varnish-cache/bin/varnishd/cache/cache_req_body.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <stdlib.h>
35
36
#include "cache_varnishd.h"
37
#include "cache_filter.h"
38
#include "cache_objhead.h"
39
#include "cache_transport.h"
40
41
#include "vtim.h"
42
#include "storage/storage.h"
43
44
/*----------------------------------------------------------------------
45
 * Pull the req.body in via/into a objcore
46
 *
47
 * This can be called only once per request
48
 *
49
 */
50
51
static ssize_t
52 4120
vrb_pull(struct req *req, ssize_t maxsize, objiterate_f *func, void *priv)
53
{
54 4120
        ssize_t l, r = 0, yet;
55
        struct vrt_ctx ctx[1];
56
        struct vfp_ctx *vfc;
57
        uint8_t *ptr;
58 4120
        enum vfp_status vfps = VFP_ERROR;
59
        const struct stevedore *stv;
60 4120
        ssize_t req_bodybytes = 0;
61 4120
        unsigned flush = OBJ_ITER_FLUSH;
62
63 4120
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
64
65 4120
        CHECK_OBJ_NOTNULL(req->htc, HTTP_CONN_MAGIC);
66 4120
        CHECK_OBJ_NOTNULL(req->vfc, VFP_CTX_MAGIC);
67 4120
        vfc = req->vfc;
68
69 4120
        req->body_oc = HSH_Private(req->wrk);
70 4120
        AN(req->body_oc);
71
72 4120
        if (req->storage != NULL)
73 200
                stv = req->storage;
74
        else
75 3920
                stv = stv_transient;
76
77 4120
        req->storage = NULL;
78
79 4120
        if (STV_NewObject(req->wrk, req->body_oc, stv, 0) == 0) {
80 40
                req->req_body_status = BS_ERROR;
81 40
                HSH_DerefBoc(req->wrk, req->body_oc);
82 40
                AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
83 80
                (void)VFP_Error(vfc, "Object allocation failed:"
84 40
                    " Ran out of space in %s", stv->vclname);
85 40
                return (-1);
86
        }
87
88 4080
        vfc->oc = req->body_oc;
89
90 4080
        INIT_OBJ(ctx, VRT_CTX_MAGIC);
91 4080
        VCL_Req2Ctx(ctx, req);
92
93 4080
        if (req->vfp_filter_list != NULL &&
94 320
            VCL_StackVFP(vfc, req->vcl, req->vfp_filter_list)) {
95 0
                (void)VFP_Error(vfc, "req.body filters failed");
96 0
                req->req_body_status = BS_ERROR;
97 0
                HSH_DerefBoc(req->wrk, req->body_oc);
98 0
                AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
99 0
                return (-1);
100
        }
101
102
103 4080
        if (VFP_Open(ctx, vfc) < 0) {
104 0
                req->req_body_status = BS_ERROR;
105 0
                HSH_DerefBoc(req->wrk, req->body_oc);
106 0
                AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
107 0
                return (-1);
108
        }
109
110 4080
        AN(req->htc);
111 4080
        yet = req->htc->content_length;
112 4080
        if (yet != 0 && req->want100cont) {
113 120
                req->want100cont = 0;
114 120
                (void)req->transport->minimal_response(req, 100);
115 120
        }
116 4080
        yet = vmax_t(ssize_t, yet, 0);
117 4080
        do {
118 6889
                AZ(vfc->failed);
119 6889
                if (maxsize >= 0 && req_bodybytes > maxsize) {
120 40
                        (void)VFP_Error(vfc, "Request body too big to cache");
121 40
                        break;
122
                }
123
                /* NB: only attempt a full allocation when caching. */
124 6849
                l = maxsize > 0 ? yet : 0;
125 6849
                if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK)
126 0
                        break;
127 6849
                AZ(vfc->failed);
128 6849
                AN(ptr);
129 6849
                AN(l);
130 6849
                vfps = VFP_Suck(vfc, ptr, &l);
131 6849
                if (l > 0 && vfps != VFP_ERROR) {
132 5731
                        req_bodybytes += l;
133 5731
                        if (yet >= l)
134 4366
                                yet -= l;
135 1365
                        else if (yet > 0)
136 0
                                yet = 0;
137 5731
                        if (func != NULL) {
138 4081
                                if (vfps == VFP_END)
139 2162
                                        flush |= OBJ_ITER_END;
140 4081
                                r = func(priv, flush, ptr, l);
141 4081
                                if (r)
142 0
                                        break;
143 4081
                        } else {
144 3300
                                ObjExtend(req->wrk, req->body_oc, l,
145 1650
                                    vfps == VFP_END ? 1 : 0);
146
                        }
147 5731
                }
148
149 6849
        } while (vfps == VFP_OK);
150 4080
        req->acct.req_bodybytes += VFP_Close(vfc);
151 4080
        VSLb_ts_req(req, "ReqBody", VTIM_real());
152 4080
        if (func != NULL) {
153 3080
                HSH_DerefBoc(req->wrk, req->body_oc);
154 3080
                AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
155 3080
                if (vfps == VFP_END && r == 0 && (flush & OBJ_ITER_END) == 0)
156 272
                        r = func(priv, flush | OBJ_ITER_END, NULL, 0);
157 3080
                if (vfps != VFP_END) {
158 646
                        req->req_body_status = BS_ERROR;
159 646
                        if (r == 0)
160 646
                                r = -1;
161 646
                }
162 3080
                return (r);
163
        }
164
165 1000
        AZ(ObjSetU64(req->wrk, req->body_oc, OA_LEN, req_bodybytes));
166 1000
        HSH_DerefBoc(req->wrk, req->body_oc);
167
168 1000
        if (vfps != VFP_END) {
169 80
                req->req_body_status = BS_ERROR;
170 80
                AZ(HSH_DerefObjCore(req->wrk, &req->body_oc, 0));
171 80
                return (-1);
172
        }
173
174 920
        assert(req_bodybytes >= 0);
175 920
        if (req_bodybytes != req->htc->content_length) {
176
                /* We must update also the "pristine" req.* copy */
177 160
                http_Unset(req->http0, H_Content_Length);
178 160
                http_Unset(req->http0, H_Transfer_Encoding);
179 320
                http_PrintfHeader(req->http0, "Content-Length: %ju",
180 160
                    (uintmax_t)req_bodybytes);
181
182 160
                http_Unset(req->http, H_Content_Length);
183 160
                http_Unset(req->http, H_Transfer_Encoding);
184 320
                http_PrintfHeader(req->http, "Content-Length: %ju",
185 160
                    (uintmax_t)req_bodybytes);
186 160
        }
187
188 920
        req->req_body_status = BS_CACHED;
189 920
        return (req_bodybytes);
190 4120
}
191
192
/*----------------------------------------------------------------------
193
 * Iterate over the req.body.
194
 *
195
 * This can be done exactly once if uncached, and multiple times if the
196
 * req.body is cached.
197
 *
198
 * return length or -1 on error
199
 */
200
201
ssize_t
202 4040
VRB_Iterate(struct worker *wrk, struct vsl_log *vsl,
203
    struct req *req, objiterate_f *func, void *priv)
204
{
205
        int i;
206
207 4040
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
208 4040
        AN(func);
209
210 4040
        if (req->req_body_status == BS_CACHED) {
211 960
                AN(req->body_oc);
212 960
                if (ObjIterate(wrk, req->body_oc, priv, func, 0))
213 0
                        return (-1);
214 960
                return (0);
215
        }
216 3080
        if (req->req_body_status == BS_NONE)
217 0
                return (0);
218 3080
        if (req->req_body_status == BS_TAKEN) {
219 0
                VSLb(vsl, SLT_VCL_Error,
220
                    "Uncached req.body can only be consumed once.");
221 0
                return (-1);
222
        }
223 3080
        if (req->req_body_status == BS_ERROR) {
224 0
                VSLb(vsl, SLT_FetchError,
225
                    "Had failed reading req.body before.");
226 0
                return (-1);
227
        }
228 3080
        Lck_Lock(&req->sp->mtx);
229 3080
        if (req->req_body_status->avail > 0) {
230 3080
                req->req_body_status = BS_TAKEN;
231 3080
                i = 0;
232 3080
        } else
233 0
                i = -1;
234 3080
        Lck_Unlock(&req->sp->mtx);
235 3080
        if (i) {
236 0
                VSLb(vsl, SLT_VCL_Error,
237
                    "Multiple attempts to access non-cached req.body");
238 0
                return (i);
239
        }
240 3080
        return (vrb_pull(req, -1, func, priv));
241 4040
}
242
243
/*----------------------------------------------------------------------
244
 * VRB_Ignore() is a dedicated function, because we might
245
 * be able to dissuade or terminate its transmission in some protocols.
246
 *
247
 * For HTTP1, we do nothing if we are going to close the connection anyway or
248
 * just iterate it into oblivion.
249
 */
250
251
static int v_matchproto_(objiterate_f)
252 2154
httpq_req_body_discard(void *priv, unsigned flush, const void *ptr, ssize_t len)
253
{
254
255 2154
        (void)priv;
256 2154
        (void)flush;
257 2154
        (void)ptr;
258 2154
        (void)len;
259 2154
        return (0);
260
}
261
262
int
263 148308
VRB_Ignore(struct req *req)
264
{
265
266 148308
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
267
268 148308
        if (req->doclose != SC_NULL)
269 9826
                return (0);
270 138482
        if (req->req_body_status->avail > 0)
271 1840
                (void)VRB_Iterate(req->wrk, req->vsl, req,
272
                    httpq_req_body_discard, NULL);
273 138482
        if (req->req_body_status == BS_ERROR)
274 206
                req->doclose = SC_RX_BODY;
275 138482
        return (0);
276 148308
}
277
278
/*----------------------------------------------------------------------
279
 */
280
281
void
282 143580
VRB_Free(struct req *req)
283
{
284
        int r;
285
286 143580
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
287
288 143580
        if (req->body_oc == NULL)
289 142660
                return;
290
291 920
        r = HSH_DerefObjCore(req->wrk, &req->body_oc, 0);
292
293
        // each busyobj may have gained a reference
294 920
        assert (r >= 0);
295 920
        assert ((unsigned)r <= req->restarts + 1);
296 143580
}
297
298
/*----------------------------------------------------------------------
299
 * Cache the req.body if it is smaller than the given size
300
 *
301
 * This function must be called before any backend fetches are kicked
302
 * off to prevent parallelism.
303
 */
304
305
ssize_t
306 1360
VRB_Cache(struct req *req, ssize_t maxsize)
307
{
308
        uint64_t u;
309
310 1360
        CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
311 1360
        assert (req->req_step == R_STP_RECV);
312 1360
        assert(maxsize >= 0);
313
314
        /*
315
         * We only allow caching to happen the first time through vcl_recv{}
316
         * where we know we will have no competition or conflicts for the
317
         * updates to req.http.* etc.
318
         */
319 1360
        if (req->restarts > 0 && req->req_body_status != BS_CACHED) {
320 0
                VSLb(req->vsl, SLT_VCL_Error,
321
                    "req.body must be cached before restarts");
322 0
                return (-1);
323
        }
324
325 1360
        if (req->req_body_status == BS_CACHED) {
326 120
                AZ(ObjGetU64(req->wrk, req->body_oc, OA_LEN, &u));
327 120
                return (u);
328
        }
329
330 1240
        if (req->req_body_status->avail <= 0)
331 160
                return (req->req_body_status->avail);
332
333 1080
        if (req->htc->content_length > maxsize) {
334 40
                req->req_body_status = BS_ERROR;
335 40
                (void)VFP_Error(req->vfc, "Request body too big to cache");
336 40
                return (-1);
337
        }
338
339 1040
        return (vrb_pull(req, maxsize, NULL, NULL));
340 1360
}