varnish-cache/bin/varnishd/cache/cache_deliver_proc.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 */
30
31
#include "config.h"
32
33
#include "cache_varnishd.h"
34
#include "cache_filter.h"
35
#include "cache_objhead.h"
36
37
void
38 240
VDP_Panic(struct vsb *vsb, const struct vdp_ctx *vdc)
39
{
40
        struct vdp_entry *vde;
41
42 240
        if (PAN_dump_struct(vsb, vdc, VDP_CTX_MAGIC, "vdc"))
43 160
                return;
44 80
        VSB_printf(vsb, "nxt = %p,\n", vdc->nxt);
45 80
        VSB_printf(vsb, "retval = %d,\n", vdc->retval);
46
47 80
        if (!VTAILQ_EMPTY(&vdc->vdp)) {
48 40
                VSB_cat(vsb, "filters = {\n");
49 40
                VSB_indent(vsb, 2);
50 200
                VTAILQ_FOREACH(vde, &vdc->vdp, list)
51 320
                        VSB_printf(vsb, "%s = %p { priv = %p }\n",
52 160
                            vde->vdp->name, vde, vde->priv);
53 40
                VSB_indent(vsb, -2);
54 40
                VSB_cat(vsb, "},\n");
55 40
        }
56
57 80
        VSB_indent(vsb, -2);
58 80
        VSB_cat(vsb, "},\n");
59 240
}
60
61
/*
62
 * Ensure that transports have called VDP_Close()
63
 * to avoid leaks in VDPs
64
 */
65
void
66 165822
VDP_Fini(const struct vdp_ctx *vdc)
67
{
68 165822
        assert(VTAILQ_EMPTY(&vdc->vdp));
69 165822
}
70
71
void
72 233646
VDP_Init(struct vdp_ctx *vdc, struct worker *wrk, struct vsl_log *vsl,
73
    const struct req *req, const struct busyobj *bo, intmax_t *clen)
74
{
75 233646
        AN(vdc);
76 233646
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
77 233646
        AN(vsl);
78
79 233646
        AN(clen);
80
81 233646
        assert((req ? 1 : 0) ^ (bo ? 1 : 0));
82
83 233646
        AN(clen);
84 233646
        assert(*clen >= -1);
85
86 233646
        INIT_OBJ(vdc, VDP_CTX_MAGIC);
87 233646
        VTAILQ_INIT(&vdc->vdp);
88 233646
        vdc->wrk = wrk;
89 233646
        vdc->vsl = vsl;
90 233646
        vdc->clen = clen;
91
92 233646
        if (req != NULL) {
93 147327
                CHECK_OBJ(req, REQ_MAGIC);
94 147327
                vdc->oc = req->objcore;
95 147327
                vdc->hp = req->resp;
96 147327
        }
97
        else {
98 86319
                CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
99 86319
                vdc->oc = bo->bereq_body;
100 86319
                vdc->hp = bo->bereq;
101
        }
102 233646
}
103
104
/* VDP_bytes
105
 *
106
 * Pushes len bytes at ptr down the delivery processor list.
107
 *
108
 * This function picks and calls the next delivery processor from the
109
 * list. The return value is the return value of the delivery
110
 * processor. Upon seeing a non-zero return value, that lowest value
111
 * observed is latched in ->retval and all subsequent calls to
112
 * VDP_bytes will return that value directly without calling the next
113
 * processor.
114
 *
115
 * VDP_END marks the end of successful processing, it is issued by
116
 * VDP_DeliverObj() and may also be sent downstream by processors ending the
117
 * stream (for return value != 0)
118
 *
119
 * VDP_END must at most be received once per processor, so any VDP sending it
120
 * downstream must itself not forward it a second time.
121
 *
122
 * Valid return values (of VDP_bytes and any VDP function):
123
 * r < 0:  Error, breaks out early on an error condition
124
 * r == 0: Continue
125
 * r > 0:  Stop, breaks out early without error condition
126
 */
127
128
int
129 851766
VDP_bytes(struct vdp_ctx *vdc, enum vdp_action act,
130
    const void *ptr, ssize_t len)
131
{
132
        int retval;
133
        struct vdp_entry *vdpe;
134
135 851766
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
136 851766
        if (vdc->retval)
137 950
                return (vdc->retval);
138 850816
        vdpe = vdc->nxt;
139 850816
        CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
140
141
        /* at most one VDP_END call */
142 850816
        assert(vdpe->end == VDP_NULL);
143
144 850816
        if (act == VDP_NULL)
145 26515
                assert(len > 0);
146 824301
        else if (act == VDP_END)
147 113369
                vdpe->end = VDP_END;
148
        else
149 710932
                assert(act == VDP_FLUSH);
150
151
        /* Call the present layer, while pointing to the next layer down */
152 850816
        vdc->nxt = VTAILQ_NEXT(vdpe, list);
153 850816
        vdpe->calls++;
154 850816
        vdc->bytes_done = len;
155 850816
        retval = vdpe->vdp->bytes(vdc, act, &vdpe->priv, ptr, len);
156 850816
        vdpe->bytes_in += vdc->bytes_done;
157 850816
        if (retval && (vdc->retval == 0 || retval < vdc->retval))
158 2216
                vdc->retval = retval; /* Latch error value */
159 850816
        vdc->nxt = vdpe;
160 850816
        return (vdc->retval);
161 851766
}
162
163
int
164 211009
VDP_Push(VRT_CTX, struct vdp_ctx *vdc, struct ws *ws, const struct vdp *vdp,
165
    void *priv)
166
{
167
        struct vdp_entry *vdpe;
168
169 211009
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
170 211009
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
171 211009
        CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC);
172 211009
        CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC);
173 211009
        AN(vdc->clen);
174 211009
        assert(*vdc->clen >= -1);
175 211009
        AN(ws);
176 211009
        AN(vdp);
177 211009
        AN(vdp->name);
178
179 211009
        if (vdc->retval)
180 0
                return (vdc->retval);
181
182 211009
        if (DO_DEBUG(DBG_PROCESSORS))
183 1960
                VSLb(vdc->vsl, SLT_Debug, "VDP_push(%s)", vdp->name);
184
185 211009
        vdpe = WS_Alloc(ws, sizeof *vdpe);
186 211009
        if (vdpe == NULL) {
187 5120
                AZ(vdc->retval);
188 5120
                vdc->retval = -1;
189 5120
                return (vdc->retval);
190
        }
191 205889
        INIT_OBJ(vdpe, VDP_ENTRY_MAGIC);
192 205889
        vdpe->vdp = vdp;
193 205889
        vdpe->priv = priv;
194 205889
        VTAILQ_INSERT_TAIL(&vdc->vdp, vdpe, list);
195 205889
        vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
196
197 205889
        AZ(vdc->retval);
198 205889
        if (vdpe->vdp->init != NULL)
199 193475
                vdc->retval = vdpe->vdp->init(ctx, vdc, &vdpe->priv);
200 205889
        vdc->oc = NULL;
201
202 205889
        if (vdc->retval > 0) {
203 1920
                VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
204 1920
                vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
205 1920
                vdc->retval = 0;
206 1920
        }
207 203969
        else if (vdc->retval == 0)
208 203649
                AN(vdp->bytes);
209 205889
        return (vdc->retval);
210 211009
}
211
212
uint64_t
213 233635
VDP_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc)
214
{
215
        struct vdp_entry *vdpe;
216 233635
        uint64_t rv = 0;
217
218 233635
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
219 233635
        CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC);
220 233635
        CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC);
221 233635
        CHECK_OBJ_ORNULL(boc, BOC_MAGIC);
222
223 436814
        while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL) {
224 203179
                CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC);
225 203179
                rv = vdpe->bytes_in;
226 406358
                VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name,
227 203179
                    (uintmax_t)vdpe->calls, (uintmax_t)rv);
228 203179
                if (vdpe->vdp->fini != NULL)
229 37719
                        AZ(vdpe->vdp->fini(vdc, &vdpe->priv));
230 203179
                AZ(vdpe->priv);
231 203179
                VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
232 203179
                vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
233
#ifdef VDP_PEDANTIC_ARMED
234
                // enable when we are confident to get VDP_END right
235
                if (vdc->nxt == NULL && vdc->retval >= 0)
236
                        assert(vdpe->end == VDP_END);
237
#endif
238
        }
239 233635
        if (oc != NULL)
240 147288
                HSH_Cancel(vdc->wrk, oc, boc);
241 233635
        return (rv);
242
}
243
244
/*--------------------------------------------------------------------*/
245
246
/*
247
 * Push a VDPIO vdp. This can only be used with only vdpio-enabled VDPs or
248
 * after a successful upgrade
249
 */
250
int
251 2560
VDPIO_Push(VRT_CTX, struct vdp_ctx *vdc, struct ws *ws, const struct vdp *vdp,
252
    void *priv)
253
{
254
        struct vdp_entry *vdpe;
255
        int r;
256
257 2560
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
258 2560
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
259 2560
        CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC);
260 2560
        CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC);
261 2560
        AN(vdc->clen);
262 2560
        assert(*vdc->clen >= -1);
263 2560
        AN(ws);
264 2560
        AN(vdp);
265 2560
        AN(vdp->name);
266
267 2560
        if (vdc->retval < 0)
268 0
                return (vdc->retval);
269
270 2560
        AN(vdp->io_init);
271
272
        // the first VDP (which leases from storage) only gets the minimum
273
        // capacity requirement of 1
274 2560
        if (vdc->retval == 0) {
275 0
                assert(VTAILQ_EMPTY(&vdc->vdp));
276 0
                vdc->retval = 1;
277 0
        }
278
279 2560
        if (DO_DEBUG(DBG_PROCESSORS))
280 2559
                VSLb(vdc->vsl, SLT_Debug, "VDPIO_push(%s)", vdp->name);
281
282 2560
        vdpe = WS_Alloc(ws, sizeof *vdpe);
283 2560
        if (vdpe == NULL) {
284 0
                vdc->retval = -ENOMEM;
285 0
                return (vdc->retval);
286
        }
287 2560
        INIT_OBJ(vdpe, VDP_ENTRY_MAGIC);
288 2560
        vdpe->vdp = vdp;
289 2560
        vdpe->priv = priv;
290 2560
        VTAILQ_INSERT_TAIL(&vdc->vdp, vdpe, list);
291 2560
        vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
292
293 2560
        assert(vdc->retval > 0);
294 2560
        if (vdpe->vdp->io_init != NULL) {
295 2560
                r = vdpe->vdp->io_init(ctx, vdc, &vdpe->priv, vdc->retval);
296 2560
                if (r <= 0) {
297 0
                        VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
298 0
                        vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
299 0
                }
300
                else
301 2560
                        AN(vdp->io_lease);
302 2560
                if (r != 0)
303 2560
                        vdc->retval = r;
304 2560
        }
305 2560
        vdc->oc = NULL;
306 2560
        return (vdc->retval);
307 2560
}
308
309
/*
310
 * upgrade an already initialized VDP filter chain to VDPIO, if possible
311
 * returns:
312
 * > 0 cap
313
 * -ENOTSUP io_upgrade missing for at least one filter
314
 * vdc->retval if < 0
315
 */
316
int
317 1600
VDPIO_Upgrade(VRT_CTX, struct vdp_ctx *vdc)
318
{
319
        struct vdp_entry *vdpe;
320
        int cap, r;
321
322 1600
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
323 1600
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
324
325 2560
        VTAILQ_FOREACH(vdpe, &vdc->vdp, list)
326 1280
                if (vdpe->vdp->io_upgrade == NULL)
327 320
                        return (-ENOTSUP);
328
329 1280
        if (vdc->retval < 0)
330 0
                return (vdc->retval);
331
332
        // minimum capacity requirement for the first filter (after storage)
333 1280
        r = cap = 1;
334 1920
        VTAILQ_FOREACH(vdpe, &vdc->vdp, list) {
335 640
                r = vdpe->vdp->io_upgrade(ctx, vdc, &vdpe->priv, cap);
336 640
                if (DO_DEBUG(DBG_PROCESSORS)) {
337 1280
                        VSLb(vdc->vsl, SLT_Debug, "VDPIO_Upgrade "
338
                            "%d = %s(cap = %d)",
339 640
                            r, vdpe->vdp->name, cap);
340 640
                }
341 640
                if (r < 0)
342 0
                        return ((vdc->retval = r));
343
                // XXX remove if filter does not want to be pushed?
344 640
                assert(r != 0);
345 640
                cap = r;
346 640
        }
347 1280
        return ((vdc->retval = r));
348 1600
}
349
350
uint64_t
351 3199
VDPIO_Close1(struct vdp_ctx *vdc, struct vdp_entry *vdpe)
352
{
353
        uint64_t rv;
354
355 3199
        CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC);
356 3199
        rv = vdpe->bytes_in;
357 6398
        VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name,
358 3199
            (uintmax_t)vdpe->calls, (uintmax_t)rv);
359 3199
        if (vdpe->vdp->io_fini != NULL)
360 640
                vdpe->vdp->io_fini(vdc, &vdpe->priv);
361 3199
        AZ(vdpe->priv);
362 3199
        VTAILQ_REMOVE(&vdc->vdp, vdpe, list);
363 3199
        vdc->nxt = VTAILQ_FIRST(&vdc->vdp);
364 3199
        return (rv);
365
}
366
367
uint64_t
368 1280
VDPIO_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc)
369
{
370
        struct vdp_entry *vdpe;
371 1280
        uint64_t rv = 0;
372
373 1280
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
374 1280
        CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC);
375 1280
        CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC);
376 1280
        CHECK_OBJ_ORNULL(boc, BOC_MAGIC);
377
378 3200
        while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL)
379 1920
                rv = VDPIO_Close1(vdc, vdpe);
380
381 1280
        if (oc != NULL)
382 1280
                HSH_Cancel(vdc->wrk, oc, boc);
383 1280
        return (rv);
384
}
385
386
/*
387
 * ============================================================
388
 * VDPIO helpers: VAI management
389
 *
390
 * Transports should not need to talk to the VAI Object interface directly,
391
 * because its state is kept in the vdp_ctx
392
 *
393
 * So we wrap init, return and fini
394
 */
395
396
// return true if error
397
int
398 1280
VDPIO_Init(struct vdp_ctx *vdc, struct objcore *oc, struct ws *ws,
399
    vai_notify_cb *notify_cb, void *notify_priv, struct vscaret *scaret)
400
{
401 1280
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
402 1280
        VSCARET_CHECK_NOTNULL(scaret);
403 1280
        AN(scaret->capacity);
404 1280
        AZ(scaret->used);
405
406 1280
        AZ(vdc->vai_hdl);
407 1280
        vdc->vai_hdl = ObjVAIinit(vdc->wrk, oc, ws, notify_cb, notify_priv);
408 1280
        if (vdc->vai_hdl == NULL)
409 0
                return (1);
410 1280
        vdc->scaret = scaret;
411 1280
        return (0);
412 1280
}
413
414
// return leases stashed in scaret
415
void
416 1398
VDPIO_Return(const struct vdp_ctx *vdc)
417
{
418 1398
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
419
420 1398
        ObjVAIreturn(vdc->wrk, vdc->vai_hdl, vdc->scaret);
421 1398
}
422
423
void
424 1278
VDPIO_Fini(struct vdp_ctx *vdc)
425
{
426 1278
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
427
428 1278
        VDPIO_Return(vdc);
429 1278
        ObjVAIfini(vdc->wrk, &vdc->vai_hdl);
430 1278
}
431
432
/*--------------------------------------------------------------------*/
433
int v_matchproto_(objiterate_f)
434 167543
VDP_ObjIterate(void *priv, unsigned flush, const void *ptr, ssize_t len)
435
{
436
        enum vdp_action act;
437
438 167543
        if (flush == 0)
439 15504
                act = VDP_NULL;
440 152039
        else if ((flush & OBJ_ITER_END) != 0)
441 97474
                act = VDP_END;
442
        else
443 54565
                act = VDP_FLUSH;
444
445 167543
        return (VDP_bytes(priv, act, ptr, len));
446
}
447
448
449
int
450 94947
VDP_DeliverObj(struct vdp_ctx *vdc, struct objcore *oc)
451
{
452
        int r, final;
453
454 94947
        CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC);
455 94947
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
456 94947
        CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC);
457 94947
        AN(vdc->vsl);
458 94947
        AZ(vdc->oc);
459 94947
        vdc->hp = NULL;
460 94947
        vdc->clen = NULL;
461 94947
        final = oc->flags & OC_F_TRANSIENT ? 1 : 0;
462 94947
        r = ObjIterate(vdc->wrk, oc, vdc, VDP_ObjIterate, final);
463 94947
        if (r < 0)
464 1611
                return (r);
465 93336
        return (0);
466 94947
}