varnish-cache/bin/varnishd/cache/cache_vrt_vcl.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2016 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <stdio.h>
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vcl.h"
40
#include "vtim.h"
41
#include "vbm.h"
42
43
#include "cache_director.h"
44
#include "cache_transport.h"
45
#include "cache_vcl.h"
46
#include "vcc_interface.h"
47
48
/*--------------------------------------------------------------------*/
49
50
const char *
51 76188
VCL_Return_Name(unsigned r)
52
{
53
54 76188
        switch (r) {
55
#define VCL_RET_MAC(l, U, B)    \
56
        case VCL_RET_##U:       \
57
                return(#l);
58
#include "tbl/vcl_returns.h"
59
        default:
60
                return (NULL);
61
        }
62 76188
}
63
64
const char *
65 76270
VCL_Method_Name(unsigned m)
66
{
67
68 76270
        switch (m) {
69
#define VCL_MET_MAC(func, upper, typ, bitmap)   \
70
        case VCL_MET_##upper:                   \
71
                return (#upper);
72
#include "tbl/vcl_returns.h"
73
        default:
74
                return (NULL);
75
        }
76 76270
}
77
78
/*--------------------------------------------------------------------*/
79
80
void
81 13158
VCL_Refresh(struct vcl **vcc)
82
{
83
84 13158
        while (vcl_active == NULL)
85 0
                VTIM_sleep(0.1);
86
87 13158
        ASSERT_VCL_ACTIVE();
88 13156
        if (*vcc == vcl_active)
89 7584
                return;
90
91 5572
        VCL_Update(vcc, NULL);
92 13156
}
93
94
void
95 13175
VCL_Recache(const struct worker *wrk, struct vcl **vclp)
96
{
97
98 13175
        AN(wrk);
99 13175
        AN(vclp);
100 13175
        CHECK_OBJ_NOTNULL(*vclp, VCL_MAGIC);
101 13175
        ASSERT_VCL_ACTIVE();
102
103 13175
        if (*vclp != vcl_active || wrk->wpriv->vcl == vcl_active) {
104 227
                VCL_Rel(vclp);
105 227
                return;
106
        }
107 12948
        if (wrk->wpriv->vcl != NULL)
108 0
                VCL_Rel(&wrk->wpriv->vcl);
109 12948
        wrk->wpriv->vcl = *vclp;
110 12948
        *vclp = NULL;
111 13175
}
112
113
void
114 10532
VCL_Ref(struct vcl *vcl)
115
{
116
117 10532
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
118 10532
        assert(!vcl->temp->is_cold);
119 10532
        Lck_Lock(&vcl_mtx);
120 10532
        assert(vcl->busy > 0);
121 10532
        vcl->busy++;
122 10532
        Lck_Unlock(&vcl_mtx);
123 10532
}
124
125
void
126 11766
VCL_Rel(struct vcl **vcc)
127
{
128
        struct vcl *vcl;
129
130 11766
        TAKE_OBJ_NOTNULL(vcl, vcc, VCL_MAGIC);
131 11766
        Lck_Lock(&vcl_mtx);
132 11766
        assert(vcl->busy > 0);
133 11766
        vcl->busy--;
134
        /*
135
         * We do not garbage collect discarded VCL's here, that happens
136
         * in VCL_Poll() which is called from the CLI thread.
137
         */
138 11766
        Lck_Unlock(&vcl_mtx);
139 11766
}
140
141
/*--------------------------------------------------------------------*/
142
143
static void
144 433
vcldir_free(struct vcldir *vdir)
145
{
146
147 433
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
148 433
        CHECK_OBJ_NOTNULL(vdir->dir, DIRECTOR_MAGIC);
149 433
        AZ(vdir->refcnt);
150 433
        Lck_Delete(&vdir->dlck);
151 433
        free(vdir->cli_name);
152 433
        FREE_OBJ(vdir->dir);
153 433
        FREE_OBJ(vdir);
154 433
}
155
156
static VCL_BACKEND
157 0
vcldir_surplus(struct vcldir *vdir)
158
{
159
160 0
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
161 0
        assert(vdir->refcnt == 1);
162 0
        vdir->refcnt = 0;
163 0
        vcldir_free(vdir);
164 0
        return (NULL);
165
}
166
167
VCL_BACKEND
168 5872
VRT_AddDirector(VRT_CTX, const struct vdi_methods *m, void *priv,
169
    const char *fmt, ...)
170
{
171
        struct vsb *vsb;
172
        struct vcl *vcl;
173
        struct vcldir *vdir;
174
        const struct vcltemp *temp;
175
        va_list ap;
176
        int i;
177
178 5872
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
179 5872
        CHECK_OBJ_NOTNULL(m, VDI_METHODS_MAGIC);
180 5872
        AN(fmt);
181 5872
        vcl = ctx->vcl;
182 5872
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
183
184
        // opportunistic, re-checked again under lock
185 5872
        if (vcl->temp == VCL_TEMP_COOLING && !DO_DEBUG(DBG_VTC_MODE))
186 0
                return (NULL);
187
188 5872
        ALLOC_OBJ(vdir, VCLDIR_MAGIC);
189 5872
        AN(vdir);
190 5872
        ALLOC_OBJ(vdir->dir, DIRECTOR_MAGIC);
191 5872
        AN(vdir->dir);
192 5872
        vdir->dir->vdir = vdir;
193
194 5872
        vdir->methods = m;
195 5872
        vdir->dir->priv = priv;
196 5872
        vsb = VSB_new_auto();
197 5872
        AN(vsb);
198 5872
        VSB_printf(vsb, "%s.", VCL_Name(vcl));
199 5872
        i = VSB_len(vsb);
200 5872
        va_start(ap, fmt);
201 5872
        VSB_vprintf(vsb, fmt, ap);
202 5872
        va_end(ap);
203 5872
        AZ(VSB_finish(vsb));
204 5872
        REPLACE(vdir->cli_name, VSB_data(vsb));
205 5872
        VSB_destroy(&vsb);
206 5872
        vdir->dir->vcl_name = vdir->cli_name + i;
207
208 5872
        vdir->vcl = vcl;
209 5872
        vdir->admin_health = VDI_AH_AUTO;
210 5872
        vdir->health_changed = VTIM_real();
211
212 5872
        vdir->refcnt++;
213 5872
        Lck_New(&vdir->dlck, lck_director);
214 5872
        vdir->dir->mtx = &vdir->dlck;
215
216
        /* NB: at this point we look at the VCL temperature after getting
217
         * through the trouble of creating the director even though it might
218
         * not be legal to do so. Because we change the VCL temperature before
219
         * sending COLD events we have to tolerate and undo attempts for the
220
         * COOLING case.
221
         *
222
         * To avoid deadlocks during vcl_BackendEvent, we only wait for vcl_mtx
223
         * if the vcl is busy (ref vcl_set_state())
224
         */
225
226 5872
        while (1) {
227 5872
                temp = vcl->temp;
228 5872
                if (temp == VCL_TEMP_COOLING)
229 0
                        return (vcldir_surplus(vdir));
230 5872
                if (vcl->busy == 0 && vcl->temp->is_warm) {
231 372
                        if (! Lck_Trylock(&vcl_mtx))
232 372
                                break;
233 0
                        usleep(10 * 1000);
234 0
                        continue;
235
                }
236 5500
                Lck_Lock(&vcl_mtx);
237 5500
                break;
238
        }
239 5872
        Lck_AssertHeld(&vcl_mtx);
240 5872
        temp = vcl->temp;
241 5872
        if (temp != VCL_TEMP_COOLING)
242 5872
                VTAILQ_INSERT_TAIL(&vcl->vdire->directors, vdir, directors_list);
243 5872
        if (temp->is_warm)
244 428
                VDI_Event(vdir->dir, VCL_EVENT_WARM);
245 5872
        Lck_Unlock(&vcl_mtx);
246
247 5872
        if (temp == VCL_TEMP_COOLING)
248 0
                return (vcldir_surplus(vdir));
249
250 5872
        if (!temp->is_warm && temp != VCL_TEMP_INIT)
251 4
                WRONG("Dynamic Backends can only be added to warm VCLs");
252
253 5868
        return (vdir->dir);
254 5868
}
255
256
void
257 5124
VRT_StaticDirector(VCL_BACKEND b)
258
{
259
        struct vcldir *vdir;
260
261 5124
        CHECK_OBJ_NOTNULL(b, DIRECTOR_MAGIC);
262 5124
        vdir = b->vdir;
263 5124
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
264 5124
        assert(vdir->refcnt == 1);
265 5124
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
266 5124
        vdir->flags |= VDIR_FLG_NOREFCNT;
267 5124
}
268
269
// vcldir is already removed from the directors list
270
// to be called only from vdire_*
271
void
272 433
vcldir_retire(struct vcldir *vdir, const struct vcltemp *temp)
273
{
274
275 433
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
276 433
        assert(vdir->refcnt == 0);
277 433
        AN(temp);
278
279 433
        if (temp->is_warm)
280 56
                VDI_Event(vdir->dir, VCL_EVENT_COLD);
281 433
        if (vdir->methods->destroy != NULL)
282 375
                vdir->methods->destroy(vdir->dir);
283 433
        vcldir_free(vdir);
284 433
}
285
286
static int
287 1265
vcldir_deref(struct vcldir *vdir)
288
{
289
        int busy;
290
291 1265
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
292 1265
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
293
294 1265
        Lck_Lock(&vdir->dlck);
295 1265
        assert(vdir->refcnt > 0);
296 1265
        busy = --vdir->refcnt;
297 1265
        Lck_Unlock(&vdir->dlck);
298
299 1265
        if (!busy)
300 433
                vdire_resign(vdir->vcl->vdire, vdir);
301 1265
        return (busy);
302
}
303
304
void
305 377
VRT_DelDirector(VCL_BACKEND *dirp)
306
{
307
        VCL_BACKEND dir;
308
        struct vcldir *vdir;
309
310 377
        TAKE_OBJ_NOTNULL(dir, dirp, DIRECTOR_MAGIC);
311
312 377
        vdir = dir->vdir;
313 377
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
314
315 377
        if (vdir->methods->release != NULL)
316 52
                vdir->methods->release(vdir->dir);
317
318 377
        if (vdir->flags & VDIR_FLG_NOREFCNT) {
319 259
                vdir->flags &= ~VDIR_FLG_NOREFCNT;
320 259
                AZ(vcldir_deref(vdir));
321 259
        } else {
322 118
                (void) vcldir_deref(vdir);
323
        }
324 377
}
325
326
void
327 68477
VRT_Assign_Backend(VCL_BACKEND *dst, VCL_BACKEND src)
328
{
329
        struct vcldir *vdir;
330
        VCL_BACKEND tmp;
331
332 68477
        AN(dst);
333 68477
        CHECK_OBJ_ORNULL((*dst), DIRECTOR_MAGIC);
334 68477
        CHECK_OBJ_ORNULL(src, DIRECTOR_MAGIC);
335 68477
        if (*dst == src)
336 4674
                return;
337 63803
        tmp = *dst;
338 63803
        *dst = src;
339 63803
        if (src != NULL) {
340 32460
                vdir = src->vdir;
341 32460
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
342 32460
                if (!(vdir->flags & VDIR_FLG_NOREFCNT)) {
343 852
                        Lck_Lock(&vdir->dlck);
344 852
                        assert(vdir->refcnt > 0);
345 852
                        vdir->refcnt++;
346 852
                        Lck_Unlock(&vdir->dlck);
347 852
                }
348 32460
        }
349 63803
        if (tmp != NULL) {
350 32223
                vdir = tmp->vdir;
351 32223
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
352 32223
                if (!(vdir->flags & VDIR_FLG_NOREFCNT))
353 888
                        (void)vcldir_deref(vdir);
354 32223
        }
355 68477
}
356
357
void
358 315
VRT_DisableDirector(VCL_BACKEND d)
359
{
360
        struct vcldir *vdir;
361
362 315
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
363 315
        vdir = d->vdir;
364 315
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
365
366 315
        vdir->admin_health = VDI_AH_DELETED;
367 315
        vdir->health_changed = VTIM_real();
368 315
}
369
370
VCL_BACKEND
371 4
VRT_LookupDirector(VRT_CTX, VCL_STRING name)
372
{
373
        struct vcl *vcl;
374
        struct vcldir *vdir;
375 4
        VCL_BACKEND dd, d = NULL;
376
        struct vdire *vdire;
377
378 4
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
379 4
        AN(name);
380
381 4
        assert(ctx->method & VCL_MET_TASK_H);
382 4
        ASSERT_CLI();
383
384 4
        vcl = ctx->vcl;
385 4
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
386
387 4
        vdire = vcl->vdire;
388
389 4
        vdire_start_iter(vdire);
390 4
        VTAILQ_FOREACH(vdir, &vdire->directors, directors_list) {
391 4
                dd = vdir->dir;
392 4
                if (strcmp(dd->vcl_name, name))
393 0
                        continue;
394 4
                d = dd;
395 4
                break;
396
        }
397 4
        vdire_end_iter(vdire);
398
399 4
        return (d);
400
}
401
402
/*--------------------------------------------------------------------*/
403
404
VCL_BACKEND
405 14641
VCL_DefaultDirector(const struct vcl *vcl)
406
{
407
408 14641
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
409 14641
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
410 14641
        return (*vcl->conf->default_director);
411
}
412
413
const char *
414 37829
VCL_Name(const struct vcl *vcl)
415
{
416
417 37829
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
418 37829
        return (vcl->loaded_name);
419
}
420
421
VCL_PROBE
422 5152
VCL_DefaultProbe(const struct vcl *vcl)
423
{
424
425 5152
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
426 5152
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
427 5152
        return (vcl->conf->default_probe);
428
}
429
430
/*--------------------------------------------------------------------*/
431
432
void
433 1360
VRT_CTX_Assert(VRT_CTX)
434
{
435 1360
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
436
437 1360
        if (ctx->msg != NULL)
438 88
                CHECK_OBJ(ctx->msg, VSB_MAGIC);
439
        else
440 1272
                AN(ctx->vsl);
441 1360
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
442 1360
        WS_Assert(ctx->ws);
443
444 1360
        CHECK_OBJ_ORNULL(ctx->sp, SESS_MAGIC);
445
446 1360
        CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC);
447 1360
        CHECK_OBJ_ORNULL(ctx->http_req, HTTP_MAGIC);
448 1360
        CHECK_OBJ_ORNULL(ctx->http_req_top, HTTP_MAGIC);
449 1360
        CHECK_OBJ_ORNULL(ctx->http_resp, HTTP_MAGIC);
450
451 1360
        CHECK_OBJ_ORNULL(ctx->bo, BUSYOBJ_MAGIC);
452 1360
        CHECK_OBJ_ORNULL(ctx->http_bereq, HTTP_MAGIC);
453 1360
        CHECK_OBJ_ORNULL(ctx->http_beresp, HTTP_MAGIC);
454 1360
}
455
456
struct vclref *
457 4
VRT_VCL_Prevent_Cold(VRT_CTX, const char *desc)
458
{
459
        struct vclref* ref;
460
461 4
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
462 4
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
463
464 4
        ALLOC_OBJ(ref, VCLREF_MAGIC);
465 4
        AN(ref);
466 4
        ref->vcl = ctx->vcl;
467 4
        REPLACE(ref->desc, desc);
468
469 4
        VCL_Ref(ctx->vcl);
470
471 4
        Lck_Lock(&vcl_mtx);
472 4
        VTAILQ_INSERT_TAIL(&ctx->vcl->ref_list, ref, list);
473 4
        Lck_Unlock(&vcl_mtx);
474
475 4
        return (ref);
476
}
477
478
void
479 4
VRT_VCL_Allow_Cold(struct vclref **refp)
480
{
481
        struct vcl *vcl;
482
        struct vclref *ref;
483
484 4
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
485 4
        vcl = ref->vcl;
486 4
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
487
488 4
        Lck_Lock(&vcl_mtx);
489 4
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
490 4
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
491 4
        Lck_Unlock(&vcl_mtx);
492
493 4
        VCL_Rel(&vcl);
494
495 4
        REPLACE(ref->desc, NULL);
496 4
        FREE_OBJ(ref);
497 4
}
498
499
struct vclref *
500 368
VRT_VCL_Prevent_Discard(VRT_CTX, const char *desc)
501
{
502
        struct vcl *vcl;
503
        struct vclref* ref;
504
505 368
        ASSERT_CLI();
506 368
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
507 368
        AN(desc);
508 368
        AN(*desc);
509
510 368
        vcl = ctx->vcl;
511 368
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
512 368
        assert(vcl->temp->is_warm);
513
514 368
        ALLOC_OBJ(ref, VCLREF_MAGIC);
515 368
        AN(ref);
516 368
        ref->vcl = vcl;
517 368
        REPLACE(ref->desc, desc);
518
519 368
        Lck_Lock(&vcl_mtx);
520 368
        VTAILQ_INSERT_TAIL(&vcl->ref_list, ref, list);
521 368
        vcl->nrefs++;
522 368
        Lck_Unlock(&vcl_mtx);
523
524 368
        return (ref);
525
}
526
527
void
528 54
VRT_VCL_Allow_Discard(struct vclref **refp)
529
{
530
        struct vcl *vcl;
531
        struct vclref *ref;
532
533 54
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
534 54
        vcl = ref->vcl;
535 54
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
536
537
        /* NB: A VCL may be released by a VMOD at any time, but it must happen
538
         * after a warmup and before the end of a cooldown. The release may or
539
         * may not happen while the same thread holds the temperature lock, so
540
         * instead we check that all references are gone in VCL_Nuke.
541
         */
542
543 54
        Lck_Lock(&vcl_mtx);
544 54
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
545 54
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
546 54
        vcl->nrefs--;
547
        /* No garbage collection here, for the same reasons as in VCL_Rel. */
548 54
        Lck_Unlock(&vcl_mtx);
549
550 54
        REPLACE(ref->desc, NULL);
551 54
        FREE_OBJ(ref);
552 54
}
553
554
/*--------------------------------------------------------------------
555
 */
556
557
static int
558 57802
req_poll(struct worker *wrk, struct req *req)
559
{
560
        struct req *top;
561
562
        /* NB: Since a fail transition leads to vcl_synth, the request may be
563
         * short-circuited twice.
564
         */
565 57802
        if (req->req_reset) {
566 92
                wrk->vpi->handling = VCL_RET_FAIL;
567 92
                return (-1);
568
        }
569
570 57710
        top = req->top->topreq;
571 57710
        CHECK_OBJ_NOTNULL(top, REQ_MAGIC);
572 57710
        CHECK_OBJ_NOTNULL(top->transport, TRANSPORT_MAGIC);
573
574 57710
        if (!FEATURE(FEATURE_VCL_REQ_RESET))
575 80
                return (0);
576 57630
        if (top->transport->poll == NULL)
577 55788
                return (0);
578 1842
        if (top->transport->poll(top) >= 0)
579 1750
                return (0);
580
581 92
        VSLb_ts_req(req, "Reset", W_TIM_real(wrk));
582 92
        wrk->stats->req_reset++;
583 92
        wrk->vpi->handling = VCL_RET_FAIL;
584 92
        req->req_reset = 1;
585 92
        return (-1);
586 57802
}
587
588
/*--------------------------------------------------------------------
589
 * Method functions to call into VCL programs.
590
 *
591
 * Either the request or busyobject must be specified, but not both.
592
 * The workspace argument is where random VCL stuff gets space from.
593
 */
594
595
static void
596 76331
vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo,
597
    void *specific, unsigned method, vcl_func_f *func, unsigned track_call)
598
{
599 76331
        uintptr_t rws = 0, aws;
600
        struct vrt_ctx ctx;
601
        struct vbitmap *vbm;
602
        void *p;
603
        size_t sz;
604
605 76331
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
606 76331
        INIT_OBJ(&ctx, VRT_CTX_MAGIC);
607 76331
        if (bo != NULL) {
608 18696
                CHECK_OBJ(bo, BUSYOBJ_MAGIC);
609 18696
                CHECK_OBJ_NOTNULL(bo->vcl, VCL_MAGIC);
610 18696
                VCL_Bo2Ctx(&ctx, bo);
611 18696
        }
612 76331
        if (req != NULL) {
613 57800
                if (bo != NULL)
614 120
                        assert(method == VCL_MET_PIPE);
615 57800
                CHECK_OBJ(req, REQ_MAGIC);
616 57800
                CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC);
617 57800
                CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC);
618 57800
                CHECK_OBJ_NOTNULL(req->top, REQTOP_MAGIC);
619 57800
                if (req_poll(wrk, req))
620 183
                        return;
621 57617
                VCL_Req2Ctx(&ctx, req);
622 57617
        }
623 76148
        assert(ctx.now != 0);
624 76148
        ctx.specific = specific;
625 76148
        ctx.method = method;
626 76148
        if (track_call > 0) {
627 164
                rws = WS_Snapshot(wrk->aws);
628 164
                sz = VBITMAP_SZ(track_call);
629 164
                p = WS_Alloc(wrk->aws, sz);
630
                // No use to attempt graceful failure, all VCL calls will fail
631 164
                AN(p);
632 164
                vbm = vbit_init(p, sz);
633 164
                ctx.called = vbm;
634 164
        }
635 76148
        aws = WS_Snapshot(wrk->aws);
636 76148
        wrk->cur_method = method;
637 76148
        wrk->seen_methods |= method;
638 76148
        AN(ctx.vsl);
639 76148
        VSLbs(ctx.vsl, SLT_VCL_call, TOSTRAND(VCL_Method_Name(method)));
640 76148
        func(&ctx, VSUB_STATIC, NULL);
641 152296
        VSLbs(ctx.vsl, SLT_VCL_return,
642 76148
            TOSTRAND(VCL_Return_Name(wrk->vpi->handling)));
643 76148
        wrk->cur_method |= 1;           // Magic marker
644 76148
        if (wrk->vpi->handling == VCL_RET_FAIL)
645 580
                wrk->stats->vcl_fail++;
646
647
        /*
648
         * VCL/Vmods are not allowed to make permanent allocations from
649
         * wrk->aws, but they can reserve and return from it.
650
         */
651 76148
        assert(aws == WS_Snapshot(wrk->aws));
652 76148
        if (rws != 0)
653 163
                WS_Reset(wrk->aws, rws);
654 76331
}
655
656
#define VCL_MET_MAC(func, upper, typ, bitmap)                           \
657
void                                                                    \
658
VCL_##func##_method(struct vcl *vcl, struct worker *wrk,                \
659
     struct req *req, struct busyobj *bo, void *specific)               \
660
{                                                                       \
661
                                                                        \
662
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);                              \
663
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);                   \
664
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);                           \
665
        vcl_call_method(wrk, req, bo, specific,                         \
666
            VCL_MET_ ## upper, vcl->conf->func##_func, vcl->conf->nsub);\
667
        AN((1U << wrk->vpi->handling) & bitmap);                        \
668
}
669
670
#include "tbl/vcl_returns.h"
671
672
/*--------------------------------------------------------------------
673
 */
674
675
VCL_STRING
676 4
VRT_check_call(VRT_CTX, VCL_SUB sub)
677
{
678 4
        VCL_STRING err = NULL;
679
        enum vcl_func_fail_e fail;
680
681 4
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
682 4
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
683
684 4
        AN(sub->func);
685 4
        sub->func(ctx, VSUB_CHECK, &fail);
686
687 4
        switch (fail) {
688
        case VSUB_E_OK:
689 0
                break;
690
        case VSUB_E_METHOD:
691 8
                err = WS_Printf(ctx->ws, "Dynamic call to \"sub %s{}\""
692 4
                    " not allowed from here", sub->name);
693 4
                if (err == NULL)
694 0
                        err = "Dynamic call not allowed and workspace overflow";
695 4
                break;
696
        case VSUB_E_RECURSE:
697 0
                err = WS_Printf(ctx->ws, "Recursive dynamic call to"
698 0
                    " \"sub %s{}\"", sub->name);
699 0
                if (err == NULL)
700 0
                        err = "Recursive dynamic call and workspace overflow";
701 0
                break;
702
        default:
703 0
                INCOMPL();
704 0
        }
705
706 4
        return (err);
707
}
708
709
VCL_VOID
710 56
VRT_call(VRT_CTX, VCL_SUB sub)
711
{
712
713 56
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
714 56
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
715
716 56
        AZ(VRT_handled(ctx));
717 56
        AN(sub->func);
718 56
        sub->func(ctx, VSUB_DYNAMIC, NULL);
719 56
}