varnish-cache/bin/varnishd/cache/cache_vrt_vcl.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2016 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 */
31
32
#include "config.h"
33
34
#include <stdio.h>
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vcl.h"
40
#include "vtim.h"
41
#include "vbm.h"
42
43
#include "cache_director.h"
44
#include "cache_transport.h"
45
#include "cache_vcl.h"
46
#include "vcc_interface.h"
47
48
/*--------------------------------------------------------------------*/
49
50
const char *
51 39108
VCL_Return_Name(unsigned r)
52
{
53
54 39108
        switch (r) {
55
#define VCL_RET_MAC(l, U, B)    \
56
        case VCL_RET_##U:       \
57
                return(#l);
58
#include "tbl/vcl_returns.h"
59
        default:
60
                return (NULL);
61
        }
62 39108
}
63
64
const char *
65 39148
VCL_Method_Name(unsigned m)
66
{
67
68 39148
        switch (m) {
69
#define VCL_MET_MAC(func, upper, typ, bitmap)   \
70
        case VCL_MET_##upper:                   \
71
                return (#upper);
72
#include "tbl/vcl_returns.h"
73
        default:
74
                return (NULL);
75
        }
76 39148
}
77
78
/*--------------------------------------------------------------------*/
79
80
void
81 6782
VCL_Refresh(struct vcl **vcc)
82
{
83
84 6782
        while (vcl_active == NULL)
85 0
                VTIM_sleep(0.1);
86
87 6782
        ASSERT_VCL_ACTIVE();
88 6782
        if (*vcc == vcl_active)
89 3916
                return;
90
91 2866
        VCL_Update(vcc, NULL);
92 6782
}
93
94
void
95 6792
VCL_Recache(const struct worker *wrk, struct vcl **vclp)
96
{
97
98 6792
        AN(wrk);
99 6792
        AN(vclp);
100 6792
        CHECK_OBJ_NOTNULL(*vclp, VCL_MAGIC);
101 6792
        ASSERT_VCL_ACTIVE();
102
103 6792
        if (*vclp != vcl_active || wrk->wpriv->vcl == vcl_active) {
104 126
                VCL_Rel(vclp);
105 126
                return;
106
        }
107 6666
        if (wrk->wpriv->vcl != NULL)
108 0
                VCL_Rel(&wrk->wpriv->vcl);
109 6666
        wrk->wpriv->vcl = *vclp;
110 6666
        *vclp = NULL;
111 6792
}
112
113
void
114 5342
VCL_Ref(struct vcl *vcl)
115
{
116
117 5342
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
118 5342
        assert(!vcl->temp->is_cold);
119 5342
        Lck_Lock(&vcl_mtx);
120 5342
        assert(vcl->busy > 0);
121 5342
        vcl->busy++;
122 5342
        Lck_Unlock(&vcl_mtx);
123 5342
}
124
125
void
126 8065
VCL_Rel(struct vcl **vcc)
127
{
128
        struct vcl *vcl;
129
130 8065
        TAKE_OBJ_NOTNULL(vcl, vcc, VCL_MAGIC);
131 8065
        Lck_Lock(&vcl_mtx);
132 8065
        assert(vcl->busy > 0);
133 8065
        vcl->busy--;
134
        /*
135
         * We do not garbage collect discarded VCL's here, that happens
136
         * in VCL_Poll() which is called from the CLI thread.
137
         */
138 8065
        Lck_Unlock(&vcl_mtx);
139 8065
}
140
141
/*--------------------------------------------------------------------*/
142
143
static void
144 217
vcldir_free(struct vcldir *vdir)
145
{
146
147 217
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
148 217
        CHECK_OBJ_NOTNULL(vdir->dir, DIRECTOR_MAGIC);
149 217
        AZ(vdir->refcnt);
150 217
        Lck_Delete(&vdir->dlck);
151 217
        free(vdir->cli_name);
152 217
        FREE_OBJ(vdir->dir);
153 217
        FREE_OBJ(vdir);
154 217
}
155
156
static VCL_BACKEND
157 0
vcldir_surplus(struct vcldir *vdir)
158
{
159
160 0
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
161 0
        assert(vdir->refcnt == 1);
162 0
        vdir->refcnt = 0;
163 0
        vcldir_free(vdir);
164 0
        return (NULL);
165
}
166
167
VCL_BACKEND
168 2988
VRT_AddDirector(VRT_CTX, const struct vdi_methods *m, void *priv,
169
    const char *fmt, ...)
170
{
171
        struct vsb *vsb;
172
        struct vcl *vcl;
173
        struct vcldir *vdir;
174
        const struct vcltemp *temp;
175
        va_list ap;
176
        int i;
177
178 2988
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
179 2988
        CHECK_OBJ_NOTNULL(m, VDI_METHODS_MAGIC);
180 2988
        AN(fmt);
181 2988
        vcl = ctx->vcl;
182 2988
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
183
184
        // opportunistic, re-checked again under lock
185 2988
        if (vcl->temp == VCL_TEMP_COOLING && !DO_DEBUG(DBG_VTC_MODE))
186 0
                return (NULL);
187
188 2988
        ALLOC_OBJ(vdir, VCLDIR_MAGIC);
189 2988
        AN(vdir);
190 2988
        ALLOC_OBJ(vdir->dir, DIRECTOR_MAGIC);
191 2988
        AN(vdir->dir);
192 2988
        vdir->dir->vdir = vdir;
193
194 2988
        vdir->methods = m;
195 2988
        vdir->dir->priv = priv;
196 2988
        vsb = VSB_new_auto();
197 2988
        AN(vsb);
198 2988
        VSB_printf(vsb, "%s.", VCL_Name(vcl));
199 2988
        i = VSB_len(vsb);
200 2988
        va_start(ap, fmt);
201 2988
        VSB_vprintf(vsb, fmt, ap);
202 2988
        va_end(ap);
203 2988
        AZ(VSB_finish(vsb));
204 2988
        REPLACE(vdir->cli_name, VSB_data(vsb));
205 2988
        VSB_destroy(&vsb);
206 2988
        vdir->dir->vcl_name = vdir->cli_name + i;
207
208 2988
        vdir->vcl = vcl;
209 2988
        vdir->admin_health = VDI_AH_AUTO;
210 2988
        vdir->health_changed = VTIM_real();
211
212 2988
        vdir->refcnt++;
213 2988
        Lck_New(&vdir->dlck, lck_director);
214 2988
        vdir->dir->mtx = &vdir->dlck;
215
216
        /* NB: at this point we look at the VCL temperature after getting
217
         * through the trouble of creating the director even though it might
218
         * not be legal to do so. Because we change the VCL temperature before
219
         * sending COLD events we have to tolerate and undo attempts for the
220
         * COOLING case.
221
         *
222
         * To avoid deadlocks during vcl_BackendEvent, we only wait for vcl_mtx
223
         * if the vcl is busy (ref vcl_set_state())
224
         */
225
226 2988
        while (1) {
227 2988
                temp = vcl->temp;
228 2988
                if (temp == VCL_TEMP_COOLING)
229 0
                        return (vcldir_surplus(vdir));
230 2988
                if (vcl->busy == 0 && vcl->temp->is_warm) {
231 190
                        if (! Lck_Trylock(&vcl_mtx))
232 190
                                break;
233 0
                        usleep(10 * 1000);
234 0
                        continue;
235
                }
236 2798
                Lck_Lock(&vcl_mtx);
237 2798
                break;
238
        }
239 2988
        Lck_AssertHeld(&vcl_mtx);
240 2988
        temp = vcl->temp;
241 2988
        if (temp != VCL_TEMP_COOLING)
242 2988
                VTAILQ_INSERT_TAIL(&vcl->vdire->directors, vdir, directors_list);
243 2988
        if (temp->is_warm)
244 218
                VDI_Event(vdir->dir, VCL_EVENT_WARM);
245 2988
        Lck_Unlock(&vcl_mtx);
246
247 2988
        if (temp == VCL_TEMP_COOLING)
248 0
                return (vcldir_surplus(vdir));
249
250 2988
        if (!temp->is_warm && temp != VCL_TEMP_INIT)
251 2
                WRONG("Dynamic Backends can only be added to warm VCLs");
252
253 2986
        return (vdir->dir);
254 2986
}
255
256
void
257 2610
VRT_StaticDirector(VCL_BACKEND b)
258
{
259
        struct vcldir *vdir;
260
261 2610
        CHECK_OBJ_NOTNULL(b, DIRECTOR_MAGIC);
262 2610
        vdir = b->vdir;
263 2610
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
264 2610
        assert(vdir->refcnt == 1);
265 2610
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
266 2610
        vdir->flags |= VDIR_FLG_NOREFCNT;
267 2610
}
268
269
// vcldir is already removed from the directors list
270
// to be called only from vdire_*
271
void
272 217
vcldir_retire(struct vcldir *vdir, const struct vcltemp *temp)
273
{
274
275 217
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
276 217
        assert(vdir->refcnt == 0);
277 217
        AN(temp);
278
279 217
        if (temp->is_warm)
280 28
                VDI_Event(vdir->dir, VCL_EVENT_COLD);
281 217
        if (vdir->methods->destroy != NULL)
282 188
                vdir->methods->destroy(vdir->dir);
283 217
        vcldir_free(vdir);
284 217
}
285
286
static int
287 633
vcldir_deref(struct vcldir *vdir)
288
{
289
        int busy;
290
291 633
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
292 633
        AZ(vdir->flags & VDIR_FLG_NOREFCNT);
293
294 633
        Lck_Lock(&vdir->dlck);
295 633
        assert(vdir->refcnt > 0);
296 633
        busy = --vdir->refcnt;
297 633
        Lck_Unlock(&vdir->dlck);
298
299 633
        if (!busy)
300 217
                vdire_resign(vdir->vcl->vdire, vdir);
301 633
        return (busy);
302
}
303
304
void
305 189
VRT_DelDirector(VCL_BACKEND *dirp)
306
{
307
        VCL_BACKEND dir;
308
        struct vcldir *vdir;
309
310 189
        TAKE_OBJ_NOTNULL(dir, dirp, DIRECTOR_MAGIC);
311
312 189
        vdir = dir->vdir;
313 189
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
314
315 189
        if (vdir->methods->release != NULL)
316 26
                vdir->methods->release(vdir->dir);
317
318 189
        if (vdir->flags & VDIR_FLG_NOREFCNT) {
319 130
                vdir->flags &= ~VDIR_FLG_NOREFCNT;
320 130
                AZ(vcldir_deref(vdir));
321 130
        } else {
322 59
                (void) vcldir_deref(vdir);
323
        }
324 189
}
325
326
void
327 34943
VRT_Assign_Backend(VCL_BACKEND *dst, VCL_BACKEND src)
328
{
329
        struct vcldir *vdir;
330
        VCL_BACKEND tmp;
331
332 34943
        AN(dst);
333 34943
        CHECK_OBJ_ORNULL((*dst), DIRECTOR_MAGIC);
334 34943
        CHECK_OBJ_ORNULL(src, DIRECTOR_MAGIC);
335 34943
        if (*dst == src)
336 2360
                return;
337 32583
        tmp = *dst;
338 32583
        *dst = src;
339 32583
        if (src != NULL) {
340 16568
                vdir = src->vdir;
341 16568
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
342 16568
                if (!(vdir->flags & VDIR_FLG_NOREFCNT)) {
343 426
                        Lck_Lock(&vdir->dlck);
344 426
                        assert(vdir->refcnt > 0);
345 426
                        vdir->refcnt++;
346 426
                        Lck_Unlock(&vdir->dlck);
347 426
                }
348 16568
        }
349 32583
        if (tmp != NULL) {
350 16450
                vdir = tmp->vdir;
351 16450
                CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
352 16450
                if (!(vdir->flags & VDIR_FLG_NOREFCNT))
353 444
                        (void)vcldir_deref(vdir);
354 16450
        }
355 34943
}
356
357
void
358 158
VRT_DisableDirector(VCL_BACKEND d)
359
{
360
        struct vcldir *vdir;
361
362 158
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
363 158
        vdir = d->vdir;
364 158
        CHECK_OBJ_NOTNULL(vdir, VCLDIR_MAGIC);
365
366 158
        vdir->admin_health = VDI_AH_DELETED;
367 158
        vdir->health_changed = VTIM_real();
368 158
}
369
370
VCL_BACKEND
371 2
VRT_LookupDirector(VRT_CTX, VCL_STRING name)
372
{
373
        struct vcl *vcl;
374
        struct vcldir *vdir;
375 2
        VCL_BACKEND dd, d = NULL;
376
        struct vdire *vdire;
377
378 2
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
379 2
        AN(name);
380
381 2
        assert(ctx->method & VCL_MET_TASK_H);
382 2
        ASSERT_CLI();
383
384 2
        vcl = ctx->vcl;
385 2
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
386
387 2
        vdire = vcl->vdire;
388
389 2
        vdire_start_iter(vdire);
390 2
        VTAILQ_FOREACH(vdir, &vdire->directors, directors_list) {
391 2
                dd = vdir->dir;
392 2
                if (strcmp(dd->vcl_name, name))
393 0
                        continue;
394 2
                d = dd;
395 2
                break;
396
        }
397 2
        vdire_end_iter(vdire);
398
399 2
        return (d);
400
}
401
402
/*--------------------------------------------------------------------*/
403
404
VCL_BACKEND
405 7524
VCL_DefaultDirector(const struct vcl *vcl)
406
{
407
408 7524
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
409 7524
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
410 7524
        return (*vcl->conf->default_director);
411
}
412
413
const char *
414 19341
VCL_Name(const struct vcl *vcl)
415
{
416
417 19341
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
418 19341
        return (vcl->loaded_name);
419
}
420
421
VCL_PROBE
422 2624
VCL_DefaultProbe(const struct vcl *vcl)
423
{
424
425 2624
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
426 2624
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);
427 2624
        return (vcl->conf->default_probe);
428
}
429
430
/*--------------------------------------------------------------------*/
431
432
void
433 682
VRT_CTX_Assert(VRT_CTX)
434
{
435 682
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
436
437 682
        if (ctx->msg != NULL)
438 44
                CHECK_OBJ(ctx->msg, VSB_MAGIC);
439
        else
440 638
                AN(ctx->vsl);
441 682
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
442 682
        WS_Assert(ctx->ws);
443
444 682
        CHECK_OBJ_ORNULL(ctx->sp, SESS_MAGIC);
445
446 682
        CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC);
447 682
        CHECK_OBJ_ORNULL(ctx->http_req, HTTP_MAGIC);
448 682
        CHECK_OBJ_ORNULL(ctx->http_req_top, HTTP_MAGIC);
449 682
        CHECK_OBJ_ORNULL(ctx->http_resp, HTTP_MAGIC);
450
451 682
        CHECK_OBJ_ORNULL(ctx->bo, BUSYOBJ_MAGIC);
452 682
        CHECK_OBJ_ORNULL(ctx->http_bereq, HTTP_MAGIC);
453 682
        CHECK_OBJ_ORNULL(ctx->http_beresp, HTTP_MAGIC);
454 682
}
455
456
struct vclref *
457 2
VRT_VCL_Prevent_Cold(VRT_CTX, const char *desc)
458
{
459
        struct vclref* ref;
460
461 2
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
462 2
        CHECK_OBJ_NOTNULL(ctx->vcl, VCL_MAGIC);
463
464 2
        ALLOC_OBJ(ref, VCLREF_MAGIC);
465 2
        AN(ref);
466 2
        ref->vcl = ctx->vcl;
467 2
        REPLACE(ref->desc, desc);
468
469 2
        VCL_Ref(ctx->vcl);
470
471 2
        Lck_Lock(&vcl_mtx);
472 2
        VTAILQ_INSERT_TAIL(&ctx->vcl->ref_list, ref, list);
473 2
        Lck_Unlock(&vcl_mtx);
474
475 2
        return (ref);
476
}
477
478
void
479 2
VRT_VCL_Allow_Cold(struct vclref **refp)
480
{
481
        struct vcl *vcl;
482
        struct vclref *ref;
483
484 2
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
485 2
        vcl = ref->vcl;
486 2
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
487
488 2
        Lck_Lock(&vcl_mtx);
489 2
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
490 2
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
491 2
        Lck_Unlock(&vcl_mtx);
492
493 2
        VCL_Rel(&vcl);
494
495 2
        REPLACE(ref->desc, NULL);
496 2
        FREE_OBJ(ref);
497 2
}
498
499
struct vclref *
500 188
VRT_VCL_Prevent_Discard(VRT_CTX, const char *desc)
501
{
502
        struct vcl *vcl;
503
        struct vclref* ref;
504
505 188
        ASSERT_CLI();
506 188
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
507 188
        AN(desc);
508 188
        AN(*desc);
509
510 188
        vcl = ctx->vcl;
511 188
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
512 188
        assert(vcl->temp->is_warm);
513
514 188
        ALLOC_OBJ(ref, VCLREF_MAGIC);
515 188
        AN(ref);
516 188
        ref->vcl = vcl;
517 188
        REPLACE(ref->desc, desc);
518
519 188
        Lck_Lock(&vcl_mtx);
520 188
        VTAILQ_INSERT_TAIL(&vcl->ref_list, ref, list);
521 188
        vcl->nrefs++;
522 188
        Lck_Unlock(&vcl_mtx);
523
524 188
        return (ref);
525
}
526
527
void
528 27
VRT_VCL_Allow_Discard(struct vclref **refp)
529
{
530
        struct vcl *vcl;
531
        struct vclref *ref;
532
533 27
        TAKE_OBJ_NOTNULL(ref, refp, VCLREF_MAGIC);
534 27
        vcl = ref->vcl;
535 27
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);
536
537
        /* NB: A VCL may be released by a VMOD at any time, but it must happen
538
         * after a warmup and before the end of a cooldown. The release may or
539
         * may not happen while the same thread holds the temperature lock, so
540
         * instead we check that all references are gone in VCL_Nuke.
541
         */
542
543 27
        Lck_Lock(&vcl_mtx);
544 27
        assert(!VTAILQ_EMPTY(&vcl->ref_list));
545 27
        VTAILQ_REMOVE(&vcl->ref_list, ref, list);
546 27
        vcl->nrefs--;
547
        /* No garbage collection here, for the same reasons as in VCL_Rel. */
548 27
        Lck_Unlock(&vcl_mtx);
549
550 27
        REPLACE(ref->desc, NULL);
551 27
        FREE_OBJ(ref);
552 27
}
553
554
/*--------------------------------------------------------------------
555
 */
556
557
static int
558 29699
req_poll(struct worker *wrk, struct req *req)
559
{
560
        struct req *top;
561
562
        /* NB: Since a fail transition leads to vcl_synth, the request may be
563
         * short-circuited twice.
564
         */
565 29699
        if (req->req_reset) {
566 54
                wrk->vpi->handling = VCL_RET_FAIL;
567 54
                return (-1);
568
        }
569
570 29645
        top = req->top->topreq;
571 29645
        CHECK_OBJ_NOTNULL(top, REQ_MAGIC);
572 29645
        CHECK_OBJ_NOTNULL(top->transport, TRANSPORT_MAGIC);
573
574 29645
        if (!FEATURE(FEATURE_VCL_REQ_RESET))
575 40
                return (0);
576 29605
        if (top->transport->poll == NULL)
577 28668
                return (0);
578 937
        if (top->transport->poll(top) >= 0)
579 883
                return (0);
580
581 54
        VSLb_ts_req(req, "Reset", W_TIM_real(wrk));
582 54
        wrk->stats->req_reset++;
583 54
        wrk->vpi->handling = VCL_RET_FAIL;
584 54
        req->req_reset = 1;
585 54
        return (-1);
586 29699
}
587
588
/*--------------------------------------------------------------------
589
 * Method functions to call into VCL programs.
590
 *
591
 * Either the request or busyobject must be specified, but not both.
592
 * The workspace argument is where random VCL stuff gets space from.
593
 */
594
595
static void
596 39198
vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo,
597
    void *specific, unsigned method, vcl_func_f *func, unsigned track_call)
598
{
599 39198
        uintptr_t rws = 0, aws;
600
        struct vrt_ctx ctx;
601
        struct vbitmap *vbm;
602
        void *p;
603
        size_t sz;
604
605 39198
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
606 39198
        INIT_OBJ(&ctx, VRT_CTX_MAGIC);
607 39198
        if (bo != NULL) {
608 9578
                CHECK_OBJ(bo, BUSYOBJ_MAGIC);
609 9578
                CHECK_OBJ_NOTNULL(bo->vcl, VCL_MAGIC);
610 9578
                VCL_Bo2Ctx(&ctx, bo);
611 9578
        }
612 39198
        if (req != NULL) {
613 29699
                if (bo != NULL)
614 58
                        assert(method == VCL_MET_PIPE);
615 29699
                CHECK_OBJ(req, REQ_MAGIC);
616 29699
                CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC);
617 29699
                CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC);
618 29699
                CHECK_OBJ_NOTNULL(req->top, REQTOP_MAGIC);
619 29699
                if (req_poll(wrk, req))
620 108
                        return;
621 29591
                VCL_Req2Ctx(&ctx, req);
622 29591
        }
623 39090
        assert(ctx.now != 0);
624 39090
        ctx.specific = specific;
625 39090
        ctx.method = method;
626 39090
        if (track_call > 0) {
627 81
                rws = WS_Snapshot(wrk->aws);
628 81
                sz = VBITMAP_SZ(track_call);
629 81
                p = WS_Alloc(wrk->aws, sz);
630
                // No use to attempt graceful failure, all VCL calls will fail
631 81
                AN(p);
632 81
                vbm = vbit_init(p, sz);
633 81
                ctx.called = vbm;
634 81
        }
635 39090
        aws = WS_Snapshot(wrk->aws);
636 39090
        wrk->cur_method = method;
637 39090
        wrk->seen_methods |= method;
638 39090
        AN(ctx.vsl);
639 39090
        VSLbs(ctx.vsl, SLT_VCL_call, TOSTRAND(VCL_Method_Name(method)));
640 39090
        func(&ctx, VSUB_STATIC, NULL);
641 78180
        VSLbs(ctx.vsl, SLT_VCL_return,
642 39090
            TOSTRAND(VCL_Return_Name(wrk->vpi->handling)));
643 39090
        wrk->cur_method |= 1;           // Magic marker
644 39090
        if (wrk->vpi->handling == VCL_RET_FAIL)
645 290
                wrk->stats->vcl_fail++;
646
647
        /*
648
         * VCL/Vmods are not allowed to make permanent allocations from
649
         * wrk->aws, but they can reserve and return from it.
650
         */
651 39090
        assert(aws == WS_Snapshot(wrk->aws));
652 39090
        if (rws != 0)
653 82
                WS_Reset(wrk->aws, rws);
654 39198
}
655
656
#define VCL_MET_MAC(func, upper, typ, bitmap)                           \
657
void                                                                    \
658
VCL_##func##_method(struct vcl *vcl, struct worker *wrk,                \
659
     struct req *req, struct busyobj *bo, void *specific)               \
660
{                                                                       \
661
                                                                        \
662
        CHECK_OBJ_NOTNULL(vcl, VCL_MAGIC);                              \
663
        CHECK_OBJ_NOTNULL(vcl->conf, VCL_CONF_MAGIC);                   \
664
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);                           \
665
        vcl_call_method(wrk, req, bo, specific,                         \
666
            VCL_MET_ ## upper, vcl->conf->func##_func, vcl->conf->nsub);\
667
        AN((1U << wrk->vpi->handling) & bitmap);                        \
668
}
669
670
#include "tbl/vcl_returns.h"
671
672
/*--------------------------------------------------------------------
673
 */
674
675
VCL_STRING
676 2
VRT_check_call(VRT_CTX, VCL_SUB sub)
677
{
678 2
        VCL_STRING err = NULL;
679
        enum vcl_func_fail_e fail;
680
681 2
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
682 2
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
683
684 2
        AN(sub->func);
685 2
        sub->func(ctx, VSUB_CHECK, &fail);
686
687 2
        switch (fail) {
688
        case VSUB_E_OK:
689 0
                break;
690
        case VSUB_E_METHOD:
691 4
                err = WS_Printf(ctx->ws, "Dynamic call to \"sub %s{}\""
692 2
                    " not allowed from here", sub->name);
693 2
                if (err == NULL)
694 0
                        err = "Dynamic call not allowed and workspace overflow";
695 2
                break;
696
        case VSUB_E_RECURSE:
697 0
                err = WS_Printf(ctx->ws, "Recursive dynamic call to"
698 0
                    " \"sub %s{}\"", sub->name);
699 0
                if (err == NULL)
700 0
                        err = "Recursive dynamic call and workspace overflow";
701 0
                break;
702
        default:
703 0
                INCOMPL();
704 0
        }
705
706 2
        return (err);
707
}
708
709
VCL_VOID
710 28
VRT_call(VRT_CTX, VCL_SUB sub)
711
{
712
713 28
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
714 28
        CHECK_OBJ_NOTNULL(sub, VCL_SUB_MAGIC);
715
716 28
        AZ(VRT_handled(ctx));
717 28
        AN(sub->func);
718 28
        sub->func(ctx, VSUB_DYNAMIC, NULL);
719 28
}