| | varnish-cache/bin/varnishd/cache/cache_vrt_priv.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2021 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
* |
30 |
|
* Runtime support for compiled VCL programs: private variables |
31 |
|
*/ |
32 |
|
|
33 |
|
#include "config.h" |
34 |
|
|
35 |
|
|
36 |
|
#include <stdlib.h> |
37 |
|
|
38 |
|
#include "cache_varnishd.h" |
39 |
|
#include "vcl.h" |
40 |
|
#include "vcc_interface.h" |
41 |
|
|
42 |
|
enum vrt_priv_storage_e { |
43 |
|
VRT_PRIV_ST_WS = 1, |
44 |
|
VRT_PRIV_ST_HEAP |
45 |
|
}; |
46 |
|
|
47 |
|
struct vrt_priv { |
48 |
|
unsigned magic; |
49 |
|
#define VRT_PRIV_MAGIC 0x24157a52 |
50 |
|
enum vrt_priv_storage_e storage; |
51 |
|
VRBT_ENTRY(vrt_priv) entry; |
52 |
|
struct vmod_priv priv[1]; |
53 |
|
uintptr_t vmod_id; |
54 |
|
}; |
55 |
|
|
56 |
|
struct vrt_privs cli_task_privs[1]; |
57 |
|
|
58 |
|
static inline int vrt_priv_dyncmp(const struct vrt_priv *, |
59 |
|
const struct vrt_priv *); |
60 |
|
|
61 |
164200 |
VRBT_GENERATE_INSERT_COLOR(vrt_privs, vrt_priv, entry, static) |
62 |
3996822160 |
VRBT_GENERATE_FIND(vrt_privs, vrt_priv, entry, vrt_priv_dyncmp, static) |
63 |
56440 |
VRBT_GENERATE_INSERT_FINISH(vrt_privs, vrt_priv, entry, static) |
64 |
478560 |
VRBT_GENERATE_INSERT(vrt_privs, vrt_priv, entry, vrt_priv_dyncmp, static) |
65 |
491375 |
VRBT_GENERATE_MINMAX(vrt_privs, vrt_priv, entry, static) |
66 |
98640 |
VRBT_GENERATE_NEXT(vrt_privs, vrt_priv, entry, static) |
67 |
|
|
68 |
|
/*-------------------------------------------------------------------- |
69 |
|
*/ |
70 |
|
|
71 |
|
void |
72 |
480 |
pan_privs(struct vsb *vsb, const struct vrt_privs *privs) |
73 |
|
{ |
74 |
|
struct vrt_priv *vp; |
75 |
|
const struct vmod_priv *p; |
76 |
|
const struct vmod_priv_methods *m; |
77 |
|
|
78 |
480 |
if (privs == NULL) { |
79 |
0 |
VSB_cat(vsb, "privs = NULL\n"); |
80 |
0 |
return; |
81 |
|
} |
82 |
480 |
VSB_printf(vsb, "privs = %p {\n", privs); |
83 |
480 |
VSB_indent(vsb, 2); |
84 |
560 |
VRBT_FOREACH(vp, vrt_privs, privs) { |
85 |
80 |
if (PAN_dump_oneline(vsb, vp, VRT_PRIV_MAGIC, "priv")) |
86 |
0 |
continue; |
87 |
80 |
p = vp->priv; |
88 |
|
//lint -e{774} |
89 |
80 |
if (p == NULL) { |
90 |
|
// should never happen |
91 |
0 |
VSB_printf(vsb, "NULL vmod %jx},\n", |
92 |
0 |
(uintmax_t)vp->vmod_id); |
93 |
0 |
continue; |
94 |
|
} |
95 |
80 |
m = p->methods; |
96 |
160 |
VSB_printf(vsb, |
97 |
|
"{p %p l %ld m %p t \"%s\"} vmod %jx},\n", |
98 |
80 |
p->priv, p->len, m, |
99 |
80 |
m != NULL ? m->type : "", |
100 |
80 |
(uintmax_t)vp->vmod_id |
101 |
|
); |
102 |
80 |
} |
103 |
480 |
VSB_indent(vsb, -2); |
104 |
480 |
VSB_cat(vsb, "},\n"); |
105 |
480 |
} |
106 |
|
|
107 |
|
/*-------------------------------------------------------------------- |
108 |
|
*/ |
109 |
|
|
110 |
|
static void |
111 |
477102 |
VRTPRIV_init(struct vrt_privs *privs) |
112 |
|
{ |
113 |
|
|
114 |
477102 |
VRBT_INIT(privs); |
115 |
477102 |
} |
116 |
|
|
117 |
|
static inline int |
118 |
3997238040 |
vrt_priv_dyncmp(const struct vrt_priv *vp1, const struct vrt_priv *vp2) |
119 |
|
{ |
120 |
3997238040 |
if (vp1->vmod_id < vp2->vmod_id) |
121 |
1882470560 |
return (-1); |
122 |
2114767480 |
if (vp1->vmod_id > vp2->vmod_id) |
123 |
1670339080 |
return (1); |
124 |
444428400 |
return (0); |
125 |
3997238040 |
} |
126 |
|
|
127 |
|
static struct vmod_priv * |
128 |
444414880 |
vrt_priv_dynamic_get(const struct vrt_privs *privs, uintptr_t vmod_id) |
129 |
|
{ |
130 |
|
struct vrt_priv *vp; |
131 |
|
|
132 |
444414880 |
const struct vrt_priv needle = {.vmod_id = vmod_id}; |
133 |
|
|
134 |
444414880 |
vp = VRBT_FIND(vrt_privs, privs, &needle); |
135 |
444414880 |
if (vp == NULL) |
136 |
6240 |
return (NULL); |
137 |
|
|
138 |
444408640 |
CHECK_OBJ(vp, VRT_PRIV_MAGIC); |
139 |
444408640 |
assert(vp->vmod_id == vmod_id); |
140 |
444408640 |
return (vp->priv); |
141 |
444414880 |
} |
142 |
|
|
143 |
|
static struct vmod_priv * |
144 |
76199 |
vrt_priv_dynamic(struct ws *ws, struct vrt_privs *privs, uintptr_t vmod_id) |
145 |
|
{ |
146 |
|
//lint --e{593} vp allocated, vp->priv returned |
147 |
|
struct vrt_priv *vp, *ovp; |
148 |
|
enum vrt_priv_storage_e storage; |
149 |
|
|
150 |
76199 |
AN(vmod_id); |
151 |
|
|
152 |
76199 |
if (LIKELY(WS_ReserveSize(ws, sizeof *vp) != 0)) { |
153 |
73599 |
vp = WS_Reservation(ws); |
154 |
73599 |
storage = VRT_PRIV_ST_WS; |
155 |
73599 |
} |
156 |
|
else { |
157 |
2600 |
vp = malloc(sizeof *vp); |
158 |
2600 |
storage = VRT_PRIV_ST_HEAP; |
159 |
|
} |
160 |
76199 |
AN(vp); |
161 |
|
|
162 |
76199 |
INIT_OBJ(vp, VRT_PRIV_MAGIC); |
163 |
76199 |
vp->storage = storage; |
164 |
76199 |
vp->vmod_id = vmod_id; |
165 |
76199 |
ovp = VRBT_INSERT(vrt_privs, privs, vp); |
166 |
76199 |
if (ovp == NULL) { |
167 |
56439 |
if (storage == VRT_PRIV_ST_WS) |
168 |
53840 |
WS_Release(ws, sizeof *vp); |
169 |
56439 |
return (vp->priv); |
170 |
|
} |
171 |
19760 |
if (storage == VRT_PRIV_ST_WS) |
172 |
19760 |
WS_Release(ws, 0); |
173 |
0 |
else if (storage == VRT_PRIV_ST_HEAP) |
174 |
0 |
free(vp); |
175 |
|
else |
176 |
0 |
WRONG("priv storage"); |
177 |
19760 |
return (ovp->priv); |
178 |
76199 |
} |
179 |
|
|
180 |
|
static struct vrt_privs * |
181 |
444489600 |
vrt_priv_task_context(VRT_CTX) |
182 |
|
{ |
183 |
|
|
184 |
444489600 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
185 |
|
|
186 |
|
/* In pipe mode, both req and bo are set. We use req */ |
187 |
|
|
188 |
444489600 |
assert(ctx->req == NULL || ctx->bo == NULL || |
189 |
|
ctx->method == VCL_MET_PIPE || ctx->method == 0); |
190 |
|
|
191 |
444489600 |
if (ctx->req) { |
192 |
444470040 |
CHECK_OBJ(ctx->req, REQ_MAGIC); |
193 |
444470040 |
return (ctx->req->privs); |
194 |
|
} |
195 |
19560 |
if (ctx->bo) { |
196 |
12800 |
CHECK_OBJ(ctx->bo, BUSYOBJ_MAGIC); |
197 |
12800 |
return (ctx->bo->privs); |
198 |
|
} |
199 |
6760 |
ASSERT_CLI(); |
200 |
6760 |
return (cli_task_privs); |
201 |
444489600 |
} |
202 |
|
|
203 |
|
struct vmod_priv * |
204 |
444414480 |
VRT_priv_task_get(VRT_CTX, const void *vmod_id) |
205 |
|
{ |
206 |
444414480 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
207 |
|
|
208 |
444414480 |
return (vrt_priv_dynamic_get( |
209 |
444414480 |
vrt_priv_task_context(ctx), |
210 |
444414480 |
(uintptr_t)vmod_id)); |
211 |
|
} |
212 |
|
|
213 |
|
struct vmod_priv * |
214 |
75120 |
VRT_priv_task(VRT_CTX, const void *vmod_id) |
215 |
|
{ |
216 |
|
|
217 |
75120 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
218 |
|
|
219 |
75120 |
return (vrt_priv_dynamic( |
220 |
75120 |
ctx->ws, |
221 |
75120 |
vrt_priv_task_context(ctx), |
222 |
75120 |
(uintptr_t)vmod_id)); |
223 |
|
} |
224 |
|
|
225 |
|
/* |
226 |
|
* XXX #3498 on VRT_fail(): Would be better to move the PRIV_TOP check to VCC |
227 |
|
* |
228 |
|
* This will fail in the preamble of any VCL SUB containing a call to a vmod |
229 |
|
* function with a PRIV_TOP argument, which might not exactly be pola |
230 |
|
*/ |
231 |
|
|
232 |
|
#define VRT_PRIV_TOP_PREP(ctx, req, sp, top) do { \ |
233 |
|
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); \ |
234 |
|
req = (ctx)->req; \ |
235 |
|
if (req == NULL) { \ |
236 |
|
VRT_fail(ctx, "PRIV_TOP is only accessible " \ |
237 |
|
"in client VCL context"); \ |
238 |
|
return (NULL); \ |
239 |
|
} \ |
240 |
|
CHECK_OBJ(req, REQ_MAGIC); \ |
241 |
|
sp = (ctx)->sp; \ |
242 |
|
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); \ |
243 |
|
top = (req)->top; \ |
244 |
|
CHECK_OBJ_NOTNULL(top, REQTOP_MAGIC); \ |
245 |
|
req = (top)->topreq; \ |
246 |
|
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); \ |
247 |
|
} while(0) |
248 |
|
|
249 |
|
struct vmod_priv * |
250 |
400 |
VRT_priv_top_get(VRT_CTX, const void *vmod_id) |
251 |
|
{ |
252 |
|
struct req *req; |
253 |
|
struct sess *sp; |
254 |
|
struct reqtop *top; |
255 |
|
struct vmod_priv *priv; |
256 |
|
|
257 |
400 |
VRT_PRIV_TOP_PREP(ctx, req, sp, top); |
258 |
|
|
259 |
400 |
Lck_Lock(&sp->mtx); |
260 |
400 |
priv = vrt_priv_dynamic_get(top->privs, (uintptr_t)vmod_id); |
261 |
400 |
Lck_Unlock(&sp->mtx); |
262 |
400 |
return (priv); |
263 |
400 |
} |
264 |
|
|
265 |
|
struct vmod_priv * |
266 |
1080 |
VRT_priv_top(VRT_CTX, const void *vmod_id) |
267 |
|
{ |
268 |
|
struct req *req; |
269 |
|
struct sess *sp; |
270 |
|
struct reqtop *top; |
271 |
|
struct vmod_priv *priv; |
272 |
|
|
273 |
1080 |
VRT_PRIV_TOP_PREP(ctx, req, sp, top); |
274 |
|
|
275 |
1080 |
Lck_Lock(&sp->mtx); |
276 |
1080 |
priv = vrt_priv_dynamic(req->ws, top->privs, (uintptr_t)vmod_id); |
277 |
1080 |
Lck_Unlock(&sp->mtx); |
278 |
1080 |
return (priv); |
279 |
1080 |
} |
280 |
|
|
281 |
|
/*-------------------------------------------------------------------- |
282 |
|
*/ |
283 |
|
|
284 |
|
void |
285 |
60010 |
VRT_priv_fini(VRT_CTX, const struct vmod_priv *p) |
286 |
|
{ |
287 |
|
const struct vmod_priv_methods *m; |
288 |
|
|
289 |
60010 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
290 |
|
|
291 |
60010 |
m = p->methods; |
292 |
60010 |
if (m == NULL) |
293 |
46390 |
return; |
294 |
|
|
295 |
13620 |
CHECK_OBJ(m, VMOD_PRIV_METHODS_MAGIC); |
296 |
13620 |
if (p->priv == NULL || m->fini == NULL) |
297 |
0 |
return; |
298 |
|
|
299 |
|
// XXX remove me after soak in |
300 |
13620 |
VRT_CTX_Assert(ctx); |
301 |
|
|
302 |
13620 |
m->fini(ctx, p->priv); |
303 |
13620 |
assert(ctx->vpi->handling == 0 || ctx->vpi->handling == VCL_RET_FAIL); |
304 |
60010 |
} |
305 |
|
|
306 |
|
/*--------------------------------------------------------------------*/ |
307 |
|
|
308 |
|
void |
309 |
477102 |
VCL_TaskEnter(struct vrt_privs *privs) |
310 |
|
{ |
311 |
|
|
312 |
477102 |
VRTPRIV_init(privs); |
313 |
477102 |
} |
314 |
|
|
315 |
|
void |
316 |
476639 |
VCL_TaskLeave(VRT_CTX, struct vrt_privs *privs) |
317 |
|
{ |
318 |
|
struct vrt_priv *vp, *vp1; |
319 |
|
|
320 |
476639 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
321 |
476639 |
AN(ctx->vpi); |
322 |
476639 |
assert(ctx->vpi->handling == 0 || ctx->vpi->handling == VCL_RET_FAIL); |
323 |
|
|
324 |
|
/* |
325 |
|
* NB: We don't bother removing entries as we finish them because it's |
326 |
|
* a costly operation. Instead we safely walk the whole tree and clear |
327 |
|
* the head at the very end. |
328 |
|
*/ |
329 |
532999 |
VRBT_FOREACH_SAFE(vp, vrt_privs, privs, vp1) { |
330 |
56360 |
CHECK_OBJ(vp, VRT_PRIV_MAGIC); |
331 |
56360 |
VRT_priv_fini(ctx, vp->priv); |
332 |
56360 |
if (vp->storage == VRT_PRIV_ST_HEAP) |
333 |
2600 |
free(vp); |
334 |
56360 |
} |
335 |
476639 |
ZERO_OBJ(privs, sizeof *privs); |
336 |
476639 |
} |