| | varnish-cache/bin/varnishd/cache/cache_esi_deliver.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2011 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* VED - Varnish Esi Delivery |
30 |
|
*/ |
31 |
|
|
32 |
|
#include "config.h" |
33 |
|
|
34 |
|
#include "cache_varnishd.h" |
35 |
|
|
36 |
|
#include <stdlib.h> |
37 |
|
|
38 |
|
#include "cache_transport.h" |
39 |
|
#include "cache_filter.h" |
40 |
|
#include "cache_vgz.h" |
41 |
|
|
42 |
|
#include "vct.h" |
43 |
|
#include "vtim.h" |
44 |
|
#include "cache_esi.h" |
45 |
|
#include "vend.h" |
46 |
|
#include "vgz.h" |
47 |
|
|
48 |
|
static vtr_deliver_f ved_deliver; |
49 |
|
static vtr_reembark_f ved_reembark; |
50 |
|
|
51 |
|
static const uint8_t gzip_hdr[] = { |
52 |
|
0x1f, 0x8b, 0x08, |
53 |
|
0x00, 0x00, 0x00, 0x00, |
54 |
|
0x00, |
55 |
|
0x02, 0x03 |
56 |
|
}; |
57 |
|
|
58 |
|
struct ecx { |
59 |
|
unsigned magic; |
60 |
|
#define ECX_MAGIC 0x0b0f9163 |
61 |
|
const uint8_t *p; |
62 |
|
const uint8_t *e; |
63 |
|
int state; |
64 |
|
ssize_t l; |
65 |
|
int isgzip; |
66 |
|
int woken; |
67 |
|
int abrt; |
68 |
|
|
69 |
|
struct req *preq; |
70 |
|
struct ecx *pecx; |
71 |
|
ssize_t l_crc; |
72 |
|
uint32_t crc; |
73 |
|
}; |
74 |
|
|
75 |
|
static int v_matchproto_(vtr_minimal_response_f) |
76 |
0 |
ved_minimal_response(struct req *req, uint16_t status) |
77 |
|
{ |
78 |
0 |
(void)req; |
79 |
0 |
(void)status; |
80 |
0 |
WRONG("esi:includes should not try minimal responses"); |
81 |
0 |
} |
82 |
|
|
83 |
|
static const struct transport VED_transport = { |
84 |
|
.magic = TRANSPORT_MAGIC, |
85 |
|
.name = "ESI_INCLUDE", |
86 |
|
.deliver = ved_deliver, |
87 |
|
.reembark = ved_reembark, |
88 |
|
.minimal_response = ved_minimal_response, |
89 |
|
}; |
90 |
|
|
91 |
|
/*--------------------------------------------------------------------*/ |
92 |
|
|
93 |
|
static void v_matchproto_(vtr_reembark_f) |
94 |
6 |
ved_reembark(struct worker *wrk, struct req *req) |
95 |
|
{ |
96 |
|
struct ecx *ecx; |
97 |
|
|
98 |
6 |
(void)wrk; |
99 |
6 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
100 |
6 |
CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); |
101 |
6 |
Lck_Lock(&req->sp->mtx); |
102 |
6 |
ecx->woken = 1; |
103 |
6 |
PTOK(pthread_cond_signal(&ecx->preq->wrk->cond)); |
104 |
6 |
Lck_Unlock(&req->sp->mtx); |
105 |
6 |
} |
106 |
|
|
107 |
|
/*--------------------------------------------------------------------*/ |
108 |
|
|
109 |
|
static void |
110 |
1728 |
ved_include(struct req *preq, const char *src, const char *host, |
111 |
|
struct ecx *ecx) |
112 |
|
{ |
113 |
|
struct worker *wrk; |
114 |
|
struct sess *sp; |
115 |
|
struct req *req; |
116 |
|
enum req_fsm_nxt s; |
117 |
|
|
118 |
1728 |
CHECK_OBJ_NOTNULL(preq, REQ_MAGIC); |
119 |
1728 |
CHECK_OBJ_NOTNULL(preq->top, REQTOP_MAGIC); |
120 |
1728 |
sp = preq->sp; |
121 |
1728 |
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC); |
122 |
1728 |
CHECK_OBJ_NOTNULL(ecx, ECX_MAGIC); |
123 |
1728 |
wrk = preq->wrk; |
124 |
|
|
125 |
1728 |
if (preq->esi_level >= cache_param->max_esi_depth) { |
126 |
528 |
VSLb(preq->vsl, SLT_VCL_Error, |
127 |
|
"ESI depth limit reached (param max_esi_depth = %u)", |
128 |
264 |
cache_param->max_esi_depth); |
129 |
264 |
if (ecx->abrt) |
130 |
4 |
preq->top->topreq->vdc->retval = -1; |
131 |
264 |
return; |
132 |
|
} |
133 |
|
|
134 |
1464 |
req = Req_New(sp, preq); |
135 |
1464 |
AN(req); |
136 |
1464 |
THR_SetRequest(req); |
137 |
1464 |
assert(IS_NO_VXID(req->vsl->wid)); |
138 |
1464 |
req->vsl->wid = VXID_Get(wrk, VSL_CLIENTMARKER); |
139 |
|
|
140 |
1464 |
wrk->stats->esi_req++; |
141 |
1464 |
req->esi_level = preq->esi_level + 1; |
142 |
|
|
143 |
2928 |
VSLb(req->vsl, SLT_Begin, "req %ju esi %u", |
144 |
1464 |
(uintmax_t)VXID(preq->vsl->wid), req->esi_level); |
145 |
2928 |
VSLb(preq->vsl, SLT_Link, "req %ju esi %u", |
146 |
1464 |
(uintmax_t)VXID(req->vsl->wid), req->esi_level); |
147 |
|
|
148 |
1464 |
VSLb_ts_req(req, "Start", W_TIM_real(wrk)); |
149 |
|
|
150 |
1464 |
HTTP_Setup(req->http, req->ws, req->vsl, SLT_ReqMethod); |
151 |
1464 |
HTTP_Dup(req->http, preq->http0); |
152 |
|
|
153 |
1464 |
http_SetH(req->http, HTTP_HDR_URL, src); |
154 |
1464 |
if (host != NULL && *host != '\0') { |
155 |
8 |
http_Unset(req->http, H_Host); |
156 |
8 |
http_SetHeader(req->http, host); |
157 |
8 |
} |
158 |
|
|
159 |
1464 |
http_ForceField(req->http, HTTP_HDR_METHOD, "GET"); |
160 |
1464 |
http_ForceField(req->http, HTTP_HDR_PROTO, "HTTP/1.1"); |
161 |
|
|
162 |
|
/* Don't allow conditionals, we can't use a 304 */ |
163 |
1464 |
http_Unset(req->http, H_If_Modified_Since); |
164 |
1464 |
http_Unset(req->http, H_If_None_Match); |
165 |
|
|
166 |
|
/* Don't allow Range */ |
167 |
1464 |
http_Unset(req->http, H_Range); |
168 |
|
|
169 |
|
/* Set Accept-Encoding according to what we want */ |
170 |
1464 |
if (ecx->isgzip) |
171 |
516 |
http_ForceHeader(req->http, H_Accept_Encoding, "gzip"); |
172 |
|
else |
173 |
948 |
http_Unset(req->http, H_Accept_Encoding); |
174 |
|
|
175 |
|
/* Client content already taken care of */ |
176 |
1464 |
http_Unset(req->http, H_Content_Length); |
177 |
1464 |
http_Unset(req->http, H_Transfer_Encoding); |
178 |
1464 |
req->req_body_status = BS_NONE; |
179 |
|
|
180 |
1464 |
AZ(req->vcl); |
181 |
1464 |
assert(req->top == preq->top); |
182 |
1464 |
if (req->top->vcl0) |
183 |
8 |
req->vcl = req->top->vcl0; |
184 |
|
else |
185 |
1456 |
req->vcl = preq->vcl; |
186 |
1464 |
VCL_Ref(req->vcl); |
187 |
|
|
188 |
1464 |
assert(req->req_step == R_STP_TRANSPORT); |
189 |
1464 |
req->t_req = preq->t_req; |
190 |
|
|
191 |
1464 |
req->transport = &VED_transport; |
192 |
1464 |
req->transport_priv = ecx; |
193 |
|
|
194 |
1464 |
VCL_TaskEnter(req->privs); |
195 |
|
|
196 |
1470 |
while (1) { |
197 |
1470 |
CNT_Embark(wrk, req); |
198 |
1470 |
ecx->woken = 0; |
199 |
1470 |
s = CNT_Request(req); |
200 |
1470 |
if (s == REQ_FSM_DONE) |
201 |
1464 |
break; |
202 |
6 |
DSL(DBG_WAITINGLIST, req->vsl->wid, |
203 |
|
"waiting for ESI (%d)", (int)s); |
204 |
6 |
assert(s == REQ_FSM_DISEMBARK); |
205 |
6 |
Lck_Lock(&sp->mtx); |
206 |
6 |
if (!ecx->woken) |
207 |
6 |
(void)Lck_CondWait(&ecx->preq->wrk->cond, &sp->mtx); |
208 |
6 |
Lck_Unlock(&sp->mtx); |
209 |
6 |
AZ(req->wrk); |
210 |
|
} |
211 |
|
|
212 |
1464 |
VCL_Rel(&req->vcl); |
213 |
|
|
214 |
1464 |
req->wrk = NULL; |
215 |
1464 |
THR_SetRequest(preq); |
216 |
|
|
217 |
1464 |
Req_Cleanup(sp, wrk, req); |
218 |
1464 |
Req_Release(req); |
219 |
1728 |
} |
220 |
|
|
221 |
|
/*--------------------------------------------------------------------*/ |
222 |
|
|
223 |
|
//#define Debug(fmt, ...) printf(fmt, __VA_ARGS__) |
224 |
|
#define Debug(fmt, ...) /**/ |
225 |
|
|
226 |
|
static ssize_t |
227 |
6072 |
ved_decode_len(struct vsl_log *vsl, const uint8_t **pp) |
228 |
|
{ |
229 |
|
const uint8_t *p; |
230 |
|
ssize_t l; |
231 |
|
|
232 |
6072 |
p = *pp; |
233 |
6072 |
switch (*p & 15) { |
234 |
|
case 1: |
235 |
5300 |
l = p[1]; |
236 |
5300 |
p += 2; |
237 |
5300 |
break; |
238 |
|
case 2: |
239 |
764 |
l = vbe16dec(p + 1); |
240 |
764 |
p += 3; |
241 |
764 |
break; |
242 |
|
case 8: |
243 |
8 |
l = vbe64dec(p + 1); |
244 |
8 |
p += 9; |
245 |
8 |
break; |
246 |
|
default: |
247 |
0 |
VSLb(vsl, SLT_Error, |
248 |
0 |
"ESI-corruption: Illegal Length %d %d\n", *p, (*p & 15)); |
249 |
0 |
WRONG("ESI-codes: illegal length"); |
250 |
0 |
} |
251 |
6072 |
*pp = p; |
252 |
6072 |
assert(l > 0); |
253 |
6072 |
return (l); |
254 |
|
} |
255 |
|
|
256 |
|
/*--------------------------------------------------------------------- |
257 |
|
*/ |
258 |
|
|
259 |
|
static int v_matchproto_(vdp_init_f) |
260 |
832 |
ved_vdp_esi_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) |
261 |
|
{ |
262 |
|
struct ecx *ecx; |
263 |
|
|
264 |
832 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
265 |
832 |
CHECK_OBJ_ORNULL(ctx->req, REQ_MAGIC); |
266 |
832 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
267 |
832 |
CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC); |
268 |
832 |
CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC); |
269 |
832 |
AN(vdc->clen); |
270 |
832 |
AN(priv); |
271 |
|
|
272 |
832 |
AZ(*priv); |
273 |
832 |
if (vdc->oc == NULL || !ObjHasAttr(vdc->wrk, vdc->oc, OA_ESIDATA)) |
274 |
0 |
return (1); |
275 |
|
|
276 |
832 |
if (ctx->req == NULL) { |
277 |
0 |
VSLb(vdc->vsl, SLT_Error, |
278 |
|
"esi can only be used on the client side"); |
279 |
0 |
return (1); |
280 |
|
} |
281 |
|
|
282 |
832 |
ALLOC_OBJ(ecx, ECX_MAGIC); |
283 |
832 |
AN(ecx); |
284 |
832 |
assert(sizeof gzip_hdr == 10); |
285 |
832 |
ecx->preq = ctx->req; |
286 |
832 |
*priv = ecx; |
287 |
832 |
RFC2616_Weaken_Etag(vdc->hp); |
288 |
|
|
289 |
832 |
ctx->req->res_esi = 1; |
290 |
832 |
if (*vdc->clen != 0) |
291 |
832 |
*vdc->clen = -1; |
292 |
832 |
if (ctx->req->esi_level > 0) { |
293 |
336 |
assert(ctx->req->transport == &VED_transport); |
294 |
336 |
CAST_OBJ_NOTNULL(ecx->pecx, ctx->req->transport_priv, ECX_MAGIC); |
295 |
336 |
if (!ecx->pecx->isgzip) |
296 |
64 |
ecx->pecx = NULL; |
297 |
336 |
} |
298 |
|
|
299 |
832 |
return (0); |
300 |
832 |
} |
301 |
|
|
302 |
|
static int v_matchproto_(vdp_fini_f) |
303 |
832 |
ved_vdp_esi_fini(struct vdp_ctx *vdc, void **priv) |
304 |
|
{ |
305 |
|
struct ecx *ecx; |
306 |
|
|
307 |
832 |
(void)vdc; |
308 |
832 |
TAKE_OBJ_NOTNULL(ecx, priv, ECX_MAGIC); |
309 |
832 |
FREE_OBJ(ecx); |
310 |
832 |
return (0); |
311 |
|
} |
312 |
|
|
313 |
|
static int v_matchproto_(vdp_bytes_f) |
314 |
1055 |
ved_vdp_esi_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
315 |
|
const void *ptr, ssize_t len) |
316 |
|
{ |
317 |
|
const uint8_t *q, *r; |
318 |
1055 |
ssize_t l = 0; |
319 |
1055 |
uint32_t icrc = 0; |
320 |
|
uint8_t tailbuf[8 + 5]; |
321 |
|
const uint8_t *pp; |
322 |
|
struct ecx *ecx; |
323 |
1055 |
int retval = 0; |
324 |
|
|
325 |
1055 |
if (act == VDP_END) |
326 |
828 |
act = VDP_FLUSH; |
327 |
|
|
328 |
1055 |
AN(priv); |
329 |
1055 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
330 |
1055 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
331 |
1055 |
pp = ptr; |
332 |
|
|
333 |
14328 |
while (1) { |
334 |
14328 |
switch (ecx->state) { |
335 |
|
case 0: |
336 |
828 |
ecx->p = ObjGetAttr(vdc->wrk, ecx->preq->objcore, |
337 |
|
OA_ESIDATA, &l); |
338 |
828 |
AN(ecx->p); |
339 |
828 |
assert(l > 0); |
340 |
828 |
ecx->e = ecx->p + l; |
341 |
|
|
342 |
828 |
if (*ecx->p == VEC_GZ) { |
343 |
428 |
if (ecx->pecx == NULL) |
344 |
164 |
retval = VDP_bytes(vdc, VDP_NULL, |
345 |
|
gzip_hdr, 10); |
346 |
428 |
ecx->l_crc = 0; |
347 |
428 |
ecx->crc = crc32(0L, Z_NULL, 0); |
348 |
428 |
ecx->isgzip = 1; |
349 |
428 |
ecx->p++; |
350 |
428 |
} |
351 |
828 |
ecx->state = 1; |
352 |
828 |
break; |
353 |
|
case 1: |
354 |
7502 |
if (ecx->p >= ecx->e) { |
355 |
799 |
ecx->state = 2; |
356 |
799 |
break; |
357 |
|
} |
358 |
6703 |
switch (*ecx->p) { |
359 |
|
case VEC_V1: |
360 |
|
case VEC_V2: |
361 |
|
case VEC_V8: |
362 |
2332 |
ecx->l = ved_decode_len(vdc->vsl, &ecx->p); |
363 |
2332 |
if (ecx->l < 0) |
364 |
0 |
return (-1); |
365 |
2332 |
if (ecx->isgzip) { |
366 |
1100 |
assert(*ecx->p == VEC_C1 || |
367 |
|
*ecx->p == VEC_C2 || |
368 |
|
*ecx->p == VEC_C8); |
369 |
1100 |
l = ved_decode_len(vdc->vsl, &ecx->p); |
370 |
1100 |
if (l < 0) |
371 |
0 |
return (-1); |
372 |
1100 |
icrc = vbe32dec(ecx->p); |
373 |
1100 |
ecx->p += 4; |
374 |
1100 |
ecx->crc = crc32_combine( |
375 |
1100 |
ecx->crc, icrc, l); |
376 |
1100 |
ecx->l_crc += l; |
377 |
1100 |
} |
378 |
2332 |
ecx->state = 3; |
379 |
2332 |
break; |
380 |
|
case VEC_S1: |
381 |
|
case VEC_S2: |
382 |
|
case VEC_S8: |
383 |
2639 |
ecx->l = ved_decode_len(vdc->vsl, &ecx->p); |
384 |
2639 |
if (ecx->l < 0) |
385 |
0 |
return (-1); |
386 |
|
Debug("SKIP1(%d)\n", (int)ecx->l); |
387 |
2639 |
ecx->state = 4; |
388 |
2639 |
break; |
389 |
|
case VEC_IA: |
390 |
1212 |
ecx->abrt = |
391 |
1212 |
FEATURE(FEATURE_ESI_INCLUDE_ONERROR); |
392 |
|
/* FALLTHROUGH */ |
393 |
|
case VEC_IC: |
394 |
1732 |
ecx->p++; |
395 |
1732 |
q = (void*)strchr((const char*)ecx->p, '\0'); |
396 |
1732 |
AN(q); |
397 |
1732 |
q++; |
398 |
1732 |
r = (void*)strchr((const char*)q, '\0'); |
399 |
1732 |
AN(r); |
400 |
1732 |
if (VDP_bytes(vdc, VDP_FLUSH, NULL, 0)) { |
401 |
4 |
ecx->p = ecx->e; |
402 |
4 |
break; |
403 |
|
} |
404 |
|
Debug("INCL [%s][%s] BEGIN\n", q, ecx->p); |
405 |
3456 |
ved_include(ecx->preq, |
406 |
1728 |
(const char*)q, (const char*)ecx->p, ecx); |
407 |
|
Debug("INCL [%s][%s] END\n", q, ecx->p); |
408 |
1728 |
ecx->p = r + 1; |
409 |
1728 |
break; |
410 |
|
default: |
411 |
0 |
VSLb(vdc->vsl, SLT_Error, |
412 |
|
"ESI corruption line %d 0x%02x [%s]\n", |
413 |
0 |
__LINE__, *ecx->p, ecx->p); |
414 |
0 |
WRONG("ESI-codes: Illegal code"); |
415 |
0 |
} |
416 |
6703 |
break; |
417 |
|
case 2: |
418 |
799 |
ptr = NULL; |
419 |
799 |
len = 0; |
420 |
799 |
if (ecx->isgzip && ecx->pecx == NULL) { |
421 |
|
/* |
422 |
|
* We are bytealigned here, so simply emit |
423 |
|
* a gzip literal block with finish bit set. |
424 |
|
*/ |
425 |
164 |
tailbuf[0] = 0x01; |
426 |
164 |
tailbuf[1] = 0x00; |
427 |
164 |
tailbuf[2] = 0x00; |
428 |
164 |
tailbuf[3] = 0xff; |
429 |
164 |
tailbuf[4] = 0xff; |
430 |
|
|
431 |
|
/* Emit CRC32 */ |
432 |
164 |
vle32enc(tailbuf + 5, ecx->crc); |
433 |
|
|
434 |
|
/* MOD(2^32) length */ |
435 |
164 |
vle32enc(tailbuf + 9, ecx->l_crc); |
436 |
|
|
437 |
164 |
ptr = tailbuf; |
438 |
164 |
len = 13; |
439 |
799 |
} else if (ecx->pecx != NULL) { |
440 |
544 |
ecx->pecx->crc = crc32_combine(ecx->pecx->crc, |
441 |
272 |
ecx->crc, ecx->l_crc); |
442 |
272 |
ecx->pecx->l_crc += ecx->l_crc; |
443 |
272 |
} |
444 |
799 |
retval = VDP_bytes(vdc, VDP_END, ptr, len); |
445 |
799 |
ecx->state = 99; |
446 |
799 |
return (retval); |
447 |
|
case 3: |
448 |
|
case 4: |
449 |
|
/* |
450 |
|
* There is no guarantee that the 'l' bytes are all |
451 |
|
* in the same storage segment, so loop over storage |
452 |
|
* until we have processed them all. |
453 |
|
*/ |
454 |
5159 |
if (ecx->l <= len) { |
455 |
4971 |
if (ecx->state == 3) |
456 |
4664 |
retval = VDP_bytes(vdc, act, |
457 |
2332 |
pp, ecx->l); |
458 |
4971 |
len -= ecx->l; |
459 |
4971 |
pp += ecx->l; |
460 |
4971 |
ecx->state = 1; |
461 |
4971 |
break; |
462 |
|
} |
463 |
188 |
if (ecx->state == 3 && len > 0) |
464 |
68 |
retval = VDP_bytes(vdc, act, pp, len); |
465 |
188 |
ecx->l -= len; |
466 |
188 |
return (retval); |
467 |
|
case 99: |
468 |
|
/* |
469 |
|
* VEP does not account for the PAD+CRC+LEN |
470 |
|
* so we can see up to approx 15 bytes here. |
471 |
|
*/ |
472 |
40 |
return (retval); |
473 |
|
default: |
474 |
0 |
WRONG("FOO"); |
475 |
0 |
break; |
476 |
|
} |
477 |
13301 |
if (retval) |
478 |
28 |
return (retval); |
479 |
|
} |
480 |
1055 |
} |
481 |
|
|
482 |
|
const struct vdp VDP_esi = { |
483 |
|
.name = "esi", |
484 |
|
.init = ved_vdp_esi_init, |
485 |
|
.bytes = ved_vdp_esi_bytes, |
486 |
|
.fini = ved_vdp_esi_fini, |
487 |
|
}; |
488 |
|
|
489 |
|
/* |
490 |
|
* Account body bytes on req |
491 |
|
* Push bytes to preq |
492 |
|
*/ |
493 |
|
static inline int |
494 |
8826 |
ved_bytes(struct ecx *ecx, enum vdp_action act, |
495 |
|
const void *ptr, ssize_t len) |
496 |
|
{ |
497 |
8826 |
if (act == VDP_END) |
498 |
1479 |
act = VDP_FLUSH; |
499 |
8826 |
return (VDP_bytes(ecx->preq->vdc, act, ptr, len)); |
500 |
|
} |
501 |
|
|
502 |
|
/*--------------------------------------------------------------------- |
503 |
|
* If a gzipped ESI object includes a ungzipped object, we need to make |
504 |
|
* it looked like a gzipped data stream. The official way to do so would |
505 |
|
* be to fire up libvgz and gzip it, but we don't, we fake it. |
506 |
|
* |
507 |
|
* First, we cannot know if it is ungzipped on purpose, the admin may |
508 |
|
* know something we don't. |
509 |
|
* |
510 |
|
* What do you mean "BS ?" |
511 |
|
* |
512 |
|
* All right then... |
513 |
|
* |
514 |
|
* The matter of the fact is that we simply will not fire up a gzip in |
515 |
|
* the output path because it costs too much memory and CPU, so we simply |
516 |
|
* wrap the data in very convenient "gzip copy-blocks" and send it down |
517 |
|
* the stream with a bit more overhead. |
518 |
|
*/ |
519 |
|
|
520 |
|
static int v_matchproto_(vdp_fini_f) |
521 |
72 |
ved_pretend_gzip_fini(struct vdp_ctx *vdc, void **priv) |
522 |
|
{ |
523 |
72 |
(void)vdc; |
524 |
72 |
*priv = NULL; |
525 |
72 |
return (0); |
526 |
|
} |
527 |
|
|
528 |
|
static int v_matchproto_(vdp_bytes_f) |
529 |
200 |
ved_pretend_gzip_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
530 |
|
const void *pv, ssize_t l) |
531 |
|
{ |
532 |
|
uint8_t buf1[5], buf2[5]; |
533 |
|
const uint8_t *p; |
534 |
|
uint16_t lx; |
535 |
|
struct ecx *ecx; |
536 |
|
|
537 |
200 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
538 |
200 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
539 |
|
|
540 |
200 |
(void)priv; |
541 |
200 |
if (l == 0) |
542 |
40 |
return (ved_bytes(ecx, act, pv, l)); |
543 |
|
|
544 |
160 |
p = pv; |
545 |
|
|
546 |
160 |
AN (ecx->isgzip); |
547 |
160 |
ecx->crc = crc32(ecx->crc, p, l); |
548 |
160 |
ecx->l_crc += l; |
549 |
|
|
550 |
|
/* |
551 |
|
* buf1 can safely be emitted multiple times for objects longer |
552 |
|
* than 64K-1 bytes. |
553 |
|
*/ |
554 |
160 |
lx = 65535; |
555 |
160 |
buf1[0] = 0; |
556 |
160 |
vle16enc(buf1 + 1, lx); |
557 |
160 |
vle16enc(buf1 + 3, ~lx); |
558 |
|
|
559 |
320 |
while (l > 0) { |
560 |
160 |
if (l >= 65535) { |
561 |
0 |
lx = 65535; |
562 |
0 |
if (ved_bytes(ecx, VDP_NULL, buf1, sizeof buf1)) |
563 |
0 |
return (-1); |
564 |
0 |
} else { |
565 |
160 |
lx = (uint16_t)l; |
566 |
160 |
buf2[0] = 0; |
567 |
160 |
vle16enc(buf2 + 1, lx); |
568 |
160 |
vle16enc(buf2 + 3, ~lx); |
569 |
160 |
if (ved_bytes(ecx, VDP_NULL, buf2, sizeof buf2)) |
570 |
0 |
return (-1); |
571 |
|
} |
572 |
160 |
if (ved_bytes(ecx, VDP_NULL, p, lx)) |
573 |
0 |
return (-1); |
574 |
160 |
l -= lx; |
575 |
160 |
p += lx; |
576 |
|
} |
577 |
|
/* buf1 & buf2 are local, so we have to flush */ |
578 |
160 |
return (ved_bytes(ecx, VDP_FLUSH, NULL, 0)); |
579 |
200 |
} |
580 |
|
|
581 |
|
static const struct vdp ved_pretend_gz = { |
582 |
|
.name = "PGZ", |
583 |
|
.bytes = ved_pretend_gzip_bytes, |
584 |
|
.fini = ved_pretend_gzip_fini, |
585 |
|
}; |
586 |
|
|
587 |
|
/*--------------------------------------------------------------------- |
588 |
|
* Include a gzipped object in a gzipped ESI object delivery |
589 |
|
* |
590 |
|
* This is the interesting case: Deliver all the deflate blocks, stripping |
591 |
|
* the "LAST" bit of the last one and padding it, as necessary, to a byte |
592 |
|
* boundary. |
593 |
|
* |
594 |
|
*/ |
595 |
|
|
596 |
|
struct ved_foo { |
597 |
|
unsigned magic; |
598 |
|
#define VED_FOO_MAGIC 0x6a5a262d |
599 |
|
struct ecx *ecx; |
600 |
|
struct objcore *objcore; |
601 |
|
uint64_t start, last, stop, lpad; |
602 |
|
ssize_t ll; |
603 |
|
uint64_t olen; |
604 |
|
uint8_t dbits[8]; |
605 |
|
uint8_t tailbuf[8]; |
606 |
|
}; |
607 |
|
|
608 |
|
static int v_matchproto_(vdp_init_f) |
609 |
164 |
ved_gzgz_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) |
610 |
|
{ |
611 |
|
ssize_t l; |
612 |
|
const char *p; |
613 |
|
struct ved_foo *foo; |
614 |
|
|
615 |
164 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
616 |
164 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
617 |
164 |
AN(priv); |
618 |
|
|
619 |
164 |
CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC); |
620 |
164 |
CHECK_OBJ_NOTNULL(foo->objcore, OBJCORE_MAGIC); |
621 |
|
|
622 |
164 |
memset(foo->tailbuf, 0xdd, sizeof foo->tailbuf); |
623 |
|
|
624 |
164 |
AN(ObjCheckFlag(vdc->wrk, foo->objcore, OF_GZIPED)); |
625 |
|
|
626 |
164 |
p = ObjGetAttr(vdc->wrk, foo->objcore, OA_GZIPBITS, &l); |
627 |
164 |
AN(p); |
628 |
164 |
assert(l == 32); |
629 |
164 |
foo->start = vbe64dec(p); |
630 |
164 |
foo->last = vbe64dec(p + 8); |
631 |
164 |
foo->stop = vbe64dec(p + 16); |
632 |
164 |
foo->olen = ObjGetLen(vdc->wrk, foo->objcore); |
633 |
164 |
assert(foo->start > 0 && foo->start < foo->olen * 8); |
634 |
164 |
assert(foo->last > 0 && foo->last < foo->olen * 8); |
635 |
164 |
assert(foo->stop > 0 && foo->stop < foo->olen * 8); |
636 |
164 |
assert(foo->last >= foo->start); |
637 |
164 |
assert(foo->last < foo->stop); |
638 |
|
|
639 |
|
/* The start bit must be byte aligned. */ |
640 |
164 |
AZ(foo->start & 7); |
641 |
164 |
return (0); |
642 |
|
} |
643 |
|
|
644 |
|
/* |
645 |
|
* XXX: for act == VDP_END || act == VDP_FLUSH, we send a flush more often than |
646 |
|
* we need. The VDP_END case would trip our "at most one VDP_END call" assertion |
647 |
|
* in VDP_bytes(), but ved_bytes() covers it. |
648 |
|
* |
649 |
|
* To avoid unnecessary chunks downstream, it would be nice to re-structure the |
650 |
|
* code to identify the last block, send VDP_END/VDP_FLUSH for that one and |
651 |
|
* VDP_NULL for anything before it. |
652 |
|
*/ |
653 |
|
|
654 |
|
static int v_matchproto_(vdp_bytes_f) |
655 |
240 |
ved_gzgz_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
656 |
|
const void *ptr, ssize_t len) |
657 |
|
{ |
658 |
|
struct ved_foo *foo; |
659 |
|
const uint8_t *pp; |
660 |
|
ssize_t dl; |
661 |
|
ssize_t l; |
662 |
|
|
663 |
240 |
(void)vdc; |
664 |
240 |
CAST_OBJ_NOTNULL(foo, *priv, VED_FOO_MAGIC); |
665 |
240 |
pp = ptr; |
666 |
240 |
if (len > 0) { |
667 |
|
/* Skip over the GZIP header */ |
668 |
240 |
dl = foo->start / 8 - foo->ll; |
669 |
240 |
if (dl > 0) { |
670 |
|
/* Before foo.start, skip */ |
671 |
176 |
if (dl > len) |
672 |
12 |
dl = len; |
673 |
176 |
foo->ll += dl; |
674 |
176 |
len -= dl; |
675 |
176 |
pp += dl; |
676 |
176 |
} |
677 |
240 |
} |
678 |
240 |
if (len > 0) { |
679 |
|
/* The main body of the object */ |
680 |
228 |
dl = foo->last / 8 - foo->ll; |
681 |
228 |
if (dl > 0) { |
682 |
72 |
dl = vmin(dl, len); |
683 |
72 |
if (ved_bytes(foo->ecx, act, pp, dl)) |
684 |
0 |
return (-1); |
685 |
72 |
foo->ll += dl; |
686 |
72 |
len -= dl; |
687 |
72 |
pp += dl; |
688 |
72 |
} |
689 |
228 |
} |
690 |
240 |
if (len > 0 && foo->ll == foo->last / 8) { |
691 |
|
/* Remove the "LAST" bit */ |
692 |
164 |
foo->dbits[0] = *pp; |
693 |
164 |
foo->dbits[0] &= ~(1U << (foo->last & 7)); |
694 |
164 |
if (ved_bytes(foo->ecx, act, foo->dbits, 1)) |
695 |
0 |
return (-1); |
696 |
164 |
foo->ll++; |
697 |
164 |
len--; |
698 |
164 |
pp++; |
699 |
164 |
} |
700 |
240 |
if (len > 0) { |
701 |
|
/* Last block */ |
702 |
220 |
dl = foo->stop / 8 - foo->ll; |
703 |
220 |
if (dl > 0) { |
704 |
108 |
dl = vmin(dl, len); |
705 |
108 |
if (ved_bytes(foo->ecx, act, pp, dl)) |
706 |
0 |
return (-1); |
707 |
108 |
foo->ll += dl; |
708 |
108 |
len -= dl; |
709 |
108 |
pp += dl; |
710 |
108 |
} |
711 |
220 |
} |
712 |
240 |
if (len > 0 && (foo->stop & 7) && foo->ll == foo->stop / 8) { |
713 |
|
/* Add alignment to byte boundary */ |
714 |
132 |
foo->dbits[1] = *pp; |
715 |
132 |
foo->ll++; |
716 |
132 |
len--; |
717 |
132 |
pp++; |
718 |
132 |
switch ((int)(foo->stop & 7)) { |
719 |
|
case 1: /* |
720 |
|
* x000.... |
721 |
|
* 00000000 00000000 11111111 11111111 |
722 |
|
*/ |
723 |
|
case 3: /* |
724 |
|
* xxx000.. |
725 |
|
* 00000000 00000000 11111111 11111111 |
726 |
|
*/ |
727 |
|
case 5: /* |
728 |
|
* xxxxx000 |
729 |
|
* 00000000 00000000 11111111 11111111 |
730 |
|
*/ |
731 |
24 |
foo->dbits[2] = 0x00; foo->dbits[3] = 0x00; |
732 |
24 |
foo->dbits[4] = 0xff; foo->dbits[5] = 0xff; |
733 |
24 |
foo->lpad = 5; |
734 |
24 |
break; |
735 |
|
case 2: /* xx010000 00000100 00000001 00000000 */ |
736 |
76 |
foo->dbits[1] |= 0x08; |
737 |
76 |
foo->dbits[2] = 0x20; |
738 |
76 |
foo->dbits[3] = 0x80; |
739 |
76 |
foo->dbits[4] = 0x00; |
740 |
76 |
foo->lpad = 4; |
741 |
76 |
break; |
742 |
|
case 4: /* xxxx0100 00000001 00000000 */ |
743 |
8 |
foo->dbits[1] |= 0x20; |
744 |
8 |
foo->dbits[2] = 0x80; |
745 |
8 |
foo->dbits[3] = 0x00; |
746 |
8 |
foo->lpad = 3; |
747 |
8 |
break; |
748 |
|
case 6: /* xxxxxx01 00000000 */ |
749 |
16 |
foo->dbits[1] |= 0x80; |
750 |
16 |
foo->dbits[2] = 0x00; |
751 |
16 |
foo->lpad = 2; |
752 |
16 |
break; |
753 |
|
case 7: /* |
754 |
|
* xxxxxxx0 |
755 |
|
* 00...... |
756 |
|
* 00000000 00000000 11111111 11111111 |
757 |
|
*/ |
758 |
8 |
foo->dbits[2] = 0x00; |
759 |
8 |
foo->dbits[3] = 0x00; foo->dbits[4] = 0x00; |
760 |
8 |
foo->dbits[5] = 0xff; foo->dbits[6] = 0xff; |
761 |
8 |
foo->lpad = 6; |
762 |
8 |
break; |
763 |
0 |
case 0: /* xxxxxxxx */ |
764 |
|
default: |
765 |
0 |
WRONG("compiler must be broken"); |
766 |
0 |
} |
767 |
132 |
if (ved_bytes(foo->ecx, act, foo->dbits + 1, foo->lpad)) |
768 |
0 |
return (-1); |
769 |
132 |
} |
770 |
240 |
if (len > 0) { |
771 |
|
/* Recover GZIP tail */ |
772 |
196 |
dl = foo->olen - foo->ll; |
773 |
196 |
assert(dl >= 0); |
774 |
196 |
if (dl > len) |
775 |
32 |
dl = len; |
776 |
196 |
if (dl > 0) { |
777 |
196 |
assert(dl <= 8); |
778 |
196 |
l = foo->ll - (foo->olen - 8); |
779 |
196 |
assert(l >= 0); |
780 |
196 |
assert(l <= 8); |
781 |
196 |
assert(l + dl <= 8); |
782 |
196 |
memcpy(foo->tailbuf + l, pp, dl); |
783 |
196 |
foo->ll += dl; |
784 |
196 |
len -= dl; |
785 |
196 |
} |
786 |
196 |
} |
787 |
240 |
assert(len == 0); |
788 |
240 |
return (0); |
789 |
240 |
} |
790 |
|
|
791 |
|
static int v_matchproto_(vdp_fini_f) |
792 |
164 |
ved_gzgz_fini(struct vdp_ctx *vdc, void **priv) |
793 |
|
{ |
794 |
|
uint32_t icrc; |
795 |
|
uint32_t ilen; |
796 |
|
struct ved_foo *foo; |
797 |
|
|
798 |
164 |
(void)vdc; |
799 |
164 |
TAKE_OBJ_NOTNULL(foo, priv, VED_FOO_MAGIC); |
800 |
|
|
801 |
|
/* XXX |
802 |
|
* this works due to the esi layering, a VDP pushing bytes from _fini |
803 |
|
* will otherwise have its own _bytes method called. |
804 |
|
* |
805 |
|
* Could rewrite use VDP_END |
806 |
|
*/ |
807 |
164 |
(void)ved_bytes(foo->ecx, VDP_FLUSH, NULL, 0); |
808 |
|
|
809 |
164 |
icrc = vle32dec(foo->tailbuf); |
810 |
164 |
ilen = vle32dec(foo->tailbuf + 4); |
811 |
164 |
foo->ecx->crc = crc32_combine(foo->ecx->crc, icrc, ilen); |
812 |
164 |
foo->ecx->l_crc += ilen; |
813 |
|
|
814 |
164 |
return (0); |
815 |
|
} |
816 |
|
|
817 |
|
static const struct vdp ved_gzgz = { |
818 |
|
.name = "VZZ", |
819 |
|
.init = ved_gzgz_init, |
820 |
|
.bytes = ved_gzgz_bytes, |
821 |
|
.fini = ved_gzgz_fini, |
822 |
|
}; |
823 |
|
|
824 |
|
/*-------------------------------------------------------------------- |
825 |
|
* Straight through without processing. |
826 |
|
*/ |
827 |
|
|
828 |
|
static int v_matchproto_(vdp_fini_f) |
829 |
1152 |
ved_vdp_fini(struct vdp_ctx *vdc, void **priv) |
830 |
|
{ |
831 |
1152 |
(void)vdc; |
832 |
1152 |
*priv = NULL; |
833 |
1152 |
return (0); |
834 |
|
} |
835 |
|
|
836 |
|
static int v_matchproto_(vdp_bytes_f) |
837 |
7667 |
ved_vdp_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, |
838 |
|
const void *ptr, ssize_t len) |
839 |
|
{ |
840 |
|
struct ecx *ecx; |
841 |
|
|
842 |
7667 |
(void)vdc; |
843 |
7667 |
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC); |
844 |
7667 |
return (ved_bytes(ecx, act, ptr, len)); |
845 |
|
} |
846 |
|
|
847 |
|
static const struct vdp ved_ved = { |
848 |
|
.name = "VED", |
849 |
|
.bytes = ved_vdp_bytes, |
850 |
|
.fini = ved_vdp_fini, |
851 |
|
}; |
852 |
|
|
853 |
|
static void |
854 |
1464 |
ved_close(struct req *req, int error) |
855 |
|
{ |
856 |
1464 |
req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc); |
857 |
|
|
858 |
1464 |
if (! error) |
859 |
1452 |
return; |
860 |
12 |
req->top->topreq->vdc->retval = -1; |
861 |
12 |
req->top->topreq->doclose = req->doclose; |
862 |
1464 |
} |
863 |
|
|
864 |
|
/*--------------------------------------------------------------------*/ |
865 |
|
|
866 |
|
static enum vtr_deliver_e v_matchproto_(vtr_deliver_f) |
867 |
1464 |
ved_deliver(struct req *req, int wantbody) |
868 |
|
{ |
869 |
1464 |
int i = 0; |
870 |
|
const char *p; |
871 |
|
uint16_t status; |
872 |
|
struct ecx *ecx; |
873 |
|
struct ved_foo foo[1]; |
874 |
|
struct vrt_ctx ctx[1]; |
875 |
|
|
876 |
1464 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
877 |
1464 |
CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC); |
878 |
1464 |
CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); |
879 |
|
|
880 |
1464 |
CAST_OBJ_NOTNULL(ecx, req->transport_priv, ECX_MAGIC); |
881 |
|
|
882 |
1464 |
status = req->resp->status % 1000; |
883 |
|
|
884 |
1480 |
if (FEATURE(FEATURE_ESI_INCLUDE_ONERROR) && |
885 |
276 |
status != 200 && status != 204) { |
886 |
16 |
ved_close(req, ecx->abrt); |
887 |
16 |
return (VTR_D_DONE); |
888 |
|
} |
889 |
|
|
890 |
1448 |
if (wantbody == 0) { |
891 |
52 |
ved_close(req, 0); |
892 |
52 |
return (VTR_D_DONE); |
893 |
|
} |
894 |
|
|
895 |
1396 |
if (req->boc == NULL && ObjGetLen(req->wrk, req->objcore) == 0) { |
896 |
0 |
ved_close(req, 0); |
897 |
0 |
return (VTR_D_DONE); |
898 |
|
} |
899 |
|
|
900 |
1396 |
if (http_GetHdr(req->resp, H_Content_Encoding, &p)) |
901 |
432 |
i = http_coding_eq(p, gzip); |
902 |
1396 |
if (i) |
903 |
432 |
i = ObjCheckFlag(req->wrk, req->objcore, OF_GZIPED); |
904 |
|
|
905 |
1396 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
906 |
1396 |
VCL_Req2Ctx(ctx, req); |
907 |
|
|
908 |
1396 |
if (ecx->isgzip && i && !req->res_esi) { |
909 |
|
/* A gzipped include which is not ESI processed */ |
910 |
|
|
911 |
|
/* OA_GZIPBITS are not valid until BOS_FINISHED */ |
912 |
168 |
if (req->boc != NULL) |
913 |
56 |
ObjWaitState(req->objcore, BOS_FINISHED); |
914 |
|
|
915 |
168 |
if (req->objcore->flags & OC_F_FAILED) { |
916 |
|
/* No way of signalling errors in the middle of |
917 |
|
* the ESI body. Omit this ESI fragment. |
918 |
|
* XXX change error argument to 1 |
919 |
|
*/ |
920 |
4 |
ved_close(req, 0); |
921 |
4 |
return (VTR_D_DONE); |
922 |
|
} |
923 |
|
|
924 |
164 |
INIT_OBJ(foo, VED_FOO_MAGIC); |
925 |
164 |
foo->ecx = ecx; |
926 |
164 |
foo->objcore = req->objcore; |
927 |
164 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_gzgz, foo); |
928 |
1392 |
} else if (ecx->isgzip && !i) { |
929 |
|
/* Non-Gzip'ed include in gzipped parent */ |
930 |
72 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_pretend_gz, ecx); |
931 |
72 |
} else { |
932 |
|
/* Anything else goes straight through */ |
933 |
1156 |
i = VDP_Push(ctx, req->vdc, req->ws, &ved_ved, ecx); |
934 |
|
} |
935 |
|
|
936 |
1392 |
if (i == 0) { |
937 |
1388 |
i = VDP_DeliverObj(req->vdc, req->objcore); |
938 |
1388 |
} else { |
939 |
4 |
VSLb(req->vsl, SLT_Error, "Failure to push ESI processors"); |
940 |
4 |
req->doclose = SC_OVERLOAD; |
941 |
|
} |
942 |
|
|
943 |
1392 |
if (i && req->doclose == SC_NULL) |
944 |
12 |
req->doclose = SC_REM_CLOSE; |
945 |
|
|
946 |
1392 |
ved_close(req, i && ecx->abrt ? 1 : 0); |
947 |
1392 |
return (VTR_D_DONE); |
948 |
1464 |
} |