| | varnish-cache/bin/varnishd/cache/cache_fetch.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include "cache_varnishd.h" |
34 |
|
#include "cache_filter.h" |
35 |
|
#include "cache_objhead.h" |
36 |
|
#include "storage/storage.h" |
37 |
|
#include "vcl.h" |
38 |
|
#include "vtim.h" |
39 |
|
#include "vcc_interface.h" |
40 |
|
|
41 |
|
#define FETCH_STEPS \ |
42 |
|
FETCH_STEP(mkbereq, MKBEREQ) \ |
43 |
|
FETCH_STEP(retry, RETRY) \ |
44 |
|
FETCH_STEP(startfetch, STARTFETCH) \ |
45 |
|
FETCH_STEP(condfetch, CONDFETCH) \ |
46 |
|
FETCH_STEP(fetch, FETCH) \ |
47 |
|
FETCH_STEP(fetchbody, FETCHBODY) \ |
48 |
|
FETCH_STEP(fetchend, FETCHEND) \ |
49 |
|
FETCH_STEP(error, ERROR) \ |
50 |
|
FETCH_STEP(fail, FAIL) \ |
51 |
|
FETCH_STEP(done, DONE) |
52 |
|
|
53 |
|
typedef const struct fetch_step *vbf_state_f(struct worker *, struct busyobj *); |
54 |
|
|
55 |
|
struct fetch_step { |
56 |
|
const char *name; |
57 |
|
vbf_state_f *func; |
58 |
7320 |
}; |
59 |
7320 |
|
60 |
7320 |
#define FETCH_STEP(l, U) \ |
61 |
7320 |
static vbf_state_f vbf_stp_##l; \ |
62 |
|
static const struct fetch_step F_STP_##U[1] = {{ .name = "Fetch Step " #l, .func = vbf_stp_##l, }}; |
63 |
|
FETCH_STEPS |
64 |
|
#undef FETCH_STEP |
65 |
|
|
66 |
|
/*-------------------------------------------------------------------- |
67 |
|
* Allocate an object, with fall-back to Transient. |
68 |
|
* XXX: This somewhat overlaps the stuff in stevedore.c |
69 |
|
* XXX: Should this be merged over there ? |
70 |
|
*/ |
71 |
|
|
72 |
|
static int |
73 |
8844 |
vbf_allocobj(struct busyobj *bo, unsigned l) |
74 |
|
{ |
75 |
|
struct objcore *oc; |
76 |
|
const struct stevedore *stv; |
77 |
|
vtim_dur lifetime; |
78 |
|
|
79 |
8844 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
80 |
8844 |
oc = bo->fetch_objcore; |
81 |
8844 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
82 |
|
|
83 |
8844 |
lifetime = oc->ttl + oc->grace + oc->keep; |
84 |
|
|
85 |
8844 |
if (bo->uncacheable) { |
86 |
3516 |
stv = stv_transient; |
87 |
3516 |
bo->wrk->stats->beresp_uncacheable++; |
88 |
3516 |
} |
89 |
5328 |
else if (lifetime < cache_param->shortlived) { |
90 |
484 |
stv = stv_transient; |
91 |
484 |
bo->wrk->stats->beresp_shortlived++; |
92 |
484 |
} |
93 |
|
else |
94 |
4844 |
stv = bo->storage; |
95 |
|
|
96 |
8844 |
bo->storage = NULL; |
97 |
|
|
98 |
8844 |
if (stv == NULL) |
99 |
4 |
return (0); |
100 |
|
|
101 |
8840 |
if (STV_NewObject(bo->wrk, oc, stv, l)) |
102 |
8812 |
return (1); |
103 |
|
|
104 |
28 |
if (stv == stv_transient) |
105 |
16 |
return (0); |
106 |
|
|
107 |
|
/* |
108 |
|
* Try to salvage the transaction by allocating a shortlived object |
109 |
|
* on Transient storage. |
110 |
|
*/ |
111 |
|
|
112 |
12 |
oc->ttl = vmin_t(float, oc->ttl, cache_param->shortlived); |
113 |
12 |
oc->grace = 0.0; |
114 |
12 |
oc->keep = 0.0; |
115 |
12 |
return (STV_NewObject(bo->wrk, oc, stv_transient, l)); |
116 |
8844 |
} |
117 |
|
|
118 |
|
static void |
119 |
7972 |
vbf_cleanup(struct busyobj *bo) |
120 |
|
{ |
121 |
|
struct vfp_ctx *vfc; |
122 |
|
|
123 |
7972 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
124 |
7972 |
vfc = bo->vfc; |
125 |
7972 |
CHECK_OBJ_NOTNULL(vfc, VFP_CTX_MAGIC); |
126 |
|
|
127 |
7972 |
bo->acct.beresp_bodybytes += VFP_Close(vfc); |
128 |
7972 |
bo->vfp_filter_list = NULL; |
129 |
|
|
130 |
7972 |
if (bo->director_state != DIR_S_NULL) |
131 |
7939 |
VDI_Finish(bo); |
132 |
7972 |
} |
133 |
|
|
134 |
|
void |
135 |
36 |
Bereq_Rollback(VRT_CTX) |
136 |
|
{ |
137 |
|
struct busyobj *bo; |
138 |
|
|
139 |
36 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
140 |
36 |
bo = ctx->bo; |
141 |
36 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
142 |
|
|
143 |
36 |
if (bo->htc != NULL) { |
144 |
32 |
assert(bo->htc->body_status != BS_TAKEN); |
145 |
32 |
if (bo->htc->body_status != BS_NONE) |
146 |
4 |
bo->htc->doclose = SC_RESP_CLOSE; |
147 |
32 |
} |
148 |
|
|
149 |
36 |
vbf_cleanup(bo); |
150 |
36 |
VCL_TaskLeave(ctx, bo->privs); |
151 |
36 |
VCL_TaskEnter(bo->privs); |
152 |
36 |
HTTP_Clone(bo->bereq, bo->bereq0); |
153 |
36 |
bo->vfp_filter_list = NULL; |
154 |
36 |
bo->err_reason = NULL; |
155 |
36 |
AN(bo->ws_bo); |
156 |
36 |
WS_Rollback(bo->ws, bo->ws_bo); |
157 |
36 |
} |
158 |
|
|
159 |
|
/*-------------------------------------------------------------------- |
160 |
|
* Turn the beresp into a obj |
161 |
|
*/ |
162 |
|
|
163 |
|
static int |
164 |
8843 |
vbf_beresp2obj(struct busyobj *bo) |
165 |
|
{ |
166 |
|
unsigned l, l2; |
167 |
|
const char *b; |
168 |
|
uint8_t *bp; |
169 |
8843 |
struct vsb *vary = NULL; |
170 |
8843 |
int varyl = 0; |
171 |
|
struct objcore *oc; |
172 |
|
|
173 |
8843 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
174 |
8843 |
oc = bo->fetch_objcore; |
175 |
8843 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
176 |
|
|
177 |
8843 |
l = 0; |
178 |
|
|
179 |
|
/* Create Vary instructions */ |
180 |
8843 |
if (!(oc->flags & OC_F_PRIVATE)) { |
181 |
5732 |
varyl = VRY_Create(bo, &vary); |
182 |
5732 |
if (varyl > 0) { |
183 |
844 |
AN(vary); |
184 |
844 |
assert(varyl == VSB_len(vary)); |
185 |
844 |
l += PRNDUP((intptr_t)varyl); |
186 |
5732 |
} else if (varyl < 0) { |
187 |
|
/* |
188 |
|
* Vary parse error |
189 |
|
* Complain about it, and make this a pass. |
190 |
|
*/ |
191 |
20 |
VSLb(bo->vsl, SLT_Error, |
192 |
|
"Illegal 'Vary' header from backend, " |
193 |
|
"making this a pass."); |
194 |
20 |
bo->uncacheable = 1; |
195 |
20 |
AZ(vary); |
196 |
20 |
} else |
197 |
|
/* No vary */ |
198 |
4868 |
AZ(vary); |
199 |
5732 |
} |
200 |
|
|
201 |
17686 |
l2 = http_EstimateWS(bo->beresp, |
202 |
8843 |
bo->uncacheable ? HTTPH_A_PASS : HTTPH_A_INS); |
203 |
8843 |
l += l2; |
204 |
|
|
205 |
8843 |
if (bo->uncacheable) |
206 |
3516 |
oc->flags |= OC_F_HFM; |
207 |
|
|
208 |
8843 |
if (!vbf_allocobj(bo, l)) { |
209 |
24 |
if (vary != NULL) |
210 |
0 |
VSB_destroy(&vary); |
211 |
24 |
AZ(vary); |
212 |
24 |
return (VFP_Error(bo->vfc, "Could not get storage")); |
213 |
|
} |
214 |
|
|
215 |
8819 |
if (vary != NULL) { |
216 |
844 |
AN(ObjSetAttr(bo->wrk, oc, OA_VARY, varyl, VSB_data(vary))); |
217 |
844 |
VSB_destroy(&vary); |
218 |
844 |
} |
219 |
|
|
220 |
8819 |
AZ(ObjSetXID(bo->wrk, oc, bo->vsl->wid)); |
221 |
|
|
222 |
|
/* for HTTP_Encode() VSLH call */ |
223 |
8819 |
bo->beresp->logtag = SLT_ObjMethod; |
224 |
|
|
225 |
|
/* Filter into object */ |
226 |
8819 |
bp = ObjSetAttr(bo->wrk, oc, OA_HEADERS, l2, NULL); |
227 |
8819 |
AN(bp); |
228 |
17638 |
HTTP_Encode(bo->beresp, bp, l2, |
229 |
8819 |
bo->uncacheable ? HTTPH_A_PASS : HTTPH_A_INS); |
230 |
|
|
231 |
8819 |
if (http_GetHdr(bo->beresp, H_Last_Modified, &b)) |
232 |
172 |
AZ(ObjSetDouble(bo->wrk, oc, OA_LASTMODIFIED, VTIM_parse(b))); |
233 |
|
else |
234 |
8647 |
AZ(ObjSetDouble(bo->wrk, oc, OA_LASTMODIFIED, |
235 |
|
floor(oc->t_origin))); |
236 |
|
|
237 |
8819 |
return (0); |
238 |
8843 |
} |
239 |
|
|
240 |
|
/*-------------------------------------------------------------------- |
241 |
|
* Copy req->bereq and release req if no body |
242 |
|
*/ |
243 |
|
|
244 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
245 |
8936 |
vbf_stp_mkbereq(struct worker *wrk, struct busyobj *bo) |
246 |
|
{ |
247 |
|
const char *q; |
248 |
|
struct objcore *oc; |
249 |
|
|
250 |
8936 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
251 |
8936 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
252 |
8936 |
CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC); |
253 |
8936 |
oc = bo->fetch_objcore; |
254 |
8936 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
255 |
|
|
256 |
8936 |
assert(oc->boc->state == BOS_INVALID); |
257 |
8936 |
AZ(bo->storage); |
258 |
|
|
259 |
8936 |
HTTP_Setup(bo->bereq0, bo->ws, bo->vsl, SLT_BereqMethod); |
260 |
17872 |
http_FilterReq(bo->bereq0, bo->req->http, |
261 |
8936 |
bo->uncacheable ? HTTPH_R_PASS : HTTPH_R_FETCH); |
262 |
|
|
263 |
8936 |
if (bo->uncacheable) |
264 |
3108 |
AZ(bo->stale_oc); |
265 |
|
else { |
266 |
5828 |
http_ForceField(bo->bereq0, HTTP_HDR_METHOD, "GET"); |
267 |
5828 |
if (cache_param->http_gzip_support) |
268 |
5808 |
http_ForceHeader(bo->bereq0, H_Accept_Encoding, "gzip"); |
269 |
|
} |
270 |
8936 |
http_ForceField(bo->bereq0, HTTP_HDR_PROTO, "HTTP/1.1"); |
271 |
|
|
272 |
9052 |
if (bo->stale_oc != NULL && |
273 |
576 |
ObjCheckFlag(bo->wrk, bo->stale_oc, OF_IMSCAND) && |
274 |
128 |
(bo->stale_oc->boc != NULL || ObjGetLen(wrk, bo->stale_oc) != 0)) { |
275 |
124 |
AZ(bo->stale_oc->flags & (OC_F_HFM|OC_F_PRIVATE)); |
276 |
124 |
q = RFC2616_Strong_LM(NULL, wrk, bo->stale_oc); |
277 |
124 |
if (q != NULL) |
278 |
104 |
http_PrintfHeader(bo->bereq0, |
279 |
52 |
"If-Modified-Since: %s", q); |
280 |
124 |
q = HTTP_GetHdrPack(bo->wrk, bo->stale_oc, H_ETag); |
281 |
124 |
if (q != NULL) |
282 |
152 |
http_PrintfHeader(bo->bereq0, |
283 |
76 |
"If-None-Match: %s", q); |
284 |
124 |
} |
285 |
|
|
286 |
8936 |
http_CopyHome(bo->bereq0); |
287 |
8936 |
HTTP_Setup(bo->bereq, bo->ws, bo->vsl, SLT_BereqMethod); |
288 |
8936 |
bo->ws_bo = WS_Snapshot(bo->ws); |
289 |
8936 |
HTTP_Clone(bo->bereq, bo->bereq0); |
290 |
|
|
291 |
8936 |
if (bo->req->req_body_status->avail == 0) { |
292 |
8588 |
bo->req = NULL; |
293 |
8588 |
ObjSetState(bo->wrk, oc, BOS_REQ_DONE); |
294 |
8936 |
} else if (bo->req->req_body_status == BS_CACHED) { |
295 |
92 |
AN(bo->req->body_oc); |
296 |
92 |
bo->bereq_body = bo->req->body_oc; |
297 |
92 |
HSH_Ref(bo->bereq_body); |
298 |
92 |
bo->req = NULL; |
299 |
92 |
ObjSetState(bo->wrk, oc, BOS_REQ_DONE); |
300 |
92 |
} |
301 |
8936 |
return (F_STP_STARTFETCH); |
302 |
|
} |
303 |
|
|
304 |
|
/*-------------------------------------------------------------------- |
305 |
|
* Start a new VSL transaction and try again |
306 |
|
* Prepare the busyobj and fetch processors |
307 |
|
*/ |
308 |
|
|
309 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
310 |
144 |
vbf_stp_retry(struct worker *wrk, struct busyobj *bo) |
311 |
|
{ |
312 |
144 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
313 |
144 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
314 |
|
|
315 |
144 |
assert(bo->fetch_objcore->boc->state <= BOS_REQ_DONE); |
316 |
|
|
317 |
144 |
if (bo->no_retry != NULL) { |
318 |
16 |
VSLb(bo->vsl, SLT_Error, |
319 |
8 |
"Retry not possible, %s", bo->no_retry); |
320 |
8 |
return (F_STP_FAIL); |
321 |
|
} |
322 |
|
|
323 |
136 |
VSLb_ts_busyobj(bo, "Retry", W_TIM_real(wrk)); |
324 |
|
|
325 |
|
/* VDI_Finish (via vbf_cleanup) must have been called before */ |
326 |
136 |
assert(bo->director_state == DIR_S_NULL); |
327 |
|
|
328 |
|
/* reset other bo attributes - See VBO_GetBusyObj */ |
329 |
136 |
bo->storage = NULL; |
330 |
136 |
bo->do_esi = 0; |
331 |
136 |
bo->do_stream = 1; |
332 |
136 |
bo->was_304 = 0; |
333 |
136 |
bo->err_code = 0; |
334 |
136 |
bo->err_reason = NULL; |
335 |
136 |
bo->connect_timeout = NAN; |
336 |
136 |
bo->first_byte_timeout = NAN; |
337 |
136 |
bo->between_bytes_timeout = NAN; |
338 |
136 |
if (bo->htc != NULL) |
339 |
0 |
bo->htc->doclose = SC_NULL; |
340 |
|
|
341 |
|
// XXX: BereqEnd + BereqAcct ? |
342 |
136 |
VSL_ChgId(bo->vsl, "bereq", "retry", VXID_Get(wrk, VSL_BACKENDMARKER)); |
343 |
136 |
VSLb_ts_busyobj(bo, "Start", bo->t_prev); |
344 |
136 |
http_VSL_log(bo->bereq); |
345 |
|
|
346 |
136 |
return (F_STP_STARTFETCH); |
347 |
144 |
} |
348 |
|
|
349 |
|
/*-------------------------------------------------------------------- |
350 |
|
* 304 setup logic |
351 |
|
*/ |
352 |
|
|
353 |
|
static int |
354 |
132 |
vbf_304_logic(struct busyobj *bo) |
355 |
|
{ |
356 |
132 |
if (bo->stale_oc != NULL && |
357 |
116 |
ObjCheckFlag(bo->wrk, bo->stale_oc, OF_IMSCAND)) { |
358 |
112 |
AZ(bo->stale_oc->flags & (OC_F_HFM|OC_F_PRIVATE)); |
359 |
112 |
if (ObjCheckFlag(bo->wrk, bo->stale_oc, OF_CHGCE)) { |
360 |
|
/* |
361 |
|
* If a VFP changed C-E in the stored |
362 |
|
* object, then don't overwrite C-E from |
363 |
|
* the IMS fetch, and we must weaken any |
364 |
|
* new ETag we get. |
365 |
|
*/ |
366 |
8 |
RFC2616_Weaken_Etag(bo->beresp); |
367 |
8 |
} |
368 |
112 |
http_Unset(bo->beresp, H_Content_Encoding); |
369 |
112 |
http_Unset(bo->beresp, H_Content_Length); |
370 |
112 |
HTTP_Merge(bo->wrk, bo->stale_oc, bo->beresp); |
371 |
112 |
assert(http_IsStatus(bo->beresp, 200)); |
372 |
112 |
bo->was_304 = 1; |
373 |
132 |
} else if (!bo->uncacheable) { |
374 |
|
/* |
375 |
|
* Backend sent unallowed 304 |
376 |
|
*/ |
377 |
4 |
VSLb(bo->vsl, SLT_Error, |
378 |
|
"304 response but not conditional fetch"); |
379 |
4 |
bo->htc->doclose = SC_RX_BAD; |
380 |
4 |
vbf_cleanup(bo); |
381 |
4 |
return (-1); |
382 |
|
} |
383 |
128 |
return (1); |
384 |
132 |
} |
385 |
|
|
386 |
|
/*-------------------------------------------------------------------- |
387 |
|
* Setup bereq from bereq0, run vcl_backend_fetch |
388 |
|
*/ |
389 |
|
|
390 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
391 |
9072 |
vbf_stp_startfetch(struct worker *wrk, struct busyobj *bo) |
392 |
|
{ |
393 |
|
int i; |
394 |
|
vtim_real now; |
395 |
|
unsigned handling; |
396 |
|
struct objcore *oc; |
397 |
|
|
398 |
9072 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
399 |
9072 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
400 |
9072 |
oc = bo->fetch_objcore; |
401 |
9072 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
402 |
|
|
403 |
9072 |
AZ(bo->storage); |
404 |
9072 |
bo->storage = bo->uncacheable ? stv_transient : STV_next(); |
405 |
|
|
406 |
9072 |
if (bo->retries > 0) |
407 |
136 |
http_Unset(bo->bereq, "\012X-Varnish:"); |
408 |
|
|
409 |
9072 |
http_PrintfHeader(bo->bereq, "X-Varnish: %ju", VXID(bo->vsl->wid)); |
410 |
|
|
411 |
9072 |
if (bo->bereq_body == NULL && bo->req == NULL) |
412 |
8724 |
http_Unset(bo->bereq, H_Content_Length); |
413 |
|
|
414 |
9072 |
VCL_backend_fetch_method(bo->vcl, wrk, NULL, bo, NULL); |
415 |
|
|
416 |
9072 |
if (wrk->vpi->handling == VCL_RET_ABANDON || |
417 |
9060 |
wrk->vpi->handling == VCL_RET_FAIL) |
418 |
28 |
return (F_STP_FAIL); |
419 |
|
|
420 |
9044 |
assert (wrk->vpi->handling == VCL_RET_FETCH || |
421 |
|
wrk->vpi->handling == VCL_RET_ERROR); |
422 |
|
|
423 |
9044 |
HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod); |
424 |
|
|
425 |
9044 |
assert(oc->boc->state <= BOS_REQ_DONE); |
426 |
|
|
427 |
9044 |
AZ(bo->htc); |
428 |
|
|
429 |
9044 |
VFP_Setup(bo->vfc, wrk); |
430 |
9044 |
bo->vfc->oc = oc; |
431 |
9044 |
bo->vfc->resp = bo->beresp; |
432 |
9044 |
bo->vfc->req = bo->bereq; |
433 |
|
|
434 |
9044 |
if (wrk->vpi->handling == VCL_RET_ERROR) |
435 |
40 |
return (F_STP_ERROR); |
436 |
|
|
437 |
9004 |
VSLb_ts_busyobj(bo, "Fetch", W_TIM_real(wrk)); |
438 |
9004 |
i = VDI_GetHdr(bo); |
439 |
9004 |
if (bo->htc != NULL) |
440 |
7968 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
441 |
|
|
442 |
9004 |
bo->t_resp = now = W_TIM_real(wrk); |
443 |
9004 |
VSLb_ts_busyobj(bo, "Beresp", now); |
444 |
|
|
445 |
9004 |
if (i) { |
446 |
1032 |
assert(bo->director_state == DIR_S_NULL); |
447 |
1032 |
return (F_STP_ERROR); |
448 |
|
} |
449 |
|
|
450 |
7972 |
if (bo->htc != NULL && bo->htc->body_status == BS_ERROR) { |
451 |
24 |
bo->htc->doclose = SC_RX_BODY; |
452 |
24 |
vbf_cleanup(bo); |
453 |
24 |
VSLb(bo->vsl, SLT_Error, "Body cannot be fetched"); |
454 |
24 |
assert(bo->director_state == DIR_S_NULL); |
455 |
24 |
return (F_STP_ERROR); |
456 |
|
} |
457 |
|
|
458 |
7948 |
if (!http_GetHdr(bo->beresp, H_Date, NULL)) { |
459 |
|
/* |
460 |
|
* RFC 2616 14.18 Date: The Date general-header field |
461 |
|
* represents the date and time at which the message was |
462 |
|
* originated, having the same semantics as orig-date in |
463 |
|
* RFC 822. ... A received message that does not have a |
464 |
|
* Date header field MUST be assigned one by the recipient |
465 |
|
* if the message will be cached by that recipient or |
466 |
|
* gatewayed via a protocol which requires a Date. |
467 |
|
* |
468 |
|
* If we didn't get a Date header, we assign one here. |
469 |
|
*/ |
470 |
332 |
http_TimeHeader(bo->beresp, "Date: ", now); |
471 |
332 |
} |
472 |
|
|
473 |
|
/* |
474 |
|
* These two headers can be spread over multiple actual headers |
475 |
|
* and we rely on their content outside of VCL, so collect them |
476 |
|
* into one line here. |
477 |
|
*/ |
478 |
7948 |
http_CollectHdr(bo->beresp, H_Cache_Control); |
479 |
7948 |
http_CollectHdr(bo->beresp, H_Vary); |
480 |
|
|
481 |
|
/* What does RFC2616 think about TTL ? */ |
482 |
15896 |
RFC2616_Ttl(bo, now, |
483 |
7948 |
&oc->t_origin, |
484 |
7948 |
&oc->ttl, |
485 |
7948 |
&oc->grace, |
486 |
7948 |
&oc->keep); |
487 |
|
|
488 |
7948 |
AZ(bo->do_esi); |
489 |
7948 |
AZ(bo->was_304); |
490 |
|
|
491 |
7948 |
if (http_IsStatus(bo->beresp, 304) && vbf_304_logic(bo) < 0) |
492 |
4 |
return (F_STP_ERROR); |
493 |
|
|
494 |
7944 |
if (bo->htc != NULL && bo->htc->doclose == SC_NULL && |
495 |
7576 |
http_GetHdrField(bo->bereq, H_Connection, "close", NULL)) |
496 |
52 |
bo->htc->doclose = SC_REQ_CLOSE; |
497 |
|
|
498 |
7944 |
VCL_backend_response_method(bo->vcl, wrk, NULL, bo, NULL); |
499 |
|
|
500 |
7944 |
if (bo->htc != NULL && bo->htc->doclose == SC_NULL && |
501 |
7488 |
http_GetHdrField(bo->beresp, H_Connection, "close", NULL)) |
502 |
4 |
bo->htc->doclose = SC_RESP_CLOSE; |
503 |
|
|
504 |
7936 |
if (VRG_CheckBo(bo) < 0) { |
505 |
28 |
if (bo->director_state != DIR_S_NULL) |
506 |
24 |
VDI_Finish(bo); |
507 |
28 |
return (F_STP_ERROR); |
508 |
|
} |
509 |
|
|
510 |
15796 |
if (wrk->vpi->handling == VCL_RET_ABANDON || |
511 |
7896 |
wrk->vpi->handling == VCL_RET_FAIL || |
512 |
7888 |
wrk->vpi->handling == VCL_RET_ERROR) { |
513 |
|
/* do not count deliberately ending the backend connection as |
514 |
|
* fetch failure |
515 |
|
*/ |
516 |
36 |
handling = wrk->vpi->handling; |
517 |
36 |
if (bo->htc) |
518 |
36 |
bo->htc->doclose = SC_RESP_CLOSE; |
519 |
36 |
vbf_cleanup(bo); |
520 |
36 |
wrk->vpi->handling = handling; |
521 |
|
|
522 |
36 |
if (wrk->vpi->handling == VCL_RET_ERROR) |
523 |
16 |
return (F_STP_ERROR); |
524 |
|
else |
525 |
20 |
return (F_STP_FAIL); |
526 |
|
} |
527 |
|
|
528 |
7872 |
if (wrk->vpi->handling == VCL_RET_RETRY) { |
529 |
116 |
if (bo->htc && bo->htc->body_status != BS_NONE) |
530 |
28 |
bo->htc->doclose = SC_RESP_CLOSE; |
531 |
116 |
vbf_cleanup(bo); |
532 |
|
|
533 |
116 |
if (bo->retries++ < bo->max_retries) |
534 |
108 |
return (F_STP_RETRY); |
535 |
|
|
536 |
8 |
VSLb(bo->vsl, SLT_VCL_Error, |
537 |
|
"Too many retries, delivering 503"); |
538 |
8 |
assert(bo->director_state == DIR_S_NULL); |
539 |
8 |
return (F_STP_ERROR); |
540 |
|
} |
541 |
|
|
542 |
7756 |
VSLb_ts_busyobj(bo, "Process", W_TIM_real(wrk)); |
543 |
7756 |
assert(oc->boc->state <= BOS_REQ_DONE); |
544 |
7756 |
if (oc->boc->state != BOS_REQ_DONE) { |
545 |
160 |
bo->req = NULL; |
546 |
160 |
ObjSetState(wrk, oc, BOS_REQ_DONE); |
547 |
160 |
} |
548 |
|
|
549 |
7756 |
if (bo->do_esi) |
550 |
1240 |
bo->do_stream = 0; |
551 |
7756 |
if (wrk->vpi->handling == VCL_RET_PASS) { |
552 |
88 |
oc->flags |= OC_F_HFP; |
553 |
88 |
bo->uncacheable = 1; |
554 |
88 |
wrk->vpi->handling = VCL_RET_DELIVER; |
555 |
88 |
} |
556 |
7756 |
if (!bo->uncacheable || !bo->do_stream) |
557 |
5344 |
oc->boc->transit_buffer = 0; |
558 |
7756 |
if (bo->uncacheable) |
559 |
2964 |
oc->flags |= OC_F_HFM; |
560 |
|
|
561 |
7756 |
assert(wrk->vpi->handling == VCL_RET_DELIVER); |
562 |
|
|
563 |
7756 |
return (bo->was_304 ? F_STP_CONDFETCH : F_STP_FETCH); |
564 |
9064 |
} |
565 |
|
|
566 |
|
/*-------------------------------------------------------------------- |
567 |
|
*/ |
568 |
|
|
569 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
570 |
4352 |
vbf_stp_fetchbody(struct worker *wrk, struct busyobj *bo) |
571 |
|
{ |
572 |
|
ssize_t l; |
573 |
|
uint8_t *ptr; |
574 |
4352 |
enum vfp_status vfps = VFP_ERROR; |
575 |
|
ssize_t est; |
576 |
|
struct vfp_ctx *vfc; |
577 |
|
struct objcore *oc; |
578 |
|
|
579 |
4352 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
580 |
4352 |
vfc = bo->vfc; |
581 |
4352 |
CHECK_OBJ_NOTNULL(vfc, VFP_CTX_MAGIC); |
582 |
4352 |
oc = bo->fetch_objcore; |
583 |
4352 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
584 |
|
|
585 |
4352 |
AN(vfc->vfp_nxt); |
586 |
|
|
587 |
4352 |
est = bo->htc->content_length; |
588 |
4352 |
if (est < 0) |
589 |
588 |
est = 0; |
590 |
|
|
591 |
4352 |
do { |
592 |
223398 |
if (oc->flags & OC_F_CANCEL) { |
593 |
|
/* |
594 |
|
* A pass object and delivery was terminated |
595 |
|
* We don't fail the fetch, in order for HitMiss |
596 |
|
* objects to be created. |
597 |
|
*/ |
598 |
11 |
AN(oc->flags & OC_F_HFM); |
599 |
11 |
VSLb(wrk->vsl, SLT_Debug, |
600 |
|
"Fetch: Pass delivery abandoned"); |
601 |
11 |
bo->htc->doclose = SC_RX_BODY; |
602 |
11 |
break; |
603 |
|
} |
604 |
223387 |
AZ(vfc->failed); |
605 |
223387 |
l = est; |
606 |
223387 |
assert(l >= 0); |
607 |
223387 |
if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK) { |
608 |
16 |
bo->htc->doclose = SC_RX_BODY; |
609 |
16 |
break; |
610 |
|
} |
611 |
|
|
612 |
223371 |
AZ(vfc->failed); |
613 |
223371 |
vfps = VFP_Suck(vfc, ptr, &l); |
614 |
223371 |
if (l >= 0 && vfps != VFP_ERROR) { |
615 |
223243 |
VFP_Extend(vfc, l, vfps); |
616 |
223243 |
if (est >= l) |
617 |
11250 |
est -= l; |
618 |
|
else |
619 |
211993 |
est = 0; |
620 |
223243 |
} |
621 |
223371 |
} while (vfps == VFP_OK); |
622 |
|
|
623 |
4352 |
if (vfc->failed) { |
624 |
144 |
(void)VFP_Error(vfc, "Fetch pipeline failed to process"); |
625 |
144 |
bo->htc->doclose = SC_RX_BODY; |
626 |
144 |
vbf_cleanup(bo); |
627 |
144 |
if (!bo->do_stream) { |
628 |
84 |
assert(oc->boc->state < BOS_STREAM); |
629 |
|
// XXX: doclose = ? |
630 |
84 |
return (F_STP_ERROR); |
631 |
|
} else { |
632 |
60 |
wrk->stats->fetch_failed++; |
633 |
60 |
return (F_STP_FAIL); |
634 |
|
} |
635 |
|
} |
636 |
|
|
637 |
4208 |
return (F_STP_FETCHEND); |
638 |
4352 |
} |
639 |
|
|
640 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
641 |
7648 |
vbf_stp_fetch(struct worker *wrk, struct busyobj *bo) |
642 |
|
{ |
643 |
|
struct vrt_ctx ctx[1]; |
644 |
|
struct objcore *oc; |
645 |
|
|
646 |
7648 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
647 |
7648 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
648 |
7648 |
oc = bo->fetch_objcore; |
649 |
7648 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
650 |
|
|
651 |
7648 |
assert(wrk->vpi->handling == VCL_RET_DELIVER); |
652 |
|
|
653 |
7648 |
if (bo->htc == NULL) { |
654 |
8 |
(void)VFP_Error(bo->vfc, "No backend connection (rollback?)"); |
655 |
8 |
vbf_cleanup(bo); |
656 |
8 |
return (F_STP_ERROR); |
657 |
|
} |
658 |
|
|
659 |
|
/* No body -> done */ |
660 |
7640 |
if (bo->htc->body_status == BS_NONE || bo->htc->content_length == 0) { |
661 |
2976 |
http_Unset(bo->beresp, H_Content_Encoding); |
662 |
2976 |
bo->do_gzip = bo->do_gunzip = 0; |
663 |
2976 |
bo->do_stream = 0; |
664 |
2976 |
bo->vfp_filter_list = ""; |
665 |
7640 |
} else if (bo->vfp_filter_list == NULL) { |
666 |
4640 |
bo->vfp_filter_list = VBF_Get_Filter_List(bo); |
667 |
4640 |
} |
668 |
|
|
669 |
7640 |
if (bo->vfp_filter_list == NULL || |
670 |
7640 |
VCL_StackVFP(bo->vfc, bo->vcl, bo->vfp_filter_list)) { |
671 |
96 |
(bo)->htc->doclose = SC_OVERLOAD; |
672 |
96 |
vbf_cleanup(bo); |
673 |
96 |
return (F_STP_ERROR); |
674 |
|
} |
675 |
|
|
676 |
7544 |
if (oc->flags & OC_F_PRIVATE) |
677 |
2499 |
AN(bo->uncacheable); |
678 |
|
|
679 |
7544 |
oc->boc->fetched_so_far = 0; |
680 |
|
|
681 |
7544 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
682 |
7544 |
VCL_Bo2Ctx(ctx, bo); |
683 |
|
|
684 |
7544 |
if (VFP_Open(ctx, bo->vfc)) { |
685 |
212 |
(void)VFP_Error(bo->vfc, "Fetch pipeline failed to open"); |
686 |
212 |
bo->htc->doclose = SC_RX_BODY; |
687 |
212 |
vbf_cleanup(bo); |
688 |
212 |
return (F_STP_ERROR); |
689 |
|
} |
690 |
|
|
691 |
7332 |
if (vbf_beresp2obj(bo)) { |
692 |
12 |
bo->htc->doclose = SC_RX_BODY; |
693 |
12 |
vbf_cleanup(bo); |
694 |
12 |
return (F_STP_ERROR); |
695 |
|
} |
696 |
|
|
697 |
|
#define OBJ_FLAG(U, l, v) \ |
698 |
|
if (bo->vfc->obj_flags & OF_##U) \ |
699 |
|
ObjSetFlag(bo->wrk, oc, OF_##U, 1); |
700 |
|
#include "tbl/obj_attr.h" |
701 |
|
|
702 |
7320 |
if (!(oc->flags & OC_F_HFM) && |
703 |
4652 |
http_IsStatus(bo->beresp, 200) && ( |
704 |
4588 |
RFC2616_Strong_LM(bo->beresp, NULL, NULL) != NULL || |
705 |
4492 |
http_GetHdr(bo->beresp, H_ETag, NULL))) |
706 |
208 |
ObjSetFlag(bo->wrk, oc, OF_IMSCAND, 1); |
707 |
|
|
708 |
7320 |
assert(oc->boc->refcount >= 1); |
709 |
|
|
710 |
7320 |
assert(oc->boc->state == BOS_REQ_DONE); |
711 |
|
|
712 |
7320 |
if (bo->do_stream) { |
713 |
3227 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
714 |
3227 |
HSH_Unbusy(wrk, oc); |
715 |
3227 |
ObjSetState(wrk, oc, BOS_STREAM); |
716 |
3227 |
} |
717 |
|
|
718 |
|
VSLb(bo->vsl, SLT_Fetch_Body, "%u %s %s", |
719 |
|
bo->htc->body_status->nbr, bo->htc->body_status->name, |
720 |
|
bo->do_stream ? "stream" : "-"); |
721 |
|
|
722 |
7320 |
if (bo->htc->body_status != BS_NONE) { |
723 |
4352 |
assert(bo->htc->body_status != BS_ERROR); |
724 |
4352 |
return (F_STP_FETCHBODY); |
725 |
|
} |
726 |
2968 |
AZ(bo->vfc->failed); |
727 |
|
return (F_STP_FETCHEND); |
728 |
|
} |
729 |
|
|
730 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
731 |
7270 |
vbf_stp_fetchend(struct worker *wrk, struct busyobj *bo) |
732 |
|
{ |
733 |
|
|
734 |
|
struct objcore *oc; |
735 |
|
|
736 |
7270 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
737 |
7270 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
738 |
7270 |
oc = bo->fetch_objcore; |
739 |
7270 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
740 |
|
|
741 |
7270 |
AZ(bo->vfc->failed); |
742 |
|
|
743 |
|
/* Recycle the backend connection before setting BOS_FINISHED to |
744 |
|
give predictable backend reuse behavior for varnishtest */ |
745 |
7270 |
vbf_cleanup(bo); |
746 |
|
|
747 |
7270 |
AZ(ObjSetU64(wrk, oc, OA_LEN, oc->boc->fetched_so_far)); |
748 |
|
|
749 |
7270 |
if (bo->do_stream) |
750 |
3258 |
assert(oc->boc->state == BOS_STREAM); |
751 |
|
else { |
752 |
4012 |
assert(oc->boc->state == BOS_REQ_DONE); |
753 |
4012 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
754 |
4012 |
HSH_Unbusy(wrk, oc); |
755 |
|
} |
756 |
|
|
757 |
7270 |
ObjSetState(wrk, oc, BOS_FINISHED); |
758 |
7270 |
VSLb_ts_busyobj(bo, "BerespBody", W_TIM_real(wrk)); |
759 |
7270 |
if (bo->stale_oc != NULL) { |
760 |
576 |
VSL(SLT_ExpKill, NO_VXID, "VBF_Superseded x=%ju n=%ju", |
761 |
288 |
VXID(ObjGetXID(wrk, bo->stale_oc)), |
762 |
288 |
VXID(ObjGetXID(wrk, bo->fetch_objcore))); |
763 |
288 |
HSH_Replace(bo->stale_oc, bo->fetch_objcore); |
764 |
288 |
} |
765 |
7270 |
return (F_STP_DONE); |
766 |
|
} |
767 |
|
|
768 |
|
/*-------------------------------------------------------------------- |
769 |
|
*/ |
770 |
|
|
771 |
|
struct vbf_objiter_priv { |
772 |
|
unsigned magic; |
773 |
|
#define VBF_OBITER_PRIV_MAGIC 0x3c272a17 |
774 |
|
struct busyobj *bo; |
775 |
|
// not yet allocated |
776 |
|
ssize_t l; |
777 |
|
// current allocation |
778 |
|
uint8_t *p; |
779 |
|
ssize_t pl; |
780 |
|
}; |
781 |
|
|
782 |
|
static int v_matchproto_(objiterate_f) |
783 |
100 |
vbf_objiterate(void *priv, unsigned flush, const void *ptr, ssize_t len) |
784 |
|
{ |
785 |
|
struct vbf_objiter_priv *vop; |
786 |
|
ssize_t l; |
787 |
100 |
const uint8_t *ps = ptr; |
788 |
|
|
789 |
100 |
CAST_OBJ_NOTNULL(vop, priv, VBF_OBITER_PRIV_MAGIC); |
790 |
100 |
CHECK_OBJ_NOTNULL(vop->bo, BUSYOBJ_MAGIC); |
791 |
|
|
792 |
100 |
flush &= OBJ_ITER_END; |
793 |
|
|
794 |
200 |
while (len > 0) { |
795 |
100 |
if (vop->pl == 0) { |
796 |
96 |
vop->p = NULL; |
797 |
96 |
AN(vop->l); |
798 |
96 |
vop->pl = vop->l; |
799 |
192 |
if (VFP_GetStorage(vop->bo->vfc, &vop->pl, &vop->p) |
800 |
96 |
!= VFP_OK) |
801 |
0 |
return (1); |
802 |
96 |
if (vop->pl < vop->l) |
803 |
4 |
vop->l -= vop->pl; |
804 |
|
else |
805 |
92 |
vop->l = 0; |
806 |
96 |
} |
807 |
100 |
AN(vop->pl); |
808 |
100 |
AN(vop->p); |
809 |
|
|
810 |
100 |
l = vmin(vop->pl, len); |
811 |
100 |
memcpy(vop->p, ps, l); |
812 |
192 |
VFP_Extend(vop->bo->vfc, l, |
813 |
100 |
flush && l == len ? VFP_END : VFP_OK); |
814 |
100 |
ps += l; |
815 |
100 |
vop->p += l; |
816 |
100 |
len -= l; |
817 |
100 |
vop->pl -= l; |
818 |
|
} |
819 |
100 |
if (flush) |
820 |
92 |
AZ(vop->l); |
821 |
100 |
return (0); |
822 |
100 |
} |
823 |
|
|
824 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
825 |
108 |
vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo) |
826 |
|
{ |
827 |
|
struct boc *stale_boc; |
828 |
|
enum boc_state_e stale_state; |
829 |
|
struct objcore *oc, *stale_oc; |
830 |
|
struct vbf_objiter_priv vop[1]; |
831 |
|
|
832 |
108 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
833 |
108 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
834 |
108 |
oc = bo->fetch_objcore; |
835 |
108 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
836 |
108 |
stale_oc = bo->stale_oc; |
837 |
108 |
CHECK_OBJ_NOTNULL(stale_oc, OBJCORE_MAGIC); |
838 |
|
|
839 |
108 |
stale_boc = HSH_RefBoc(stale_oc); |
840 |
108 |
CHECK_OBJ_ORNULL(stale_boc, BOC_MAGIC); |
841 |
108 |
if (stale_boc) { |
842 |
|
/* Wait for the stale object to become fully fetched, so |
843 |
|
* that we can catch fetch errors, before we unbusy the |
844 |
|
* new object. This serves two purposes. First it helps |
845 |
|
* with request coalescing, and stops long chains of |
846 |
|
* IMS-updated short-TTL objects all streaming from a |
847 |
|
* single slow body fetch. Second it makes sure that all |
848 |
|
* the object attributes are complete when we copy them |
849 |
|
* (this would be an issue for ie OA_GZIPBITS). */ |
850 |
12 |
VSLb(bo->vsl, SLT_Notice, |
851 |
|
"vsl: Conditional fetch wait for streaming object"); |
852 |
12 |
ObjWaitState(stale_oc, BOS_FINISHED); |
853 |
12 |
stale_state = stale_boc->state; |
854 |
12 |
HSH_DerefBoc(bo->wrk, stale_oc); |
855 |
12 |
stale_boc = NULL; |
856 |
12 |
if (stale_state != BOS_FINISHED) { |
857 |
8 |
assert(stale_state == BOS_FAILED); |
858 |
8 |
AN(stale_oc->flags & OC_F_FAILED); |
859 |
8 |
} |
860 |
12 |
} |
861 |
|
|
862 |
108 |
AZ(stale_boc); |
863 |
108 |
if (stale_oc->flags & OC_F_FAILED) { |
864 |
8 |
(void)VFP_Error(bo->vfc, "Template object failed"); |
865 |
8 |
vbf_cleanup(bo); |
866 |
8 |
wrk->stats->fetch_failed++; |
867 |
8 |
return (F_STP_FAIL); |
868 |
|
} |
869 |
|
|
870 |
100 |
if (vbf_beresp2obj(bo)) { |
871 |
4 |
vbf_cleanup(bo); |
872 |
4 |
wrk->stats->fetch_failed++; |
873 |
4 |
return (F_STP_FAIL); |
874 |
|
} |
875 |
|
|
876 |
96 |
if (ObjHasAttr(bo->wrk, stale_oc, OA_ESIDATA)) |
877 |
4 |
AZ(ObjCopyAttr(bo->wrk, oc, stale_oc, OA_ESIDATA)); |
878 |
|
|
879 |
96 |
AZ(ObjCopyAttr(bo->wrk, oc, stale_oc, OA_FLAGS)); |
880 |
96 |
if (oc->flags & OC_F_HFM) |
881 |
8 |
ObjSetFlag(bo->wrk, oc, OF_IMSCAND, 0); |
882 |
96 |
AZ(ObjCopyAttr(bo->wrk, oc, stale_oc, OA_GZIPBITS)); |
883 |
|
|
884 |
96 |
if (bo->do_stream) { |
885 |
92 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
886 |
92 |
HSH_Unbusy(wrk, oc); |
887 |
92 |
ObjSetState(wrk, oc, BOS_STREAM); |
888 |
92 |
} |
889 |
|
|
890 |
96 |
INIT_OBJ(vop, VBF_OBITER_PRIV_MAGIC); |
891 |
96 |
vop->bo = bo; |
892 |
96 |
vop->l = ObjGetLen(bo->wrk, stale_oc); |
893 |
96 |
if (ObjIterate(wrk, stale_oc, vop, vbf_objiterate, 0)) |
894 |
0 |
(void)VFP_Error(bo->vfc, "Template object failed"); |
895 |
|
|
896 |
96 |
if (bo->vfc->failed) { |
897 |
0 |
vbf_cleanup(bo); |
898 |
0 |
wrk->stats->fetch_failed++; |
899 |
0 |
return (F_STP_FAIL); |
900 |
|
} |
901 |
96 |
return (F_STP_FETCHEND); |
902 |
108 |
} |
903 |
|
|
904 |
|
/*-------------------------------------------------------------------- |
905 |
|
* Create synth object |
906 |
|
* |
907 |
|
* replaces a stale object unless |
908 |
|
* - abandoning the bereq or |
909 |
|
* - leaving vcl_backend_error with return (deliver) and beresp.ttl == 0s or |
910 |
|
* - there is a waitinglist on this object because in this case the default ttl |
911 |
|
* would be 1s, so we might be looking at the same case as the previous |
912 |
|
* |
913 |
|
* We do want the stale replacement to avoid an object pileup with short ttl and |
914 |
|
* long grace/keep, yet there could exist cases where a cache object is |
915 |
|
* deliberately created to momentarily override a stale object. |
916 |
|
* |
917 |
|
* If this case exists, we should add a vcl veto (e.g. beresp.replace_stale with |
918 |
|
* default true) |
919 |
|
*/ |
920 |
|
|
921 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
922 |
1564 |
vbf_stp_error(struct worker *wrk, struct busyobj *bo) |
923 |
|
{ |
924 |
|
ssize_t l, ll, o; |
925 |
|
vtim_real now; |
926 |
|
uint8_t *ptr; |
927 |
|
struct vsb *synth_body; |
928 |
|
struct objcore *stale, *oc; |
929 |
|
|
930 |
1564 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
931 |
1564 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
932 |
1564 |
oc = bo->fetch_objcore; |
933 |
1564 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
934 |
1564 |
AN(oc->flags & OC_F_BUSY); |
935 |
1564 |
assert(bo->director_state == DIR_S_NULL); |
936 |
|
|
937 |
1564 |
if (wrk->vpi->handling != VCL_RET_ERROR) |
938 |
1508 |
wrk->stats->fetch_failed++; |
939 |
|
|
940 |
1564 |
now = W_TIM_real(wrk); |
941 |
1564 |
VSLb_ts_busyobj(bo, "Error", now); |
942 |
|
|
943 |
1564 |
if (oc->stobj->stevedore != NULL) { |
944 |
84 |
oc->boc->fetched_so_far = 0; |
945 |
84 |
ObjFreeObj(bo->wrk, oc); |
946 |
84 |
} |
947 |
|
|
948 |
1564 |
if (bo->storage == NULL) |
949 |
96 |
bo->storage = STV_next(); |
950 |
|
|
951 |
|
// XXX: reset all beresp flags ? |
952 |
|
|
953 |
1564 |
HTTP_Setup(bo->beresp, bo->ws, bo->vsl, SLT_BerespMethod); |
954 |
1564 |
if (bo->err_code > 0) |
955 |
96 |
http_PutResponse(bo->beresp, "HTTP/1.1", bo->err_code, |
956 |
48 |
bo->err_reason); |
957 |
|
else |
958 |
1516 |
http_PutResponse(bo->beresp, "HTTP/1.1", 503, |
959 |
|
"Backend fetch failed"); |
960 |
|
|
961 |
1564 |
http_TimeHeader(bo->beresp, "Date: ", now); |
962 |
1564 |
http_SetHeader(bo->beresp, "Server: Varnish"); |
963 |
|
|
964 |
1564 |
stale = bo->stale_oc; |
965 |
1564 |
oc->t_origin = now; |
966 |
1564 |
if (!VTAILQ_EMPTY(&oc->objhead->waitinglist)) { |
967 |
|
/* |
968 |
|
* If there is a waitinglist, it means that there is no |
969 |
|
* grace-able object, so cache the error return for a |
970 |
|
* short time, so the waiting list can drain, rather than |
971 |
|
* each objcore on the waiting list sequentially attempt |
972 |
|
* to fetch from the backend. |
973 |
|
*/ |
974 |
12 |
oc->ttl = 1; |
975 |
12 |
oc->grace = 5; |
976 |
12 |
oc->keep = 5; |
977 |
12 |
stale = NULL; |
978 |
12 |
} else { |
979 |
1552 |
oc->ttl = 0; |
980 |
1552 |
oc->grace = 0; |
981 |
1552 |
oc->keep = 0; |
982 |
|
} |
983 |
|
|
984 |
1564 |
synth_body = VSB_new_auto(); |
985 |
1564 |
AN(synth_body); |
986 |
|
|
987 |
1564 |
VCL_backend_error_method(bo->vcl, wrk, NULL, bo, synth_body); |
988 |
|
|
989 |
1564 |
AZ(VSB_finish(synth_body)); |
990 |
|
|
991 |
1564 |
if (wrk->vpi->handling == VCL_RET_ABANDON || wrk->vpi->handling == VCL_RET_FAIL) { |
992 |
112 |
VSB_destroy(&synth_body); |
993 |
112 |
return (F_STP_FAIL); |
994 |
|
} |
995 |
|
|
996 |
1452 |
if (wrk->vpi->handling == VCL_RET_RETRY) { |
997 |
40 |
VSB_destroy(&synth_body); |
998 |
40 |
if (bo->retries++ < bo->max_retries) |
999 |
36 |
return (F_STP_RETRY); |
1000 |
4 |
VSLb(bo->vsl, SLT_VCL_Error, "Too many retries, failing"); |
1001 |
4 |
return (F_STP_FAIL); |
1002 |
|
} |
1003 |
|
|
1004 |
1412 |
assert(wrk->vpi->handling == VCL_RET_DELIVER); |
1005 |
|
|
1006 |
1412 |
assert(bo->vfc->wrk == bo->wrk); |
1007 |
1412 |
assert(bo->vfc->oc == oc); |
1008 |
1412 |
assert(bo->vfc->resp == bo->beresp); |
1009 |
1412 |
assert(bo->vfc->req == bo->bereq); |
1010 |
|
|
1011 |
1412 |
if (vbf_beresp2obj(bo)) { |
1012 |
8 |
VSB_destroy(&synth_body); |
1013 |
8 |
return (F_STP_FAIL); |
1014 |
|
} |
1015 |
|
|
1016 |
1404 |
oc->boc->transit_buffer = 0; |
1017 |
|
|
1018 |
1404 |
ll = VSB_len(synth_body); |
1019 |
1404 |
o = 0; |
1020 |
2612 |
while (ll > 0) { |
1021 |
1208 |
l = ll; |
1022 |
1208 |
if (VFP_GetStorage(bo->vfc, &l, &ptr) != VFP_OK) |
1023 |
0 |
break; |
1024 |
1208 |
l = vmin(l, ll); |
1025 |
1208 |
memcpy(ptr, VSB_data(synth_body) + o, l); |
1026 |
1208 |
VFP_Extend(bo->vfc, l, l == ll ? VFP_END : VFP_OK); |
1027 |
1208 |
ll -= l; |
1028 |
1208 |
o += l; |
1029 |
|
} |
1030 |
1404 |
AZ(ObjSetU64(wrk, oc, OA_LEN, o)); |
1031 |
1404 |
VSB_destroy(&synth_body); |
1032 |
1404 |
ObjSetState(wrk, oc, BOS_PREP_STREAM); |
1033 |
1404 |
HSH_Unbusy(wrk, oc); |
1034 |
1404 |
if (stale != NULL && oc->ttl > 0) |
1035 |
88 |
HSH_Kill(stale); |
1036 |
1404 |
ObjSetState(wrk, oc, BOS_FINISHED); |
1037 |
1404 |
return (F_STP_DONE); |
1038 |
1564 |
} |
1039 |
|
|
1040 |
|
/*-------------------------------------------------------------------- |
1041 |
|
*/ |
1042 |
|
|
1043 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
1044 |
260 |
vbf_stp_fail(struct worker *wrk, struct busyobj *bo) |
1045 |
|
{ |
1046 |
|
struct objcore *oc; |
1047 |
|
|
1048 |
260 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1049 |
260 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1050 |
260 |
oc = bo->fetch_objcore; |
1051 |
260 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1052 |
|
|
1053 |
260 |
assert(oc->boc->state < BOS_FINISHED); |
1054 |
260 |
HSH_Fail(oc); |
1055 |
260 |
if (!(oc->flags & OC_F_BUSY)) |
1056 |
60 |
HSH_Kill(oc); |
1057 |
260 |
ObjSetState(wrk, oc, BOS_FAILED); |
1058 |
260 |
return (F_STP_DONE); |
1059 |
|
} |
1060 |
|
|
1061 |
|
/*-------------------------------------------------------------------- |
1062 |
|
*/ |
1063 |
|
|
1064 |
|
static const struct fetch_step * v_matchproto_(vbf_state_f) |
1065 |
0 |
vbf_stp_done(struct worker *wrk, struct busyobj *bo) |
1066 |
|
{ |
1067 |
|
|
1068 |
0 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1069 |
0 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1070 |
0 |
WRONG("Just plain wrong"); |
1071 |
0 |
NEEDLESS(return (F_STP_DONE)); |
1072 |
|
} |
1073 |
|
|
1074 |
|
static void v_matchproto_(task_func_t) |
1075 |
8936 |
vbf_fetch_thread(struct worker *wrk, void *priv) |
1076 |
|
{ |
1077 |
|
struct vrt_ctx ctx[1]; |
1078 |
|
struct busyobj *bo; |
1079 |
|
struct objcore *oc; |
1080 |
|
const struct fetch_step *stp; |
1081 |
|
|
1082 |
8936 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1083 |
8936 |
CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); |
1084 |
8936 |
CHECK_OBJ_NOTNULL(bo->req, REQ_MAGIC); |
1085 |
8936 |
oc = bo->fetch_objcore; |
1086 |
8936 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1087 |
|
|
1088 |
8936 |
THR_SetBusyobj(bo); |
1089 |
8936 |
stp = F_STP_MKBEREQ; |
1090 |
8936 |
assert(isnan(bo->t_first)); |
1091 |
8936 |
assert(isnan(bo->t_prev)); |
1092 |
8936 |
VSLb_ts_busyobj(bo, "Start", W_TIM_real(wrk)); |
1093 |
|
|
1094 |
8936 |
bo->wrk = wrk; |
1095 |
8936 |
wrk->vsl = bo->vsl; |
1096 |
|
|
1097 |
|
#if 0 |
1098 |
|
if (bo->stale_oc != NULL) { |
1099 |
|
CHECK_OBJ_NOTNULL(bo->stale_oc, OBJCORE_MAGIC); |
1100 |
|
/* We don't want the oc/stevedore ops in fetching thread */ |
1101 |
|
if (!ObjCheckFlag(wrk, bo->stale_oc, OF_IMSCAND)) |
1102 |
|
(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); |
1103 |
|
} |
1104 |
|
#endif |
1105 |
|
|
1106 |
8936 |
VCL_TaskEnter(bo->privs); |
1107 |
48284 |
while (stp != F_STP_DONE) { |
1108 |
39348 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1109 |
39348 |
assert(oc->boc->refcount >= 1); |
1110 |
39348 |
if (oc->boc->state < BOS_REQ_DONE) |
1111 |
9264 |
AN(bo->req); |
1112 |
|
else |
1113 |
30084 |
AZ(bo->req); |
1114 |
39348 |
AN(stp); |
1115 |
39348 |
AN(stp->name); |
1116 |
39348 |
AN(stp->func); |
1117 |
39348 |
stp = stp->func(wrk, bo); |
1118 |
|
} |
1119 |
|
|
1120 |
8936 |
assert(bo->director_state == DIR_S_NULL); |
1121 |
|
|
1122 |
8936 |
INIT_OBJ(ctx, VRT_CTX_MAGIC); |
1123 |
8936 |
VCL_Bo2Ctx(ctx, bo); |
1124 |
8936 |
VCL_TaskLeave(ctx, bo->privs); |
1125 |
8936 |
http_Teardown(bo->bereq); |
1126 |
8936 |
http_Teardown(bo->beresp); |
1127 |
|
// cannot make assumptions about the number of references here #3434 |
1128 |
8936 |
if (bo->bereq_body != NULL) |
1129 |
72 |
(void) HSH_DerefObjCore(bo->wrk, &bo->bereq_body, 0); |
1130 |
|
|
1131 |
8936 |
if (oc->boc->state == BOS_FINISHED) { |
1132 |
8675 |
AZ(oc->flags & OC_F_FAILED); |
1133 |
17350 |
VSLb(bo->vsl, SLT_Length, "%ju", |
1134 |
8675 |
(uintmax_t)ObjGetLen(bo->wrk, oc)); |
1135 |
8675 |
} |
1136 |
|
// AZ(oc->boc); // XXX |
1137 |
|
|
1138 |
8936 |
if (bo->stale_oc != NULL) |
1139 |
576 |
(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); |
1140 |
|
|
1141 |
8936 |
wrk->vsl = NULL; |
1142 |
8936 |
HSH_DerefBoc(wrk, oc); |
1143 |
8936 |
SES_Rel(bo->sp); |
1144 |
8936 |
VBO_ReleaseBusyObj(wrk, &bo); |
1145 |
8936 |
THR_SetBusyobj(NULL); |
1146 |
8936 |
} |
1147 |
|
|
1148 |
|
/*-------------------------------------------------------------------- |
1149 |
|
*/ |
1150 |
|
|
1151 |
|
void |
1152 |
8940 |
VBF_Fetch(struct worker *wrk, struct req *req, struct objcore *oc, |
1153 |
|
struct objcore *oldoc, enum vbf_fetch_mode_e mode) |
1154 |
|
{ |
1155 |
|
struct boc *boc; |
1156 |
|
struct busyobj *bo; |
1157 |
|
enum task_prio prio; |
1158 |
|
const char *how; |
1159 |
|
|
1160 |
8940 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1161 |
8940 |
CHECK_OBJ_NOTNULL(req, REQ_MAGIC); |
1162 |
8940 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1163 |
8940 |
AN(oc->flags & OC_F_BUSY); |
1164 |
8940 |
CHECK_OBJ_ORNULL(oldoc, OBJCORE_MAGIC); |
1165 |
|
|
1166 |
8940 |
bo = VBO_GetBusyObj(wrk, req); |
1167 |
8940 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
1168 |
8940 |
AN(bo->vcl); |
1169 |
|
|
1170 |
8940 |
boc = HSH_RefBoc(oc); |
1171 |
8940 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
1172 |
|
|
1173 |
8940 |
switch (mode) { |
1174 |
|
case VBF_PASS: |
1175 |
3108 |
prio = TASK_QUEUE_BO; |
1176 |
3108 |
how = "pass"; |
1177 |
3108 |
bo->uncacheable = 1; |
1178 |
3108 |
break; |
1179 |
|
case VBF_NORMAL: |
1180 |
5492 |
prio = TASK_QUEUE_BO; |
1181 |
5492 |
how = "fetch"; |
1182 |
5492 |
break; |
1183 |
|
case VBF_BACKGROUND: |
1184 |
340 |
prio = TASK_QUEUE_BG; |
1185 |
340 |
how = "bgfetch"; |
1186 |
340 |
bo->is_bgfetch = 1; |
1187 |
340 |
break; |
1188 |
|
default: |
1189 |
0 |
WRONG("Wrong fetch mode"); |
1190 |
0 |
} |
1191 |
|
|
1192 |
|
#define REQ_BEREQ_FLAG(l, r, w, d) bo->l = req->l; |
1193 |
|
#include "tbl/req_bereq_flags.h" |
1194 |
|
|
1195 |
|
VSLb(bo->vsl, SLT_Begin, "bereq %ju %s", VXID(req->vsl->wid), how); |
1196 |
|
VSLbs(bo->vsl, SLT_VCL_use, TOSTRAND(VCL_Name(bo->vcl))); |
1197 |
|
VSLb(req->vsl, SLT_Link, "bereq %ju %s", VXID(bo->vsl->wid), how); |
1198 |
|
|
1199 |
|
THR_SetBusyobj(bo); |
1200 |
|
|
1201 |
|
bo->sp = req->sp; |
1202 |
|
SES_Ref(bo->sp); |
1203 |
|
|
1204 |
|
oc->boc->vary = req->vary_b; |
1205 |
|
req->vary_b = NULL; |
1206 |
|
|
1207 |
|
HSH_Ref(oc); |
1208 |
8940 |
AZ(bo->fetch_objcore); |
1209 |
|
bo->fetch_objcore = oc; |
1210 |
|
|
1211 |
8940 |
AZ(bo->stale_oc); |
1212 |
8940 |
if (oldoc != NULL) { |
1213 |
580 |
assert(oldoc->refcnt > 0); |
1214 |
580 |
HSH_Ref(oldoc); |
1215 |
580 |
bo->stale_oc = oldoc; |
1216 |
580 |
} |
1217 |
|
|
1218 |
8940 |
AZ(bo->req); |
1219 |
|
bo->req = req; |
1220 |
|
|
1221 |
|
bo->fetch_task->priv = bo; |
1222 |
|
bo->fetch_task->func = vbf_fetch_thread; |
1223 |
|
|
1224 |
8940 |
if (Pool_Task(wrk->pool, bo->fetch_task, prio)) { |
1225 |
8 |
wrk->stats->bgfetch_no_thread++; |
1226 |
8 |
VSLb(bo->vsl, SLT_FetchError, |
1227 |
|
"No thread available for bgfetch"); |
1228 |
8 |
(void)vbf_stp_fail(req->wrk, bo); |
1229 |
8 |
if (bo->stale_oc != NULL) |
1230 |
4 |
(void)HSH_DerefObjCore(wrk, &bo->stale_oc, 0); |
1231 |
8 |
HSH_DerefBoc(wrk, oc); |
1232 |
8 |
SES_Rel(bo->sp); |
1233 |
8 |
THR_SetBusyobj(NULL); |
1234 |
8 |
VBO_ReleaseBusyObj(wrk, &bo); |
1235 |
8 |
} else { |
1236 |
8932 |
THR_SetBusyobj(NULL); |
1237 |
8932 |
bo = NULL; /* ref transferred to fetch thread */ |
1238 |
8932 |
if (mode == VBF_BACKGROUND) { |
1239 |
336 |
ObjWaitState(oc, BOS_REQ_DONE); |
1240 |
336 |
(void)VRB_Ignore(req); |
1241 |
336 |
} else { |
1242 |
8596 |
ObjWaitState(oc, BOS_STREAM); |
1243 |
8596 |
if (oc->boc->state == BOS_FAILED) { |
1244 |
104 |
AN((oc->flags & OC_F_FAILED)); |
1245 |
104 |
} else { |
1246 |
8492 |
AZ(oc->flags & OC_F_BUSY); |
1247 |
|
} |
1248 |
|
} |
1249 |
|
} |
1250 |
8940 |
AZ(bo); |
1251 |
|
VSLb_ts_req(req, "Fetch", W_TIM_real(wrk)); |
1252 |
8940 |
assert(oc->boc == boc); |
1253 |
|
HSH_DerefBoc(wrk, oc); |
1254 |
8940 |
if (mode == VBF_BACKGROUND) |
1255 |
340 |
(void)HSH_DerefObjCore(wrk, &oc, HSH_RUSH_POLICY); |
1256 |
|
} |