| | varnish-cache/bin/varnishd/cache/cache_deliver_proc.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include "cache_varnishd.h" |
34 |
|
#include "cache_filter.h" |
35 |
|
#include "cache_objhead.h" |
36 |
|
|
37 |
|
void |
38 |
6 |
VDP_Panic(struct vsb *vsb, const struct vdp_ctx *vdc) |
39 |
|
{ |
40 |
|
struct vdp_entry *vde; |
41 |
|
|
42 |
6 |
if (PAN_dump_struct(vsb, vdc, VDP_CTX_MAGIC, "vdc")) |
43 |
4 |
return; |
44 |
2 |
VSB_printf(vsb, "nxt = %p,\n", vdc->nxt); |
45 |
2 |
VSB_printf(vsb, "retval = %d,\n", vdc->retval); |
46 |
|
|
47 |
2 |
if (!VTAILQ_EMPTY(&vdc->vdp)) { |
48 |
1 |
VSB_cat(vsb, "filters = {\n"); |
49 |
1 |
VSB_indent(vsb, 2); |
50 |
4 |
VTAILQ_FOREACH(vde, &vdc->vdp, list) |
51 |
6 |
VSB_printf(vsb, "%s = %p { priv = %p }\n", |
52 |
3 |
vde->vdp->name, vde, vde->priv); |
53 |
1 |
VSB_indent(vsb, -2); |
54 |
1 |
VSB_cat(vsb, "},\n"); |
55 |
1 |
} |
56 |
|
|
57 |
2 |
VSB_indent(vsb, -2); |
58 |
2 |
VSB_cat(vsb, "},\n"); |
59 |
6 |
} |
60 |
|
|
61 |
|
/* |
62 |
|
* Ensure that transports have called VDP_Close() |
63 |
|
* to avoid leaks in VDPs |
64 |
|
*/ |
65 |
|
void |
66 |
4087 |
VDP_Fini(const struct vdp_ctx *vdc) |
67 |
|
{ |
68 |
4087 |
assert(VTAILQ_EMPTY(&vdc->vdp)); |
69 |
4087 |
} |
70 |
|
|
71 |
|
void |
72 |
5779 |
VDP_Init(struct vdp_ctx *vdc, struct worker *wrk, struct vsl_log *vsl, |
73 |
|
const struct req *req, const struct busyobj *bo, intmax_t *clen) |
74 |
|
{ |
75 |
5779 |
AN(vdc); |
76 |
5779 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
77 |
5779 |
AN(vsl); |
78 |
|
|
79 |
5779 |
AN(clen); |
80 |
|
|
81 |
5779 |
assert((req ? 1 : 0) ^ (bo ? 1 : 0)); |
82 |
|
|
83 |
5779 |
AN(clen); |
84 |
5779 |
assert(*clen >= -1); |
85 |
|
|
86 |
5779 |
INIT_OBJ(vdc, VDP_CTX_MAGIC); |
87 |
5779 |
VTAILQ_INIT(&vdc->vdp); |
88 |
5779 |
vdc->wrk = wrk; |
89 |
5779 |
vdc->vsl = vsl; |
90 |
5779 |
vdc->clen = clen; |
91 |
|
|
92 |
5779 |
if (req != NULL) { |
93 |
3649 |
CHECK_OBJ(req, REQ_MAGIC); |
94 |
3649 |
vdc->oc = req->objcore; |
95 |
3649 |
vdc->hp = req->resp; |
96 |
3649 |
} |
97 |
|
else { |
98 |
2130 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
99 |
2130 |
vdc->oc = bo->bereq_body; |
100 |
2130 |
vdc->hp = bo->bereq; |
101 |
|
} |
102 |
5779 |
} |
103 |
|
|
104 |
|
/* VDP_bytes |
105 |
|
* |
106 |
|
* Pushes len bytes at ptr down the delivery processor list. |
107 |
|
* |
108 |
|
* This function picks and calls the next delivery processor from the |
109 |
|
* list. The return value is the return value of the delivery |
110 |
|
* processor. Upon seeing a non-zero return value, that lowest value |
111 |
|
* observed is latched in ->retval and all subsequent calls to |
112 |
|
* VDP_bytes will return that value directly without calling the next |
113 |
|
* processor. |
114 |
|
* |
115 |
|
* VDP_END marks the end of successful processing, it is issued by |
116 |
|
* VDP_DeliverObj() and may also be sent downstream by processors ending the |
117 |
|
* stream (for return value != 0) |
118 |
|
* |
119 |
|
* VDP_END must at most be received once per processor, so any VDP sending it |
120 |
|
* downstream must itself not forward it a second time. |
121 |
|
* |
122 |
|
* Valid return values (of VDP_bytes and any VDP function): |
123 |
|
* r < 0: Error, breaks out early on an error condition |
124 |
|
* r == 0: Continue |
125 |
|
* r > 0: Stop, breaks out early without error condition |
126 |
|
*/ |
127 |
|
|
128 |
|
int |
129 |
8089 |
VDP_bytes(struct vdp_ctx *vdc, enum vdp_action act, |
130 |
|
const void *ptr, ssize_t len) |
131 |
|
{ |
132 |
|
int retval; |
133 |
|
struct vdp_entry *vdpe; |
134 |
|
|
135 |
8089 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
136 |
8089 |
if (vdc->retval) |
137 |
24 |
return (vdc->retval); |
138 |
8065 |
vdpe = vdc->nxt; |
139 |
8065 |
CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC); |
140 |
|
|
141 |
|
/* at most one VDP_END call */ |
142 |
8065 |
assert(vdpe->end == VDP_NULL); |
143 |
|
|
144 |
8065 |
if (act == VDP_NULL) |
145 |
636 |
assert(len > 0); |
146 |
7429 |
else if (act == VDP_END) |
147 |
2770 |
vdpe->end = VDP_END; |
148 |
|
else |
149 |
4659 |
assert(act == VDP_FLUSH); |
150 |
|
|
151 |
|
/* Call the present layer, while pointing to the next layer down */ |
152 |
8065 |
vdc->nxt = VTAILQ_NEXT(vdpe, list); |
153 |
8065 |
vdpe->calls++; |
154 |
8065 |
vdc->bytes_done = len; |
155 |
8065 |
retval = vdpe->vdp->bytes(vdc, act, &vdpe->priv, ptr, len); |
156 |
8065 |
vdpe->bytes_in += vdc->bytes_done; |
157 |
8065 |
if (retval && (vdc->retval == 0 || retval < vdc->retval)) |
158 |
56 |
vdc->retval = retval; /* Latch error value */ |
159 |
8065 |
vdc->nxt = vdpe; |
160 |
8065 |
return (vdc->retval); |
161 |
8089 |
} |
162 |
|
|
163 |
|
int |
164 |
5181 |
VDP_Push(VRT_CTX, struct vdp_ctx *vdc, struct ws *ws, const struct vdp *vdp, |
165 |
|
void *priv) |
166 |
|
{ |
167 |
|
struct vdp_entry *vdpe; |
168 |
|
|
169 |
5181 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
170 |
5181 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
171 |
5181 |
CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC); |
172 |
5181 |
CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC); |
173 |
5181 |
AN(vdc->clen); |
174 |
5181 |
assert(*vdc->clen >= -1); |
175 |
5181 |
AN(ws); |
176 |
5181 |
AN(vdp); |
177 |
5181 |
AN(vdp->name); |
178 |
|
|
179 |
5181 |
if (vdc->retval) |
180 |
0 |
return (vdc->retval); |
181 |
|
|
182 |
5181 |
if (DO_DEBUG(DBG_PROCESSORS)) |
183 |
17 |
VSLb(vdc->vsl, SLT_Debug, "VDP_push(%s)", vdp->name); |
184 |
|
|
185 |
5181 |
vdpe = WS_Alloc(ws, sizeof *vdpe); |
186 |
5181 |
if (vdpe == NULL) { |
187 |
128 |
AZ(vdc->retval); |
188 |
128 |
vdc->retval = -1; |
189 |
128 |
return (vdc->retval); |
190 |
|
} |
191 |
5053 |
INIT_OBJ(vdpe, VDP_ENTRY_MAGIC); |
192 |
5053 |
vdpe->vdp = vdp; |
193 |
5053 |
vdpe->priv = priv; |
194 |
5053 |
VTAILQ_INSERT_TAIL(&vdc->vdp, vdpe, list); |
195 |
5053 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
196 |
|
|
197 |
5053 |
AZ(vdc->retval); |
198 |
5053 |
if (vdpe->vdp->init != NULL) |
199 |
4743 |
vdc->retval = vdpe->vdp->init(ctx, vdc, &vdpe->priv); |
200 |
5053 |
vdc->oc = NULL; |
201 |
|
|
202 |
5053 |
if (vdc->retval > 0) { |
203 |
39 |
VTAILQ_REMOVE(&vdc->vdp, vdpe, list); |
204 |
39 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
205 |
39 |
vdc->retval = 0; |
206 |
39 |
} |
207 |
5014 |
else if (vdc->retval == 0) |
208 |
5006 |
AN(vdp->bytes); |
209 |
5053 |
return (vdc->retval); |
210 |
5181 |
} |
211 |
|
|
212 |
|
uint64_t |
213 |
5779 |
VDP_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc) |
214 |
|
{ |
215 |
|
struct vdp_entry *vdpe; |
216 |
5779 |
uint64_t rv = 0; |
217 |
|
|
218 |
5779 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
219 |
5779 |
CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC); |
220 |
5779 |
CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); |
221 |
5779 |
CHECK_OBJ_ORNULL(boc, BOC_MAGIC); |
222 |
|
|
223 |
10774 |
while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL) { |
224 |
4995 |
CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC); |
225 |
4995 |
rv = vdpe->bytes_in; |
226 |
9990 |
VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name, |
227 |
4995 |
(uintmax_t)vdpe->calls, (uintmax_t)rv); |
228 |
4995 |
if (vdpe->vdp->fini != NULL) |
229 |
922 |
AZ(vdpe->vdp->fini(vdc, &vdpe->priv)); |
230 |
4995 |
AZ(vdpe->priv); |
231 |
4995 |
VTAILQ_REMOVE(&vdc->vdp, vdpe, list); |
232 |
4995 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
233 |
|
#ifdef VDP_PEDANTIC_ARMED |
234 |
|
// enable when we are confident to get VDP_END right |
235 |
|
if (vdc->nxt == NULL && vdc->retval >= 0) |
236 |
|
assert(vdpe->end == VDP_END); |
237 |
|
#endif |
238 |
|
} |
239 |
5779 |
if (oc != NULL) |
240 |
3647 |
HSH_Cancel(vdc->wrk, oc, boc); |
241 |
5779 |
return (rv); |
242 |
|
} |
243 |
|
|
244 |
|
/*--------------------------------------------------------------------*/ |
245 |
|
|
246 |
|
/* |
247 |
|
* Push a VDPIO vdp. This can only be used with only vdpio-enabled VDPs or |
248 |
|
* after a successful upgrade |
249 |
|
*/ |
250 |
|
int |
251 |
96 |
VDPIO_Push(VRT_CTX, struct vdp_ctx *vdc, struct ws *ws, const struct vdp *vdp, |
252 |
|
void *priv) |
253 |
|
{ |
254 |
|
struct vdp_entry *vdpe; |
255 |
|
int r; |
256 |
|
|
257 |
96 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
258 |
96 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
259 |
96 |
CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC); |
260 |
96 |
CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC); |
261 |
96 |
AN(vdc->clen); |
262 |
96 |
assert(*vdc->clen >= -1); |
263 |
96 |
AN(ws); |
264 |
96 |
AN(vdp); |
265 |
96 |
AN(vdp->name); |
266 |
|
|
267 |
96 |
if (vdc->retval < 0) |
268 |
0 |
return (vdc->retval); |
269 |
|
|
270 |
96 |
AN(vdp->io_init); |
271 |
|
|
272 |
|
// the first VDP (which leases from storage) only gets the minimum |
273 |
|
// capacity requirement of 1 |
274 |
96 |
if (vdc->retval == 0) { |
275 |
0 |
assert(VTAILQ_EMPTY(&vdc->vdp)); |
276 |
0 |
vdc->retval = 1; |
277 |
0 |
} |
278 |
|
|
279 |
96 |
if (DO_DEBUG(DBG_PROCESSORS)) |
280 |
96 |
VSLb(vdc->vsl, SLT_Debug, "VDPIO_push(%s)", vdp->name); |
281 |
|
|
282 |
96 |
vdpe = WS_Alloc(ws, sizeof *vdpe); |
283 |
96 |
if (vdpe == NULL) { |
284 |
0 |
vdc->retval = -ENOMEM; |
285 |
0 |
return (vdc->retval); |
286 |
|
} |
287 |
96 |
INIT_OBJ(vdpe, VDP_ENTRY_MAGIC); |
288 |
96 |
vdpe->vdp = vdp; |
289 |
96 |
vdpe->priv = priv; |
290 |
96 |
VTAILQ_INSERT_TAIL(&vdc->vdp, vdpe, list); |
291 |
96 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
292 |
|
|
293 |
96 |
assert(vdc->retval > 0); |
294 |
96 |
if (vdpe->vdp->io_init != NULL) { |
295 |
96 |
r = vdpe->vdp->io_init(ctx, vdc, &vdpe->priv, vdc->retval); |
296 |
96 |
if (r <= 0) { |
297 |
0 |
VTAILQ_REMOVE(&vdc->vdp, vdpe, list); |
298 |
0 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
299 |
0 |
} |
300 |
|
else |
301 |
96 |
AN(vdp->io_lease); |
302 |
96 |
if (r != 0) |
303 |
96 |
vdc->retval = r; |
304 |
96 |
} |
305 |
96 |
vdc->oc = NULL; |
306 |
96 |
return (vdc->retval); |
307 |
96 |
} |
308 |
|
|
309 |
|
/* |
310 |
|
* upgrade an already initialized VDP filter chain to VDPIO, if possible |
311 |
|
* returns: |
312 |
|
* > 0 cap |
313 |
|
* -ENOTSUP io_upgrade missing for at least one filter |
314 |
|
* vdc->retval if < 0 |
315 |
|
*/ |
316 |
|
int |
317 |
48 |
VDPIO_Upgrade(VRT_CTX, struct vdp_ctx *vdc) |
318 |
|
{ |
319 |
|
struct vdp_entry *vdpe; |
320 |
|
int cap, r; |
321 |
|
|
322 |
48 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
323 |
48 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
324 |
|
|
325 |
64 |
VTAILQ_FOREACH(vdpe, &vdc->vdp, list) |
326 |
16 |
if (vdpe->vdp->io_upgrade == NULL) |
327 |
0 |
return (-ENOTSUP); |
328 |
|
|
329 |
48 |
if (vdc->retval < 0) |
330 |
0 |
return (vdc->retval); |
331 |
|
|
332 |
|
// minimum capacity requirement for the first filter (after storage) |
333 |
48 |
r = cap = 1; |
334 |
64 |
VTAILQ_FOREACH(vdpe, &vdc->vdp, list) { |
335 |
16 |
r = vdpe->vdp->io_upgrade(ctx, vdc, &vdpe->priv, cap); |
336 |
16 |
if (DO_DEBUG(DBG_PROCESSORS)) { |
337 |
32 |
VSLb(vdc->vsl, SLT_Debug, "VDPIO_Upgrade " |
338 |
|
"%d = %s(cap = %d)", |
339 |
16 |
r, vdpe->vdp->name, cap); |
340 |
16 |
} |
341 |
16 |
if (r < 0) |
342 |
0 |
return ((vdc->retval = r)); |
343 |
|
// XXX remove if filter does not want to be pushed? |
344 |
16 |
assert(r != 0); |
345 |
16 |
cap = r; |
346 |
16 |
} |
347 |
48 |
return ((vdc->retval = r)); |
348 |
48 |
} |
349 |
|
|
350 |
|
uint64_t |
351 |
112 |
VDPIO_Close1(struct vdp_ctx *vdc, struct vdp_entry *vdpe) |
352 |
|
{ |
353 |
|
uint64_t rv; |
354 |
|
|
355 |
112 |
CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC); |
356 |
112 |
rv = vdpe->bytes_in; |
357 |
224 |
VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name, |
358 |
112 |
(uintmax_t)vdpe->calls, (uintmax_t)rv); |
359 |
112 |
if (vdpe->vdp->io_fini != NULL) |
360 |
16 |
vdpe->vdp->io_fini(vdc, &vdpe->priv); |
361 |
112 |
AZ(vdpe->priv); |
362 |
112 |
VTAILQ_REMOVE(&vdc->vdp, vdpe, list); |
363 |
112 |
vdc->nxt = VTAILQ_FIRST(&vdc->vdp); |
364 |
112 |
return (rv); |
365 |
|
} |
366 |
|
|
367 |
|
uint64_t |
368 |
48 |
VDPIO_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc) |
369 |
|
{ |
370 |
|
struct vdp_entry *vdpe; |
371 |
48 |
uint64_t rv = 0; |
372 |
|
|
373 |
48 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
374 |
48 |
CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC); |
375 |
48 |
CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); |
376 |
48 |
CHECK_OBJ_ORNULL(boc, BOC_MAGIC); |
377 |
|
|
378 |
112 |
while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL) |
379 |
64 |
rv = VDPIO_Close1(vdc, vdpe); |
380 |
|
|
381 |
48 |
if (oc != NULL) |
382 |
48 |
HSH_Cancel(vdc->wrk, oc, boc); |
383 |
48 |
return (rv); |
384 |
|
} |
385 |
|
|
386 |
|
/* |
387 |
|
* ============================================================ |
388 |
|
* VDPIO helpers: VAI management |
389 |
|
* |
390 |
|
* Transports should not need to talk to the VAI Object interface directly, |
391 |
|
* because its state is kept in the vdp_ctx |
392 |
|
* |
393 |
|
* So we wrap init, return and fini |
394 |
|
*/ |
395 |
|
|
396 |
|
// return true if error |
397 |
|
int |
398 |
48 |
VDPIO_Init(struct vdp_ctx *vdc, struct objcore *oc, struct ws *ws, |
399 |
|
vai_notify_cb *notify_cb, void *notify_priv, struct vscaret *scaret) |
400 |
|
{ |
401 |
48 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
402 |
48 |
VSCARET_CHECK_NOTNULL(scaret); |
403 |
48 |
AN(scaret->capacity); |
404 |
48 |
AZ(scaret->used); |
405 |
|
|
406 |
48 |
AZ(vdc->vai_hdl); |
407 |
48 |
vdc->vai_hdl = ObjVAIinit(vdc->wrk, oc, ws, notify_cb, notify_priv); |
408 |
48 |
if (vdc->vai_hdl == NULL) |
409 |
0 |
return (1); |
410 |
48 |
vdc->scaret = scaret; |
411 |
48 |
return (0); |
412 |
48 |
} |
413 |
|
|
414 |
|
// return leases stashed in scaret |
415 |
|
void |
416 |
50 |
VDPIO_Return(const struct vdp_ctx *vdc) |
417 |
|
{ |
418 |
50 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
419 |
|
|
420 |
50 |
ObjVAIreturn(vdc->wrk, vdc->vai_hdl, vdc->scaret); |
421 |
50 |
} |
422 |
|
|
423 |
|
void |
424 |
48 |
VDPIO_Fini(struct vdp_ctx *vdc) |
425 |
|
{ |
426 |
48 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
427 |
|
|
428 |
48 |
VDPIO_Return(vdc); |
429 |
48 |
ObjVAIfini(vdc->wrk, &vdc->vai_hdl); |
430 |
48 |
} |
431 |
|
|
432 |
|
/*--------------------------------------------------------------------*/ |
433 |
|
int v_matchproto_(objiterate_f) |
434 |
4138 |
VDP_ObjIterate(void *priv, unsigned flush, const void *ptr, ssize_t len) |
435 |
|
{ |
436 |
|
enum vdp_action act; |
437 |
|
|
438 |
4138 |
if (flush == 0) |
439 |
361 |
act = VDP_NULL; |
440 |
3777 |
else if ((flush & OBJ_ITER_END) != 0) |
441 |
2400 |
act = VDP_END; |
442 |
|
else |
443 |
1377 |
act = VDP_FLUSH; |
444 |
|
|
445 |
4138 |
return (VDP_bytes(priv, act, ptr, len)); |
446 |
|
} |
447 |
|
|
448 |
|
|
449 |
|
int |
450 |
2337 |
VDP_DeliverObj(struct vdp_ctx *vdc, struct objcore *oc) |
451 |
|
{ |
452 |
|
int r, final; |
453 |
|
|
454 |
2337 |
CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); |
455 |
2337 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
456 |
2337 |
CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC); |
457 |
2337 |
AN(vdc->vsl); |
458 |
2337 |
AZ(vdc->oc); |
459 |
2337 |
vdc->hp = NULL; |
460 |
2337 |
vdc->clen = NULL; |
461 |
2337 |
final = oc->flags & OC_F_TRANSIENT ? 1 : 0; |
462 |
2337 |
r = ObjIterate(vdc->wrk, oc, vdc, VDP_ObjIterate, final); |
463 |
2337 |
if (r < 0) |
464 |
40 |
return (r); |
465 |
2297 |
return (0); |
466 |
2337 |
} |