varnish-cache/bin/varnishd/cache/cache_fetch_proc.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 */
30
31
#include "config.h"
32
33
#include <stdlib.h>
34
35
#include "cache_varnishd.h"
36
#include "cache_filter.h"
37
#include "vcli_serve.h"
38
39
static unsigned fetchfrag;
40
41
/*--------------------------------------------------------------------
42
 * We want to issue the first error we encounter on fetching and
43
 * suppress the rest.  This function does that.
44
 *
45
 * Other code is allowed to look at busyobj->fetch_failed to bail out
46
 *
47
 * For convenience, always return VFP_ERROR
48
 */
49
50
enum vfp_status
51 10238
VFP_Error(struct vfp_ctx *vc, const char *fmt, ...)
52
{
53
        va_list ap;
54
55 10238
        CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC);
56 10238
        if (!vc->failed) {
57 6359
                va_start(ap, fmt);
58 6359
                VSLbv(vc->wrk->vsl, SLT_FetchError, fmt, ap);
59 6359
                va_end(ap);
60 6359
                vc->failed = 1;
61 6359
        }
62 10238
        return (VFP_ERROR);
63
}
64
65
/*--------------------------------------------------------------------
66
 * Fetch Storage to put object into.
67
 *
68
 */
69
70
enum vfp_status
71 2304046
VFP_GetStorage(struct vfp_ctx *vc, ssize_t *sz, uint8_t **ptr)
72
{
73
74 2304046
        CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC);
75 2304046
        AN(sz);
76 2304046
        assert(*sz >= 0);
77 2304046
        AN(ptr);
78
79 2304046
        if (fetchfrag > 0)
80 3320
                *sz = fetchfrag;
81
82 2304046
        if (!ObjGetSpace(vc->wrk, vc->oc, sz, ptr)) {
83 319
                *sz = 0;
84 319
                *ptr = NULL;
85 319
                return (VFP_Error(vc, "Could not get storage"));
86
        }
87 2303727
        assert(*sz > 0);
88 2303727
        AN(*ptr);
89 2303727
        return (VFP_OK);
90 2304046
}
91
92
void
93 2271332
VFP_Extend(const struct vfp_ctx *vc, ssize_t sz, enum vfp_status flg)
94
{
95 2271332
        CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC);
96
97 2271332
        ObjExtend(vc->wrk, vc->oc, sz, flg == VFP_END ? 1 : 0);
98 2271332
}
99
100
/**********************************************************************
101
 */
102
103
void
104 97198
VFP_Setup(struct vfp_ctx *vc, struct worker *wrk)
105
{
106
107 97198
        INIT_OBJ(vc, VFP_CTX_MAGIC);
108 97198
        VTAILQ_INIT(&vc->vfp);
109 97198
        vc->wrk = wrk;
110 97198
}
111
112
/**********************************************************************
113
 * Returns the number of bytes processed by the lowest VFP in the stack
114
 */
115
116
uint64_t
117 87598
VFP_Close(struct vfp_ctx *vc)
118
{
119
        struct vfp_entry *vfe, *tmp;
120 87598
        uint64_t rv = 0;
121
122 158353
        VTAILQ_FOREACH_SAFE(vfe, &vc->vfp, list, tmp) {
123 70755
                if (vfe->vfp->fini != NULL)
124 19320
                        vfe->vfp->fini(vc, vfe);
125 70755
                rv = vfe->bytes_out;
126 141510
                VSLb(vc->wrk->vsl, SLT_VfpAcct, "%s %ju %ju", vfe->vfp->name,
127 70755
                    (uintmax_t)vfe->calls, (uintmax_t)rv);
128 70755
                VTAILQ_REMOVE(&vc->vfp, vfe, list);
129 70755
        }
130 87598
        return (rv);
131
}
132
133
int
134 80839
VFP_Open(VRT_CTX, struct vfp_ctx *vc)
135
{
136
        struct vfp_entry *vfe;
137
138 80839
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
139 80839
        CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC);
140 80839
        CHECK_OBJ_NOTNULL(vc->resp, HTTP_MAGIC);
141 80839
        CHECK_OBJ_NOTNULL(vc->wrk, WORKER_MAGIC);
142 80839
        AN(vc->wrk->vsl);
143
144 147556
        VTAILQ_FOREACH_REVERSE(vfe, &vc->vfp, vfp_entry_s, list) {
145 68837
                if (vfe->vfp->init == NULL)
146 50997
                        continue;
147 17840
                if (DO_DEBUG(DBG_PROCESSORS))
148 80
                        VSLb(vc->wrk->vsl, SLT_Debug, "VFP_Open(%s)",
149 40
                             vfe->vfp->name);
150 17840
                vfe->closed = vfe->vfp->init(ctx, vc, vfe);
151 17840
                if (vfe->closed != VFP_OK && vfe->closed != VFP_NULL) {
152 4240
                        (void)VFP_Error(vc, "Fetch filter %s failed to open",
153 2120
                            vfe->vfp->name);
154 2120
                        (void)VFP_Close(vc);
155 2120
                        return (-1);
156
                }
157 15720
        }
158
159 78719
        return (0);
160 80839
}
161
162
/**********************************************************************
163
 * Suck data up from lower levels.
164
 * Once a layer return non VFP_OK, clean it up and produce the same
165
 * return value for any subsequent calls.
166
 */
167
168
enum vfp_status
169 4414274
VFP_Suck(struct vfp_ctx *vc, void *p, ssize_t *lp)
170
{
171
        enum vfp_status vp;
172
        struct vfp_entry *vfe, *vfe_prev;
173 4414274
        const char *prev_name = "<storage>";
174
        ssize_t limit;
175
176 4414274
        CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC);
177 4414274
        AN(p);
178 4414274
        AN(lp);
179 4414274
        limit = *lp;
180 4414274
        vfe = vc->vfp_nxt;
181 4414274
        CHECK_OBJ_NOTNULL(vfe, VFP_ENTRY_MAGIC);
182 4414274
        vc->vfp_nxt = VTAILQ_NEXT(vfe, list);
183
184 4414274
        vfe_prev = VTAILQ_PREV(vfe, vfp_entry_s, list);
185 4414274
        if (vfe_prev != NULL)
186 2167059
                prev_name = vfe_prev->vfp->name;
187
188 4414274
        if (vfe->closed == VFP_NULL) {
189
                /* Layer asked to be bypassed when opened */
190 160
                vp = VFP_Suck(vc, p, lp);
191 160
                VFP_DEBUG(vc, "bypassing %s vp=%d", vfe->vfp->name, vp);
192 4414274
        } else if (vfe->closed == VFP_OK) {
193 4411154
                vp = vfe->vfp->pull(vc, vfe, p, lp);
194 4411154
                VFP_DEBUG(vc, "%s pulled %zdB/%zdB from %s vp=%d",
195
                    prev_name, *lp, limit, vfe->vfp->name, vp);
196 4411154
                if (vp != VFP_OK && vp != VFP_END && vp != VFP_ERROR)
197 0
                        vp = VFP_Error(vc, "Fetch filter %s returned %d",
198 0
                            vfe->vfp->name, vp);
199
                else
200 4411154
                        vfe->bytes_out += *lp;
201 4411154
                vfe->closed = vp;
202 4411154
                vfe->calls++;
203 4411154
        } else {
204
                /* Already closed filter */
205 2960
                *lp = 0;
206 2960
                vp = vfe->closed;
207 2960
                VFP_DEBUG(vc, "ignoring %s vp=%d", vfe->vfp->name, vp);
208
        }
209 4414274
        vc->vfp_nxt = vfe;
210 4414274
        assert(vp != VFP_NULL);
211 4414274
        return (vp);
212
}
213
214
/*--------------------------------------------------------------------
215
 */
216
217
struct vfp_entry *
218 73318
VFP_Push(struct vfp_ctx *vc, const struct vfp *vfp)
219
{
220
        struct vfp_entry *vfe;
221
222 73318
        CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC);
223 73318
        CHECK_OBJ_NOTNULL(vc->resp, HTTP_MAGIC);
224
225 73318
        vfe = WS_Alloc(vc->resp->ws, sizeof *vfe);
226 73318
        if (vfe == NULL) {
227 1520
                (void)VFP_Error(vc, "Workspace overflow");
228 1520
                return (NULL);
229
        }
230
231 71798
        INIT_OBJ(vfe, VFP_ENTRY_MAGIC);
232 71798
        vfe->vfp = vfp;
233 71798
        vfe->closed = VFP_OK;
234 71798
        VTAILQ_INSERT_HEAD(&vc->vfp, vfe, list);
235 71798
        vc->vfp_nxt = vfe;
236 71798
        return (vfe);
237 73318
}
238
239
/*--------------------------------------------------------------------
240
 * Debugging aids
241
 */
242
243
static void v_matchproto_(cli_func_t)
244 280
debug_fragfetch(struct cli *cli, const char * const *av, void *priv)
245
{
246 280
        (void)priv;
247 280
        (void)cli;
248 280
        fetchfrag = strtoul(av[2], NULL, 0);
249 280
}
250
251
static struct cli_proto debug_cmds[] = {
252
        { CLICMD_DEBUG_FRAGFETCH,               "d", debug_fragfetch },
253
        { NULL }
254
};
255
256
/*--------------------------------------------------------------------
257
 *
258
 */
259
260
void
261 37949
VFP_Init(void)
262
{
263
264 37949
        CLI_AddFuncs(debug_cmds);
265 37949
}