varnish-cache/bin/varnishd/cache/cache_backend.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 *
30
 * The director implementation for VCL backends.
31
 *
32
 */
33
34
#include "config.h"
35
36
#include <stdlib.h>
37
38
#include "cache_varnishd.h"
39
#include "cache_director.h"
40
41
#include "vtcp.h"
42
#include "vtim.h"
43
#include "vsa.h"
44
45
#include "cache_backend.h"
46
#include "cache_conn_pool.h"
47
#include "cache_transport.h"
48
#include "cache_vcl.h"
49
#include "http1/cache_http1.h"
50
#include "proxy/cache_proxy.h"
51
52
#include "VSC_vbe.h"
53
54
/*--------------------------------------------------------------------*/
55
56
enum connwait_e {
57
        CW_DO_CONNECT = 1,
58
        CW_QUEUED,
59
        CW_DEQUEUED,
60
        CW_BE_BUSY,
61
};
62
63
struct connwait {
64
        unsigned                        magic;
65
#define CONNWAIT_MAGIC                  0x75c7a52b
66
        enum connwait_e                 cw_state;
67
        VTAILQ_ENTRY(connwait)          cw_list;
68
        pthread_cond_t                  cw_cond;
69
};
70
71
static const char * const vbe_proto_ident = "HTTP Backend";
72
73
static struct lock backends_mtx;
74
75
/*--------------------------------------------------------------------*/
76
77
void
78 133
VBE_Connect_Error(struct VSC_vbe *vsc, int err)
79
{
80
81 133
        switch(err) {
82
        case 0:
83
                /*
84
                 * This is kind of brittle, but zero is the only
85
                 * value of errno we can trust to have no meaning.
86
                 */
87 58
                vsc->helddown++;
88 58
                break;
89
        case EACCES:
90
        case EPERM:
91 0
                vsc->fail_eacces++;
92 0
                break;
93
        case EADDRNOTAVAIL:
94 0
                vsc->fail_eaddrnotavail++;
95 0
                break;
96
        case ECONNREFUSED:
97 75
                vsc->fail_econnrefused++;
98 75
                break;
99
        case ENETUNREACH:
100 0
                vsc->fail_enetunreach++;
101 0
                break;
102
        case ETIMEDOUT:
103 0
                vsc->fail_etimedout++;
104 0
                break;
105
        default:
106 0
                vsc->fail_other++;
107 0
        }
108 133
}
109
110
/*--------------------------------------------------------------------*/
111
112
#define FIND_TMO(tmx, dst, bo, be)                                      \
113
        do {                                                            \
114
                CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);                   \
115
                dst = bo->tmx;                                          \
116
                if (isnan(dst) && be->tmx >= 0.0)                       \
117
                        dst = be->tmx;                                  \
118
                if (isnan(dst))                                         \
119
                        dst = cache_param->tmx;                         \
120
        } while (0)
121
122
#define FIND_BE_SPEC(tmx, dst, be, def)                                 \
123
        do {                                                            \
124
                CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);                   \
125
                dst = be->tmx;                                          \
126
                if (dst == def)                                         \
127
                        dst = cache_param->tmx;                         \
128
        } while (0)
129
130
#define FIND_BE_PARAM(tmx, dst, be)                                     \
131
        FIND_BE_SPEC(tmx, dst, be, 0)
132
133
#define FIND_BE_TMO(tmx, dst, be)                                       \
134
        FIND_BE_SPEC(tmx, dst, be, -1.0)
135
136
#define BE_BUSY(be)     \
137
        (be->max_connections > 0 && be->n_conn >= be->max_connections)
138
139
/*--------------------------------------------------------------------*/
140
141
static void
142 212
vbe_connwait_broadcast(const struct backend *bp)
143
{
144
        struct connwait *cw;
145
146 212
        CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
147
148 212
        Lck_Lock(bp->director->mtx);
149 239
        VTAILQ_FOREACH(cw, &bp->cw_head, cw_list) {
150 27
                CHECK_OBJ(cw, CONNWAIT_MAGIC);
151 27
                assert(cw->cw_state == CW_QUEUED);
152 27
                PTOK(pthread_cond_signal(&cw->cw_cond));
153 27
        }
154 212
        Lck_Unlock(bp->director->mtx);
155 212
}
156
157
static void
158 6541
vbe_connwait_signal_locked(const struct backend *bp)
159
{
160
        struct connwait *cw;
161
162 6541
        Lck_AssertHeld(bp->director->mtx);
163
164 6541
        if (bp->n_conn < bp->max_connections) {
165 33
                cw = VTAILQ_FIRST(&bp->cw_head);
166 33
                if (cw != NULL) {
167 3
                        CHECK_OBJ(cw, CONNWAIT_MAGIC);
168 3
                        assert(cw->cw_state == CW_QUEUED);
169 3
                        PTOK(pthread_cond_signal(&cw->cw_cond));
170 3
                }
171 33
        }
172 6541
}
173
174
static void
175 6585
vbe_connwait_fini(struct connwait *cw)
176
{
177 6585
        CHECK_OBJ_NOTNULL(cw, CONNWAIT_MAGIC);
178 6585
        assert(cw->cw_state != CW_QUEUED);
179 6585
        PTOK(pthread_cond_destroy(&cw->cw_cond));
180 6585
        FINI_OBJ(cw);
181 6585
}
182
183
/*--------------------------------------------------------------------
184
 * Get a connection to the backend
185
 *
186
 * note: wrk is a separate argument because it differs for pipe vs. fetch
187
 */
188
189
static struct pfd *
190 6600
vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp,
191
    unsigned force_fresh)
192
{
193
        struct busyobj *bo;
194
        struct pfd *pfd;
195
        int *fdp, err;
196
        vtim_dur tmod;
197
        char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE];
198
        char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE];
199
        unsigned wait_limit;
200
        vtim_dur wait_tmod;
201
        vtim_dur wait_end;
202
        struct connwait cw[1];
203
204 6600
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
205 6600
        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
206 6600
        bo = ctx->bo;
207 6600
        CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC);
208 6600
        AN(bp->vsc);
209
210 6600
        if (!VRT_Healthy(ctx, dir, NULL)) {
211 30
                VSLb(bo->vsl, SLT_FetchError,
212 15
                     "backend %s: unhealthy", VRT_BACKEND_string(dir));
213 15
                bp->vsc->unhealthy++;
214 15
                VSC_C_main->backend_unhealthy++;
215 15
                return (NULL);
216
        }
217 6585
        INIT_OBJ(cw, CONNWAIT_MAGIC);
218 6585
        PTOK(pthread_cond_init(&cw->cw_cond, NULL));
219 6585
        Lck_Lock(bp->director->mtx);
220 6585
        FIND_BE_PARAM(backend_wait_limit, wait_limit, bp);
221 6585
        FIND_BE_TMO(backend_wait_timeout, wait_tmod, bp);
222 6585
        cw->cw_state = CW_DO_CONNECT;
223 6585
        if (!VTAILQ_EMPTY(&bp->cw_head) || BE_BUSY(bp))
224 42
                cw->cw_state = CW_BE_BUSY;
225
226 6621
        if (cw->cw_state == CW_BE_BUSY && wait_limit > 0 &&
227 36
            wait_tmod > 0.0 && bp->cw_count < wait_limit) {
228 36
                VTAILQ_INSERT_TAIL(&bp->cw_head, cw, cw_list);
229 36
                bp->cw_count++;
230 36
                VSC_C_main->backend_wait++;
231 36
                cw->cw_state = CW_QUEUED;
232 36
                wait_end = VTIM_real() + wait_tmod;
233 36
                do {
234 72
                        err = Lck_CondWaitUntil(&cw->cw_cond, bp->director->mtx,
235 36
                            wait_end);
236 36
                } while (err == EINTR);
237 36
                assert(cw->cw_state == CW_QUEUED);
238 36
                VTAILQ_REMOVE(&bp->cw_head, cw, cw_list);
239 36
                cw->cw_state = CW_DEQUEUED;
240 36
                bp->cw_count--;
241 36
                if ((err != 0 && BE_BUSY(bp)) || !VRT_Healthy(ctx, dir, NULL)) {
242 33
                        VSC_C_main->backend_wait_fail++;
243 33
                        cw->cw_state = CW_BE_BUSY;
244 33
                }
245 36
        }
246 6585
        if (cw->cw_state != CW_BE_BUSY)
247 6546
                bp->n_conn++;
248
249 6585
        if (!VTAILQ_EMPTY(&bp->cw_head) && !BE_BUSY(bp)) {
250
                /* Signal the new head of the waiting queue */
251 0
                vbe_connwait_signal_locked(bp);
252 0
        }
253
254 6585
        Lck_Unlock(bp->director->mtx);
255
256 6585
        if (cw->cw_state == CW_BE_BUSY) {
257 78
                VSLb(bo->vsl, SLT_FetchError,
258 39
                     "backend %s: busy", VRT_BACKEND_string(dir));
259 39
                bp->vsc->busy++;
260 39
                VSC_C_main->backend_busy++;
261 39
                vbe_connwait_fini(cw);
262 39
                return (NULL);
263
        }
264
265 6546
        AZ(bo->htc);
266 6546
        bo->htc = WS_Alloc(bo->ws, sizeof *bo->htc);
267
        /* XXX: we may want to detect the ws overflow sooner */
268 6546
        if (bo->htc == NULL) {
269 93
                VSLb(bo->vsl, SLT_FetchError, "out of workspace");
270
                /* XXX: counter ? */
271 93
                Lck_Lock(bp->director->mtx);
272 93
                bp->n_conn--;
273 93
                vbe_connwait_signal_locked(bp);
274 93
                Lck_Unlock(bp->director->mtx);
275 93
                vbe_connwait_fini(cw);
276 93
                return (NULL);
277
        }
278 6453
        bo->htc->doclose = SC_NULL;
279 6453
        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
280
281 6453
        FIND_TMO(connect_timeout, tmod, bo, bp);
282 6453
        pfd = VCP_Get(bp->conn_pool, tmod, wrk, force_fresh, &err);
283 6453
        if (pfd == NULL) {
284 93
                Lck_Lock(bp->director->mtx);
285 93
                VBE_Connect_Error(bp->vsc, err);
286 93
                bp->n_conn--;
287 93
                vbe_connwait_signal_locked(bp);
288 93
                Lck_Unlock(bp->director->mtx);
289 186
                VSLb(bo->vsl, SLT_FetchError,
290
                     "backend %s: fail errno %d (%s)",
291 93
                     VRT_BACKEND_string(dir), err, VAS_errtxt(err));
292 93
                VSC_C_main->backend_fail++;
293 93
                bo->htc = NULL;
294 93
                vbe_connwait_fini(cw);
295 93
                return (NULL);
296
        }
297
298 6360
        VSLb_ts_busyobj(bo, "Connected", W_TIM_real(wrk));
299 6360
        fdp = PFD_Fd(pfd);
300 6360
        AN(fdp);
301 6360
        assert(*fdp >= 0);
302
303 6360
        Lck_Lock(bp->director->mtx);
304 6360
        bp->vsc->conn++;
305 6360
        bp->vsc->req++;
306 6360
        Lck_Unlock(bp->director->mtx);
307
308 6360
        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
309
310 6360
        err = 0;
311 6360
        if (bp->proxy_header != 0)
312 15
                err += VPX_Send_Proxy(*fdp, bp->proxy_header, bo->sp);
313 6360
        if (err < 0) {
314 0
                VSLb(bo->vsl, SLT_FetchError,
315
                     "backend %s: proxy write errno %d (%s)",
316 0
                     VRT_BACKEND_string(dir),
317 0
                     errno, VAS_errtxt(errno));
318
                // account as if connect failed - good idea?
319 0
                VSC_C_main->backend_fail++;
320 0
                bo->htc = NULL;
321 0
                VCP_Close(&pfd);
322 0
                AZ(pfd);
323 0
                Lck_Lock(bp->director->mtx);
324 0
                bp->n_conn--;
325 0
                bp->vsc->conn--;
326 0
                bp->vsc->req--;
327 0
                vbe_connwait_signal_locked(bp);
328 0
                Lck_Unlock(bp->director->mtx);
329 0
                vbe_connwait_fini(cw);
330 0
                return (NULL);
331
        }
332 6360
        bo->acct.bereq_hdrbytes += err;
333
334 6360
        PFD_LocalName(pfd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1);
335 6360
        PFD_RemoteName(pfd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2);
336 12720
        VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s %s",
337 6360
            *fdp, VRT_BACKEND_string(dir), abuf2, pbuf2, abuf1, pbuf1,
338 6360
            PFD_State(pfd) == PFD_STATE_STOLEN ? "reuse" : "connect");
339
340 6360
        INIT_OBJ(bo->htc, HTTP_CONN_MAGIC);
341 6360
        bo->htc->priv = pfd;
342 6360
        bo->htc->rfd = fdp;
343 6360
        bo->htc->doclose = SC_NULL;
344 6360
        FIND_TMO(first_byte_timeout,
345
            bo->htc->first_byte_timeout, bo, bp);
346 6360
        FIND_TMO(between_bytes_timeout,
347
            bo->htc->between_bytes_timeout, bo, bp);
348 6360
        vbe_connwait_fini(cw);
349 6360
        return (pfd);
350 6600
}
351
352
static void v_matchproto_(vdi_finish_f)
353 6357
vbe_dir_finish(VRT_CTX, VCL_BACKEND d)
354
{
355
        struct backend *bp;
356
        struct busyobj *bo;
357
        struct pfd *pfd;
358
359 6357
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
360 6357
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
361 6357
        bo = ctx->bo;
362 6357
        CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
363 6357
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
364
365 6357
        CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC);
366 6357
        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
367
368 6357
        pfd = bo->htc->priv;
369 6357
        bo->htc->priv = NULL;
370 6357
        if (bo->htc->doclose != SC_NULL || bp->proxy_header != 0) {
371 2280
                VSLb(bo->vsl, SLT_BackendClose, "%d %s close %s", *PFD_Fd(pfd),
372 1140
                    VRT_BACKEND_string(d), bo->htc->doclose->name);
373 1140
                VCP_Close(&pfd);
374 1140
                AZ(pfd);
375 1140
                Lck_Lock(bp->director->mtx);
376 1140
        } else {
377 5217
                assert (PFD_State(pfd) == PFD_STATE_USED);
378 10434
                VSLb(bo->vsl, SLT_BackendClose, "%d %s recycle", *PFD_Fd(pfd),
379 5217
                    VRT_BACKEND_string(d));
380 5217
                Lck_Lock(bp->director->mtx);
381 5217
                VSC_C_main->backend_recycle++;
382 5217
                VCP_Recycle(bo->wrk, &pfd);
383
        }
384 6357
        assert(bp->n_conn > 0);
385 6357
        bp->n_conn--;
386 6357
        AN(bp->vsc);
387 6357
        bp->vsc->conn--;
388
#define ACCT(foo)       bp->vsc->foo += bo->acct.foo;
389
#include "tbl/acct_fields_bereq.h"
390
        vbe_connwait_signal_locked(bp);
391
        Lck_Unlock(bp->director->mtx);
392
        bo->htc = NULL;
393
}
394
395
static int v_matchproto_(vdi_gethdrs_f)
396 6510
vbe_dir_gethdrs(VRT_CTX, VCL_BACKEND d)
397
{
398 6510
        int i, extrachance = 1;
399
        struct backend *bp;
400
        struct pfd *pfd;
401
        struct busyobj *bo;
402
        struct worker *wrk;
403
404 6510
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
405 6510
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
406 6510
        bo = ctx->bo;
407 6510
        CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
408 6510
        CHECK_OBJ_NOTNULL(bo->bereq, HTTP_MAGIC);
409 6510
        if (bo->htc != NULL)
410 0
                CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
411 6510
        wrk = ctx->bo->wrk;
412 6510
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
413 6510
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
414
415
        /*
416
         * Now that we know our backend, we can set a default Host:
417
         * header if one is necessary.  This cannot be done in the VCL
418
         * because the backend may be chosen by a director.
419
         */
420 6510
        if (!http_GetHdr(bo->bereq, H_Host, NULL) && bp->hosthdr != NULL)
421 204
                http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr);
422
423 6510
        do {
424 6526
                if (bo->htc != NULL)
425 0
                        CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
426 6526
                pfd = vbe_dir_getfd(ctx, wrk, d, bp, extrachance == 0 ? 1 : 0);
427 6526
                if (pfd == NULL)
428 237
                        return (-1);
429 6289
                AN(bo->htc);
430 6289
                CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
431 6289
                if (PFD_State(pfd) != PFD_STATE_STOLEN)
432 3927
                        extrachance = 0;
433
434 12578
                i = V1F_SendReq(wrk, bo, &bo->acct.bereq_hdrbytes,
435 6289
                    &bo->acct.bereq_bodybytes);
436
437 6289
                if (i == 0 && PFD_State(pfd) != PFD_STATE_USED) {
438 7074
                        if (VCP_Wait(wrk, pfd, VTIM_real() +
439 4716
                            bo->htc->first_byte_timeout) != 0) {
440 3
                                bo->htc->doclose = SC_RX_TIMEOUT;
441 3
                                VSLb(bo->vsl, SLT_FetchError,
442
                                     "first byte timeout (reused connection)");
443 3
                                extrachance = 0;
444 3
                        }
445 2358
                }
446
447 6289
                if (bo->htc->doclose == SC_NULL) {
448 6199
                        assert(PFD_State(pfd) == PFD_STATE_USED);
449 6199
                        if (i == 0)
450 6198
                                i = V1F_FetchRespHdr(bo);
451 6199
                        if (i == 0) {
452 5975
                                AN(bo->htc->priv);
453 5975
                                http_VSL_log(bo->beresp);
454 5975
                                return (0);
455
                        }
456 222
                }
457 312
                CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC);
458
459
                /*
460
                 * If we recycled a backend connection, there is a finite chance
461
                 * that the backend closed it before we got the bereq to it.
462
                 * In that case do a single automatic retry if req.body allows.
463
                 */
464 312
                vbe_dir_finish(ctx, d);
465 312
                AZ(bo->htc);
466 312
                if (i < 0 || extrachance == 0)
467 297
                        break;
468 15
                if (bo->no_retry != NULL)
469 0
                        break;
470 15
                VSC_C_main->backend_retry++;
471 15
        } while (extrachance--);
472 298
        return (-1);
473 6510
}
474
475
static VCL_IP v_matchproto_(vdi_getip_f)
476 3
vbe_dir_getip(VRT_CTX, VCL_BACKEND d)
477
{
478
        struct pfd *pfd;
479
480 3
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
481 3
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
482 3
        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
483 3
        CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC);
484 3
        pfd = ctx->bo->htc->priv;
485
486 3
        return (VCP_GetIp(pfd));
487
}
488
489
/*--------------------------------------------------------------------*/
490
491
static stream_close_t v_matchproto_(vdi_http1pipe_f)
492 75
vbe_dir_http1pipe(VRT_CTX, VCL_BACKEND d)
493
{
494
        int i;
495
        stream_close_t retval;
496
        struct backend *bp;
497
        struct v1p_acct v1a;
498
        struct pfd *pfd;
499
        vtim_real deadline;
500
501 75
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
502 75
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
503 75
        CHECK_OBJ_NOTNULL(ctx->req, REQ_MAGIC);
504 75
        CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC);
505 75
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
506
507 75
        memset(&v1a, 0, sizeof v1a);
508
509
        /* This is hackish... */
510 75
        v1a.req = ctx->req->acct.req_hdrbytes;
511 75
        ctx->req->acct.req_hdrbytes = 0;
512
513 75
        ctx->req->res_pipe = 1;
514
515 75
        retval = SC_TX_ERROR;
516 75
        pfd = vbe_dir_getfd(ctx, ctx->req->wrk, d, bp, 0);
517
518 75
        if (pfd != NULL) {
519 72
                CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC);
520 144
                i = V1F_SendReq(ctx->req->wrk, ctx->bo,
521 72
                    &v1a.bereq, &v1a.out);
522 72
                VSLb_ts_req(ctx->req, "Pipe", W_TIM_real(ctx->req->wrk));
523 72
                if (i == 0) {
524 72
                        deadline = ctx->bo->task_deadline;
525 72
                        if (isnan(deadline))
526 69
                                deadline = cache_param->pipe_task_deadline;
527 72
                        if (deadline > 0.)
528 9
                                deadline += ctx->req->sp->t_idle;
529 144
                        retval = V1P_Process(ctx->req, *PFD_Fd(pfd), &v1a,
530 72
                            deadline);
531 72
                }
532 72
                VSLb_ts_req(ctx->req, "PipeSess", W_TIM_real(ctx->req->wrk));
533 72
                ctx->bo->htc->doclose = retval;
534 72
                vbe_dir_finish(ctx, d);
535 72
        }
536 75
        V1P_Charge(ctx->req, &v1a, bp->vsc);
537 75
        CHECK_OBJ_NOTNULL(retval, STREAM_CLOSE_MAGIC);
538 75
        return (retval);
539
}
540
541
/*--------------------------------------------------------------------*/
542
543
static void
544 4610
vbe_dir_event(const struct director *d, enum vcl_event_e ev)
545
{
546
        struct backend *bp;
547
548 4610
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
549 4610
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
550
551 4610
        if (ev == VCL_EVENT_WARM) {
552 3921
                VRT_VSC_Reveal(bp->vsc_seg);
553 3921
                if (bp->probe != NULL)
554 111
                        VBP_Control(bp, 1);
555 4610
        } else if (ev == VCL_EVENT_COLD) {
556 247
                if (bp->probe != NULL)
557 21
                        VBP_Control(bp, 0);
558 247
                VRT_VSC_Hide(bp->vsc_seg);
559 689
        } else if (ev == VCL_EVENT_DISCARD) {
560 200
                VRT_DelDirector(&bp->director);
561 442
        } else if (ev == VDI_EVENT_SICK) {
562 242
                const struct vdi_ahealth *ah = d->vdir->admin_health;
563
564 242
                if (ah == VDI_AH_SICK || (ah == VDI_AH_AUTO && bp->sick))
565 212
                        vbe_connwait_broadcast(bp);
566 242
        }
567 4610
}
568
569
/*---------------------------------------------------------------------*/
570
571
static void
572 242
vbe_free(struct backend *be)
573
{
574
575 242
        CHECK_OBJ_NOTNULL(be, BACKEND_MAGIC);
576
577 242
        if (be->probe != NULL)
578 18
                VBP_Remove(be);
579
580 242
        VSC_vbe_Destroy(&be->vsc_seg);
581 242
        Lck_Lock(&backends_mtx);
582 242
        VSC_C_main->n_backend--;
583 242
        Lck_Unlock(&backends_mtx);
584 242
        VCP_Rel(&be->conn_pool);
585
586
#define DA(x)   do { if (be->x != NULL) free(be->x); } while (0)
587
#define DN(x)   /**/
588 242
        VRT_BACKEND_HANDLE();
589
#undef DA
590
#undef DN
591 242
        free(be->endpoint);
592
593 242
        assert(VTAILQ_EMPTY(&be->cw_head));
594 242
        FREE_OBJ(be);
595 242
}
596
597
static void v_matchproto_(vdi_destroy_f)
598 242
vbe_destroy(const struct director *d)
599
{
600
        struct backend *be;
601
602 242
        CAST_OBJ_NOTNULL(be, d->priv, BACKEND_MAGIC);
603 242
        vbe_free(be);
604 242
}
605
606
/*--------------------------------------------------------------------*/
607
608
static void
609 18
vbe_panic(const struct director *d, struct vsb *vsb)
610
{
611
        struct backend *bp;
612
613 18
        PAN_CheckMagic(vsb, d, DIRECTOR_MAGIC);
614 18
        bp = d->priv;
615 18
        PAN_CheckMagic(vsb, bp, BACKEND_MAGIC);
616
617 18
        VCP_Panic(vsb, bp->conn_pool);
618 18
        VSB_printf(vsb, "hosthdr = %s,\n", bp->hosthdr);
619 18
        VSB_printf(vsb, "n_conn = %u,\n", bp->n_conn);
620 18
}
621
622
/*--------------------------------------------------------------------
623
 */
624
625
static void v_matchproto_(vdi_list_f)
626 3432
vbe_list(VRT_CTX, const struct director *d, struct vsb *vsb, int pflag,
627
    int jflag)
628
{
629
        char buf[VTCP_ADDRBUFSIZE];
630
        struct backend *bp;
631
        struct vrt_endpoint *vep;
632
633 3432
        (void)ctx;
634
635 3432
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
636 3432
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
637 3432
        CHECK_OBJ_NOTNULL(bp->endpoint, VRT_ENDPOINT_MAGIC);
638
639 3432
        vep = bp->endpoint;
640
641 3432
        if (bp->probe != NULL)
642 234
                VBP_Status(vsb, bp, pflag, jflag);
643 3198
        else if (jflag && pflag)
644 9
                VSB_cat(vsb, "{},\n");
645 3189
        else if (jflag)
646 33
                VSB_cat(vsb, "[0, 0, \"healthy\"]");
647 3156
        else if (pflag)
648 27
                return;
649
        else
650 3129
                VSB_cat(vsb, "0/0\thealthy");
651
652 3405
        if (jflag && pflag) {
653 15
                if (vep->ipv4 != NULL) {
654 15
                        VTCP_name(vep->ipv4, buf, sizeof buf, NULL, 0);
655 15
                        VSB_printf(vsb, "\"ipv4\": \"%s\",\n", buf);
656 15
                }
657 15
                if (vep->ipv6 != NULL) {
658 0
                        VTCP_name(vep->ipv6, buf, sizeof buf, NULL, 0);
659 0
                        VSB_printf(vsb, "\"ipv6\": \"%s\",\n", buf);
660 0
                }
661 15
        }
662 3432
}
663
664
/*--------------------------------------------------------------------
665
 */
666
667
static VCL_BOOL v_matchproto_(vdi_healthy_f)
668 201
vbe_healthy(VRT_CTX, VCL_BACKEND d, VCL_TIME *t)
669
{
670
        struct backend *bp;
671
672 201
        (void)ctx;
673 201
        CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC);
674 201
        CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC);
675
676 201
        if (t != NULL)
677 126
                *t = bp->changed;
678
679 201
        return (!bp->sick);
680
}
681
682
/*--------------------------------------------------------------------
683
 */
684
685
static const struct vdi_methods vbe_methods[1] = {{
686
        .magic =                VDI_METHODS_MAGIC,
687
        .type =                 "backend",
688
        .http1pipe =            vbe_dir_http1pipe,
689
        .gethdrs =              vbe_dir_gethdrs,
690
        .getip =                vbe_dir_getip,
691
        .finish =               vbe_dir_finish,
692
        .event =                vbe_dir_event,
693
        .destroy =              vbe_destroy,
694
        .panic =                vbe_panic,
695
        .list =                 vbe_list,
696
        .healthy =              vbe_healthy
697
}};
698
699
static const struct vdi_methods vbe_methods_noprobe[1] = {{
700
        .magic =                VDI_METHODS_MAGIC,
701
        .type =                 "backend",
702
        .http1pipe =            vbe_dir_http1pipe,
703
        .gethdrs =              vbe_dir_gethdrs,
704
        .getip =                vbe_dir_getip,
705
        .finish =               vbe_dir_finish,
706
        .event =                vbe_dir_event,
707
        .destroy =              vbe_destroy,
708
        .panic =                vbe_panic,
709
        .list =                 vbe_list
710
}};
711
712
/*--------------------------------------------------------------------
713
 * Create a new static or dynamic director::backend instance.
714
 */
715
716
size_t
717 3609
VRT_backend_vsm_need(VRT_CTX)
718
{
719 3609
        (void)ctx;
720 3609
        return (VRT_VSC_Overhead(VSC_vbe_size));
721
}
722
723
/*
724
 * The new_backend via parameter is a VCL_BACKEND, but we need a (struct
725
 * backend)
726
 *
727
 * For now, we resolve it when creating the backend, which implies no redundancy
728
 * / load balancing across the via director if it is more than a simple backend.
729
 */
730
731
static const struct backend *
732 24
via_resolve(VRT_CTX, const struct vrt_endpoint *vep, VCL_BACKEND via)
733
{
734 24
        const struct backend *viabe = NULL;
735
736 24
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
737 24
        CHECK_OBJ_NOTNULL(via, DIRECTOR_MAGIC);
738
739 24
        if (vep->uds_path) {
740 0
                VRT_fail(ctx, "Via is only supported for IP addresses");
741 0
                return (NULL);
742
        }
743
744 24
        via = VRT_DirectorResolve(ctx, via);
745
746 24
        if (via == NULL) {
747 0
                VRT_fail(ctx, "Via resolution failed");
748 0
                return (NULL);
749
        }
750
751 24
        CHECK_OBJ(via, DIRECTOR_MAGIC);
752 24
        CHECK_OBJ_NOTNULL(via->vdir, VCLDIR_MAGIC);
753
754 24
        if (via->vdir->methods == vbe_methods ||
755 24
            via->vdir->methods == vbe_methods_noprobe)
756 24
                CAST_OBJ_NOTNULL(viabe, via->priv, BACKEND_MAGIC);
757
758 24
        if (viabe == NULL)
759 0
                VRT_fail(ctx, "Via does not resolve to a backend");
760
761 24
        return (viabe);
762 24
}
763
764
/*
765
 * construct a new endpoint identical to vep with sa in a proxy header
766
 */
767
static struct vrt_endpoint *
768 24
via_endpoint(const struct vrt_endpoint *vep, const struct suckaddr *sa,
769
    const char *auth)
770
{
771
        struct vsb *preamble;
772
        struct vrt_blob blob[1];
773
        struct vrt_endpoint *nvep, *ret;
774
        const struct suckaddr *client_bogo;
775
776 24
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
777 24
        AN(sa);
778
779 24
        nvep = VRT_Endpoint_Clone(vep);
780 24
        CHECK_OBJ_NOTNULL(nvep, VRT_ENDPOINT_MAGIC);
781
782 24
        if (VSA_Get_Proto(sa) == AF_INET6)
783 0
                client_bogo = bogo_ip6;
784
        else
785 24
                client_bogo = bogo_ip;
786
787 24
        preamble = VSB_new_auto();
788 24
        AN(preamble);
789 24
        VPX_Format_Proxy(preamble, 2, client_bogo, sa, auth);
790 24
        blob->blob = VSB_data(preamble);
791 24
        blob->len = VSB_len(preamble);
792 24
        nvep->preamble = blob;
793 24
        ret = VRT_Endpoint_Clone(nvep);
794 24
        CHECK_OBJ_NOTNULL(ret, VRT_ENDPOINT_MAGIC);
795 24
        VSB_destroy(&preamble);
796 24
        FREE_OBJ(nvep);
797
798 24
        return (ret);
799
}
800
801
VCL_BACKEND
802 3942
VRT_new_backend_clustered(VRT_CTX, struct vsmw_cluster *vc,
803
    const struct vrt_backend *vrt, VCL_BACKEND via)
804
{
805
        struct backend *be;
806
        struct vcl *vcl;
807
        const struct vrt_backend_probe *vbp;
808
        const struct vrt_endpoint *vep;
809
        const struct vdi_methods *m;
810 3942
        const struct suckaddr *sa = NULL;
811
        char abuf[VTCP_ADDRBUFSIZE];
812 3942
        const struct backend *viabe = NULL;
813
814 3942
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
815 3942
        CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC);
816 3942
        vep = vrt->endpoint;
817 3942
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
818 3942
        if (vep->uds_path == NULL) {
819 3804
                if (vep->ipv4 == NULL && vep->ipv6 == NULL) {
820 0
                        VRT_fail(ctx, "%s: Illegal IP", __func__);
821 0
                        return (NULL);
822
                }
823 3804
        } else {
824 138
                assert(vep->ipv4== NULL && vep->ipv6== NULL);
825
        }
826
827 3942
        if (via != NULL) {
828 24
                viabe = via_resolve(ctx, vep, via);
829 24
                if (viabe == NULL)
830 0
                        return (NULL);
831 24
        }
832
833 3942
        vcl = ctx->vcl;
834 3942
        AN(vcl);
835 3942
        AN(vrt->vcl_name);
836
837
        /* Create new backend */
838 3942
        ALLOC_OBJ(be, BACKEND_MAGIC);
839 3942
        if (be == NULL)
840 0
                return (NULL);
841 3942
        VTAILQ_INIT(&be->cw_head);
842
843
#define DA(x)   do { if (vrt->x != NULL) REPLACE((be->x), (vrt->x)); } while (0)
844
#define DN(x)   do { be->x = vrt->x; } while (0)
845 3942
        VRT_BACKEND_HANDLE();
846
#undef DA
847
#undef DN
848
849
#define CPTMO(a, b, x) do {                             \
850
                if ((a)->x < 0.0 || isnan((a)->x))      \
851
                        (a)->x = (b)->x;                \
852
        } while(0)
853
854 3936
        if (viabe != NULL) {
855 24
                CPTMO(be, viabe, connect_timeout);
856 24
                CPTMO(be, viabe, first_byte_timeout);
857 24
                CPTMO(be, viabe, between_bytes_timeout);
858 24
        }
859
#undef CPTMO
860
861 3936
        if (viabe || be->hosthdr == NULL) {
862 27
                if (vrt->endpoint->uds_path != NULL)
863 3
                        sa = bogo_ip;
864 24
                else if (cache_param->prefer_ipv6 && vep->ipv6 != NULL)
865 0
                        sa = vep->ipv6;
866 24
                else if (vep->ipv4!= NULL)
867 24
                        sa = vep->ipv4;
868
                else
869 0
                        sa = vep->ipv6;
870 27
                if (be->hosthdr == NULL) {
871 3
                        VTCP_name(sa, abuf, sizeof abuf, NULL, 0);
872 3
                        REPLACE(be->hosthdr, abuf);
873 3
                }
874 27
        }
875
876 7872
        be->vsc = VSC_vbe_New(vc, &be->vsc_seg,
877 3936
            "%s.%s", VCL_Name(ctx->vcl), vrt->vcl_name);
878 3936
        AN(be->vsc);
879 3936
        if (! vcl->temp->is_warm)
880 3897
                VRT_VSC_Hide(be->vsc_seg);
881
882 3936
        if (viabe)
883 42
                vep = be->endpoint = via_endpoint(viabe->endpoint, sa,
884 21
                    be->authority);
885
        else
886 3915
                vep = be->endpoint = VRT_Endpoint_Clone(vep);
887
888 3936
        AN(vep);
889 3936
        be->conn_pool = VCP_Ref(vep, vbe_proto_ident);
890 3936
        AN(be->conn_pool);
891
892 3936
        vbp = vrt->probe;
893 3936
        if (vbp == NULL)
894 3864
                vbp = VCL_DefaultProbe(vcl);
895
896 3936
        if (vbp != NULL) {
897 108
                VBP_Insert(be, vbp, be->conn_pool);
898 108
                m = vbe_methods;
899 108
        } else {
900 3828
                be->sick = 0;
901 3828
                be->vsc->happy = UINT64_MAX;
902 3828
                m = vbe_methods_noprobe;
903
        }
904
905 3936
        Lck_Lock(&backends_mtx);
906 3936
        VSC_C_main->n_backend++;
907 3936
        Lck_Unlock(&backends_mtx);
908
909 3936
        be->director = VRT_AddDirector(ctx, m, be, "%s", vrt->vcl_name);
910
911 3936
        if (be->director == NULL) {
912 0
                vbe_free(be);
913 0
                return (NULL);
914
        }
915
        /* for cold VCL, update initial director state */
916 3936
        if (be->probe != NULL)
917 111
                VBP_Update_Backend(be->probe);
918 3936
        return (be->director);
919 3936
}
920
921
VCL_BACKEND
922 96
VRT_new_backend(VRT_CTX, const struct vrt_backend *vrt, VCL_BACKEND via)
923
{
924
925 96
        CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC);
926 96
        CHECK_OBJ_NOTNULL(vrt->endpoint, VRT_ENDPOINT_MAGIC);
927 96
        return (VRT_new_backend_clustered(ctx, NULL, vrt, via));
928
}
929
930
/*--------------------------------------------------------------------
931
 * Delete a dynamic director::backend instance.  Undeleted dynamic and
932
 * static instances are GC'ed when the VCL is discarded (in cache_vcl.c)
933
 */
934
935
void
936 236
VRT_delete_backend(VRT_CTX, VCL_BACKEND *dp)
937
{
938
939 236
        (void)ctx;
940 236
        CHECK_OBJ_NOTNULL(*dp, DIRECTOR_MAGIC);
941 236
        VRT_DisableDirector(*dp);
942 236
        VRT_Assign_Backend(dp, NULL);
943 236
}
944
945
/*---------------------------------------------------------------------*/
946
947
void
948 2786
VBE_InitCfg(void)
949
{
950
951 2786
        Lck_New(&backends_mtx, lck_vbe);
952 2786
}