varnish-cache/bin/varnishd/cache/cache_conn_pool.c
0
/*-
1
 * Copyright (c) 2015 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * (TCP|UDS) connection pools.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <stdlib.h>
36
37
#include "cache_varnishd.h"
38
39
#include "vsa.h"
40
#include "vsha256.h"
41
#include "vtcp.h"
42
#include "vus.h"
43
#include "vtim.h"
44
#include "waiter/waiter.h"
45
46
#include "cache_conn_pool.h"
47
#include "cache_pool.h"
48
49
#include "VSC_vcp.h"
50
51
struct conn_pool;
52
static inline int vcp_cmp(const struct conn_pool *a, const struct conn_pool *b);
53
54
/*--------------------------------------------------------------------
55
 */
56
57
struct pfd {
58
        unsigned                magic;
59
#define PFD_MAGIC               0x0c5e6593
60
        int                     fd;
61
        VTAILQ_ENTRY(pfd)       list;
62
        VCL_IP                  addr;
63
        uint8_t                 state;
64
        struct waited           waited[1];
65
        struct conn_pool        *conn_pool;
66
67
        pthread_cond_t          *cond;
68
};
69
70
/*--------------------------------------------------------------------
71
 */
72
73
typedef int cp_open_f(const struct conn_pool *, vtim_dur tmo, VCL_IP *ap);
74
typedef void cp_close_f(struct pfd *);
75
typedef void cp_name_f(const struct pfd *, char *, unsigned, char *, unsigned);
76
77
struct cp_methods {
78
        cp_open_f                               *open;
79
        cp_close_f                              *close;
80
        cp_name_f                               *local_name;
81
        cp_name_f                               *remote_name;
82
};
83
84
struct conn_pool {
85
        unsigned                                magic;
86
#define CONN_POOL_MAGIC                         0x85099bc3
87
88
        const struct cp_methods                 *methods;
89
90
        struct vrt_endpoint                     *endpoint;
91
        char                                    ident[VSHA256_DIGEST_LENGTH];
92
93
        VRBT_ENTRY(conn_pool)                   entry;
94
        int                                     refcnt;
95
        struct lock                             mtx;
96
97
        VTAILQ_HEAD(, pfd)                      connlist;
98
        int                                     n_conn;
99
100
        int                                     n_kill;
101
102
        int                                     n_used;
103
104
        vtim_mono                               holddown;
105
        int                                     holddown_errno;
106
};
107
108
static struct lock conn_pools_mtx;
109
static struct lock dead_pools_mtx;
110
static struct VSC_vcp *vsc;
111
112
VRBT_HEAD(vrb, conn_pool);
113 32
VRBT_GENERATE_REMOVE_COLOR(vrb, conn_pool, entry, static)
114 84
VRBT_GENERATE_REMOVE(vrb, conn_pool, entry, static)
115 440
VRBT_GENERATE_INSERT_COLOR(vrb, conn_pool, entry, static)
116 2075
VRBT_GENERATE_INSERT_FINISH(vrb, conn_pool, entry, static)
117 3153
VRBT_GENERATE_INSERT(vrb, conn_pool, entry, vcp_cmp, static)
118 5
VRBT_GENERATE_NEXT(vrb, conn_pool, entry, static)
119 10
VRBT_GENERATE_MINMAX(vrb, conn_pool, entry, static)
120
121
static struct vrb conn_pools = VRBT_INITIALIZER(&conn_pools);
122
static struct vrb dead_pools = VRBT_INITIALIZER(&dying_cps);
123
124
/*--------------------------------------------------------------------
125
 */
126
127
unsigned
128 20513
PFD_State(const struct pfd *p)
129
{
130 20513
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
131 20513
        return (p->state);
132
}
133
134
int *
135 8671
PFD_Fd(struct pfd *p)
136
{
137 8671
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
138 8671
        return (&(p->fd));
139
}
140
141
void
142 4314
PFD_LocalName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf,
143
              unsigned plen)
144
{
145 4314
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
146 4314
        CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC);
147 4314
        p->conn_pool->methods->local_name(p, abuf, alen, pbuf, plen);
148 4314
}
149
150
void
151 4314
PFD_RemoteName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf,
152
               unsigned plen)
153
{
154 4314
        CHECK_OBJ_NOTNULL(p, PFD_MAGIC);
155 4314
        CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC);
156 4314
        p->conn_pool->methods->remote_name(p, abuf, alen, pbuf, plen);
157 4314
}
158
159
/*--------------------------------------------------------------------
160
 */
161
162
static inline int
163 1078
vcp_cmp(const struct conn_pool *a, const struct conn_pool *b)
164
{
165 1078
        return (memcmp(a->ident, b->ident, sizeof b->ident));
166
}
167
168
/*--------------------------------------------------------------------
169
 * Waiter-handler
170
 */
171
172
static void  v_matchproto_(waiter_handle_f)
173 3490
vcp_handle(struct waited *w, enum wait_event ev, vtim_real now)
174
{
175
        struct pfd *pfd;
176
        struct conn_pool *cp;
177
178 3490
        CHECK_OBJ_NOTNULL(w, WAITED_MAGIC);
179 3490
        CAST_OBJ_NOTNULL(pfd, w->priv1, PFD_MAGIC);
180 3490
        (void)ev;
181 3490
        (void)now;
182 3490
        CHECK_OBJ_NOTNULL(pfd->conn_pool, CONN_POOL_MAGIC);
183 3490
        cp = pfd->conn_pool;
184
185 3490
        Lck_Lock(&cp->mtx);
186
187 3490
        switch (pfd->state) {
188
        case PFD_STATE_STOLEN:
189 1608
                pfd->state = PFD_STATE_USED;
190 1608
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
191 1608
                AN(pfd->cond);
192 1608
                PTOK(pthread_cond_signal(pfd->cond));
193 1608
                break;
194
        case PFD_STATE_AVAIL:
195 1874
                cp->methods->close(pfd);
196 1874
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
197 1874
                cp->n_conn--;
198 1874
                FREE_OBJ(pfd);
199 1874
                break;
200
        case PFD_STATE_CLEANUP:
201 8
                cp->methods->close(pfd);
202 8
                cp->n_kill--;
203 8
                memset(pfd, 0x11, sizeof *pfd);
204 8
                free(pfd);
205 8
                break;
206
        default:
207 0
                WRONG("Wrong pfd state");
208 0
        }
209 3490
        Lck_Unlock(&cp->mtx);
210 3490
}
211
212
213
/*--------------------------------------------------------------------
214
 */
215
216
void
217 74
VCP_AddRef(struct conn_pool *cp)
218
{
219 74
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
220
221 74
        Lck_Lock(&conn_pools_mtx);
222 74
        assert(cp->refcnt > 0);
223 74
        cp->refcnt++;
224 74
        Lck_Unlock(&conn_pools_mtx);
225 74
}
226
227
/*--------------------------------------------------------------------
228
 */
229
230
static void
231 658
vcp_destroy(struct conn_pool **cpp)
232
{
233
        struct conn_pool *cp;
234
235 658
        TAKE_OBJ_NOTNULL(cp, cpp, CONN_POOL_MAGIC);
236 658
        AZ(cp->n_conn);
237 658
        AZ(cp->n_kill);
238 658
        Lck_Delete(&cp->mtx);
239 658
        FREE_OBJ(cp->endpoint);
240 658
        FREE_OBJ(cp);
241 658
}
242
243
/*--------------------------------------------------------------------
244
 * Release Conn pool, destroy or stash for future destruction if last
245
 * reference.
246
 */
247
248
void
249 174
VCP_Rel(struct conn_pool **cpp)
250
{
251
        struct conn_pool *cp;
252
        struct pfd *pfd, *pfd2;
253
        int n_kill;
254
255 174
        TAKE_OBJ_NOTNULL(cp, cpp, CONN_POOL_MAGIC);
256
257 174
        Lck_Lock(&conn_pools_mtx);
258 174
        assert(cp->refcnt > 0);
259 174
        if (--cp->refcnt > 0) {
260 120
                Lck_Unlock(&conn_pools_mtx);
261 120
                return;
262
        }
263 54
        AZ(cp->n_used);
264 54
        VRBT_REMOVE(vrb, &conn_pools, cp);
265 54
        Lck_Unlock(&conn_pools_mtx);
266
267 54
        Lck_Lock(&cp->mtx);
268 58
        VTAILQ_FOREACH_SAFE(pfd, &cp->connlist, list, pfd2) {
269 4
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
270 4
                cp->n_conn--;
271 4
                assert(pfd->state == PFD_STATE_AVAIL);
272 4
                pfd->state = PFD_STATE_CLEANUP;
273 4
                (void)shutdown(pfd->fd, SHUT_RDWR);
274 4
                cp->n_kill++;
275 4
        }
276 54
        n_kill = cp->n_kill;
277 54
        Lck_Unlock(&cp->mtx);
278 54
        if (n_kill == 0) {
279 50
                vcp_destroy(&cp);
280 50
                return;
281
        }
282 4
        Lck_Lock(&dead_pools_mtx);
283
        /*
284
         * Here we reuse cp's entry but it will probably not be correctly
285
         * indexed because of the hack in VCP_RelPoll
286
         */
287 4
        VRBT_INSERT(vrb, &dead_pools, cp);
288 4
        Lck_Unlock(&dead_pools_mtx);
289 174
}
290
291
void
292 15445
VCP_RelPoll(void)
293
{
294
        struct vrb dead;
295
        struct conn_pool *cp, *cp2;
296
        int n_kill;
297
298 15445
        ASSERT_CLI();
299
300 15445
        Lck_Lock(&dead_pools_mtx);
301 15445
        if (VRBT_EMPTY(&dead_pools)) {
302 15440
                Lck_Unlock(&dead_pools_mtx);
303 15440
                return;
304
        }
305 5
        dead = dead_pools;
306 5
        VRBT_INIT(&dead_pools);
307 5
        Lck_Unlock(&dead_pools_mtx);
308
309 10
        VRBT_FOREACH_SAFE(cp, vrb, &dead, cp2) {
310 5
                CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
311 5
                Lck_Lock(&cp->mtx);
312 5
                n_kill = cp->n_kill;
313 5
                Lck_Unlock(&cp->mtx);
314 5
                if (n_kill > 0)
315 1
                        continue;
316 4
                VRBT_REMOVE(vrb, &dead, cp);
317 4
                vcp_destroy(&cp);
318 4
        }
319
320 5
        if (VRBT_EMPTY(&dead))
321 4
                return;
322
323 1
        Lck_Lock(&dead_pools_mtx);
324
        /*
325
         * The following insertion will most likely result in an
326
         * unordered tree, but in this case it does not matter
327
         * as we just want to iterate over all the elements
328
         * in the tree in order to delete them.
329
         */
330 1
        VRBT_INSERT(vrb, &dead_pools, dead.rbh_root);
331 1
        Lck_Unlock(&dead_pools_mtx);
332 15445
}
333
334
/*--------------------------------------------------------------------
335
 * Recycle a connection.
336
 */
337
338
void
339 3531
VCP_Recycle(const struct worker *wrk, struct pfd **pfdp)
340
{
341
        struct pfd *pfd;
342
        struct conn_pool *cp;
343 3531
        int i = 0;
344
345 3531
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
346 3531
        TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC);
347 3531
        cp = pfd->conn_pool;
348 3531
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
349
350 3531
        assert(pfd->state == PFD_STATE_USED);
351 3531
        assert(pfd->fd > 0);
352
353 3531
        Lck_Lock(&cp->mtx);
354 3531
        cp->n_used--;
355
356 3531
        pfd->waited->priv1 = pfd;
357 3531
        pfd->waited->fd = pfd->fd;
358 3531
        pfd->waited->idle = VTIM_real();
359 3531
        pfd->state = PFD_STATE_AVAIL;
360 3531
        pfd->waited->func = vcp_handle;
361 3531
        pfd->waited->tmo = cache_param->backend_idle_timeout;
362 3531
        if (Wait_Enter(wrk->pool->waiter, pfd->waited)) {
363 0
                cp->methods->close(pfd);
364 0
                memset(pfd, 0x33, sizeof *pfd);
365 0
                free(pfd);
366
                // XXX: stats
367 0
                pfd = NULL;
368 0
        } else {
369 3531
                VTAILQ_INSERT_HEAD(&cp->connlist, pfd, list);
370 3531
                i++;
371
        }
372
373 3531
        if (pfd != NULL)
374 3531
                cp->n_conn++;
375 3531
        Lck_Unlock(&cp->mtx);
376
377 3531
        if (i && DO_DEBUG(DBG_VTC_MODE)) {
378
                /*
379
                 * In varnishtest we do not have the luxury of using
380
                 * multiple backend connections, so whenever we end up
381
                 * in the "pending" case, take a short nap to let the
382
                 * waiter catch up and put the pfd back into circulations.
383
                 *
384
                 * In particular ESI:include related tests suffer random
385
                 * failures without this.
386
                 *
387
                 * In normal operation, the only effect is that we will
388
                 * have N+1 backend connections rather than N, which is
389
                 * entirely harmless.
390
                 */
391 3531
                VTIM_sleep(0.01);
392 3531
        }
393 3531
}
394
395
/*--------------------------------------------------------------------
396
 * Open a new connection from pool.
397
 */
398
399
int
400 3157
VCP_Open(struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap, int *err)
401
{
402
        int r;
403
        vtim_mono h;
404
405 3157
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
406 3157
        AN(err);
407
408 3157
        while (cp->holddown > 0) {
409 46
                Lck_Lock(&cp->mtx);
410 46
                if (cp->holddown == 0) {
411 0
                        Lck_Unlock(&cp->mtx);
412 0
                        break;
413
                }
414
415 46
                if (VTIM_mono() >= cp->holddown) {
416 5
                        cp->holddown = 0;
417 5
                        Lck_Unlock(&cp->mtx);
418 5
                        break;
419
                }
420
421 41
                *err = 0;
422 41
                errno = cp->holddown_errno;
423 41
                Lck_Unlock(&cp->mtx);
424 41
                return (-1);
425
        }
426
427 3116
        *err = errno = 0;
428 3116
        r = cp->methods->open(cp, tmo, ap);
429
430 3116
        if (r >= 0 && errno == 0 && cp->endpoint->preamble != NULL &&
431 18
             cp->endpoint->preamble->len > 0) {
432 18
                CHECK_OBJ(cp->endpoint->preamble, VRT_BLOB_MAGIC);
433 54
                if (write(r, cp->endpoint->preamble->blob,
434 36
                    cp->endpoint->preamble->len) !=
435 18
                    cp->endpoint->preamble->len) {
436 0
                        *err = errno;
437 0
                        closefd(&r);
438 0
                }
439 18
        } else {
440 3098
                *err = errno;
441
        }
442
443 3116
        if (r >= 0)
444 3064
                return (r);
445
446 52
        h = 0;
447
448 52
        switch (errno) {
449
        case EACCES:
450
        case EPERM:
451 0
                h = cache_param->backend_local_error_holddown;
452 0
                break;
453
        case EADDRNOTAVAIL:
454 0
                h = cache_param->backend_local_error_holddown;
455 0
                break;
456
        case ECONNREFUSED:
457 52
                h = cache_param->backend_remote_error_holddown;
458 52
                break;
459
        case ENETUNREACH:
460 0
                h = cache_param->backend_remote_error_holddown;
461 0
                break;
462
        default:
463 0
                break;
464
        }
465
466 52
        if (h == 0)
467 0
                return (r);
468
469 52
        Lck_Lock(&cp->mtx);
470 52
        h += VTIM_mono();
471 52
        if (cp->holddown == 0 || h < cp->holddown) {
472 48
                cp->holddown = h;
473 48
                cp->holddown_errno = errno;
474 48
        }
475
476 52
        Lck_Unlock(&cp->mtx);
477
478 52
        return (r);
479 3157
}
480
481
/*--------------------------------------------------------------------
482
 * Close a connection.
483
 */
484
485
void
486 781
VCP_Close(struct pfd **pfdp)
487
{
488
        struct pfd *pfd;
489
        struct conn_pool *cp;
490
491 781
        TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC);
492 781
        cp = pfd->conn_pool;
493 781
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
494
495 781
        assert(pfd->fd > 0);
496
497 781
        Lck_Lock(&cp->mtx);
498 781
        assert(pfd->state == PFD_STATE_USED || pfd->state == PFD_STATE_STOLEN);
499 781
        cp->n_used--;
500 781
        if (pfd->state == PFD_STATE_STOLEN) {
501 4
                (void)shutdown(pfd->fd, SHUT_RDWR);
502 4
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
503 4
                pfd->state = PFD_STATE_CLEANUP;
504 4
                cp->n_kill++;
505 4
        } else {
506 777
                assert(pfd->state == PFD_STATE_USED);
507 777
                cp->methods->close(pfd);
508 777
                memset(pfd, 0x44, sizeof *pfd);
509 777
                free(pfd);
510
        }
511 781
        Lck_Unlock(&cp->mtx);
512 781
}
513
514
/*--------------------------------------------------------------------
515
 * Get a connection, possibly recycled
516
 */
517
518
struct pfd *
519 4376
VCP_Get(struct conn_pool *cp, vtim_dur tmo, struct worker *wrk,
520
    unsigned force_fresh, int *err)
521
{
522
        struct pfd *pfd;
523
524 4376
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
525 4376
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
526 4376
        AN(err);
527
528 4376
        *err = 0;
529 4376
        Lck_Lock(&cp->mtx);
530 4376
        pfd = VTAILQ_FIRST(&cp->connlist);
531 4376
        CHECK_OBJ_ORNULL(pfd, PFD_MAGIC);
532 4376
        if (force_fresh || pfd == NULL || pfd->state == PFD_STATE_STOLEN) {
533 2764
                pfd = NULL;
534 2764
        } else {
535 1612
                assert(pfd->conn_pool == cp);
536 1612
                assert(pfd->state == PFD_STATE_AVAIL);
537 1612
                VTAILQ_REMOVE(&cp->connlist, pfd, list);
538 1612
                VTAILQ_INSERT_TAIL(&cp->connlist, pfd, list);
539 1612
                cp->n_conn--;
540 1612
                VSC_C_main->backend_reuse++;
541 1612
                pfd->state = PFD_STATE_STOLEN;
542 1612
                pfd->cond = &wrk->cond;
543
        }
544 4376
        cp->n_used++;                   // Opening mostly works
545 4376
        Lck_Unlock(&cp->mtx);
546
547 4376
        if (pfd != NULL)
548 1612
                return (pfd);
549
550 2764
        ALLOC_OBJ(pfd, PFD_MAGIC);
551 2764
        AN(pfd);
552 2764
        INIT_OBJ(pfd->waited, WAITED_MAGIC);
553 2764
        pfd->state = PFD_STATE_USED;
554 2764
        pfd->conn_pool = cp;
555 2764
        pfd->fd = VCP_Open(cp, tmo, &pfd->addr, err);
556 2764
        if (pfd->fd < 0) {
557 62
                FREE_OBJ(pfd);
558 62
                Lck_Lock(&cp->mtx);
559 62
                cp->n_used--;           // Nope, didn't work after all.
560 62
                Lck_Unlock(&cp->mtx);
561 62
        } else
562 2702
                VSC_C_main->backend_conn++;
563
564 2764
        return (pfd);
565 4376
}
566
567
/*--------------------------------------------------------------------
568
 */
569
570
int
571 1606
VCP_Wait(struct worker *wrk, struct pfd *pfd, vtim_real when)
572
{
573
        struct conn_pool *cp;
574
        int r;
575
576 1606
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
577 1606
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
578 1606
        cp = pfd->conn_pool;
579 1606
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
580 1606
        assert(pfd->cond == &wrk->cond);
581 1606
        Lck_Lock(&cp->mtx);
582 3210
        while (pfd->state == PFD_STATE_STOLEN) {
583 1606
                r = Lck_CondWaitUntil(&wrk->cond, &cp->mtx, when);
584 1606
                if (r != 0) {
585 2
                        if (r == EINTR)
586 0
                                continue;
587 2
                        assert(r == ETIMEDOUT);
588 2
                        Lck_Unlock(&cp->mtx);
589 2
                        return (1);
590
                }
591
        }
592 1604
        assert(pfd->state == PFD_STATE_USED);
593 1604
        pfd->cond = NULL;
594 1604
        Lck_Unlock(&cp->mtx);
595
596 1604
        return (0);
597 1606
}
598
599
/*--------------------------------------------------------------------
600
 */
601
602
VCL_IP
603 2
VCP_GetIp(struct pfd *pfd)
604
{
605
606 2
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
607 2
        return (pfd->addr);
608
}
609
610
/*--------------------------------------------------------------------*/
611
612
static void
613 12
vcp_panic_endpoint(struct vsb *vsb, const struct vrt_endpoint *vep)
614
{
615
        char h[VTCP_ADDRBUFSIZE];
616
        char p[VTCP_PORTBUFSIZE];
617
618 12
        if (PAN_dump_struct(vsb, vep, VRT_ENDPOINT_MAGIC, "vrt_endpoint"))
619 0
                return;
620 12
        if (vep->uds_path)
621 0
                VSB_printf(vsb, "uds_path = %s,\n", vep->uds_path);
622 12
        if (vep->ipv4 && VSA_Sane(vep->ipv4)) {
623 12
                VTCP_name(vep->ipv4, h, sizeof h, p, sizeof p);
624 12
                VSB_printf(vsb, "ipv4 = %s, ", h);
625 12
                VSB_printf(vsb, "port = %s,\n", p);
626 12
        }
627 12
        if (vep->ipv6 && VSA_Sane(vep->ipv6)) {
628 0
                VTCP_name(vep->ipv6, h, sizeof h, p, sizeof p);
629 0
                VSB_printf(vsb, "ipv6 = %s, ", h);
630 0
                VSB_printf(vsb, "port = %s,\n", p);
631 0
        }
632 12
        VSB_indent(vsb, -2);
633 12
        VSB_cat(vsb, "},\n");
634 12
}
635
636
void
637 12
VCP_Panic(struct vsb *vsb, struct conn_pool *cp)
638
{
639
640 12
        if (PAN_dump_struct(vsb, cp, CONN_POOL_MAGIC, "conn_pool"))
641 0
                return;
642 12
        VSB_cat(vsb, "ident = ");
643 12
        VSB_quote(vsb, cp->ident, VSHA256_DIGEST_LENGTH, VSB_QUOTE_HEX);
644 12
        VSB_cat(vsb, ",\n");
645 12
        vcp_panic_endpoint(vsb, cp->endpoint);
646 12
        VSB_indent(vsb, -2);
647 12
        VSB_cat(vsb, "},\n");
648 12
}
649
650
/*--------------------------------------------------------------------*/
651
652
void
653 1898
VCP_Init(void)
654
{
655 1898
        Lck_New(&conn_pools_mtx, lck_conn_pool);
656 1898
        Lck_New(&dead_pools_mtx, lck_dead_pool);
657
658 1898
        AZ(vsc);
659 1898
        vsc = VSC_vcp_New(NULL, NULL, "");
660 1898
        AN(vsc);
661 1898
}
662
663
/**********************************************************************/
664
665
static inline int
666 3118
tmo2msec(vtim_dur tmo)
667
{
668 3118
        return ((int)floor(tmo * 1000.0));
669
}
670
671
static int v_matchproto_(cp_open_f)
672 2879
vtp_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap)
673
{
674
        int s;
675
        int msec;
676
677 2879
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
678
679 2879
        msec = tmo2msec(tmo);
680 2879
        if (cache_param->prefer_ipv6) {
681 0
                *ap = cp->endpoint->ipv6;
682 0
                s = VTCP_connect(*ap, msec);
683 0
                if (s >= 0)
684 0
                        return (s);
685 0
        }
686 2879
        *ap = cp->endpoint->ipv4;
687 2879
        s = VTCP_connect(*ap, msec);
688 2879
        if (s >= 0)
689 2824
                return (s);
690 55
        if (!cache_param->prefer_ipv6) {
691 55
                *ap = cp->endpoint->ipv6;
692 55
                s = VTCP_connect(*ap, msec);
693 55
        }
694 55
        return (s);
695 2879
}
696
697
698
/*--------------------------------------------------------------------*/
699
700
static void v_matchproto_(cp_close_f)
701 2659
vtp_close(struct pfd *pfd)
702
{
703
704 2659
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
705 2659
        VTCP_close(&pfd->fd);
706 2659
}
707
708
static void v_matchproto_(cp_name_f)
709 4092
vtp_local_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
710
               unsigned plen)
711
{
712 4092
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
713 4092
        VTCP_myname(pfd->fd, addr, alen, pbuf, plen);
714 4092
}
715
716
static void v_matchproto_(cp_name_f)
717 4092
vtp_remote_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
718
                unsigned plen)
719
{
720 4092
        CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC);
721 4092
        VTCP_hisname(pfd->fd, addr, alen, pbuf, plen);
722 4092
}
723
724
static const struct cp_methods vtp_methods = {
725
        .open = vtp_open,
726
        .close = vtp_close,
727
        .local_name = vtp_local_name,
728
        .remote_name = vtp_remote_name,
729
};
730
731
/*--------------------------------------------------------------------
732
 */
733
734
static int v_matchproto_(cp_open_f)
735 237
vus_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap)
736
{
737
        int s;
738
        int msec;
739
740 237
        CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC);
741 237
        AN(cp->endpoint->uds_path);
742
743 237
        msec = tmo2msec(tmo);
744 237
        *ap = bogo_ip;
745 237
        s = VUS_connect(cp->endpoint->uds_path, msec);
746 237
        return (s);
747
}
748
749
static void v_matchproto_(cp_name_f)
750 444
vus_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf,
751
         unsigned plen)
752
{
753 444
        (void) pfd;
754 444
        assert(alen > strlen("0.0.0.0"));
755 444
        assert(plen > 1);
756 444
        strcpy(addr, "0.0.0.0");
757 444
        strcpy(pbuf, "0");
758 444
}
759
760
static const struct cp_methods vus_methods = {
761
        .open = vus_open,
762
        .close = vtp_close,
763
        .local_name = vus_name,
764
        .remote_name = vus_name,
765
};
766
767
/*--------------------------------------------------------------------
768
 * Reference a TCP pool given by {ip4, ip6} pair or a UDS.  Create if
769
 * it doesn't exist already.
770
 */
771
772
struct conn_pool *
773 2674
VCP_Ref(const struct vrt_endpoint *vep, const char *ident)
774
{
775
        struct conn_pool *cp, *cp2;
776
        struct VSHA256Context cx[1];
777
        unsigned char digest[VSHA256_DIGEST_LENGTH];
778
779 2674
        CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC);
780 2674
        AN(ident);
781 2674
        AN(vsc);
782
783 2674
        VSHA256_Init(cx);
784 2674
        VSHA256_Update(cx, ident, strlen(ident) + 1); // include \0
785 2674
        if (vep->uds_path != NULL) {
786 92
                AZ(vep->ipv4);
787 92
                AZ(vep->ipv6);
788 92
                VSHA256_Update(cx, "UDS", 4); // include \0
789 92
                VSHA256_Update(cx, vep->uds_path, strlen(vep->uds_path));
790 92
        } else {
791 2582
                assert(vep->ipv4 != NULL || vep->ipv6 != NULL);
792 2582
                if (vep->ipv4 != NULL) {
793 2576
                        assert(VSA_Sane(vep->ipv4));
794 2576
                        VSHA256_Update(cx, "IP4", 4); // include \0
795 2576
                        VSHA256_Update(cx, vep->ipv4, vsa_suckaddr_len);
796 2576
                }
797 2582
                if (vep->ipv6 != NULL) {
798 8
                        assert(VSA_Sane(vep->ipv6));
799 8
                        VSHA256_Update(cx, "IP6", 4); // include \0
800 8
                        VSHA256_Update(cx, vep->ipv6, vsa_suckaddr_len);
801 8
                }
802
        }
803 2674
        CHECK_OBJ_ORNULL(vep->preamble, VRT_BLOB_MAGIC);
804 2674
        if (vep->preamble != NULL && vep->preamble->len > 0) {
805 18
                VSHA256_Update(cx, "PRE", 4); // include \0
806 18
                VSHA256_Update(cx, vep->preamble->blob, vep->preamble->len);
807 18
        }
808 2674
        VSHA256_Final(digest, cx);
809
810 2674
        ALLOC_OBJ(cp, CONN_POOL_MAGIC);
811 2674
        AN(cp);
812 2674
        cp->refcnt = 1;
813 2674
        cp->holddown = 0;
814 2674
        cp->endpoint = VRT_Endpoint_Clone(vep);
815 2674
        CHECK_OBJ_NOTNULL(cp->endpoint, VRT_ENDPOINT_MAGIC);
816 2674
        memcpy(cp->ident, digest, sizeof cp->ident);
817 2674
        if (vep->uds_path != NULL)
818 92
                cp->methods = &vus_methods;
819
        else
820 2582
                cp->methods = &vtp_methods;
821 2674
        Lck_New(&cp->mtx, lck_conn_pool);
822 2674
        VTAILQ_INIT(&cp->connlist);
823
824 2674
        Lck_Lock(&conn_pools_mtx);
825 2674
        cp2 = VRBT_INSERT(vrb, &conn_pools, cp);
826 2674
        if (cp2 == NULL) {
827 2070
                vsc->ref_miss++;
828 2070
                Lck_Unlock(&conn_pools_mtx);
829 2070
                return (cp);
830
        }
831
832 604
        CHECK_OBJ(cp2, CONN_POOL_MAGIC);
833 604
        assert(cp2->refcnt > 0);
834 604
        cp2->refcnt++;
835 604
        vsc->ref_hit++;
836 604
        Lck_Unlock(&conn_pools_mtx);
837
838 604
        vcp_destroy(&cp);
839 604
        return (cp2);
840 2674
}