| | varnish-cache/bin/varnishd/cache/cache_conn_pool.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2015 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* (TCP|UDS) connection pools. |
30 |
|
* |
31 |
|
*/ |
32 |
|
|
33 |
|
#include "config.h" |
34 |
|
|
35 |
|
#include <stdlib.h> |
36 |
|
|
37 |
|
#include "cache_varnishd.h" |
38 |
|
|
39 |
|
#include "vsa.h" |
40 |
|
#include "vsha256.h" |
41 |
|
#include "vtcp.h" |
42 |
|
#include "vus.h" |
43 |
|
#include "vtim.h" |
44 |
|
#include "waiter/waiter.h" |
45 |
|
|
46 |
|
#include "cache_conn_pool.h" |
47 |
|
#include "cache_pool.h" |
48 |
|
|
49 |
|
struct conn_pool; |
50 |
|
static inline int vcp_cmp(const struct conn_pool *a, const struct conn_pool *b); |
51 |
|
|
52 |
|
/*-------------------------------------------------------------------- |
53 |
|
*/ |
54 |
|
|
55 |
|
struct pfd { |
56 |
|
unsigned magic; |
57 |
|
#define PFD_MAGIC 0x0c5e6593 |
58 |
|
int fd; |
59 |
|
VTAILQ_ENTRY(pfd) list; |
60 |
|
VCL_IP addr; |
61 |
|
uint8_t state; |
62 |
|
struct waited waited[1]; |
63 |
|
struct conn_pool *conn_pool; |
64 |
|
|
65 |
|
pthread_cond_t *cond; |
66 |
|
}; |
67 |
|
|
68 |
|
/*-------------------------------------------------------------------- |
69 |
|
*/ |
70 |
|
|
71 |
|
typedef int cp_open_f(const struct conn_pool *, vtim_dur tmo, VCL_IP *ap); |
72 |
|
typedef void cp_close_f(struct pfd *); |
73 |
|
typedef void cp_name_f(const struct pfd *, char *, unsigned, char *, unsigned); |
74 |
|
|
75 |
|
struct cp_methods { |
76 |
|
cp_open_f *open; |
77 |
|
cp_close_f *close; |
78 |
|
cp_name_f *local_name; |
79 |
|
cp_name_f *remote_name; |
80 |
|
}; |
81 |
|
|
82 |
|
struct conn_pool { |
83 |
|
unsigned magic; |
84 |
|
#define CONN_POOL_MAGIC 0x85099bc3 |
85 |
|
|
86 |
|
const struct cp_methods *methods; |
87 |
|
|
88 |
|
struct vrt_endpoint *endpoint; |
89 |
|
char ident[VSHA256_DIGEST_LENGTH]; |
90 |
|
|
91 |
|
VRBT_ENTRY(conn_pool) entry; |
92 |
|
int refcnt; |
93 |
|
struct lock mtx; |
94 |
|
|
95 |
|
VTAILQ_HEAD(, pfd) connlist; |
96 |
|
int n_conn; |
97 |
|
|
98 |
|
int n_kill; |
99 |
|
|
100 |
|
int n_used; |
101 |
|
|
102 |
|
vtim_mono holddown; |
103 |
|
int holddown_errno; |
104 |
|
}; |
105 |
|
|
106 |
|
static struct lock conn_pools_mtx; |
107 |
|
static struct lock dead_pools_mtx; |
108 |
|
|
109 |
|
VRBT_HEAD(vrb, conn_pool); |
110 |
66 |
VRBT_GENERATE_REMOVE_COLOR(vrb, conn_pool, entry, static) |
111 |
161 |
VRBT_GENERATE_REMOVE(vrb, conn_pool, entry, static) |
112 |
6184 |
VRBT_GENERATE_FIND(vrb, conn_pool, entry, vcp_cmp, static) |
113 |
867 |
VRBT_GENERATE_INSERT_COLOR(vrb, conn_pool, entry, static) |
114 |
4080 |
VRBT_GENERATE_INSERT_FINISH(vrb, conn_pool, entry, static) |
115 |
4957 |
VRBT_GENERATE_INSERT(vrb, conn_pool, entry, vcp_cmp, static) |
116 |
8 |
VRBT_GENERATE_NEXT(vrb, conn_pool, entry, static) |
117 |
16 |
VRBT_GENERATE_MINMAX(vrb, conn_pool, entry, static) |
118 |
|
|
119 |
|
static struct vrb conn_pools = VRBT_INITIALIZER(&conn_pools); |
120 |
|
static struct vrb dead_pools = VRBT_INITIALIZER(&dying_cps); |
121 |
|
|
122 |
|
/*-------------------------------------------------------------------- |
123 |
|
*/ |
124 |
|
|
125 |
|
unsigned |
126 |
40353 |
PFD_State(const struct pfd *p) |
127 |
|
{ |
128 |
40353 |
CHECK_OBJ_NOTNULL(p, PFD_MAGIC); |
129 |
40353 |
return (p->state); |
130 |
|
} |
131 |
|
|
132 |
|
int * |
133 |
17052 |
PFD_Fd(struct pfd *p) |
134 |
|
{ |
135 |
17052 |
CHECK_OBJ_NOTNULL(p, PFD_MAGIC); |
136 |
17052 |
return (&(p->fd)); |
137 |
|
} |
138 |
|
|
139 |
|
void |
140 |
8480 |
PFD_LocalName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf, |
141 |
|
unsigned plen) |
142 |
|
{ |
143 |
8480 |
CHECK_OBJ_NOTNULL(p, PFD_MAGIC); |
144 |
8480 |
CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC); |
145 |
8480 |
p->conn_pool->methods->local_name(p, abuf, alen, pbuf, plen); |
146 |
8480 |
} |
147 |
|
|
148 |
|
void |
149 |
8480 |
PFD_RemoteName(const struct pfd *p, char *abuf, unsigned alen, char *pbuf, |
150 |
|
unsigned plen) |
151 |
|
{ |
152 |
8480 |
CHECK_OBJ_NOTNULL(p, PFD_MAGIC); |
153 |
8480 |
CHECK_OBJ_NOTNULL(p->conn_pool, CONN_POOL_MAGIC); |
154 |
8480 |
p->conn_pool->methods->remote_name(p, abuf, alen, pbuf, plen); |
155 |
8480 |
} |
156 |
|
|
157 |
|
/*-------------------------------------------------------------------- |
158 |
|
*/ |
159 |
|
|
160 |
|
static inline int |
161 |
2989 |
vcp_cmp(const struct conn_pool *a, const struct conn_pool *b) |
162 |
|
{ |
163 |
2989 |
return (memcmp(a->ident, b->ident, sizeof b->ident)); |
164 |
|
} |
165 |
|
|
166 |
|
/*-------------------------------------------------------------------- |
167 |
|
* Waiter-handler |
168 |
|
*/ |
169 |
|
|
170 |
|
static void v_matchproto_(waiter_handle_f) |
171 |
6877 |
vcp_handle(struct waited *w, enum wait_event ev, vtim_real now) |
172 |
|
{ |
173 |
|
struct pfd *pfd; |
174 |
|
struct conn_pool *cp; |
175 |
|
|
176 |
6877 |
CHECK_OBJ_NOTNULL(w, WAITED_MAGIC); |
177 |
6877 |
CAST_OBJ_NOTNULL(pfd, w->priv1, PFD_MAGIC); |
178 |
6877 |
(void)ev; |
179 |
6877 |
(void)now; |
180 |
6877 |
CHECK_OBJ_NOTNULL(pfd->conn_pool, CONN_POOL_MAGIC); |
181 |
6877 |
cp = pfd->conn_pool; |
182 |
|
|
183 |
6877 |
Lck_Lock(&cp->mtx); |
184 |
|
|
185 |
6877 |
switch (pfd->state) { |
186 |
|
case PFD_STATE_STOLEN: |
187 |
3152 |
pfd->state = PFD_STATE_USED; |
188 |
3152 |
VTAILQ_REMOVE(&cp->connlist, pfd, list); |
189 |
3152 |
AN(pfd->cond); |
190 |
3152 |
PTOK(pthread_cond_signal(pfd->cond)); |
191 |
3152 |
break; |
192 |
|
case PFD_STATE_AVAIL: |
193 |
3709 |
cp->methods->close(pfd); |
194 |
3709 |
VTAILQ_REMOVE(&cp->connlist, pfd, list); |
195 |
3709 |
cp->n_conn--; |
196 |
3709 |
FREE_OBJ(pfd); |
197 |
3709 |
break; |
198 |
|
case PFD_STATE_CLEANUP: |
199 |
16 |
cp->methods->close(pfd); |
200 |
16 |
cp->n_kill--; |
201 |
16 |
memset(pfd, 0x11, sizeof *pfd); |
202 |
16 |
free(pfd); |
203 |
16 |
break; |
204 |
|
default: |
205 |
0 |
WRONG("Wrong pfd state"); |
206 |
0 |
} |
207 |
6877 |
Lck_Unlock(&cp->mtx); |
208 |
6877 |
} |
209 |
|
|
210 |
|
|
211 |
|
/*-------------------------------------------------------------------- |
212 |
|
*/ |
213 |
|
|
214 |
|
void |
215 |
148 |
VCP_AddRef(struct conn_pool *cp) |
216 |
|
{ |
217 |
148 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
218 |
|
|
219 |
148 |
Lck_Lock(&conn_pools_mtx); |
220 |
148 |
assert(cp->refcnt > 0); |
221 |
148 |
cp->refcnt++; |
222 |
148 |
Lck_Unlock(&conn_pools_mtx); |
223 |
148 |
} |
224 |
|
|
225 |
|
/*-------------------------------------------------------------------- |
226 |
|
*/ |
227 |
|
|
228 |
|
static void |
229 |
108 |
vcp_destroy(struct conn_pool **cpp) |
230 |
|
{ |
231 |
|
struct conn_pool *cp; |
232 |
|
|
233 |
108 |
TAKE_OBJ_NOTNULL(cp, cpp, CONN_POOL_MAGIC); |
234 |
108 |
AZ(cp->n_conn); |
235 |
108 |
AZ(cp->n_kill); |
236 |
108 |
Lck_Delete(&cp->mtx); |
237 |
108 |
free(cp->endpoint); |
238 |
108 |
FREE_OBJ(cp); |
239 |
108 |
} |
240 |
|
|
241 |
|
/*-------------------------------------------------------------------- |
242 |
|
* Release Conn pool, destroy if last reference. |
243 |
|
*/ |
244 |
|
|
245 |
|
void |
246 |
347 |
VCP_Rel(struct conn_pool **cpp) |
247 |
|
{ |
248 |
|
struct conn_pool *cp; |
249 |
|
struct pfd *pfd, *pfd2; |
250 |
|
int n_kill; |
251 |
|
|
252 |
347 |
TAKE_OBJ_NOTNULL(cp, cpp, CONN_POOL_MAGIC); |
253 |
|
|
254 |
347 |
Lck_Lock(&conn_pools_mtx); |
255 |
347 |
assert(cp->refcnt > 0); |
256 |
347 |
if (--cp->refcnt > 0) { |
257 |
239 |
Lck_Unlock(&conn_pools_mtx); |
258 |
239 |
return; |
259 |
|
} |
260 |
108 |
AZ(cp->n_used); |
261 |
108 |
VRBT_REMOVE(vrb, &conn_pools, cp); |
262 |
108 |
Lck_Unlock(&conn_pools_mtx); |
263 |
|
|
264 |
108 |
Lck_Lock(&cp->mtx); |
265 |
116 |
VTAILQ_FOREACH_SAFE(pfd, &cp->connlist, list, pfd2) { |
266 |
8 |
VTAILQ_REMOVE(&cp->connlist, pfd, list); |
267 |
8 |
cp->n_conn--; |
268 |
8 |
assert(pfd->state == PFD_STATE_AVAIL); |
269 |
8 |
pfd->state = PFD_STATE_CLEANUP; |
270 |
8 |
(void)shutdown(pfd->fd, SHUT_RDWR); |
271 |
8 |
cp->n_kill++; |
272 |
8 |
} |
273 |
108 |
n_kill = cp->n_kill; |
274 |
108 |
Lck_Unlock(&cp->mtx); |
275 |
108 |
if (n_kill == 0) { |
276 |
100 |
vcp_destroy(&cp); |
277 |
100 |
return; |
278 |
|
} |
279 |
8 |
Lck_Lock(&dead_pools_mtx); |
280 |
|
/* |
281 |
|
* Here we reuse cp's entry but it will probably not be correctly |
282 |
|
* indexed because of the hack in VCP_RelPoll |
283 |
|
*/ |
284 |
8 |
VRBT_INSERT(vrb, &dead_pools, cp); |
285 |
8 |
Lck_Unlock(&dead_pools_mtx); |
286 |
347 |
} |
287 |
|
|
288 |
|
void |
289 |
30265 |
VCP_RelPoll(void) |
290 |
|
{ |
291 |
|
struct vrb dead; |
292 |
|
struct conn_pool *cp, *cp2; |
293 |
|
int n_kill; |
294 |
|
|
295 |
30265 |
ASSERT_CLI(); |
296 |
|
|
297 |
30265 |
Lck_Lock(&dead_pools_mtx); |
298 |
30265 |
if (VRBT_EMPTY(&dead_pools)) { |
299 |
30257 |
Lck_Unlock(&dead_pools_mtx); |
300 |
30257 |
return; |
301 |
|
} |
302 |
8 |
dead = dead_pools; |
303 |
8 |
VRBT_INIT(&dead_pools); |
304 |
8 |
Lck_Unlock(&dead_pools_mtx); |
305 |
|
|
306 |
16 |
VRBT_FOREACH_SAFE(cp, vrb, &dead, cp2) { |
307 |
8 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
308 |
8 |
Lck_Lock(&cp->mtx); |
309 |
8 |
n_kill = cp->n_kill; |
310 |
8 |
Lck_Unlock(&cp->mtx); |
311 |
8 |
if (n_kill > 0) |
312 |
0 |
continue; |
313 |
8 |
VRBT_REMOVE(vrb, &dead, cp); |
314 |
8 |
vcp_destroy(&cp); |
315 |
8 |
} |
316 |
|
|
317 |
8 |
if (VRBT_EMPTY(&dead)) |
318 |
8 |
return; |
319 |
|
|
320 |
0 |
Lck_Lock(&dead_pools_mtx); |
321 |
|
/* |
322 |
|
* The following insertion will most likely result in an |
323 |
|
* unordered tree, but in this case it does not matter |
324 |
|
* as we just want to iterate over all the elements |
325 |
|
* in the tree in order to delete them. |
326 |
|
*/ |
327 |
0 |
VRBT_INSERT(vrb, &dead_pools, dead.rbh_root); |
328 |
0 |
Lck_Unlock(&dead_pools_mtx); |
329 |
30265 |
} |
330 |
|
|
331 |
|
/*-------------------------------------------------------------------- |
332 |
|
* Recycle a connection. |
333 |
|
*/ |
334 |
|
|
335 |
|
void |
336 |
6957 |
VCP_Recycle(const struct worker *wrk, struct pfd **pfdp) |
337 |
|
{ |
338 |
|
struct pfd *pfd; |
339 |
|
struct conn_pool *cp; |
340 |
6957 |
int i = 0; |
341 |
|
|
342 |
6957 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
343 |
6957 |
TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC); |
344 |
6957 |
cp = pfd->conn_pool; |
345 |
6957 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
346 |
|
|
347 |
6957 |
assert(pfd->state == PFD_STATE_USED); |
348 |
6957 |
assert(pfd->fd > 0); |
349 |
|
|
350 |
6957 |
Lck_Lock(&cp->mtx); |
351 |
6957 |
cp->n_used--; |
352 |
|
|
353 |
6957 |
pfd->waited->priv1 = pfd; |
354 |
6957 |
pfd->waited->fd = pfd->fd; |
355 |
6957 |
pfd->waited->idle = VTIM_real(); |
356 |
6957 |
pfd->state = PFD_STATE_AVAIL; |
357 |
6957 |
pfd->waited->func = vcp_handle; |
358 |
6957 |
pfd->waited->tmo = cache_param->backend_idle_timeout; |
359 |
6957 |
if (Wait_Enter(wrk->pool->waiter, pfd->waited)) { |
360 |
0 |
cp->methods->close(pfd); |
361 |
0 |
memset(pfd, 0x33, sizeof *pfd); |
362 |
0 |
free(pfd); |
363 |
|
// XXX: stats |
364 |
0 |
pfd = NULL; |
365 |
0 |
} else { |
366 |
6957 |
VTAILQ_INSERT_HEAD(&cp->connlist, pfd, list); |
367 |
6957 |
i++; |
368 |
|
} |
369 |
|
|
370 |
6957 |
if (pfd != NULL) |
371 |
6957 |
cp->n_conn++; |
372 |
6957 |
Lck_Unlock(&cp->mtx); |
373 |
|
|
374 |
6957 |
if (i && DO_DEBUG(DBG_VTC_MODE)) { |
375 |
|
/* |
376 |
|
* In varnishtest we do not have the luxury of using |
377 |
|
* multiple backend connections, so whenever we end up |
378 |
|
* in the "pending" case, take a short nap to let the |
379 |
|
* waiter catch up and put the pfd back into circulations. |
380 |
|
* |
381 |
|
* In particular ESI:include related tests suffer random |
382 |
|
* failures without this. |
383 |
|
* |
384 |
|
* In normal operation, the only effect is that we will |
385 |
|
* have N+1 backend connections rather than N, which is |
386 |
|
* entirely harmless. |
387 |
|
*/ |
388 |
6957 |
VTIM_sleep(0.01); |
389 |
6957 |
} |
390 |
6957 |
} |
391 |
|
|
392 |
|
/*-------------------------------------------------------------------- |
393 |
|
* Open a new connection from pool. |
394 |
|
*/ |
395 |
|
|
396 |
|
int |
397 |
6222 |
VCP_Open(struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap, int *err) |
398 |
|
{ |
399 |
|
int r; |
400 |
|
vtim_mono h; |
401 |
|
|
402 |
6222 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
403 |
6222 |
AN(err); |
404 |
|
|
405 |
6222 |
while (cp->holddown > 0) { |
406 |
88 |
Lck_Lock(&cp->mtx); |
407 |
88 |
if (cp->holddown == 0) { |
408 |
0 |
Lck_Unlock(&cp->mtx); |
409 |
0 |
break; |
410 |
|
} |
411 |
|
|
412 |
88 |
if (VTIM_mono() >= cp->holddown) { |
413 |
11 |
cp->holddown = 0; |
414 |
11 |
Lck_Unlock(&cp->mtx); |
415 |
11 |
break; |
416 |
|
} |
417 |
|
|
418 |
77 |
*err = 0; |
419 |
77 |
errno = cp->holddown_errno; |
420 |
77 |
Lck_Unlock(&cp->mtx); |
421 |
77 |
return (-1); |
422 |
|
} |
423 |
|
|
424 |
6145 |
*err = errno = 0; |
425 |
6145 |
r = cp->methods->open(cp, tmo, ap); |
426 |
|
|
427 |
6145 |
if (r >= 0 && errno == 0 && cp->endpoint->preamble != NULL && |
428 |
36 |
cp->endpoint->preamble->len > 0) { |
429 |
108 |
if (write(r, cp->endpoint->preamble->blob, |
430 |
72 |
cp->endpoint->preamble->len) != |
431 |
36 |
cp->endpoint->preamble->len) { |
432 |
0 |
*err = errno; |
433 |
0 |
closefd(&r); |
434 |
0 |
} |
435 |
36 |
} else { |
436 |
6109 |
*err = errno; |
437 |
|
} |
438 |
|
|
439 |
6145 |
if (r >= 0) |
440 |
6045 |
return (r); |
441 |
|
|
442 |
100 |
h = 0; |
443 |
|
|
444 |
100 |
switch (errno) { |
445 |
|
case EACCES: |
446 |
|
case EPERM: |
447 |
0 |
h = cache_param->backend_local_error_holddown; |
448 |
0 |
break; |
449 |
|
case EADDRNOTAVAIL: |
450 |
0 |
h = cache_param->backend_local_error_holddown; |
451 |
0 |
break; |
452 |
|
case ECONNREFUSED: |
453 |
100 |
h = cache_param->backend_remote_error_holddown; |
454 |
100 |
break; |
455 |
|
case ENETUNREACH: |
456 |
0 |
h = cache_param->backend_remote_error_holddown; |
457 |
0 |
break; |
458 |
|
default: |
459 |
0 |
break; |
460 |
|
} |
461 |
|
|
462 |
100 |
if (h == 0) |
463 |
0 |
return (r); |
464 |
|
|
465 |
100 |
Lck_Lock(&cp->mtx); |
466 |
100 |
h += VTIM_mono(); |
467 |
100 |
if (cp->holddown == 0 || h < cp->holddown) { |
468 |
89 |
cp->holddown = h; |
469 |
89 |
cp->holddown_errno = errno; |
470 |
89 |
} |
471 |
|
|
472 |
100 |
Lck_Unlock(&cp->mtx); |
473 |
|
|
474 |
100 |
return (r); |
475 |
6222 |
} |
476 |
|
|
477 |
|
/*-------------------------------------------------------------------- |
478 |
|
* Close a connection. |
479 |
|
*/ |
480 |
|
|
481 |
|
void |
482 |
1519 |
VCP_Close(struct pfd **pfdp) |
483 |
|
{ |
484 |
|
struct pfd *pfd; |
485 |
|
struct conn_pool *cp; |
486 |
|
|
487 |
1519 |
TAKE_OBJ_NOTNULL(pfd, pfdp, PFD_MAGIC); |
488 |
1519 |
cp = pfd->conn_pool; |
489 |
1519 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
490 |
|
|
491 |
1519 |
assert(pfd->fd > 0); |
492 |
|
|
493 |
1519 |
Lck_Lock(&cp->mtx); |
494 |
1519 |
assert(pfd->state == PFD_STATE_USED || pfd->state == PFD_STATE_STOLEN); |
495 |
1519 |
cp->n_used--; |
496 |
1519 |
if (pfd->state == PFD_STATE_STOLEN) { |
497 |
8 |
(void)shutdown(pfd->fd, SHUT_RDWR); |
498 |
8 |
VTAILQ_REMOVE(&cp->connlist, pfd, list); |
499 |
8 |
pfd->state = PFD_STATE_CLEANUP; |
500 |
8 |
cp->n_kill++; |
501 |
8 |
} else { |
502 |
1511 |
assert(pfd->state == PFD_STATE_USED); |
503 |
1511 |
cp->methods->close(pfd); |
504 |
1511 |
memset(pfd, 0x44, sizeof *pfd); |
505 |
1511 |
free(pfd); |
506 |
|
} |
507 |
1519 |
Lck_Unlock(&cp->mtx); |
508 |
1519 |
} |
509 |
|
|
510 |
|
/*-------------------------------------------------------------------- |
511 |
|
* Get a connection, possibly recycled |
512 |
|
*/ |
513 |
|
|
514 |
|
struct pfd * |
515 |
8604 |
VCP_Get(struct conn_pool *cp, vtim_dur tmo, struct worker *wrk, |
516 |
|
unsigned force_fresh, int *err) |
517 |
|
{ |
518 |
|
struct pfd *pfd; |
519 |
|
|
520 |
8604 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
521 |
8604 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
522 |
8604 |
AN(err); |
523 |
|
|
524 |
8604 |
*err = 0; |
525 |
8604 |
Lck_Lock(&cp->mtx); |
526 |
8604 |
pfd = VTAILQ_FIRST(&cp->connlist); |
527 |
8604 |
CHECK_OBJ_ORNULL(pfd, PFD_MAGIC); |
528 |
8604 |
if (force_fresh || pfd == NULL || pfd->state == PFD_STATE_STOLEN) { |
529 |
5444 |
pfd = NULL; |
530 |
5444 |
} else { |
531 |
3160 |
assert(pfd->conn_pool == cp); |
532 |
3160 |
assert(pfd->state == PFD_STATE_AVAIL); |
533 |
3160 |
VTAILQ_REMOVE(&cp->connlist, pfd, list); |
534 |
3160 |
VTAILQ_INSERT_TAIL(&cp->connlist, pfd, list); |
535 |
3160 |
cp->n_conn--; |
536 |
3160 |
VSC_C_main->backend_reuse++; |
537 |
3160 |
pfd->state = PFD_STATE_STOLEN; |
538 |
3160 |
pfd->cond = &wrk->cond; |
539 |
|
} |
540 |
8604 |
cp->n_used++; // Opening mostly works |
541 |
8604 |
Lck_Unlock(&cp->mtx); |
542 |
|
|
543 |
8604 |
if (pfd != NULL) |
544 |
3160 |
return (pfd); |
545 |
|
|
546 |
5444 |
ALLOC_OBJ(pfd, PFD_MAGIC); |
547 |
5444 |
AN(pfd); |
548 |
5444 |
INIT_OBJ(pfd->waited, WAITED_MAGIC); |
549 |
5444 |
pfd->state = PFD_STATE_USED; |
550 |
5444 |
pfd->conn_pool = cp; |
551 |
5444 |
pfd->fd = VCP_Open(cp, tmo, &pfd->addr, err); |
552 |
5444 |
if (pfd->fd < 0) { |
553 |
124 |
FREE_OBJ(pfd); |
554 |
124 |
Lck_Lock(&cp->mtx); |
555 |
124 |
cp->n_used--; // Nope, didn't work after all. |
556 |
124 |
Lck_Unlock(&cp->mtx); |
557 |
124 |
} else |
558 |
5320 |
VSC_C_main->backend_conn++; |
559 |
|
|
560 |
5444 |
return (pfd); |
561 |
8604 |
} |
562 |
|
|
563 |
|
/*-------------------------------------------------------------------- |
564 |
|
*/ |
565 |
|
|
566 |
|
int |
567 |
3144 |
VCP_Wait(struct worker *wrk, struct pfd *pfd, vtim_real when) |
568 |
|
{ |
569 |
|
struct conn_pool *cp; |
570 |
|
int r; |
571 |
|
|
572 |
3144 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
573 |
3144 |
CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC); |
574 |
3144 |
cp = pfd->conn_pool; |
575 |
3144 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
576 |
3144 |
assert(pfd->cond == &wrk->cond); |
577 |
3144 |
Lck_Lock(&cp->mtx); |
578 |
6284 |
while (pfd->state == PFD_STATE_STOLEN) { |
579 |
3144 |
r = Lck_CondWaitUntil(&wrk->cond, &cp->mtx, when); |
580 |
3144 |
if (r != 0) { |
581 |
4 |
if (r == EINTR) |
582 |
0 |
continue; |
583 |
4 |
assert(r == ETIMEDOUT); |
584 |
4 |
Lck_Unlock(&cp->mtx); |
585 |
4 |
return (1); |
586 |
|
} |
587 |
|
} |
588 |
3140 |
assert(pfd->state == PFD_STATE_USED); |
589 |
3140 |
pfd->cond = NULL; |
590 |
3140 |
Lck_Unlock(&cp->mtx); |
591 |
|
|
592 |
3140 |
return (0); |
593 |
3144 |
} |
594 |
|
|
595 |
|
/*-------------------------------------------------------------------- |
596 |
|
*/ |
597 |
|
|
598 |
|
VCL_IP |
599 |
4 |
VCP_GetIp(struct pfd *pfd) |
600 |
|
{ |
601 |
|
|
602 |
4 |
CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC); |
603 |
4 |
return (pfd->addr); |
604 |
|
} |
605 |
|
|
606 |
|
/*--------------------------------------------------------------------*/ |
607 |
|
|
608 |
|
static void |
609 |
24 |
vcp_panic_endpoint(struct vsb *vsb, const struct vrt_endpoint *vep) |
610 |
|
{ |
611 |
|
char h[VTCP_ADDRBUFSIZE]; |
612 |
|
char p[VTCP_PORTBUFSIZE]; |
613 |
|
|
614 |
24 |
if (PAN_dump_struct(vsb, vep, VRT_ENDPOINT_MAGIC, "vrt_endpoint")) |
615 |
0 |
return; |
616 |
24 |
if (vep->uds_path) |
617 |
0 |
VSB_printf(vsb, "uds_path = %s,\n", vep->uds_path); |
618 |
24 |
if (vep->ipv4 && VSA_Sane(vep->ipv4)) { |
619 |
24 |
VTCP_name(vep->ipv4, h, sizeof h, p, sizeof p); |
620 |
24 |
VSB_printf(vsb, "ipv4 = %s, ", h); |
621 |
24 |
VSB_printf(vsb, "port = %s,\n", p); |
622 |
24 |
} |
623 |
24 |
if (vep->ipv6 && VSA_Sane(vep->ipv6)) { |
624 |
0 |
VTCP_name(vep->ipv6, h, sizeof h, p, sizeof p); |
625 |
0 |
VSB_printf(vsb, "ipv6 = %s, ", h); |
626 |
0 |
VSB_printf(vsb, "port = %s,\n", p); |
627 |
0 |
} |
628 |
24 |
VSB_indent(vsb, -2); |
629 |
24 |
VSB_cat(vsb, "},\n"); |
630 |
24 |
} |
631 |
|
|
632 |
|
void |
633 |
24 |
VCP_Panic(struct vsb *vsb, struct conn_pool *cp) |
634 |
|
{ |
635 |
|
|
636 |
24 |
if (PAN_dump_struct(vsb, cp, CONN_POOL_MAGIC, "conn_pool")) |
637 |
0 |
return; |
638 |
24 |
VSB_cat(vsb, "ident = "); |
639 |
24 |
VSB_quote(vsb, cp->ident, VSHA256_DIGEST_LENGTH, VSB_QUOTE_HEX); |
640 |
24 |
VSB_cat(vsb, ",\n"); |
641 |
24 |
vcp_panic_endpoint(vsb, cp->endpoint); |
642 |
24 |
VSB_indent(vsb, -2); |
643 |
24 |
VSB_cat(vsb, "},\n"); |
644 |
24 |
} |
645 |
|
|
646 |
|
/*--------------------------------------------------------------------*/ |
647 |
|
|
648 |
|
void |
649 |
3714 |
VCP_Init(void) |
650 |
|
{ |
651 |
3714 |
Lck_New(&conn_pools_mtx, lck_conn_pool); |
652 |
3714 |
Lck_New(&dead_pools_mtx, lck_dead_pool); |
653 |
3714 |
} |
654 |
|
|
655 |
|
/**********************************************************************/ |
656 |
|
|
657 |
|
static inline int |
658 |
6148 |
tmo2msec(vtim_dur tmo) |
659 |
|
{ |
660 |
6148 |
return ((int)floor(tmo * 1000.0)); |
661 |
|
} |
662 |
|
|
663 |
|
static int v_matchproto_(cp_open_f) |
664 |
5674 |
vtp_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap) |
665 |
|
{ |
666 |
|
int s; |
667 |
|
int msec; |
668 |
|
|
669 |
5674 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
670 |
|
|
671 |
5674 |
msec = tmo2msec(tmo); |
672 |
5674 |
if (cache_param->prefer_ipv6) { |
673 |
0 |
*ap = cp->endpoint->ipv6; |
674 |
0 |
s = VTCP_connect(*ap, msec); |
675 |
0 |
if (s >= 0) |
676 |
0 |
return (s); |
677 |
0 |
} |
678 |
5674 |
*ap = cp->endpoint->ipv4; |
679 |
5674 |
s = VTCP_connect(*ap, msec); |
680 |
5674 |
if (s >= 0) |
681 |
5564 |
return (s); |
682 |
110 |
if (!cache_param->prefer_ipv6) { |
683 |
110 |
*ap = cp->endpoint->ipv6; |
684 |
110 |
s = VTCP_connect(*ap, msec); |
685 |
110 |
} |
686 |
110 |
return (s); |
687 |
5674 |
} |
688 |
|
|
689 |
|
|
690 |
|
/*--------------------------------------------------------------------*/ |
691 |
|
|
692 |
|
static void v_matchproto_(cp_close_f) |
693 |
5236 |
vtp_close(struct pfd *pfd) |
694 |
|
{ |
695 |
|
|
696 |
5236 |
CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC); |
697 |
5236 |
VTCP_close(&pfd->fd); |
698 |
5236 |
} |
699 |
|
|
700 |
|
static void v_matchproto_(cp_name_f) |
701 |
8036 |
vtp_local_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf, |
702 |
|
unsigned plen) |
703 |
|
{ |
704 |
8036 |
CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC); |
705 |
8036 |
VTCP_myname(pfd->fd, addr, alen, pbuf, plen); |
706 |
8036 |
} |
707 |
|
|
708 |
|
static void v_matchproto_(cp_name_f) |
709 |
8036 |
vtp_remote_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf, |
710 |
|
unsigned plen) |
711 |
|
{ |
712 |
8036 |
CHECK_OBJ_NOTNULL(pfd, PFD_MAGIC); |
713 |
8036 |
VTCP_hisname(pfd->fd, addr, alen, pbuf, plen); |
714 |
8036 |
} |
715 |
|
|
716 |
|
static const struct cp_methods vtp_methods = { |
717 |
|
.open = vtp_open, |
718 |
|
.close = vtp_close, |
719 |
|
.local_name = vtp_local_name, |
720 |
|
.remote_name = vtp_remote_name, |
721 |
|
}; |
722 |
|
|
723 |
|
/*-------------------------------------------------------------------- |
724 |
|
*/ |
725 |
|
|
726 |
|
static int v_matchproto_(cp_open_f) |
727 |
471 |
vus_open(const struct conn_pool *cp, vtim_dur tmo, VCL_IP *ap) |
728 |
|
{ |
729 |
|
int s; |
730 |
|
int msec; |
731 |
|
|
732 |
471 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
733 |
471 |
AN(cp->endpoint->uds_path); |
734 |
|
|
735 |
471 |
msec = tmo2msec(tmo); |
736 |
471 |
*ap = bogo_ip; |
737 |
471 |
s = VUS_connect(cp->endpoint->uds_path, msec); |
738 |
471 |
return (s); |
739 |
|
} |
740 |
|
|
741 |
|
static void v_matchproto_(cp_name_f) |
742 |
888 |
vus_name(const struct pfd *pfd, char *addr, unsigned alen, char *pbuf, |
743 |
|
unsigned plen) |
744 |
|
{ |
745 |
888 |
(void) pfd; |
746 |
888 |
assert(alen > strlen("0.0.0.0")); |
747 |
888 |
assert(plen > 1); |
748 |
888 |
strcpy(addr, "0.0.0.0"); |
749 |
888 |
strcpy(pbuf, "0"); |
750 |
888 |
} |
751 |
|
|
752 |
|
static const struct cp_methods vus_methods = { |
753 |
|
.open = vus_open, |
754 |
|
.close = vtp_close, |
755 |
|
.local_name = vus_name, |
756 |
|
.remote_name = vus_name, |
757 |
|
}; |
758 |
|
|
759 |
|
/*-------------------------------------------------------------------- |
760 |
|
* Reference a TCP pool given by {ip4, ip6} pair or a UDS. Create if |
761 |
|
* it doesn't exist already. |
762 |
|
*/ |
763 |
|
|
764 |
|
struct conn_pool * |
765 |
5252 |
VCP_Ref(const struct vrt_endpoint *vep, const char *ident) |
766 |
|
{ |
767 |
|
struct conn_pool *cp, *cp2; |
768 |
|
struct VSHA256Context cx[1]; |
769 |
|
unsigned char digest[VSHA256_DIGEST_LENGTH]; |
770 |
|
|
771 |
5252 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
772 |
5252 |
AN(ident); |
773 |
5252 |
VSHA256_Init(cx); |
774 |
5252 |
VSHA256_Update(cx, ident, strlen(ident) + 1); // include \0 |
775 |
5252 |
if (vep->uds_path != NULL) { |
776 |
184 |
AZ(vep->ipv4); |
777 |
184 |
AZ(vep->ipv6); |
778 |
184 |
VSHA256_Update(cx, "UDS", 4); // include \0 |
779 |
184 |
VSHA256_Update(cx, vep->uds_path, strlen(vep->uds_path)); |
780 |
184 |
} else { |
781 |
5068 |
assert(vep->ipv4 != NULL || vep->ipv6 != NULL); |
782 |
5068 |
if (vep->ipv4 != NULL) { |
783 |
5056 |
assert(VSA_Sane(vep->ipv4)); |
784 |
5056 |
VSHA256_Update(cx, "IP4", 4); // include \0 |
785 |
5056 |
VSHA256_Update(cx, vep->ipv4, vsa_suckaddr_len); |
786 |
5056 |
} |
787 |
5068 |
if (vep->ipv6 != NULL) { |
788 |
16 |
assert(VSA_Sane(vep->ipv6)); |
789 |
16 |
VSHA256_Update(cx, "IP6", 4); // include \0 |
790 |
16 |
VSHA256_Update(cx, vep->ipv6, vsa_suckaddr_len); |
791 |
16 |
} |
792 |
|
} |
793 |
5252 |
if (vep->preamble != NULL && vep->preamble->len > 0) { |
794 |
36 |
VSHA256_Update(cx, "PRE", 4); // include \0 |
795 |
36 |
VSHA256_Update(cx, vep->preamble->blob, vep->preamble->len); |
796 |
36 |
} |
797 |
5252 |
VSHA256_Final(digest, cx); |
798 |
|
|
799 |
5252 |
ALLOC_OBJ(cp, CONN_POOL_MAGIC); |
800 |
5252 |
AN(cp); |
801 |
5252 |
cp->refcnt = 1; |
802 |
5252 |
cp->holddown = 0; |
803 |
5252 |
cp->endpoint = VRT_Endpoint_Clone(vep); |
804 |
5252 |
CHECK_OBJ_NOTNULL(cp->endpoint, VRT_ENDPOINT_MAGIC); |
805 |
5252 |
memcpy(cp->ident, digest, sizeof cp->ident); |
806 |
5252 |
if (vep->uds_path != NULL) |
807 |
184 |
cp->methods = &vus_methods; |
808 |
|
else |
809 |
5068 |
cp->methods = &vtp_methods; |
810 |
5252 |
Lck_New(&cp->mtx, lck_conn_pool); |
811 |
5252 |
VTAILQ_INIT(&cp->connlist); |
812 |
|
|
813 |
5252 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
814 |
5252 |
Lck_Lock(&conn_pools_mtx); |
815 |
5252 |
cp2 = VRBT_FIND(vrb, &conn_pools, cp); |
816 |
5252 |
if (cp2 == NULL) |
817 |
4072 |
AZ(VRBT_INSERT(vrb, &conn_pools, cp)); |
818 |
|
else { |
819 |
1180 |
CHECK_OBJ(cp2, CONN_POOL_MAGIC); |
820 |
1180 |
assert(cp2->refcnt > 0); |
821 |
1180 |
cp2->refcnt++; |
822 |
|
} |
823 |
5252 |
Lck_Unlock(&conn_pools_mtx); |
824 |
|
|
825 |
5252 |
if (cp2 == NULL) { |
826 |
4072 |
CHECK_OBJ_NOTNULL(cp, CONN_POOL_MAGIC); |
827 |
4072 |
return (cp); |
828 |
|
} |
829 |
|
|
830 |
1180 |
Lck_Delete(&cp->mtx); |
831 |
1180 |
AZ(cp->n_conn); |
832 |
1180 |
AZ(cp->n_kill); |
833 |
1180 |
FREE_OBJ(cp->endpoint); |
834 |
1180 |
FREE_OBJ(cp); |
835 |
1180 |
CHECK_OBJ_NOTNULL(cp2, CONN_POOL_MAGIC); |
836 |
1180 |
return (cp2); |
837 |
5252 |
} |