varnish-cache/vmod/vmod_directors_shard_dir.c
0
/*-
1
 * Copyright 2009-2016 UPLEX - Nils Goroll Systemoptimierung
2
 * All rights reserved.
3
 *
4
 * Authors: Nils Goroll <nils.goroll@uplex.de>
5
 *          Geoffrey Simmons <geoff.simmons@uplex.de>
6
 *          Julian Wiesener <jw@uplex.de>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 */
31
32
/*lint -e801 */
33
34
#include "config.h"
35
36
#include <stdlib.h>
37
#include <stdio.h>
38
#include <time.h>
39
#include <limits.h>
40
41
#include "cache/cache.h"
42
43
#include "vbm.h"
44
#include "vrnd.h"
45
46
#include "vcc_directors_if.h"
47
#include "vmod_directors_shard_dir.h"
48
49
struct shard_be_info {
50
        unsigned        hostid;
51
        unsigned        healthy;
52
        double          changed;        // when
53
};
54
55
/*
56
 * circle walk state for shard_next
57
 *
58
 * pick* cut off the search after having seen all possible backends
59
 */
60
struct shard_state {
61
        const struct vrt_ctx    *ctx;
62
        struct sharddir *shardd;
63
        uint32_t                idx;
64
65
        struct vbitmap          *picklist;
66
        unsigned                pickcount;
67
68
        struct shard_be_info    previous;
69
        struct shard_be_info    last;
70
};
71
72
void
73 91
sharddir_debug(struct sharddir *shardd, const uint32_t flags)
74
{
75 91
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
76 91
        shardd->debug_flags = flags;
77 91
}
78
79
void
80 273
sharddir_log(struct vsl_log *vsl, enum VSL_tag_e tag,  const char *fmt, ...)
81
{
82
        va_list ap;
83
84 273
        va_start(ap, fmt);
85 273
        if (vsl != NULL)
86 130
                VSLbv(vsl, tag, fmt, ap);
87
        else
88 143
                VSLv(tag, NO_VXID, fmt, ap);
89 273
        va_end(ap);
90 273
}
91
92
static int
93 1352
shard_lookup(const struct sharddir *shardd, const uint32_t key)
94
{
95 1352
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
96
97 1352
        const uint32_t n = shardd->n_points;
98 1352
        uint32_t i, idx = UINT32_MAX, high = n, low = 0;
99
100 1352
        assert (n < idx);
101
102 1352
        do {
103 8255
            i = (high + low) / 2 ;
104 8255
            if (shardd->hashcircle[i].point == key)
105 13
                idx = i;
106 8242
            else if (i == n - 1)
107 234
                idx = n - 1;
108 8008
            else if (shardd->hashcircle[i].point < key &&
109 4238
                     shardd->hashcircle[i+1].point >= key)
110 884
                idx = i + 1;
111 7124
            else if (shardd->hashcircle[i].point > key)
112 7540
                if (i == 0)
113 221
                    idx = 0;
114
                else
115 3549
                    high = i;
116
            else
117 3354
                low = i;
118 8255
        } while (idx == UINT32_MAX);
119
120 1352
        return (idx);
121
}
122
123
static int
124 2704
shard_next(struct shard_state *state, VCL_INT skip, VCL_BOOL healthy)
125
{
126 2704
        int c, chosen = -1;
127
        VCL_BACKEND be;
128
        vtim_real changed;
129
        struct shard_be_info *sbe;
130
131 2704
        AN(state);
132 2704
        CHECK_OBJ_NOTNULL(state->shardd, SHARDDIR_MAGIC);
133
134 2704
        if (state->pickcount >= state->shardd->n_backend)
135 39
                return (-1);
136
137 4264
        while (state->pickcount < state->shardd->n_backend && skip >= 0) {
138
139 4251
                c = state->shardd->hashcircle[state->idx].host;
140
141 4251
                if (!vbit_test(state->picklist, c)) {
142
143 2860
                        vbit_set(state->picklist, c);
144 2860
                        state->pickcount++;
145
146 2860
                        sbe = NULL;
147 2860
                        be = state->shardd->backend[c].backend;
148 2860
                        AN(be);
149 2860
                        if (VRT_Healthy(state->ctx, be, &changed)) {
150 2782
                                if (skip-- == 0) {
151 2652
                                        chosen = c;
152 2652
                                        sbe = &state->last;
153 2652
                                } else {
154 130
                                        sbe = &state->previous;
155
                                }
156
157 2860
                        } else if (!healthy && skip-- == 0) {
158 0
                                chosen = c;
159 0
                                sbe = &state->last;
160 0
                        }
161 2860
                        if (sbe == &state->last &&
162 2652
                            state->last.hostid != UINT_MAX)
163 1300
                                memcpy(&state->previous, &state->last,
164
                                    sizeof(state->previous));
165
166 2860
                        if (sbe) {
167 2782
                                sbe->hostid = c;
168 2782
                                sbe->healthy = 1;
169 2782
                                sbe->changed = changed;
170 2782
                        }
171 2860
                        if (chosen != -1)
172 2652
                                break;
173 208
                }
174
175 1599
                if (++(state->idx) == state->shardd->n_points)
176 247
                        state->idx = 0;
177
        }
178 2665
        return (chosen);
179 2704
}
180
181
void
182 325
sharddir_new(struct sharddir **sharddp, const char *vcl_name,
183
    const struct vmod_directors_shard_param *param)
184
{
185
        struct sharddir *shardd;
186
187 325
        AN(vcl_name);
188 325
        AN(sharddp);
189 325
        AZ(*sharddp);
190 325
        ALLOC_OBJ(shardd, SHARDDIR_MAGIC);
191 325
        AN(shardd);
192 325
        *sharddp = shardd;
193 325
        shardd->name = vcl_name;
194 325
        shardd->param = param;
195 325
        PTOK(pthread_rwlock_init(&shardd->mtx, NULL));
196 325
}
197
198
void
199 39
sharddir_set_param(struct sharddir *shardd,
200
    const struct vmod_directors_shard_param *param)
201
{
202 39
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
203 39
        shardd->param = param;
204 39
}
205
206
void
207 65
sharddir_release(struct sharddir *shardd)
208
{
209 65
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
210 65
        shardcfg_backend_clear(shardd);
211 65
}
212
213
void
214 65
sharddir_delete(struct sharddir **sharddp)
215
{
216
        struct sharddir *shardd;
217
218 65
        TAKE_OBJ_NOTNULL(shardd, sharddp, SHARDDIR_MAGIC);
219 65
        shardcfg_delete(shardd);
220 65
        PTOK(pthread_rwlock_destroy(&shardd->mtx));
221 65
        FREE_OBJ(shardd);
222 65
}
223
224
void
225 1950
sharddir_rdlock(struct sharddir *shardd)
226
{
227 1950
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
228 1950
        PTOK(pthread_rwlock_rdlock(&shardd->mtx));
229 1950
}
230
231
void
232 572
sharddir_wrlock(struct sharddir *shardd)
233
{
234 572
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
235 572
        PTOK(pthread_rwlock_wrlock(&shardd->mtx));
236 572
}
237
238
void
239 2522
sharddir_unlock(struct sharddir *shardd)
240
{
241 2522
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
242 2522
        PTOK(pthread_rwlock_unlock(&shardd->mtx));
243 2522
}
244
245
static inline void
246 1352
validate_alt(VRT_CTX, const struct sharddir *shardd, VCL_INT *alt)
247
{
248 1352
        const VCL_INT alt_max = shardd->n_backend - 1;
249
250 1352
        if (*alt < 0) {
251 0
                shard_err(ctx->vsl, shardd->name,
252
                    "invalid negative parameter alt=%ld, set to 0", *alt);
253 0
                *alt = 0;
254 1352
        } else if (*alt > alt_max) {
255 39
                shard_err(ctx->vsl, shardd->name,
256
                    "parameter alt=%ld limited to %ld", *alt, alt_max);
257 39
                *alt = alt_max;
258 39
        }
259 1352
}
260
261
static inline void
262 1352
init_state(struct shard_state *state,
263
    VRT_CTX, struct sharddir *shardd, struct vbitmap *picklist)
264
{
265 1352
        AN(picklist);
266
267 1352
        state->ctx = ctx;
268 1352
        state->shardd = shardd;
269 1352
        state->idx = UINT32_MAX;
270 1352
        state->picklist = picklist;
271
272
        /* healthy and changed only defined for valid hostids */
273 1352
        state->previous.hostid = UINT_MAX;
274 1352
        state->last.hostid = UINT_MAX;
275 1352
}
276
277
/* basically same as vdir_any_healthy
278
 * - XXX we should embed a vdir
279
 * - XXX should we return the health state of the actual backend
280
 *   for healthy=IGNORE ?
281
 */
282
VCL_BOOL
283 208
sharddir_any_healthy(VRT_CTX, struct sharddir *shardd, VCL_TIME *changed)
284
{
285 208
        unsigned i, retval = 0;
286
        VCL_BACKEND be;
287
        vtim_real c;
288
289 208
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
290 208
        sharddir_rdlock(shardd);
291 208
        if (changed != NULL)
292 91
                *changed = 0;
293 208
        for (i = 0; i < shardd->n_backend; i++) {
294 208
                be = shardd->backend[i].backend;
295 208
                CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC);
296 208
                retval = VRT_Healthy(ctx, be, &c);
297 208
                if (changed != NULL && c > *changed)
298 91
                        *changed = c;
299 208
                if (retval)
300 208
                        break;
301 0
        }
302 208
        sharddir_unlock(shardd);
303 208
        return (retval);
304
}
305
306
/*
307
 * core function for the director backend/resolve method
308
 */
309
310
static VCL_BACKEND
311 1352
sharddir_pick_be_locked(VRT_CTX, const struct sharddir *shardd, uint32_t key,
312
    VCL_INT alt, VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy,
313
    struct shard_state *state)
314
{
315
        VCL_BACKEND be;
316
        VCL_DURATION chosen_r, alt_r;
317
318 1352
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
319 1352
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
320 1352
        AN(ctx->vsl);
321 1352
        assert(shardd->n_backend > 0);
322
323 1352
        assert(shardd->hashcircle);
324
325 1352
        validate_alt(ctx, shardd, &alt);
326
327 1352
        state->idx = shard_lookup(shardd, key);
328 1352
        assert(state->idx < UINT32_MAX);
329
330 1352
        SHDBG(SHDBG_LOOKUP, shardd, "lookup key %x idx %u host %u",
331
            key, state->idx, shardd->hashcircle[state->idx].host);
332
333 1352
        if (alt > 0) {
334 1053
                if (shard_next(state, alt - 1,
335 702
                    healthy == VENUM(ALL) ? 1 : 0) == -1) {
336 0
                        if (state->previous.hostid != UINT_MAX) {
337 0
                                be = sharddir_backend(shardd,
338 0
                                    state->previous.hostid);
339 0
                                AN(be);
340 0
                                return (be);
341
                        }
342 0
                        return (NULL);
343
                }
344 351
        }
345
346 1352
        if (shard_next(state, 0, healthy == VENUM(IGNORE) ? 0 : 1) == -1) {
347 0
                if (state->previous.hostid != UINT_MAX) {
348 0
                        be = sharddir_backend(shardd, state->previous.hostid);
349 0
                        AN(be);
350 0
                        return (be);
351
                }
352 0
                return (NULL);
353
        }
354
355 1352
        be = sharddir_backend(shardd, state->last.hostid);
356 1352
        AN(be);
357
358 1352
        if (warmup == -1)
359 1339
                warmup = shardd->warmup;
360
361
        /* short path for cases we dont want ramup/warmup or can't */
362 1352
        if (alt > 0 || healthy == VENUM(IGNORE) || (!rampup && warmup == 0) ||
363 1001
            shard_next(state, 0, 1) == -1)
364 403
                return (be);
365
366 949
        assert(alt == 0);
367 949
        assert(state->previous.hostid != UINT_MAX);
368 949
        assert(state->last.hostid != UINT_MAX);
369 949
        assert(state->previous.hostid != state->last.hostid);
370 949
        assert(be == sharddir_backend(shardd, state->previous.hostid));
371
372 949
        chosen_r = shardcfg_get_rampup(shardd, state->previous.hostid);
373 949
        alt_r = shardcfg_get_rampup(shardd, state->last.hostid);
374
375 949
        SHDBG(SHDBG_RAMPWARM, shardd, "chosen host %u rampup %f changed %f",
376
            state->previous.hostid, chosen_r,
377
            ctx->now - state->previous.changed);
378 949
        SHDBG(SHDBG_RAMPWARM, shardd, "alt host %u rampup %f changed %f",
379
            state->last.hostid, alt_r,
380
            ctx->now - state->last.changed);
381
382 949
        if (ctx->now - state->previous.changed < chosen_r) {
383
                /*
384
                 * chosen host is in rampup
385
                 * - no change if alternative host is also in rampup or the dice
386
                 *   has rolled in favour of the chosen host
387
                 */
388 39
                if (!rampup ||
389 26
                    ctx->now - state->last.changed < alt_r ||
390 26
                    VRND_RandomTestableDouble() * chosen_r <
391 13
                    (ctx->now - state->previous.changed))
392 13
                        return (be);
393 13
        } else {
394
                /* chosen host not in rampup - warmup ? */
395 923
                if (warmup == 0 || VRND_RandomTestableDouble() > warmup)
396 923
                        return (be);
397
        }
398
399 13
        be = sharddir_backend(shardd, state->last.hostid);
400 13
        return (be);
401 1352
}
402
403
VCL_BACKEND
404 1352
sharddir_pick_be(VRT_CTX, struct sharddir *shardd, uint32_t key, VCL_INT alt,
405
    VCL_REAL warmup, VCL_BOOL rampup, VCL_ENUM healthy)
406
{
407
        VCL_BACKEND be;
408
        struct shard_state state[1];
409
        unsigned picklist_sz;
410
411 1352
        CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC);
412 1352
        CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
413
414 1352
        sharddir_rdlock(shardd);
415
416 1352
        if (shardd->n_backend == 0) {
417 0
                shard_err0(ctx->vsl, shardd->name, "no backends");
418 0
                sharddir_unlock(shardd);
419 0
                return (NULL);
420
        }
421
422 1352
        picklist_sz = VBITMAP_SZ(shardd->n_backend);
423 1352
        char picklist_spc[picklist_sz];
424
425 1352
        memset(state, 0, sizeof(state));
426 1352
        init_state(state, ctx, shardd, vbit_init(picklist_spc, picklist_sz));
427
428 2704
        be = sharddir_pick_be_locked(ctx, shardd, key, alt, warmup, rampup,
429 1352
            healthy, state);
430 1352
        sharddir_unlock(shardd);
431
432 1352
        vbit_destroy(state->picklist);
433 1352
        return (be);
434 1352
}