| | varnish-cache/vmod/vmod_directors_fall_back.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2013-2015 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@FreeBSD.org> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
*/ |
29 |
|
|
30 |
|
#include "config.h" |
31 |
|
|
32 |
|
#include <stdlib.h> |
33 |
|
#include <string.h> |
34 |
|
|
35 |
|
#include "cache/cache.h" |
36 |
|
|
37 |
|
#include "vcc_directors_if.h" |
38 |
|
|
39 |
|
#include "vmod_directors.h" |
40 |
|
#include "vsb.h" |
41 |
|
#include "vbm.h" |
42 |
|
|
43 |
|
struct vmod_directors_fallback { |
44 |
|
unsigned magic; |
45 |
|
#define VMOD_DIRECTORS_FALLBACK_MAGIC 0xad4e26ba |
46 |
|
struct vdir *vd; |
47 |
|
VCL_BOOL st; |
48 |
|
unsigned cur; |
49 |
|
}; |
50 |
|
|
51 |
|
static VCL_BOOL v_matchproto_(vdi_healthy) |
52 |
160 |
vmod_fallback_healthy(VRT_CTX, VCL_BACKEND dir, VCL_TIME *changed) |
53 |
|
{ |
54 |
|
struct vmod_directors_fallback *fb; |
55 |
|
|
56 |
160 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
57 |
160 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
58 |
160 |
CAST_OBJ_NOTNULL(fb, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
59 |
160 |
return (vdir_any_healthy(ctx, fb->vd, changed)); |
60 |
|
} |
61 |
|
|
62 |
|
static void v_matchproto_(vdi_list_f) |
63 |
520 |
vmod_fallback_list(VRT_CTX, VCL_BACKEND dir, struct vsb *vsb, int pflag, |
64 |
|
int jflag) |
65 |
|
{ |
66 |
|
struct vmod_directors_fallback *fb; |
67 |
|
struct vdir *vd; |
68 |
|
VCL_BACKEND be; |
69 |
|
VCL_BOOL h; |
70 |
|
unsigned u, nh; |
71 |
|
|
72 |
520 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
73 |
520 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
74 |
520 |
CAST_OBJ_NOTNULL(fb, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
75 |
520 |
CAST_OBJ_NOTNULL(vd, fb->vd, VDIR_MAGIC); |
76 |
|
|
77 |
520 |
if (pflag) { |
78 |
80 |
if (jflag) { |
79 |
40 |
VSB_cat(vsb, "{\n"); |
80 |
40 |
VSB_indent(vsb, 2); |
81 |
80 |
VSB_printf(vsb, "\"sticky\": %s,\n", |
82 |
40 |
fb->st ? "true" : "false"); |
83 |
40 |
VSB_cat(vsb, "\"backends\": {\n"); |
84 |
40 |
VSB_indent(vsb, 2); |
85 |
40 |
} else { |
86 |
40 |
VSB_cat(vsb, "\n\n\tBackend\tCurrent\tHealth\n"); |
87 |
|
} |
88 |
80 |
} |
89 |
|
|
90 |
520 |
vdir_rdlock(vd); |
91 |
520 |
vdir_update_health(ctx, vd); |
92 |
840 |
for (u = 0; pflag && u < vd->n_backend; u++) { |
93 |
320 |
be = vd->backend[u]; |
94 |
320 |
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); |
95 |
|
|
96 |
320 |
h = vbit_test(vd->healthy, u); |
97 |
|
|
98 |
320 |
if (jflag) { |
99 |
160 |
if (u) |
100 |
120 |
VSB_cat(vsb, ",\n"); |
101 |
160 |
VSB_printf(vsb, "\"%s\": {\n", be->vcl_name); |
102 |
160 |
VSB_indent(vsb, 2); |
103 |
|
|
104 |
160 |
if (fb->cur == u) |
105 |
40 |
VSB_cat(vsb, "\"current\": true,\n"); |
106 |
|
else |
107 |
120 |
VSB_cat(vsb, "\"current\": false,\n"); |
108 |
|
|
109 |
160 |
if (h) |
110 |
120 |
VSB_cat(vsb, "\"health\": \"healthy\"\n"); |
111 |
|
else |
112 |
40 |
VSB_cat(vsb, "\"health\": \"sick\"\n"); |
113 |
|
|
114 |
160 |
VSB_indent(vsb, -2); |
115 |
160 |
VSB_cat(vsb, "}"); |
116 |
160 |
} else { |
117 |
160 |
VSB_cat(vsb, "\t"); |
118 |
160 |
VSB_cat(vsb, be->vcl_name); |
119 |
160 |
if (fb->cur == u) |
120 |
40 |
VSB_cat(vsb, "\t*\t"); |
121 |
|
else |
122 |
120 |
VSB_cat(vsb, "\t\t"); |
123 |
160 |
VSB_cat(vsb, h ? "healthy" : "sick"); |
124 |
160 |
VSB_cat(vsb, "\n"); |
125 |
|
} |
126 |
320 |
} |
127 |
520 |
nh = vd->n_healthy; |
128 |
520 |
u = vd->n_backend; |
129 |
520 |
vdir_unlock(vd); |
130 |
|
|
131 |
520 |
if (jflag && (pflag)) { |
132 |
40 |
VSB_cat(vsb, "\n"); |
133 |
40 |
VSB_indent(vsb, -2); |
134 |
40 |
VSB_cat(vsb, "}\n"); |
135 |
40 |
VSB_indent(vsb, -2); |
136 |
40 |
VSB_cat(vsb, "},\n"); |
137 |
40 |
} |
138 |
|
|
139 |
520 |
if (pflag) |
140 |
80 |
return; |
141 |
|
|
142 |
440 |
if (jflag) |
143 |
240 |
VSB_printf(vsb, "[%u, %u, \"%s\"]", nh, u, |
144 |
120 |
nh ? "healthy" : "sick"); |
145 |
|
else |
146 |
320 |
VSB_printf(vsb, "%u/%u\t%s", nh, u, nh ? "healthy" : "sick"); |
147 |
520 |
} |
148 |
|
|
149 |
|
static VCL_BACKEND v_matchproto_(vdi_resolve_f) |
150 |
1080 |
vmod_fallback_resolve(VRT_CTX, VCL_BACKEND dir) |
151 |
|
{ |
152 |
|
struct vmod_directors_fallback *fb; |
153 |
|
unsigned u; |
154 |
1080 |
VCL_BACKEND be = NULL; |
155 |
|
|
156 |
1080 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
157 |
1080 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
158 |
1080 |
CAST_OBJ_NOTNULL(fb, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
159 |
|
|
160 |
1080 |
vdir_wrlock(fb->vd); |
161 |
1080 |
if (!fb->st) |
162 |
720 |
fb->cur = 0; |
163 |
1800 |
for (u = 0; u < fb->vd->n_backend; u++) { |
164 |
1760 |
be = fb->vd->backend[fb->cur]; |
165 |
1760 |
CHECK_OBJ_NOTNULL(be, DIRECTOR_MAGIC); |
166 |
1760 |
if (VRT_Healthy(ctx, be, NULL)) |
167 |
1040 |
break; |
168 |
720 |
if (++fb->cur == fb->vd->n_backend) |
169 |
80 |
fb->cur = 0; |
170 |
720 |
} |
171 |
1080 |
if (u == fb->vd->n_backend) |
172 |
40 |
be = NULL; |
173 |
1080 |
vdir_unlock(fb->vd); |
174 |
1080 |
return (be); |
175 |
|
} |
176 |
|
|
177 |
|
static void v_matchproto_(vdi_release_f) |
178 |
40 |
vmod_fallback_release(VCL_BACKEND dir) |
179 |
|
{ |
180 |
|
struct vmod_directors_fallback *fallback; |
181 |
|
|
182 |
40 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
183 |
40 |
CAST_OBJ_NOTNULL(fallback, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
184 |
40 |
vdir_release(fallback->vd); |
185 |
40 |
} |
186 |
|
|
187 |
|
static void v_matchproto_(vdi_destroy_f) |
188 |
40 |
vmod_fallback_destroy(VCL_BACKEND dir) |
189 |
|
{ |
190 |
|
struct vmod_directors_fallback *fallback; |
191 |
|
|
192 |
40 |
CHECK_OBJ_NOTNULL(dir, DIRECTOR_MAGIC); |
193 |
40 |
CAST_OBJ_NOTNULL(fallback, dir->priv, VMOD_DIRECTORS_FALLBACK_MAGIC); |
194 |
40 |
vdir_delete(&fallback->vd); |
195 |
40 |
FREE_OBJ(fallback); |
196 |
40 |
} |
197 |
|
|
198 |
|
static const struct vdi_methods vmod_fallback_methods[1] = {{ |
199 |
|
.magic = VDI_METHODS_MAGIC, |
200 |
|
.type = "fallback", |
201 |
|
.healthy = vmod_fallback_healthy, |
202 |
|
.resolve = vmod_fallback_resolve, |
203 |
|
.release = vmod_fallback_release, |
204 |
|
.destroy = vmod_fallback_destroy, |
205 |
|
.list = vmod_fallback_list |
206 |
|
}}; |
207 |
|
|
208 |
|
|
209 |
|
VCL_VOID v_matchproto_() |
210 |
200 |
vmod_fallback__init(VRT_CTX, |
211 |
|
struct vmod_directors_fallback **fbp, const char *vcl_name, VCL_BOOL sticky) |
212 |
|
{ |
213 |
|
struct vmod_directors_fallback *fb; |
214 |
|
|
215 |
200 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
216 |
200 |
AN(fbp); |
217 |
200 |
AZ(*fbp); |
218 |
200 |
ALLOC_OBJ(fb, VMOD_DIRECTORS_FALLBACK_MAGIC); |
219 |
200 |
AN(fb); |
220 |
200 |
*fbp = fb; |
221 |
200 |
vdir_new(ctx, &fb->vd, vcl_name, vmod_fallback_methods, fb); |
222 |
200 |
fb->st = sticky; |
223 |
200 |
} |
224 |
|
|
225 |
|
VCL_VOID v_matchproto_() |
226 |
40 |
vmod_fallback__fini(struct vmod_directors_fallback **fbp) |
227 |
|
{ |
228 |
|
struct vmod_directors_fallback *fb; |
229 |
|
|
230 |
40 |
TAKE_OBJ_NOTNULL(fb, fbp, VMOD_DIRECTORS_FALLBACK_MAGIC); |
231 |
40 |
VRT_DelDirector(&fb->vd->dir); |
232 |
40 |
} |
233 |
|
|
234 |
|
VCL_VOID v_matchproto_() |
235 |
640 |
vmod_fallback_add_backend(VRT_CTX, |
236 |
|
struct vmod_directors_fallback *fb, VCL_BACKEND be) |
237 |
|
{ |
238 |
|
|
239 |
640 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
240 |
640 |
CHECK_OBJ_NOTNULL(fb, VMOD_DIRECTORS_FALLBACK_MAGIC); |
241 |
640 |
vdir_add_backend(ctx, fb->vd, be, 0.0); |
242 |
640 |
} |
243 |
|
|
244 |
|
VCL_VOID v_matchproto_() |
245 |
160 |
vmod_fallback_remove_backend(VRT_CTX, |
246 |
|
struct vmod_directors_fallback *fb, VCL_BACKEND be) |
247 |
|
{ |
248 |
160 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
249 |
160 |
CHECK_OBJ_NOTNULL(fb, VMOD_DIRECTORS_FALLBACK_MAGIC); |
250 |
160 |
vdir_remove_backend(ctx, fb->vd, be, &fb->cur); |
251 |
160 |
} |
252 |
|
|
253 |
|
VCL_BACKEND v_matchproto_() |
254 |
1080 |
vmod_fallback_backend(VRT_CTX, |
255 |
|
struct vmod_directors_fallback *fb) |
256 |
|
{ |
257 |
1080 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
258 |
1080 |
CHECK_OBJ_NOTNULL(fb, VMOD_DIRECTORS_FALLBACK_MAGIC); |
259 |
1080 |
return (fb->vd->dir); |
260 |
|
} |