| | varnish-cache/bin/varnishd/storage/storage_simple.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2007-2015 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
*/ |
30 |
|
|
31 |
|
#include "config.h" |
32 |
|
|
33 |
|
#include <stdlib.h> |
34 |
|
|
35 |
2718 |
#include "cache/cache_varnishd.h" |
36 |
2216 |
|
37 |
515 |
#include "cache/cache_obj.h" |
38 |
357 |
#include "cache/cache_objhead.h" |
39 |
2216 |
|
40 |
|
#include "storage/storage.h" |
41 |
|
#include "storage/storage_simple.h" |
42 |
|
|
43 |
|
#include "vtim.h" |
44 |
|
|
45 |
3053 |
/* Flags for allocating memory in sml_stv_alloc */ |
46 |
5778 |
#define LESS_MEM_ALLOCED_IS_OK 1 |
47 |
|
|
48 |
|
// marker pointer for sml_trimstore |
49 |
|
static void *trim_once = &trim_once; |
50 |
|
// for delayed return of hdl->last resume pointer |
51 |
|
static void *null_iov = &null_iov; |
52 |
3970 |
|
53 |
|
/*-------------------------------------------------------------------*/ |
54 |
|
|
55 |
|
static struct storage * |
56 |
|
objallocwithnuke(struct worker *, const struct stevedore *, ssize_t size, |
57 |
|
int flags); |
58 |
|
|
59 |
|
static struct storage * |
60 |
3429 |
sml_stv_alloc(const struct stevedore *stv, ssize_t size, int flags) |
61 |
|
{ |
62 |
|
struct storage *st; |
63 |
|
|
64 |
3429 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
65 |
3429 |
AN(stv->sml_alloc); |
66 |
|
|
67 |
3429 |
if (!(flags & LESS_MEM_ALLOCED_IS_OK)) { |
68 |
615 |
if (size > cache_param->fetch_maxchunksize) |
69 |
0 |
return (NULL); |
70 |
|
else |
71 |
615 |
return (stv->sml_alloc(stv, size)); |
72 |
|
} |
73 |
|
|
74 |
2814 |
if (size > cache_param->fetch_maxchunksize) |
75 |
0 |
size = cache_param->fetch_maxchunksize; |
76 |
|
|
77 |
2814 |
assert(size <= UINT_MAX); /* field limit in struct storage */ |
78 |
|
|
79 |
2949 |
for (;;) { |
80 |
|
/* try to allocate from it */ |
81 |
2949 |
assert(size > 0); |
82 |
2949 |
st = stv->sml_alloc(stv, size); |
83 |
2949 |
if (st != NULL) |
84 |
2803 |
break; |
85 |
|
|
86 |
146 |
if (size <= cache_param->fetch_chunksize) |
87 |
11 |
break; |
88 |
|
|
89 |
135 |
size /= 2; |
90 |
|
} |
91 |
2814 |
CHECK_OBJ_ORNULL(st, STORAGE_MAGIC); |
92 |
2814 |
return (st); |
93 |
3429 |
} |
94 |
|
|
95 |
|
static void |
96 |
4413 |
sml_stv_free(const struct stevedore *stv, struct storage *st) |
97 |
|
{ |
98 |
|
|
99 |
4413 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
100 |
4413 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
101 |
4413 |
if (stv->sml_free != NULL) |
102 |
4413 |
stv->sml_free(st); |
103 |
4413 |
} |
104 |
|
|
105 |
|
/*-------------------------------------------------------------------- |
106 |
|
* This function is called by stevedores ->allocobj() method, which |
107 |
|
* very often will be SML_allocobj() below, to convert a slab |
108 |
|
* of storage into object which the stevedore can then register in its |
109 |
|
* internal state, before returning it to STV_NewObject(). |
110 |
|
* As you probably guessed: All this for persistence. |
111 |
|
*/ |
112 |
|
|
113 |
|
struct object * |
114 |
2839 |
SML_MkObject(const struct stevedore *stv, struct objcore *oc, void *ptr) |
115 |
|
{ |
116 |
|
struct object *o; |
117 |
|
|
118 |
2839 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
119 |
2839 |
AN(stv->methods); |
120 |
2839 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
121 |
|
|
122 |
2839 |
assert(PAOK(ptr)); |
123 |
|
|
124 |
2839 |
o = ptr; |
125 |
2839 |
INIT_OBJ(o, OBJECT_MAGIC); |
126 |
|
|
127 |
2839 |
VTAILQ_INIT(&o->list); |
128 |
|
|
129 |
2839 |
oc->stobj->stevedore = stv; |
130 |
2839 |
oc->stobj->priv = o; |
131 |
2839 |
oc->stobj->priv2 = 0; |
132 |
2839 |
return (o); |
133 |
|
} |
134 |
|
|
135 |
|
/*-------------------------------------------------------------------- |
136 |
|
* This is the default ->allocobj() which all stevedores who do not |
137 |
|
* implement persistent storage can rely on. |
138 |
|
*/ |
139 |
|
|
140 |
|
int v_matchproto_(storage_allocobj_f) |
141 |
2828 |
SML_allocobj(struct worker *wrk, const struct stevedore *stv, |
142 |
|
struct objcore *oc, unsigned wsl) |
143 |
|
{ |
144 |
|
struct object *o; |
145 |
2828 |
struct storage *st = NULL; |
146 |
|
unsigned ltot; |
147 |
|
|
148 |
2828 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
149 |
2828 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
150 |
2828 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
151 |
|
|
152 |
2828 |
AN(stv->sml_alloc); |
153 |
|
|
154 |
2828 |
ltot = sizeof(*o) + PRNDUP(wsl); |
155 |
|
|
156 |
2828 |
do { |
157 |
2831 |
st = stv->sml_alloc(stv, ltot); |
158 |
2831 |
if (st != NULL && st->space < ltot) { |
159 |
0 |
stv->sml_free(st); |
160 |
0 |
st = NULL; |
161 |
0 |
} |
162 |
2831 |
} while (st == NULL && LRU_NukeOne(wrk, stv->lru)); |
163 |
2828 |
if (st == NULL) |
164 |
11 |
return (0); |
165 |
|
|
166 |
2817 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
167 |
2817 |
o = SML_MkObject(stv, oc, st->ptr); |
168 |
2817 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
169 |
2817 |
st->len = sizeof(*o); |
170 |
2817 |
o->objstore = st; |
171 |
2817 |
return (1); |
172 |
2828 |
} |
173 |
|
|
174 |
|
void * v_matchproto_(storage_allocbuf_t) |
175 |
286 |
SML_AllocBuf(struct worker *wrk, const struct stevedore *stv, size_t size, |
176 |
|
uintptr_t *ppriv) |
177 |
|
{ |
178 |
|
struct storage *st; |
179 |
|
|
180 |
286 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
181 |
286 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
182 |
286 |
AN(ppriv); |
183 |
|
|
184 |
286 |
if (size > UINT_MAX) |
185 |
0 |
return (NULL); |
186 |
286 |
st = objallocwithnuke(wrk, stv, size, 0); |
187 |
286 |
if (st == NULL) |
188 |
0 |
return (NULL); |
189 |
286 |
assert(st->space >= size); |
190 |
286 |
st->flags = STORAGE_F_BUFFER; |
191 |
286 |
st->len = size; |
192 |
286 |
*ppriv = (uintptr_t)st; |
193 |
286 |
return (st->ptr); |
194 |
286 |
} |
195 |
|
|
196 |
|
void v_matchproto_(storage_freebuf_t) |
197 |
286 |
SML_FreeBuf(struct worker *wrk, const struct stevedore *stv, uintptr_t priv) |
198 |
|
{ |
199 |
|
struct storage *st; |
200 |
|
|
201 |
286 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
202 |
286 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
203 |
|
|
204 |
286 |
CAST_OBJ_NOTNULL(st, (void *)priv, STORAGE_MAGIC); |
205 |
286 |
assert(st->flags == STORAGE_F_BUFFER); |
206 |
286 |
sml_stv_free(stv, st); |
207 |
286 |
} |
208 |
|
|
209 |
|
/*--------------------------------------------------------------------- |
210 |
|
*/ |
211 |
|
|
212 |
|
static struct object * |
213 |
155084 |
sml_getobj(struct worker *wrk, struct objcore *oc) |
214 |
|
{ |
215 |
|
const struct stevedore *stv; |
216 |
|
struct object *o; |
217 |
|
|
218 |
155084 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
219 |
155084 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
220 |
155084 |
stv = oc->stobj->stevedore; |
221 |
155084 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
222 |
155084 |
if (stv->sml_getobj != NULL) |
223 |
293 |
return (stv->sml_getobj(wrk, oc)); |
224 |
154791 |
if (oc->stobj->priv == NULL) |
225 |
0 |
return (NULL); |
226 |
154791 |
CAST_OBJ_NOTNULL(o, oc->stobj->priv, OBJECT_MAGIC); |
227 |
154791 |
return (o); |
228 |
155084 |
} |
229 |
|
|
230 |
|
static void v_matchproto_(objslim_f) |
231 |
3193 |
sml_slim(struct worker *wrk, struct objcore *oc) |
232 |
|
{ |
233 |
|
const struct stevedore *stv; |
234 |
|
struct object *o; |
235 |
|
struct storage *st, *stn; |
236 |
|
|
237 |
3193 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
238 |
|
|
239 |
3193 |
stv = oc->stobj->stevedore; |
240 |
3193 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
241 |
3193 |
o = sml_getobj(wrk, oc); |
242 |
3193 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
243 |
|
|
244 |
|
#define OBJ_AUXATTR(U, l) \ |
245 |
|
do { \ |
246 |
|
if (o->aa_##l != NULL) { \ |
247 |
|
sml_stv_free(stv, o->aa_##l); \ |
248 |
|
o->aa_##l = NULL; \ |
249 |
|
} \ |
250 |
|
} while (0); |
251 |
|
#include "tbl/obj_attr.h" |
252 |
|
|
253 |
4780 |
VTAILQ_FOREACH_SAFE(st, &o->list, list, stn) { |
254 |
1587 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
255 |
1587 |
VTAILQ_REMOVE(&o->list, st, list); |
256 |
1587 |
sml_stv_free(stv, st); |
257 |
1587 |
} |
258 |
|
} |
259 |
|
|
260 |
|
static void |
261 |
2856 |
sml_bocfini(const struct stevedore *stv, struct boc *boc) |
262 |
|
{ |
263 |
|
struct storage *st; |
264 |
|
|
265 |
2856 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
266 |
2856 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
267 |
|
|
268 |
2856 |
if (boc->stevedore_priv == NULL || |
269 |
1879 |
boc->stevedore_priv == trim_once) |
270 |
2654 |
return; |
271 |
|
|
272 |
|
/* Free any leftovers from Trim */ |
273 |
202 |
TAKE_OBJ_NOTNULL(st, &boc->stevedore_priv, STORAGE_MAGIC); |
274 |
202 |
sml_stv_free(stv, st); |
275 |
2856 |
} |
276 |
|
|
277 |
|
/* |
278 |
|
* called in two cases: |
279 |
|
* - oc->boc == NULL: cache object on LRU freed |
280 |
|
* - oc->boc != NULL: cache object replaced for backend error |
281 |
|
*/ |
282 |
|
static void v_matchproto_(objfree_f) |
283 |
1788 |
sml_objfree(struct worker *wrk, struct objcore *oc) |
284 |
|
{ |
285 |
|
const struct stevedore *stv; |
286 |
|
struct storage *st; |
287 |
|
struct object *o; |
288 |
|
|
289 |
1788 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
290 |
1788 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
291 |
1788 |
stv = oc->stobj->stevedore; |
292 |
1788 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
293 |
1788 |
CAST_OBJ_NOTNULL(o, oc->stobj->priv, OBJECT_MAGIC); |
294 |
|
|
295 |
1788 |
sml_slim(wrk, oc); |
296 |
1788 |
st = o->objstore; |
297 |
1788 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
298 |
1788 |
FINI_OBJ(o); |
299 |
|
|
300 |
1788 |
if (oc->boc != NULL) |
301 |
21 |
sml_bocfini(stv, oc->boc); |
302 |
1767 |
else if (stv->lru != NULL) |
303 |
1767 |
LRU_Remove(oc); |
304 |
|
|
305 |
1788 |
sml_stv_free(stv, st); |
306 |
|
|
307 |
1788 |
memset(oc->stobj, 0, sizeof oc->stobj); |
308 |
|
|
309 |
1788 |
wrk->stats->n_object--; |
310 |
1788 |
} |
311 |
|
|
312 |
|
// kept for reviewers - XXX remove later |
313 |
|
#undef VAI_DBG |
314 |
|
|
315 |
|
struct sml_hdl { |
316 |
|
struct vai_hdl_preamble preamble; |
317 |
|
#define SML_HDL_MAGIC 0x37dfd996 |
318 |
|
struct vai_qe qe; |
319 |
|
struct pool_task task; // unfortunate |
320 |
|
struct ws *ws; // NULL is malloc() |
321 |
|
struct objcore *oc; |
322 |
|
struct object *obj; |
323 |
|
const struct stevedore *stv; |
324 |
|
struct boc *boc; |
325 |
|
|
326 |
|
struct storage *st; // updated by _lease() |
327 |
|
|
328 |
|
// only for _lease_boc() |
329 |
|
uint64_t st_off; // already returned fragment of current st |
330 |
|
uint64_t avail, returned; |
331 |
|
struct storage *last; // to resume, held back by _return() |
332 |
|
}; |
333 |
|
|
334 |
|
static inline void |
335 |
1884 |
sml_ai_viov_fill(struct viov *viov, struct storage *st) |
336 |
|
{ |
337 |
1884 |
viov->iov.iov_base = TRUST_ME(st->ptr); |
338 |
1884 |
viov->iov.iov_len = st->len; |
339 |
1884 |
viov->lease = ptr2lease(st); |
340 |
1884 |
VAI_ASSERT_LEASE(viov->lease); |
341 |
1884 |
} |
342 |
|
|
343 |
|
// sml has no mechanism to notify "I got free space again now" |
344 |
|
// (we could add that, but because storage.h is used in mgt, a first attempt |
345 |
|
// looks at least like this would cause some include spill for vai_q_head or |
346 |
|
// something similar) |
347 |
|
// |
348 |
|
// So anyway, to get ahead we just implement a pretty stupid "call the notify |
349 |
|
// some time later" on a thread |
350 |
|
static void |
351 |
0 |
sml_ai_later_task(struct worker *wrk, void *priv) |
352 |
|
{ |
353 |
|
struct sml_hdl *hdl; |
354 |
0 |
const vtim_dur dur = 0.0042; |
355 |
|
|
356 |
0 |
(void)wrk; |
357 |
0 |
VTIM_sleep(dur); |
358 |
0 |
CAST_VAI_HDL_NOTNULL(hdl, priv, SML_HDL_MAGIC); |
359 |
0 |
memset(&hdl->task, 0, sizeof hdl->task); |
360 |
0 |
hdl->qe.cb(hdl, hdl->qe.priv); |
361 |
0 |
} |
362 |
|
static void |
363 |
0 |
sml_ai_later(struct worker *wrk, struct sml_hdl *hdl) |
364 |
|
{ |
365 |
0 |
AZ(hdl->task.func); |
366 |
0 |
AZ(hdl->task.priv); |
367 |
0 |
hdl->task.func = sml_ai_later_task; |
368 |
0 |
hdl->task.priv = hdl; |
369 |
0 |
AZ(Pool_Task(wrk->pool, &hdl->task, TASK_QUEUE_BG)); |
370 |
0 |
} |
371 |
|
|
372 |
|
|
373 |
|
static int |
374 |
16 |
sml_ai_buffer(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
375 |
|
{ |
376 |
|
const struct stevedore *stv; |
377 |
|
struct sml_hdl *hdl; |
378 |
|
struct storage *st; |
379 |
|
struct viov *vio; |
380 |
16 |
int r = 0; |
381 |
|
|
382 |
16 |
(void) wrk; |
383 |
16 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
384 |
16 |
stv = hdl->stv; |
385 |
16 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
386 |
|
|
387 |
32 |
VSCARAB_FOREACH(vio, scarab) |
388 |
16 |
if (vio->iov.iov_len > UINT_MAX) |
389 |
0 |
return (-EINVAL); |
390 |
|
|
391 |
32 |
VSCARAB_FOREACH(vio, scarab) { |
392 |
16 |
st = objallocwithnuke(wrk, stv, vio->iov.iov_len, 0); |
393 |
16 |
if (st == NULL) |
394 |
0 |
break; |
395 |
16 |
assert(st->space >= vio->iov.iov_len); |
396 |
16 |
st->flags = STORAGE_F_BUFFER; |
397 |
16 |
st->len = st->space; |
398 |
|
|
399 |
16 |
sml_ai_viov_fill(vio, st); |
400 |
16 |
r++; |
401 |
16 |
} |
402 |
16 |
if (r == 0) { |
403 |
0 |
sml_ai_later(wrk, hdl); |
404 |
0 |
r = -EAGAIN; |
405 |
0 |
} |
406 |
16 |
return (r); |
407 |
16 |
} |
408 |
|
|
409 |
|
static int |
410 |
3340 |
sml_ai_lease_simple(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
411 |
|
{ |
412 |
|
struct storage *st; |
413 |
|
struct sml_hdl *hdl; |
414 |
|
struct viov *viov; |
415 |
3340 |
int r = 0; |
416 |
|
|
417 |
3340 |
(void) wrk; |
418 |
3340 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
419 |
3340 |
VSCARAB_CHECK_NOTNULL(scarab); |
420 |
|
|
421 |
3340 |
AZ(hdl->st_off); |
422 |
3340 |
st = hdl->st; |
423 |
5208 |
while (st != NULL && (viov = VSCARAB_GET(scarab)) != NULL) { |
424 |
1868 |
CHECK_OBJ(st, STORAGE_MAGIC); |
425 |
1868 |
sml_ai_viov_fill(viov, st); |
426 |
1868 |
r++; |
427 |
1868 |
st = VTAILQ_PREV(st, storagehead, list); |
428 |
|
} |
429 |
3340 |
hdl->st = st; |
430 |
3340 |
if (st == NULL) |
431 |
3308 |
scarab->flags |= VSCARAB_F_END; |
432 |
3340 |
return (r); |
433 |
|
} |
434 |
|
|
435 |
|
/* |
436 |
|
* on leases while streaming (with a boc): |
437 |
|
* |
438 |
|
* SML uses the lease return facility to implement the "free behind" for |
439 |
|
* OC_F_TRANSIENT objects. When streaming, we also return leases on |
440 |
|
* fragments of sts, but we must only "free behind" when we are done with the |
441 |
|
* last fragment. |
442 |
|
* |
443 |
|
* So we use a magic lease to signal "this is only a fragment", which we ignore |
444 |
|
* on returns |
445 |
|
*/ |
446 |
|
|
447 |
|
static int |
448 |
3345 |
sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
449 |
|
{ |
450 |
3345 |
enum boc_state_e state = BOS_INVALID; |
451 |
|
struct storage *next; |
452 |
|
struct sml_hdl *hdl; |
453 |
|
struct viov *viov; |
454 |
3345 |
int r = 0; |
455 |
|
|
456 |
3345 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
457 |
3345 |
VSCARAB_CHECK_NOTNULL(scarab); |
458 |
|
|
459 |
3345 |
if (hdl->avail == hdl->returned) { |
460 |
6648 |
hdl->avail = ObjVAIGetExtend(wrk, hdl->oc, hdl->returned, |
461 |
3324 |
&state, &hdl->qe); |
462 |
3324 |
if (state == BOS_FAILED) { |
463 |
15 |
hdl->last = NULL; |
464 |
15 |
return (-EPIPE); |
465 |
|
} |
466 |
3309 |
else if (state == BOS_FINISHED) |
467 |
734 |
(void)0; |
468 |
2575 |
else if (hdl->avail == hdl->returned) { |
469 |
|
// ObjVAIGetExtend() has scheduled a notification |
470 |
1323 |
if (hdl->boc->transit_buffer > 0) |
471 |
292 |
return (-ENOBUFS); |
472 |
|
else |
473 |
1031 |
return (-EAGAIN); |
474 |
|
} |
475 |
|
else |
476 |
1252 |
assert(state < BOS_FINISHED); |
477 |
1986 |
} |
478 |
2007 |
Lck_Lock(&hdl->boc->mtx); |
479 |
2007 |
if (hdl->st == NULL && hdl->last != NULL) |
480 |
838 |
hdl->st = VTAILQ_PREV(hdl->last, storagehead, list); |
481 |
2007 |
if (hdl->last != NULL && state < BOS_FINISHED) { |
482 |
154 |
viov = VSCARAB_GET(scarab); |
483 |
154 |
AN(viov); |
484 |
154 |
viov->iov.iov_base = null_iov; |
485 |
154 |
viov->iov.iov_len = 0; |
486 |
154 |
viov->lease = ptr2lease(hdl->last); |
487 |
154 |
} |
488 |
2007 |
if (hdl->last != NULL) |
489 |
838 |
hdl->last = NULL; |
490 |
2007 |
if (hdl->st == NULL) { |
491 |
692 |
assert(hdl->returned == 0 || hdl->avail == hdl->returned); |
492 |
692 |
hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); |
493 |
692 |
} |
494 |
2007 |
if (hdl->st == NULL) |
495 |
1 |
assert(hdl->avail == hdl->returned); |
496 |
|
|
497 |
3490 |
while (hdl->avail > hdl->returned && (viov = VSCARAB_GET(scarab)) != NULL) { |
498 |
1483 |
CHECK_OBJ_NOTNULL(hdl->st, STORAGE_MAGIC); // ObjVAIGetExtend ensures |
499 |
1483 |
assert(hdl->st_off <= hdl->st->space); |
500 |
1483 |
size_t av = hdl->avail - hdl->returned; |
501 |
1483 |
size_t l = hdl->st->space - hdl->st_off; |
502 |
1483 |
AN(l); |
503 |
1483 |
if (l > av) |
504 |
226 |
l = av; |
505 |
1483 |
viov->iov.iov_base = TRUST_ME(hdl->st->ptr + hdl->st_off); |
506 |
1483 |
viov->iov.iov_len = l; |
507 |
1483 |
if (hdl->st_off + l == hdl->st->space) { |
508 |
1257 |
next = VTAILQ_PREV(hdl->st, storagehead, list); |
509 |
1257 |
AZ(hdl->last); |
510 |
1257 |
if (next == NULL) { |
511 |
848 |
hdl->last = hdl->st; |
512 |
848 |
viov->lease = VAI_LEASE_NORET; |
513 |
848 |
} |
514 |
|
else { |
515 |
409 |
CHECK_OBJ(next, STORAGE_MAGIC); |
516 |
409 |
viov->lease = ptr2lease(hdl->st); |
517 |
|
} |
518 |
|
#ifdef VAI_DBG |
519 |
|
VSLb(wrk->vsl, SLT_Debug, "off %zu + l %zu == space st %p next st %p stvprv %p", |
520 |
|
hdl->st_off, l, hdl->st, next, hdl->boc->stevedore_priv); |
521 |
|
#endif |
522 |
1257 |
hdl->st_off = 0; |
523 |
1257 |
hdl->st = next; |
524 |
1257 |
} |
525 |
|
else { |
526 |
226 |
viov->lease = VAI_LEASE_NORET; |
527 |
226 |
hdl->st_off += l; |
528 |
|
} |
529 |
1483 |
hdl->returned += l; |
530 |
1483 |
VAI_ASSERT_LEASE(viov->lease); |
531 |
1483 |
r++; |
532 |
|
} |
533 |
|
|
534 |
2007 |
Lck_Unlock(&hdl->boc->mtx); |
535 |
2007 |
if (state != BOS_FINISHED && hdl->avail == hdl->returned) { |
536 |
2544 |
hdl->avail = ObjVAIGetExtend(wrk, hdl->oc, hdl->returned, |
537 |
1272 |
&state, &hdl->qe); |
538 |
1272 |
} |
539 |
2007 |
if (state == BOS_FINISHED && hdl->avail == hdl->returned) |
540 |
733 |
scarab->flags |= VSCARAB_F_END; |
541 |
2007 |
return (r); |
542 |
3345 |
} |
543 |
|
|
544 |
|
// return only buffers, used if object is not streaming |
545 |
|
static void v_matchproto_(vai_return_f) |
546 |
2332 |
sml_ai_return_buffers(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) |
547 |
|
{ |
548 |
|
struct storage *st; |
549 |
|
struct sml_hdl *hdl; |
550 |
|
uint64_t *p; |
551 |
|
|
552 |
2332 |
(void) wrk; |
553 |
2332 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
554 |
|
|
555 |
4880 |
VSCARET_FOREACH(p, scaret) { |
556 |
2548 |
if (*p == VAI_LEASE_NORET) |
557 |
622 |
continue; |
558 |
1926 |
CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); |
559 |
1926 |
if ((st->flags & STORAGE_F_BUFFER) == 0) |
560 |
1910 |
continue; |
561 |
16 |
sml_stv_free(hdl->stv, st); |
562 |
16 |
} |
563 |
2332 |
VSCARET_INIT(scaret, scaret->capacity); |
564 |
2332 |
} |
565 |
|
|
566 |
|
// generic return for buffers and object leases, used when streaming |
567 |
|
static void v_matchproto_(vai_return_f) |
568 |
675 |
sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) |
569 |
|
{ |
570 |
|
struct storage *st; |
571 |
|
struct sml_hdl *hdl; |
572 |
|
uint64_t *p; |
573 |
|
|
574 |
675 |
(void) wrk; |
575 |
675 |
CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); |
576 |
675 |
VSCARET_CHECK_NOTNULL(scaret); |
577 |
675 |
if (scaret->used == 0) |
578 |
0 |
return; |
579 |
|
|
580 |
|
// callback is only registered if needed |
581 |
675 |
assert(hdl->boc != NULL && (hdl->oc->flags & OC_F_TRANSIENT) != 0); |
582 |
|
|
583 |
|
// filter noret and last |
584 |
675 |
VSCARET_LOCAL(todo, scaret->used); |
585 |
1697 |
VSCARET_FOREACH(p, scaret) { |
586 |
1022 |
if (*p == VAI_LEASE_NORET) |
587 |
500 |
continue; |
588 |
522 |
CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); |
589 |
522 |
VSCARET_ADD(todo, *p); |
590 |
522 |
} |
591 |
675 |
VSCARET_INIT(scaret, scaret->capacity); |
592 |
|
|
593 |
675 |
Lck_Lock(&hdl->boc->mtx); |
594 |
1197 |
VSCARET_FOREACH(p, todo) { |
595 |
522 |
CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); |
596 |
522 |
if ((st->flags & STORAGE_F_BUFFER) != 0) |
597 |
0 |
continue; |
598 |
522 |
VTAILQ_REMOVE(&hdl->obj->list, st, list); |
599 |
522 |
if (st == hdl->boc->stevedore_priv) |
600 |
0 |
hdl->boc->stevedore_priv = trim_once; |
601 |
522 |
} |
602 |
675 |
Lck_Unlock(&hdl->boc->mtx); |
603 |
|
|
604 |
1197 |
VSCARET_FOREACH(p, todo) { |
605 |
522 |
CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); |
606 |
|
#ifdef VAI_DBG |
607 |
|
VSLb(wrk->vsl, SLT_Debug, "ret %p", st); |
608 |
|
#endif |
609 |
522 |
sml_stv_free(hdl->stv, st); |
610 |
522 |
} |
611 |
675 |
} |
612 |
|
|
613 |
|
static void v_matchproto_(vai_fini_f) |
614 |
2452 |
sml_ai_fini(struct worker *wrk, vai_hdl *vai_hdlp) |
615 |
|
{ |
616 |
|
struct sml_hdl *hdl; |
617 |
|
|
618 |
2452 |
AN(vai_hdlp); |
619 |
2452 |
CAST_VAI_HDL_NOTNULL(hdl, *vai_hdlp, SML_HDL_MAGIC); |
620 |
2452 |
*vai_hdlp = NULL; |
621 |
|
|
622 |
2452 |
if (hdl->boc != NULL) { |
623 |
757 |
ObjVAICancel(wrk, hdl->boc, &hdl->qe); |
624 |
757 |
HSH_DerefBoc(wrk, hdl->oc); |
625 |
757 |
hdl->boc = NULL; |
626 |
757 |
} |
627 |
|
|
628 |
2452 |
if (hdl->ws != NULL) |
629 |
48 |
WS_Release(hdl->ws, 0); |
630 |
|
else |
631 |
2404 |
free(hdl); |
632 |
2452 |
} |
633 |
|
|
634 |
|
static vai_hdl v_matchproto_(vai_init_f) |
635 |
2452 |
sml_ai_init(struct worker *wrk, struct objcore *oc, struct ws *ws, |
636 |
|
vai_notify_cb *notify, void *notify_priv) |
637 |
|
{ |
638 |
|
struct sml_hdl *hdl; |
639 |
2452 |
const size_t sz = sizeof *hdl; |
640 |
|
|
641 |
2452 |
if (ws != NULL && WS_ReserveSize(ws, (unsigned)sz)) |
642 |
48 |
hdl = WS_Reservation(ws); |
643 |
|
else { |
644 |
2404 |
hdl = malloc(sz); |
645 |
2404 |
ws = NULL; |
646 |
|
} |
647 |
|
|
648 |
2452 |
AN(hdl); |
649 |
2452 |
INIT_VAI_HDL(hdl, SML_HDL_MAGIC); |
650 |
2452 |
hdl->preamble.vai_lease = sml_ai_lease_simple; |
651 |
2452 |
hdl->preamble.vai_buffer = sml_ai_buffer; |
652 |
2452 |
hdl->preamble.vai_return = sml_ai_return_buffers; |
653 |
2452 |
hdl->preamble.vai_fini = sml_ai_fini; |
654 |
2452 |
hdl->ws = ws; |
655 |
|
|
656 |
2452 |
hdl->oc = oc; |
657 |
2452 |
hdl->obj = sml_getobj(wrk, oc); |
658 |
2452 |
CHECK_OBJ_NOTNULL(hdl->obj, OBJECT_MAGIC); |
659 |
2452 |
hdl->stv = oc->stobj->stevedore; |
660 |
2452 |
CHECK_OBJ_NOTNULL(hdl->stv, STEVEDORE_MAGIC); |
661 |
|
|
662 |
2452 |
hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); |
663 |
2452 |
CHECK_OBJ_ORNULL(hdl->st, STORAGE_MAGIC); |
664 |
|
|
665 |
2452 |
hdl->qe.magic = VAI_Q_MAGIC; |
666 |
2452 |
hdl->qe.cb = notify; |
667 |
2452 |
hdl->qe.hdl = hdl; |
668 |
2452 |
hdl->qe.priv = notify_priv; |
669 |
|
|
670 |
2452 |
hdl->boc = HSH_RefBoc(oc); |
671 |
2452 |
if (hdl->boc == NULL) |
672 |
1695 |
return (hdl); |
673 |
|
/* we only initialize notifications if we have a boc, so |
674 |
|
* any wrong attempt triggers magic checks. |
675 |
|
*/ |
676 |
757 |
hdl->preamble.vai_lease = sml_ai_lease_boc; |
677 |
757 |
if ((hdl->oc->flags & OC_F_TRANSIENT) != 0) |
678 |
343 |
hdl->preamble.vai_return = sml_ai_return; |
679 |
757 |
return (hdl); |
680 |
2452 |
} |
681 |
|
|
682 |
|
/* |
683 |
|
* trivial notification to allow the iterator to simply block |
684 |
|
*/ |
685 |
|
struct sml_notify { |
686 |
|
unsigned magic; |
687 |
|
#define SML_NOTIFY_MAGIC 0x4589af31 |
688 |
|
unsigned hasmore; |
689 |
|
pthread_mutex_t mtx; |
690 |
|
pthread_cond_t cond; |
691 |
|
}; |
692 |
|
|
693 |
|
static void |
694 |
2404 |
sml_notify_init(struct sml_notify *sn) |
695 |
|
{ |
696 |
|
|
697 |
2404 |
INIT_OBJ(sn, SML_NOTIFY_MAGIC); |
698 |
2404 |
AZ(pthread_mutex_init(&sn->mtx, NULL)); |
699 |
2404 |
AZ(pthread_cond_init(&sn->cond, NULL)); |
700 |
2404 |
} |
701 |
|
|
702 |
|
static void |
703 |
2404 |
sml_notify_fini(struct sml_notify *sn) |
704 |
|
{ |
705 |
|
|
706 |
2404 |
CHECK_OBJ_NOTNULL(sn, SML_NOTIFY_MAGIC); |
707 |
2404 |
AZ(pthread_mutex_destroy(&sn->mtx)); |
708 |
2404 |
AZ(pthread_cond_destroy(&sn->cond)); |
709 |
2404 |
} |
710 |
|
|
711 |
|
static void v_matchproto_(vai_notify_cb) |
712 |
1326 |
sml_notify(vai_hdl hdl, void *priv) |
713 |
|
{ |
714 |
|
struct sml_notify *sn; |
715 |
|
|
716 |
1326 |
(void) hdl; |
717 |
1326 |
CAST_OBJ_NOTNULL(sn, priv, SML_NOTIFY_MAGIC); |
718 |
1326 |
AZ(pthread_mutex_lock(&sn->mtx)); |
719 |
1326 |
sn->hasmore = 1; |
720 |
1326 |
AZ(pthread_cond_signal(&sn->cond)); |
721 |
1326 |
AZ(pthread_mutex_unlock(&sn->mtx)); |
722 |
|
|
723 |
1326 |
} |
724 |
|
|
725 |
|
static void |
726 |
1303 |
sml_notify_wait(struct sml_notify *sn) |
727 |
|
{ |
728 |
|
|
729 |
1303 |
CHECK_OBJ_NOTNULL(sn, SML_NOTIFY_MAGIC); |
730 |
1303 |
AZ(pthread_mutex_lock(&sn->mtx)); |
731 |
2212 |
while (sn->hasmore == 0) |
732 |
909 |
AZ(pthread_cond_wait(&sn->cond, &sn->mtx)); |
733 |
1303 |
AN(sn->hasmore); |
734 |
1303 |
sn->hasmore = 0; |
735 |
1303 |
AZ(pthread_mutex_unlock(&sn->mtx)); |
736 |
1303 |
} |
737 |
|
|
738 |
|
static int v_matchproto_(objiterator_f) |
739 |
2404 |
sml_iterator(struct worker *wrk, struct objcore *oc, |
740 |
|
void *priv, objiterate_f *func, int final) |
741 |
|
{ |
742 |
|
struct sml_notify sn; |
743 |
|
struct viov *vio, *last; |
744 |
|
unsigned u, uu; |
745 |
|
vai_hdl hdl; |
746 |
|
int nn, r, r2, islast; |
747 |
|
|
748 |
2404 |
VSCARAB_LOCAL(scarab, 16); |
749 |
2404 |
VSCARET_LOCAL(scaret, 16); |
750 |
|
|
751 |
2404 |
(void) final; // phase out? |
752 |
2404 |
sml_notify_init(&sn); |
753 |
2404 |
hdl = ObjVAIinit(wrk, oc, NULL, sml_notify, &sn); |
754 |
2404 |
AN(hdl); |
755 |
|
|
756 |
2404 |
r = u = 0; |
757 |
|
|
758 |
2404 |
do { |
759 |
5332 |
do { |
760 |
6602 |
nn = ObjVAIlease(wrk, hdl, scarab); |
761 |
6602 |
if (nn <= 0 || scarab->flags & VSCARAB_F_END) |
762 |
5331 |
break; |
763 |
1271 |
} while (scarab->used < scarab->capacity); |
764 |
|
|
765 |
|
/* |
766 |
|
* nn is the wait/return action or 0 |
767 |
|
* nn tells us if to flush |
768 |
|
*/ |
769 |
5332 |
uu = u; |
770 |
5332 |
last = VSCARAB_LAST(scarab); |
771 |
8738 |
VSCARAB_FOREACH(vio, scarab) { |
772 |
3458 |
islast = vio == last; |
773 |
3458 |
AZ(u & OBJ_ITER_END); |
774 |
3458 |
if (islast && scarab->flags & VSCARAB_F_END) |
775 |
1658 |
u |= OBJ_ITER_END; |
776 |
|
|
777 |
|
// flush if it is the scarab's last IOV and we will block next |
778 |
|
// or if we need space in the return leases array |
779 |
3458 |
uu = u; |
780 |
3458 |
if ((islast && nn < 0) || scaret->used == scaret->capacity - 1) |
781 |
1235 |
uu |= OBJ_ITER_FLUSH; |
782 |
|
|
783 |
|
// null iov with the only purpose to return the resume ptr lease |
784 |
|
// exception needed because assert(len > 0) in VDP_bytes() |
785 |
3458 |
if (vio->iov.iov_base == null_iov) |
786 |
154 |
r = 0; |
787 |
|
else |
788 |
3304 |
r = func(priv, uu, vio->iov.iov_base, vio->iov.iov_len); |
789 |
3458 |
if (r != 0) |
790 |
52 |
break; |
791 |
|
|
792 |
|
// sufficient space ensured by capacity check above |
793 |
3406 |
VSCARET_ADD(scaret, vio->lease); |
794 |
|
|
795 |
|
#ifdef VAI_DBG |
796 |
|
VSLb(wrk->vsl, SLT_Debug, "len %zu scaret %u uu %u", |
797 |
|
vio->iov.iov_len, scaret->used, uu); |
798 |
|
#endif |
799 |
|
|
800 |
|
// whenever we have flushed, return leases |
801 |
3406 |
if ((uu & OBJ_ITER_FLUSH) && scaret->used > 0) |
802 |
1218 |
ObjVAIreturn(wrk, hdl, scaret); |
803 |
3406 |
} |
804 |
|
|
805 |
|
// return leases which we did not use if error (break) |
806 |
5384 |
VSCARAB_FOREACH_RESUME(vio, scarab) { |
807 |
52 |
if (scaret->used == scaret->capacity) |
808 |
0 |
ObjVAIreturn(wrk, hdl, scaret); |
809 |
52 |
VSCARET_ADD(scaret, vio->lease); |
810 |
52 |
} |
811 |
|
|
812 |
|
// we have now completed the scarab |
813 |
5332 |
VSCARAB_INIT(scarab, scarab->capacity); |
814 |
|
|
815 |
|
#ifdef VAI_DBG |
816 |
|
VSLb(wrk->vsl, SLT_Debug, "r %d nn %d uu %u", |
817 |
|
r, nn, uu); |
818 |
|
#endif |
819 |
|
|
820 |
|
// flush before blocking if we did not already |
821 |
5332 |
if (r == 0 && (nn == -ENOBUFS || nn == -EAGAIN) && |
822 |
5280 |
(uu & OBJ_ITER_FLUSH) == 0) { |
823 |
87 |
r = func(priv, OBJ_ITER_FLUSH, NULL, 0); |
824 |
87 |
if (scaret->used > 0) |
825 |
0 |
ObjVAIreturn(wrk, hdl, scaret); |
826 |
87 |
} |
827 |
|
|
828 |
5332 |
if (r == 0 && (nn == -ENOBUFS || nn == -EAGAIN)) { |
829 |
1304 |
assert(scaret->used <= 1); |
830 |
1304 |
sml_notify_wait(&sn); |
831 |
1304 |
} |
832 |
4028 |
else if (r == 0 && nn < 0) |
833 |
15 |
r = -1; |
834 |
5332 |
} while (nn != 0 && r == 0); |
835 |
|
|
836 |
2404 |
if ((u & OBJ_ITER_END) == 0) { |
837 |
746 |
r2 = func(priv, OBJ_ITER_END, NULL, 0); |
838 |
746 |
if (r == 0) |
839 |
714 |
r = r2; |
840 |
746 |
} |
841 |
|
|
842 |
2404 |
if (scaret->used > 0) |
843 |
1675 |
ObjVAIreturn(wrk, hdl, scaret); |
844 |
|
|
845 |
2404 |
ObjVAIfini(wrk, &hdl); |
846 |
2404 |
sml_notify_fini(&sn); |
847 |
|
|
848 |
2404 |
return (r); |
849 |
|
} |
850 |
|
|
851 |
|
/*-------------------------------------------------------------------- |
852 |
|
*/ |
853 |
|
|
854 |
|
static struct storage * |
855 |
3227 |
objallocwithnuke(struct worker *wrk, const struct stevedore *stv, ssize_t size, |
856 |
|
int flags) |
857 |
|
{ |
858 |
3227 |
struct storage *st = NULL; |
859 |
|
|
860 |
3227 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
861 |
3227 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
862 |
|
|
863 |
3227 |
if (size > cache_param->fetch_maxchunksize) { |
864 |
2 |
if (!(flags & LESS_MEM_ALLOCED_IS_OK)) |
865 |
0 |
return (NULL); |
866 |
2 |
size = cache_param->fetch_maxchunksize; |
867 |
2 |
} |
868 |
|
|
869 |
3227 |
assert(size <= UINT_MAX); /* field limit in struct storage */ |
870 |
|
|
871 |
3227 |
do { |
872 |
|
/* try to allocate from it */ |
873 |
3235 |
st = sml_stv_alloc(stv, size, flags); |
874 |
3235 |
if (st != NULL) |
875 |
3222 |
break; |
876 |
|
|
877 |
|
/* no luck; try to free some space and keep trying */ |
878 |
13 |
if (stv->lru == NULL) |
879 |
0 |
break; |
880 |
13 |
} while (LRU_NukeOne(wrk, stv->lru)); |
881 |
|
|
882 |
3227 |
CHECK_OBJ_ORNULL(st, STORAGE_MAGIC); |
883 |
3227 |
return (st); |
884 |
3227 |
} |
885 |
|
|
886 |
|
static int v_matchproto_(objgetspace_f) |
887 |
58044 |
sml_getspace(struct worker *wrk, struct objcore *oc, ssize_t *sz, |
888 |
|
uint8_t **ptr) |
889 |
|
{ |
890 |
|
struct object *o; |
891 |
|
struct storage *st; |
892 |
|
|
893 |
58044 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
894 |
58044 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
895 |
58044 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
896 |
58044 |
AN(sz); |
897 |
58044 |
AN(ptr); |
898 |
58044 |
if (*sz == 0) |
899 |
54405 |
*sz = cache_param->fetch_chunksize; |
900 |
58044 |
assert(*sz > 0); |
901 |
58044 |
if (oc->boc->transit_buffer > 0) |
902 |
495 |
*sz = vmin_t(ssize_t, *sz, oc->boc->transit_buffer); |
903 |
|
|
904 |
58044 |
o = sml_getobj(wrk, oc); |
905 |
58044 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
906 |
58044 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
907 |
|
|
908 |
58044 |
st = VTAILQ_FIRST(&o->list); |
909 |
58044 |
if (st != NULL && st->len < st->space) { |
910 |
55236 |
*sz = st->space - st->len; |
911 |
55236 |
*ptr = st->ptr + st->len; |
912 |
55236 |
assert (*sz > 0); |
913 |
55236 |
return (1); |
914 |
|
} |
915 |
|
|
916 |
2808 |
st = objallocwithnuke(wrk, oc->stobj->stevedore, *sz, |
917 |
|
LESS_MEM_ALLOCED_IS_OK); |
918 |
2808 |
if (st == NULL) |
919 |
5 |
return (0); |
920 |
|
|
921 |
2803 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
922 |
2803 |
Lck_Lock(&oc->boc->mtx); |
923 |
2803 |
VTAILQ_INSERT_HEAD(&o->list, st, list); |
924 |
2803 |
Lck_Unlock(&oc->boc->mtx); |
925 |
|
|
926 |
2803 |
*sz = st->space - st->len; |
927 |
2803 |
assert (*sz > 0); |
928 |
2803 |
*ptr = st->ptr + st->len; |
929 |
2803 |
return (1); |
930 |
58044 |
} |
931 |
|
|
932 |
|
static void v_matchproto_(objextend_f) |
933 |
56587 |
sml_extend(struct worker *wrk, struct objcore *oc, ssize_t l) |
934 |
|
{ |
935 |
|
struct object *o; |
936 |
|
struct storage *st; |
937 |
|
|
938 |
56587 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
939 |
56587 |
assert(l > 0); |
940 |
|
|
941 |
56587 |
o = sml_getobj(wrk, oc); |
942 |
56587 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
943 |
56587 |
st = VTAILQ_FIRST(&o->list); |
944 |
56587 |
CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); |
945 |
56587 |
assert(st->len + l <= st->space); |
946 |
56587 |
st->len += l; |
947 |
56587 |
} |
948 |
|
|
949 |
|
static void v_matchproto_(objtrimstore_f) |
950 |
1878 |
sml_trimstore(struct worker *wrk, struct objcore *oc) |
951 |
|
{ |
952 |
|
const struct stevedore *stv; |
953 |
|
struct storage *st, *st1; |
954 |
|
struct object *o; |
955 |
|
|
956 |
1878 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
957 |
1878 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
958 |
1878 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
959 |
|
|
960 |
1878 |
stv = oc->stobj->stevedore; |
961 |
1878 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
962 |
|
|
963 |
1878 |
if (oc->boc->stevedore_priv != NULL) |
964 |
0 |
WRONG("sml_trimstore already called"); |
965 |
1878 |
oc->boc->stevedore_priv = trim_once; |
966 |
|
|
967 |
1878 |
if (stv->sml_free == NULL) |
968 |
0 |
return; |
969 |
|
|
970 |
1878 |
o = sml_getobj(wrk, oc); |
971 |
1878 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
972 |
1878 |
st = VTAILQ_FIRST(&o->list); |
973 |
|
|
974 |
1878 |
if (st == NULL) |
975 |
0 |
return; |
976 |
|
|
977 |
1878 |
if (st->len == 0) { |
978 |
8 |
Lck_Lock(&oc->boc->mtx); |
979 |
8 |
VTAILQ_REMOVE(&o->list, st, list); |
980 |
8 |
Lck_Unlock(&oc->boc->mtx); |
981 |
|
/* sml_bocdone frees this */ |
982 |
8 |
oc->boc->stevedore_priv = st; |
983 |
8 |
return; |
984 |
|
} |
985 |
|
|
986 |
1870 |
if (st->space - st->len < 512) |
987 |
1677 |
return; |
988 |
|
|
989 |
193 |
st1 = sml_stv_alloc(stv, st->len, 0); |
990 |
193 |
if (st1 == NULL) |
991 |
0 |
return; |
992 |
193 |
assert(st1->space >= st->len); |
993 |
|
|
994 |
193 |
memcpy(st1->ptr, st->ptr, st->len); |
995 |
193 |
st1->len = st->len; |
996 |
193 |
Lck_Lock(&oc->boc->mtx); |
997 |
193 |
VTAILQ_REMOVE(&o->list, st, list); |
998 |
193 |
VTAILQ_INSERT_HEAD(&o->list, st1, list); |
999 |
193 |
Lck_Unlock(&oc->boc->mtx); |
1000 |
|
/* sml_bocdone frees this */ |
1001 |
193 |
oc->boc->stevedore_priv = st; |
1002 |
1878 |
} |
1003 |
|
|
1004 |
|
static void v_matchproto_(objbocdone_f) |
1005 |
2835 |
sml_bocdone(struct worker *wrk, struct objcore *oc, struct boc *boc) |
1006 |
|
{ |
1007 |
|
const struct stevedore *stv; |
1008 |
|
|
1009 |
2835 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1010 |
2835 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
1011 |
2835 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
1012 |
2835 |
stv = oc->stobj->stevedore; |
1013 |
2835 |
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); |
1014 |
|
|
1015 |
2835 |
sml_bocfini(stv, boc); |
1016 |
|
|
1017 |
2835 |
if (stv->lru != NULL) { |
1018 |
2796 |
if (isnan(wrk->lastused)) |
1019 |
0 |
wrk->lastused = VTIM_real(); |
1020 |
2796 |
LRU_Add(oc, wrk->lastused); // approx timestamp is OK |
1021 |
2796 |
} |
1022 |
2835 |
} |
1023 |
|
|
1024 |
|
static const void * v_matchproto_(objgetattr_f) |
1025 |
22381 |
sml_getattr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
1026 |
|
ssize_t *len) |
1027 |
|
{ |
1028 |
|
struct object *o; |
1029 |
|
ssize_t dummy; |
1030 |
|
|
1031 |
22381 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1032 |
|
|
1033 |
22381 |
if (len == NULL) |
1034 |
13731 |
len = &dummy; |
1035 |
22381 |
o = sml_getobj(wrk, oc); |
1036 |
22381 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
1037 |
|
|
1038 |
22381 |
switch (attr) { |
1039 |
|
/* Fixed size attributes */ |
1040 |
|
#define OBJ_FIXATTR(U, l, s) \ |
1041 |
|
case OA_##U: \ |
1042 |
|
*len = sizeof o->fa_##l; \ |
1043 |
|
return (o->fa_##l); |
1044 |
|
#include "tbl/obj_attr.h" |
1045 |
|
|
1046 |
|
/* Variable size attributes */ |
1047 |
|
#define OBJ_VARATTR(U, l) \ |
1048 |
|
case OA_##U: \ |
1049 |
|
if (o->va_##l == NULL) \ |
1050 |
|
return (NULL); \ |
1051 |
|
*len = o->va_##l##_len; \ |
1052 |
|
return (o->va_##l); |
1053 |
|
#include "tbl/obj_attr.h" |
1054 |
|
|
1055 |
|
/* Auxiliary attributes */ |
1056 |
|
#define OBJ_AUXATTR(U, l) \ |
1057 |
|
case OA_##U: \ |
1058 |
|
if (o->aa_##l == NULL) \ |
1059 |
|
return (NULL); \ |
1060 |
|
CHECK_OBJ_NOTNULL(o->aa_##l, STORAGE_MAGIC); \ |
1061 |
|
*len = o->aa_##l->len; \ |
1062 |
|
return (o->aa_##l->ptr); |
1063 |
|
#include "tbl/obj_attr.h" |
1064 |
|
|
1065 |
|
default: |
1066 |
|
break; |
1067 |
|
} |
1068 |
0 |
WRONG("Unsupported OBJ_ATTR"); |
1069 |
22381 |
} |
1070 |
|
|
1071 |
|
static void * v_matchproto_(objsetattr_f) |
1072 |
10567 |
sml_setattr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
1073 |
|
ssize_t len, const void *ptr) |
1074 |
|
{ |
1075 |
|
struct object *o; |
1076 |
10567 |
void *retval = NULL; |
1077 |
|
struct storage *st; |
1078 |
|
|
1079 |
10567 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
1080 |
|
|
1081 |
10567 |
o = sml_getobj(wrk, oc); |
1082 |
10567 |
CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); |
1083 |
10567 |
st = o->objstore; |
1084 |
|
|
1085 |
10567 |
switch (attr) { |
1086 |
|
/* Fixed size attributes */ |
1087 |
|
#define OBJ_FIXATTR(U, l, s) \ |
1088 |
|
case OA_##U: \ |
1089 |
|
assert(len == sizeof o->fa_##l); \ |
1090 |
|
retval = o->fa_##l; \ |
1091 |
|
break; |
1092 |
|
#include "tbl/obj_attr.h" |
1093 |
|
|
1094 |
|
/* Variable size attributes */ |
1095 |
|
#define OBJ_VARATTR(U, l) \ |
1096 |
|
case OA_##U: \ |
1097 |
|
if (o->va_##l##_len > 0) { \ |
1098 |
|
AN(o->va_##l); \ |
1099 |
|
assert(len == o->va_##l##_len); \ |
1100 |
|
retval = o->va_##l; \ |
1101 |
|
} else if (len > 0) { \ |
1102 |
|
assert(len <= UINT_MAX); \ |
1103 |
|
assert(st->len + len <= st->space); \ |
1104 |
|
o->va_##l = st->ptr + st->len; \ |
1105 |
|
st->len += len; \ |
1106 |
|
o->va_##l##_len = len; \ |
1107 |
|
retval = o->va_##l; \ |
1108 |
|
} \ |
1109 |
|
break; |
1110 |
|
#include "tbl/obj_attr.h" |
1111 |
|
|
1112 |
|
/* Auxiliary attributes */ |
1113 |
|
#define OBJ_AUXATTR(U, l) \ |
1114 |
|
case OA_##U: \ |
1115 |
|
if (o->aa_##l != NULL) { \ |
1116 |
|
CHECK_OBJ_NOTNULL(o->aa_##l, STORAGE_MAGIC); \ |
1117 |
|
assert(len == o->aa_##l->len); \ |
1118 |
|
retval = o->aa_##l->ptr; \ |
1119 |
|
break; \ |
1120 |
|
} \ |
1121 |
|
if (len == 0) \ |
1122 |
|
break; \ |
1123 |
|
o->aa_##l = objallocwithnuke(wrk, oc->stobj->stevedore, \ |
1124 |
|
len, 0); \ |
1125 |
|
if (o->aa_##l == NULL) \ |
1126 |
|
break; \ |
1127 |
|
CHECK_OBJ_NOTNULL(o->aa_##l, STORAGE_MAGIC); \ |
1128 |
|
assert(len <= o->aa_##l->space); \ |
1129 |
|
o->aa_##l->len = len; \ |
1130 |
|
retval = o->aa_##l->ptr; \ |
1131 |
|
break; |
1132 |
|
#include "tbl/obj_attr.h" |
1133 |
|
|
1134 |
|
default: |
1135 |
0 |
WRONG("Unsupported OBJ_ATTR"); |
1136 |
|
break; |
1137 |
|
} |
1138 |
|
|
1139 |
10567 |
if (retval != NULL && ptr != NULL) |
1140 |
377 |
memcpy(retval, ptr, len); |
1141 |
10567 |
return (retval); |
1142 |
|
} |
1143 |
|
|
1144 |
|
const struct obj_methods SML_methods = { |
1145 |
|
.objfree = sml_objfree, |
1146 |
|
.objiterator = sml_iterator, |
1147 |
|
.objgetspace = sml_getspace, |
1148 |
|
.objextend = sml_extend, |
1149 |
|
.objtrimstore = sml_trimstore, |
1150 |
|
.objbocdone = sml_bocdone, |
1151 |
|
.objslim = sml_slim, |
1152 |
|
.objgetattr = sml_getattr, |
1153 |
|
.objsetattr = sml_setattr, |
1154 |
|
.objtouch = LRU_Touch, |
1155 |
|
.vai_init = sml_ai_init |
1156 |
|
}; |
1157 |
|
|
1158 |
|
static void |
1159 |
3 |
sml_panic_st(struct vsb *vsb, const char *hd, const struct storage *st) |
1160 |
|
{ |
1161 |
6 |
VSB_printf(vsb, "%s = %p {priv=%p, ptr=%p, len=%u, space=%u},\n", |
1162 |
3 |
hd, st, st->priv, st->ptr, st->len, st->space); |
1163 |
3 |
} |
1164 |
|
|
1165 |
|
void |
1166 |
2 |
SML_panic(struct vsb *vsb, const struct objcore *oc) |
1167 |
|
{ |
1168 |
|
struct object *o; |
1169 |
|
struct storage *st; |
1170 |
|
|
1171 |
2 |
VSB_printf(vsb, "Simple = %p,\n", oc->stobj->priv); |
1172 |
2 |
if (oc->stobj->priv == NULL) |
1173 |
0 |
return; |
1174 |
2 |
o = oc->stobj->priv; |
1175 |
2 |
PAN_CheckMagic(vsb, o, OBJECT_MAGIC); |
1176 |
2 |
sml_panic_st(vsb, "Obj", o->objstore); |
1177 |
|
|
1178 |
|
#define OBJ_FIXATTR(U, l, sz) \ |
1179 |
|
VSB_printf(vsb, "%s = ", #U); \ |
1180 |
|
VSB_quote(vsb, (const void*)o->fa_##l, sz, VSB_QUOTE_HEX); \ |
1181 |
|
VSB_printf(vsb, ",\n"); |
1182 |
|
|
1183 |
|
#define OBJ_VARATTR(U, l) \ |
1184 |
|
VSB_printf(vsb, "%s = {len=%u, ptr=%p},\n", \ |
1185 |
|
#U, o->va_##l##_len, o->va_##l); |
1186 |
|
|
1187 |
|
#define OBJ_AUXATTR(U, l) \ |
1188 |
|
do { \ |
1189 |
|
if (o->aa_##l != NULL) sml_panic_st(vsb, #U, o->aa_##l);\ |
1190 |
|
} while(0); |
1191 |
|
|
1192 |
|
#include "tbl/obj_attr.h" |
1193 |
|
|
1194 |
3 |
VTAILQ_FOREACH(st, &o->list, list) { |
1195 |
1 |
sml_panic_st(vsb, "Body", st); |
1196 |
1 |
} |
1197 |
|
} |