| | varnish-cache/bin/varnishd/cache/cache_obj.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2013-2016 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* Lifetime of an objcore: |
30 |
|
* phase 0 - nonexistent |
31 |
|
* phase 1 - created, but no stevedore associated |
32 |
|
* phase 2 - stevedore associated, being filled out |
33 |
|
* phase 3 - stable, no changes happening |
34 |
|
* phase 4 - unavailable, being dismantled |
35 |
|
* phase 5 - stevedore disassociated |
36 |
|
* phase 6 - nonexistent |
37 |
|
* |
38 |
|
* 0->1 ObjNew() creates objcore |
39 |
|
* |
40 |
|
* 1->2 STV_NewObject() associates a stevedore |
41 |
|
* |
42 |
|
* 2 ObjSetState() sets state |
43 |
|
* 2 ObjWaitState() waits for particular state |
44 |
|
* INVALID->REQ_DONE->STREAM->FINISHED->FAILED |
45 |
|
* |
46 |
|
* 2 ObjGetSpace() allocates space |
47 |
|
* 2 ObjExtend() commits content |
48 |
|
* 2 ObjWaitExtend() waits for content - used to implement ObjIterate()) |
49 |
|
* |
50 |
|
* 2 ObjSetAttr() |
51 |
|
* 2 ObjCopyAttr() |
52 |
|
* 2 ObjSetFlag() |
53 |
|
* 2 ObjSetDouble() |
54 |
|
* 2 ObjSetU32() |
55 |
|
* 2 ObjSetU64() |
56 |
|
* |
57 |
|
* 2->3 ObjBocDone() Boc removed from OC, clean it up |
58 |
|
* |
59 |
|
* 23 ObjHasAttr() |
60 |
|
* 23 ObjGetAttr() |
61 |
|
* 23 ObjCheckFlag() |
62 |
|
* 23 ObjGetDouble() |
63 |
|
* 23 ObjGetU32() |
64 |
|
* 23 ObjGetU64() |
65 |
|
* 23 ObjGetLen() |
66 |
|
* 23 ObjGetXID() |
67 |
|
* |
68 |
|
* 23 ObjIterate() ... over body |
69 |
|
* |
70 |
|
* 23 ObjTouch() Signal to LRU(-like) facilities |
71 |
|
* |
72 |
|
* 3->4 HSH_Snipe() kill if not in use |
73 |
|
* 3->4 HSH_Kill() make unavailable |
74 |
|
* |
75 |
|
* 234 ObjSlim() Release body storage (but retain attribute storage) |
76 |
|
* |
77 |
|
* 4->5 ObjFreeObj() disassociates stevedore |
78 |
|
* |
79 |
|
* 5->6 FREE_OBJ() ...in HSH_DerefObjCore() |
80 |
|
*/ |
81 |
|
|
82 |
|
#include "config.h" |
83 |
|
|
84 |
|
#include <stdlib.h> |
85 |
|
|
86 |
|
#include "cache_varnishd.h" |
87 |
|
#include "cache_obj.h" |
88 |
|
#include "vend.h" |
89 |
|
#include "storage/storage.h" |
90 |
|
|
91 |
|
static const struct obj_methods * |
92 |
162497 |
obj_getmethods(const struct objcore *oc) |
93 |
|
{ |
94 |
|
|
95 |
162497 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
96 |
162497 |
CHECK_OBJ_NOTNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
97 |
162497 |
AN(oc->stobj->stevedore->methods); |
98 |
162497 |
return (oc->stobj->stevedore->methods); |
99 |
|
} |
100 |
|
|
101 |
|
static struct boc * |
102 |
3199 |
obj_newboc(void) |
103 |
|
{ |
104 |
|
struct boc *boc; |
105 |
|
|
106 |
3199 |
ALLOC_OBJ(boc, BOC_MAGIC); |
107 |
3199 |
AN(boc); |
108 |
3199 |
Lck_New(&boc->mtx, lck_busyobj); |
109 |
3199 |
PTOK(pthread_cond_init(&boc->cond, NULL)); |
110 |
3199 |
boc->refcount = 1; |
111 |
3199 |
boc->transit_buffer = cache_param->transit_buffer; |
112 |
3199 |
return (boc); |
113 |
|
} |
114 |
|
|
115 |
|
static void |
116 |
2921 |
obj_deleteboc(struct boc **p) |
117 |
|
{ |
118 |
|
struct boc *boc; |
119 |
|
|
120 |
2921 |
TAKE_OBJ_NOTNULL(boc, p, BOC_MAGIC); |
121 |
2921 |
Lck_Delete(&boc->mtx); |
122 |
2921 |
PTOK(pthread_cond_destroy(&boc->cond)); |
123 |
2921 |
free(boc->vary); |
124 |
2921 |
FREE_OBJ(boc); |
125 |
2921 |
} |
126 |
|
|
127 |
|
/*==================================================================== |
128 |
|
* ObjNew() |
129 |
|
* |
130 |
|
*/ |
131 |
|
|
132 |
|
struct objcore * |
133 |
3199 |
ObjNew(const struct worker *wrk) |
134 |
|
{ |
135 |
|
struct objcore *oc; |
136 |
|
|
137 |
3199 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
138 |
|
|
139 |
3199 |
ALLOC_OBJ(oc, OBJCORE_MAGIC); |
140 |
3199 |
AN(oc); |
141 |
3199 |
wrk->stats->n_objectcore++; |
142 |
3199 |
oc->last_lru = NAN; |
143 |
3199 |
oc->flags = OC_F_BUSY; |
144 |
|
|
145 |
3199 |
oc->boc = obj_newboc(); |
146 |
|
|
147 |
3199 |
return (oc); |
148 |
|
} |
149 |
|
|
150 |
|
/*==================================================================== |
151 |
|
* ObjDestroy() |
152 |
|
* |
153 |
|
*/ |
154 |
|
|
155 |
|
void |
156 |
1857 |
ObjDestroy(const struct worker *wrk, struct objcore **p) |
157 |
|
{ |
158 |
|
struct objcore *oc; |
159 |
|
|
160 |
1857 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
161 |
1857 |
TAKE_OBJ_NOTNULL(oc, p, OBJCORE_MAGIC); |
162 |
1857 |
if (oc->boc != NULL) |
163 |
33 |
obj_deleteboc(&oc->boc); |
164 |
1857 |
FREE_OBJ(oc); |
165 |
1857 |
wrk->stats->n_objectcore--; |
166 |
1857 |
} |
167 |
|
|
168 |
|
/*==================================================================== |
169 |
|
* ObjIterate() |
170 |
|
* |
171 |
|
*/ |
172 |
|
|
173 |
|
int |
174 |
2403 |
ObjIterate(struct worker *wrk, struct objcore *oc, |
175 |
|
void *priv, objiterate_f *func, int final) |
176 |
|
{ |
177 |
2403 |
const struct obj_methods *om = obj_getmethods(oc); |
178 |
|
|
179 |
2403 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
180 |
2403 |
AN(func); |
181 |
2403 |
AN(om->objiterator); |
182 |
2403 |
return (om->objiterator(wrk, oc, priv, func, final)); |
183 |
|
} |
184 |
|
|
185 |
|
/*==================================================================== |
186 |
|
* ObjVAI...(): Asynchronous Iteration |
187 |
|
* |
188 |
|
* |
189 |
|
* ObjVAIinit() returns an opaque handle, or NULL if not supported |
190 |
|
* |
191 |
|
* A VAI handle must not be used concurrently |
192 |
|
* |
193 |
|
* the vai_notify_cb(priv) will be called asynchronously by the storage |
194 |
|
* engine when a -EAGAIN / -ENOBUFS condition is over and ObjVAIlease() |
195 |
|
* can be called again. |
196 |
|
* |
197 |
|
* Note: |
198 |
|
* - the callback gets executed by an arbitrary thread |
199 |
|
* - WITH the boc mtx held |
200 |
|
* so it should never block and only do minimal work |
201 |
|
* |
202 |
|
* ObjVAIlease() fills the vscarab with leases. returns: |
203 |
|
* |
204 |
|
* -EAGAIN: nothing available at the moment, storage will notify, no use to |
205 |
|
* call again until notification |
206 |
|
* -ENOBUFS: caller needs to return leases, storage will notify |
207 |
|
* -EPIPE: BOS_FAILED for busy object |
208 |
|
* -(errno): other problem, fatal |
209 |
|
* |
210 |
|
* >= 0: number of viovs added (== scarab->capacity - scarab->used) |
211 |
|
* |
212 |
|
* struct vscarab: |
213 |
|
* |
214 |
|
* the leases can be used by the caller until returned with |
215 |
|
* ObjVAIreturn(). The storage guarantees that the lease member is a |
216 |
|
* multiple of 8 (that is, the lower three bits are zero). These can be |
217 |
|
* used by the caller between lease and return, but must be cleared to |
218 |
|
* zero before returning. |
219 |
|
* |
220 |
|
* ObjVAIbuffer() allocates temporary buffers, returns: |
221 |
|
* |
222 |
|
* -EAGAIN: allocation can not be fulfilled immediately, storage will notify, |
223 |
|
* no use to call again until notification |
224 |
|
* -EINVAL: size larger than UINT_MAX requested |
225 |
|
* -(errno): other problem, fatal |
226 |
|
* n: n > 0, number of viovs filled |
227 |
|
* |
228 |
|
* The struct vscarab is used on the way in and out: On the way in, the |
229 |
|
* iov.iov_len members contain the sizes the caller requests, all other |
230 |
|
* members of the struct viovs are expected to be zero initialized. |
231 |
|
* |
232 |
|
* The maximum size to be requested is UINT_MAX. |
233 |
|
* |
234 |
|
* ObjVAIbuffer() may return sizes larger than requested. The returned n |
235 |
|
* might be smaller than requested. |
236 |
|
* |
237 |
|
* ObjVAIreturn() returns leases collected in a struct vscaret |
238 |
|
* |
239 |
|
* it must be called with a vscaret, which holds an array of lease values |
240 |
|
* received via ObjVAIlease() or ObjVAIbuffer() when the caller can |
241 |
|
* guarantee that they are no longer accessed. |
242 |
|
* |
243 |
|
* ObjVAIreturn() may retain leases in the vscaret if the implementation |
244 |
|
* still requires them, iow, the vscaret might not be empty upon return. |
245 |
|
* |
246 |
|
* ObjVAIfini() finalized iteration |
247 |
|
* |
248 |
|
* it must be called when iteration is done, irrespective of error status |
249 |
|
*/ |
250 |
|
|
251 |
|
vai_hdl |
252 |
2452 |
ObjVAIinit(struct worker *wrk, struct objcore *oc, struct ws *ws, |
253 |
|
vai_notify_cb *cb, void *cb_priv) |
254 |
|
{ |
255 |
2452 |
const struct obj_methods *om = obj_getmethods(oc); |
256 |
|
|
257 |
2452 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
258 |
|
|
259 |
2452 |
if (om->vai_init == NULL) |
260 |
0 |
return (NULL); |
261 |
2452 |
return (om->vai_init(wrk, oc, ws, cb, cb_priv)); |
262 |
2452 |
} |
263 |
|
|
264 |
|
int |
265 |
6733 |
ObjVAIlease(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
266 |
|
{ |
267 |
6733 |
struct vai_hdl_preamble *vaip = vhdl; |
268 |
|
|
269 |
6733 |
AN(vaip); |
270 |
6733 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
271 |
6733 |
AN(vaip->vai_lease); |
272 |
6733 |
return (vaip->vai_lease(wrk, vhdl, scarab)); |
273 |
|
} |
274 |
|
|
275 |
|
int |
276 |
16 |
ObjVAIbuffer(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) |
277 |
|
{ |
278 |
16 |
struct vai_hdl_preamble *vaip = vhdl; |
279 |
|
|
280 |
16 |
AN(vaip); |
281 |
16 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
282 |
16 |
AN(vaip->vai_buffer); |
283 |
16 |
return (vaip->vai_buffer(wrk, vhdl, scarab)); |
284 |
|
} |
285 |
|
|
286 |
|
void |
287 |
3027 |
ObjVAIreturn(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) |
288 |
|
{ |
289 |
3027 |
struct vai_hdl_preamble *vaip = vhdl; |
290 |
|
|
291 |
3027 |
AN(vaip); |
292 |
3027 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
293 |
3027 |
AN(vaip->vai_return); |
294 |
3027 |
vaip->vai_return(wrk, vhdl, scaret); |
295 |
3027 |
} |
296 |
|
|
297 |
|
void |
298 |
2452 |
ObjVAIfini(struct worker *wrk, vai_hdl *vhdlp) |
299 |
|
{ |
300 |
2452 |
AN(vhdlp); |
301 |
2452 |
struct vai_hdl_preamble *vaip = *vhdlp; |
302 |
|
|
303 |
2452 |
AN(vaip); |
304 |
2452 |
assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); |
305 |
2452 |
AN(vaip->vai_lease); |
306 |
2452 |
vaip->vai_fini(wrk, vhdlp); |
307 |
2452 |
} |
308 |
|
|
309 |
|
/*==================================================================== |
310 |
|
* ObjGetSpace() |
311 |
|
* |
312 |
|
* This function returns a pointer and length of free space. If there |
313 |
|
* is no free space, some will be added first. |
314 |
|
* |
315 |
|
* The "sz" argument is an input hint of how much space is desired. |
316 |
|
* 0 means "unknown", return some default size (maybe fetch_chunksize) |
317 |
|
*/ |
318 |
|
|
319 |
|
int |
320 |
58096 |
ObjGetSpace(struct worker *wrk, struct objcore *oc, ssize_t *sz, uint8_t **ptr) |
321 |
|
{ |
322 |
58096 |
const struct obj_methods *om = obj_getmethods(oc); |
323 |
|
|
324 |
58096 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
325 |
58096 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
326 |
58096 |
AN(sz); |
327 |
58096 |
AN(ptr); |
328 |
58096 |
assert(*sz >= 0); |
329 |
|
|
330 |
58096 |
AN(om->objgetspace); |
331 |
58096 |
return (om->objgetspace(wrk, oc, sz, ptr)); |
332 |
|
} |
333 |
|
|
334 |
|
/*==================================================================== |
335 |
|
* ObjExtend() |
336 |
|
* |
337 |
|
* This function extends the used part of the object a number of bytes |
338 |
|
* into the last space returned by ObjGetSpace() |
339 |
|
* |
340 |
|
* The final flag must be set on the last call, and it will release any |
341 |
|
* surplus space allocated. |
342 |
|
*/ |
343 |
|
|
344 |
|
static void |
345 |
56621 |
obj_extend_condwait(const struct objcore *oc) |
346 |
|
{ |
347 |
|
|
348 |
56621 |
if (oc->boc->transit_buffer == 0) |
349 |
56130 |
return; |
350 |
|
|
351 |
491 |
assert(oc->flags & OC_F_TRANSIENT); |
352 |
519 |
while (!(oc->flags & OC_F_CANCEL) && oc->boc->fetched_so_far > |
353 |
517 |
oc->boc->delivered_so_far + oc->boc->transit_buffer) |
354 |
28 |
(void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); |
355 |
56621 |
} |
356 |
|
|
357 |
|
// notify of an extension of the boc or state change |
358 |
|
//lint -sem(obj_boc_notify_Unlock, thread_unlock) |
359 |
|
|
360 |
|
static void |
361 |
64117 |
obj_boc_notify_Unlock(struct boc *boc) |
362 |
|
{ |
363 |
|
struct vai_qe *qe, *next; |
364 |
|
|
365 |
64117 |
PTOK(pthread_cond_broadcast(&boc->cond)); |
366 |
64117 |
qe = VSLIST_FIRST(&boc->vai_q_head); |
367 |
64117 |
VSLIST_FIRST(&boc->vai_q_head) = NULL; |
368 |
65462 |
while (qe != NULL) { |
369 |
1345 |
CHECK_OBJ(qe, VAI_Q_MAGIC); |
370 |
1345 |
AN(qe->flags & VAI_QF_INQUEUE); |
371 |
1345 |
qe->flags &= ~VAI_QF_INQUEUE; |
372 |
1345 |
next = VSLIST_NEXT(qe, list); |
373 |
1345 |
VSLIST_NEXT(qe, list) = NULL; |
374 |
1345 |
qe->cb(qe->hdl, qe->priv); |
375 |
1345 |
qe = next; |
376 |
|
} |
377 |
64117 |
Lck_Unlock(&boc->mtx); |
378 |
64117 |
} |
379 |
|
|
380 |
|
void |
381 |
57313 |
ObjExtend(struct worker *wrk, struct objcore *oc, ssize_t l, int final) |
382 |
|
{ |
383 |
57313 |
const struct obj_methods *om = obj_getmethods(oc); |
384 |
|
|
385 |
57313 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
386 |
57313 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
387 |
57313 |
AN(om->objextend); |
388 |
57313 |
assert(l >= 0); |
389 |
|
|
390 |
57313 |
if (l > 0) { |
391 |
56621 |
Lck_Lock(&oc->boc->mtx); |
392 |
56621 |
obj_extend_condwait(oc); |
393 |
56621 |
om->objextend(wrk, oc, l); |
394 |
56621 |
oc->boc->fetched_so_far += l; |
395 |
56621 |
obj_boc_notify_Unlock(oc->boc); |
396 |
56621 |
} |
397 |
|
|
398 |
57313 |
assert(oc->boc->state < BOS_FINISHED); |
399 |
57313 |
if (final && om->objtrimstore != NULL) |
400 |
1879 |
om->objtrimstore(wrk, oc); |
401 |
57313 |
} |
402 |
|
|
403 |
|
/*==================================================================== |
404 |
|
*/ |
405 |
|
|
406 |
|
static inline void |
407 |
4656 |
objSignalFetchLocked(const struct objcore *oc, uint64_t l) |
408 |
|
{ |
409 |
4656 |
if (oc->boc->transit_buffer > 0) { |
410 |
1042 |
assert(oc->flags & OC_F_TRANSIENT); |
411 |
|
/* Signal the new client position */ |
412 |
1042 |
oc->boc->delivered_so_far = l; |
413 |
1042 |
PTOK(pthread_cond_signal(&oc->boc->cond)); |
414 |
1042 |
} |
415 |
4656 |
} |
416 |
|
|
417 |
|
uint64_t |
418 |
0 |
ObjWaitExtend(const struct worker *wrk, const struct objcore *oc, uint64_t l, |
419 |
|
enum boc_state_e *statep) |
420 |
|
{ |
421 |
|
enum boc_state_e state; |
422 |
|
uint64_t rv; |
423 |
|
|
424 |
0 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
425 |
0 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
426 |
0 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
427 |
0 |
Lck_Lock(&oc->boc->mtx); |
428 |
0 |
while (1) { |
429 |
0 |
rv = oc->boc->fetched_so_far; |
430 |
0 |
assert(l <= rv || oc->boc->state == BOS_FAILED); |
431 |
0 |
state = oc->boc->state; |
432 |
0 |
objSignalFetchLocked(oc, l); |
433 |
0 |
if (rv > l || state >= BOS_FINISHED) |
434 |
0 |
break; |
435 |
0 |
(void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); |
436 |
|
} |
437 |
0 |
Lck_Unlock(&oc->boc->mtx); |
438 |
0 |
if (statep != NULL) |
439 |
0 |
*statep = state; |
440 |
0 |
return (rv); |
441 |
|
} |
442 |
|
|
443 |
|
// get a new extension _or_ register a notification |
444 |
|
uint64_t |
445 |
4656 |
ObjVAIGetExtend(struct worker *wrk, const struct objcore *oc, uint64_t l, |
446 |
|
enum boc_state_e *statep, struct vai_qe *qe) |
447 |
|
{ |
448 |
|
enum boc_state_e state; |
449 |
|
uint64_t rv; |
450 |
|
|
451 |
4656 |
(void) wrk; |
452 |
4656 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
453 |
4656 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
454 |
4656 |
CHECK_OBJ_NOTNULL(qe, VAI_Q_MAGIC); |
455 |
4656 |
Lck_Lock(&oc->boc->mtx); |
456 |
4656 |
rv = oc->boc->fetched_so_far; |
457 |
4656 |
assert(l <= rv || oc->boc->state == BOS_FAILED); |
458 |
4656 |
state = oc->boc->state; |
459 |
4656 |
objSignalFetchLocked(oc, l); |
460 |
4656 |
if (l == rv && state < BOS_FINISHED && |
461 |
2609 |
(qe->flags & VAI_QF_INQUEUE) == 0) { |
462 |
1354 |
qe->flags |= VAI_QF_INQUEUE; |
463 |
1354 |
VSLIST_INSERT_HEAD(&oc->boc->vai_q_head, qe, list); |
464 |
1354 |
} |
465 |
4656 |
Lck_Unlock(&oc->boc->mtx); |
466 |
4656 |
if (statep != NULL) |
467 |
4658 |
*statep = state; |
468 |
4660 |
return (rv); |
469 |
|
} |
470 |
|
|
471 |
|
void |
472 |
755 |
ObjVAICancel(struct worker *wrk, struct boc *boc, struct vai_qe *qe) |
473 |
|
{ |
474 |
|
|
475 |
755 |
(void) wrk; |
476 |
755 |
CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); |
477 |
755 |
CHECK_OBJ_NOTNULL(qe, VAI_Q_MAGIC); |
478 |
|
|
479 |
755 |
Lck_Lock(&boc->mtx); |
480 |
|
// inefficient, but should be rare |
481 |
755 |
if ((qe->flags & VAI_QF_INQUEUE) != 0) |
482 |
10 |
VSLIST_REMOVE(&boc->vai_q_head, qe, vai_qe, list); |
483 |
755 |
qe->flags = 0; |
484 |
755 |
Lck_Unlock(&boc->mtx); |
485 |
755 |
} |
486 |
|
|
487 |
|
/*==================================================================== |
488 |
|
*/ |
489 |
|
|
490 |
|
void |
491 |
7498 |
ObjSetState(struct worker *wrk, const struct objcore *oc, |
492 |
|
enum boc_state_e next) |
493 |
|
{ |
494 |
|
const struct obj_methods *om; |
495 |
|
|
496 |
7498 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
497 |
7498 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
498 |
7498 |
assert(next > oc->boc->state); |
499 |
|
|
500 |
7498 |
CHECK_OBJ_ORNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
501 |
7498 |
assert(next != BOS_STREAM || oc->boc->state == BOS_PREP_STREAM); |
502 |
7498 |
assert(next != BOS_FINISHED || (oc->oa_present & (1 << OA_LEN))); |
503 |
|
|
504 |
7498 |
if (oc->stobj->stevedore != NULL) { |
505 |
5223 |
om = oc->stobj->stevedore->methods; |
506 |
5223 |
if (om->objsetstate != NULL) |
507 |
0 |
om->objsetstate(wrk, oc, next); |
508 |
5223 |
} |
509 |
|
|
510 |
7498 |
Lck_Lock(&oc->boc->mtx); |
511 |
7498 |
oc->boc->state = next; |
512 |
7498 |
obj_boc_notify_Unlock(oc->boc); |
513 |
7498 |
} |
514 |
|
|
515 |
|
/*==================================================================== |
516 |
|
*/ |
517 |
|
|
518 |
|
void |
519 |
2613 |
ObjWaitState(const struct objcore *oc, enum boc_state_e want) |
520 |
|
{ |
521 |
|
|
522 |
2613 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
523 |
2613 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
524 |
|
|
525 |
2613 |
Lck_Lock(&oc->boc->mtx); |
526 |
|
/* wake up obj_extend_condwait() */ |
527 |
2613 |
if (oc->flags & OC_F_CANCEL) |
528 |
353 |
PTOK(pthread_cond_signal(&oc->boc->cond)); |
529 |
33936 |
while (1) { |
530 |
33936 |
if (oc->boc->state >= want) |
531 |
2613 |
break; |
532 |
31323 |
(void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); |
533 |
|
} |
534 |
2613 |
Lck_Unlock(&oc->boc->mtx); |
535 |
2613 |
} |
536 |
|
|
537 |
|
/*==================================================================== |
538 |
|
* ObjGetlen() |
539 |
|
* |
540 |
|
* This is a separate function because it may need locking |
541 |
|
*/ |
542 |
|
|
543 |
|
uint64_t |
544 |
5377 |
ObjGetLen(struct worker *wrk, struct objcore *oc) |
545 |
|
{ |
546 |
|
uint64_t len; |
547 |
|
|
548 |
5377 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
549 |
|
|
550 |
5377 |
AZ(ObjGetU64(wrk, oc, OA_LEN, &len)); |
551 |
5377 |
return (len); |
552 |
|
} |
553 |
|
|
554 |
|
/*==================================================================== |
555 |
|
* ObjSlim() |
556 |
|
* |
557 |
|
* Free the whatever storage can be freed, without freeing the actual |
558 |
|
* object yet. |
559 |
|
*/ |
560 |
|
|
561 |
|
void |
562 |
1405 |
ObjSlim(struct worker *wrk, struct objcore *oc) |
563 |
|
{ |
564 |
1405 |
const struct obj_methods *om = obj_getmethods(oc); |
565 |
|
|
566 |
1405 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
567 |
|
|
568 |
1405 |
if (om->objslim != NULL) |
569 |
1405 |
om->objslim(wrk, oc); |
570 |
1405 |
} |
571 |
|
|
572 |
|
/*==================================================================== |
573 |
|
* Called when the boc used to populate the objcore is going away. |
574 |
|
* Useful for releasing any leftovers from Trim. |
575 |
|
*/ |
576 |
|
|
577 |
|
void |
578 |
2888 |
ObjBocDone(struct worker *wrk, struct objcore *oc, struct boc **boc) |
579 |
|
{ |
580 |
|
const struct obj_methods *m; |
581 |
|
|
582 |
2888 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
583 |
2888 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
584 |
2888 |
AN(boc); |
585 |
2888 |
CHECK_OBJ_NOTNULL(*boc, BOC_MAGIC); |
586 |
2888 |
CHECK_OBJ_ORNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
587 |
2888 |
if (oc->stobj->stevedore != NULL) { |
588 |
2835 |
m = obj_getmethods(oc); |
589 |
2835 |
if (m->objbocdone != NULL) |
590 |
2835 |
m->objbocdone(wrk, oc, *boc); |
591 |
2835 |
} |
592 |
2888 |
obj_deleteboc(boc); |
593 |
2888 |
} |
594 |
|
|
595 |
|
/*==================================================================== |
596 |
|
*/ |
597 |
|
void |
598 |
1792 |
ObjFreeObj(struct worker *wrk, struct objcore *oc) |
599 |
|
{ |
600 |
1792 |
const struct obj_methods *m = obj_getmethods(oc); |
601 |
|
|
602 |
1792 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
603 |
|
|
604 |
1792 |
AN(m->objfree); |
605 |
1792 |
m->objfree(wrk, oc); |
606 |
1792 |
AZ(oc->stobj->stevedore); |
607 |
1792 |
} |
608 |
|
|
609 |
|
/*==================================================================== |
610 |
|
* ObjHasAttr() |
611 |
|
* |
612 |
|
* Check if object has this attribute |
613 |
|
*/ |
614 |
|
|
615 |
|
int |
616 |
7822 |
ObjHasAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr) |
617 |
|
{ |
618 |
|
|
619 |
7822 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
620 |
7822 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
621 |
|
|
622 |
7822 |
if (oc->oa_present) |
623 |
7804 |
return (oc->oa_present & (1 << attr)); |
624 |
|
|
625 |
|
/* resurrected persistent objects don't have oa_present set */ |
626 |
18 |
return (ObjGetAttr(wrk, oc, attr, NULL) != NULL ? 1 : 0); |
627 |
7822 |
} |
628 |
|
|
629 |
|
/*==================================================================== |
630 |
|
* ObjGetAttr() |
631 |
|
* |
632 |
|
* Get an attribute of the object. |
633 |
|
* |
634 |
|
* Returns NULL on unset or zero length attributes and len set to |
635 |
|
* zero. Returns Non-NULL otherwise and len is updated with the attributes |
636 |
|
* length. |
637 |
|
*/ |
638 |
|
|
639 |
|
const void * |
640 |
22378 |
ObjGetAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
641 |
|
ssize_t *len) |
642 |
|
{ |
643 |
22378 |
const struct obj_methods *om = obj_getmethods(oc); |
644 |
|
|
645 |
22378 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
646 |
|
|
647 |
22378 |
AN(om->objgetattr); |
648 |
22378 |
return (om->objgetattr(wrk, oc, attr, len)); |
649 |
|
} |
650 |
|
|
651 |
|
/*==================================================================== |
652 |
|
* ObjSetAttr() |
653 |
|
* |
654 |
|
* Setting fixed size attributes always succeeds. |
655 |
|
* |
656 |
|
* Setting a variable size attribute asserts if the combined size of the |
657 |
|
* variable attributes exceeds the total variable attribute space set at |
658 |
|
* object creation. If there is space it always succeeds. |
659 |
|
* |
660 |
|
* Setting an auxiliary attribute can fail. |
661 |
|
* |
662 |
|
* Resetting any variable asserts if the new length does not match the |
663 |
|
* previous length exactly. |
664 |
|
* |
665 |
|
* If ptr is Non-NULL, it points to the new content which is copied into |
666 |
|
* the attribute. Otherwise the caller will have to do the copying. |
667 |
|
* |
668 |
|
* Return value is non-NULL on success and NULL on failure. If ptr was |
669 |
|
* non-NULL, it is an error to use the returned pointer to set the |
670 |
|
* attribute data, it is only a success indicator in that case. |
671 |
|
*/ |
672 |
|
|
673 |
|
void * |
674 |
10566 |
ObjSetAttr(struct worker *wrk, struct objcore *oc, enum obj_attr attr, |
675 |
|
ssize_t len, const void *ptr) |
676 |
|
{ |
677 |
10566 |
const struct obj_methods *om = obj_getmethods(oc); |
678 |
|
void *r; |
679 |
|
|
680 |
10566 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
681 |
10566 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
682 |
|
|
683 |
10566 |
AN(om->objsetattr); |
684 |
10566 |
assert((int)attr < 16); |
685 |
10566 |
r = om->objsetattr(wrk, oc, attr, len, ptr); |
686 |
10566 |
if (r) |
687 |
10567 |
oc->oa_present |= (1 << attr); |
688 |
10568 |
return (r); |
689 |
|
} |
690 |
|
|
691 |
|
/*==================================================================== |
692 |
|
* ObjTouch() |
693 |
|
*/ |
694 |
|
|
695 |
|
void |
696 |
3267 |
ObjTouch(struct worker *wrk, struct objcore *oc, vtim_real now) |
697 |
|
{ |
698 |
3267 |
const struct obj_methods *om = obj_getmethods(oc); |
699 |
|
|
700 |
3267 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
701 |
3267 |
if (om->objtouch != NULL) |
702 |
3267 |
om->objtouch(wrk, oc, now); |
703 |
3267 |
} |
704 |
|
|
705 |
|
/*==================================================================== |
706 |
|
* Utility functions which work on top of the previous ones |
707 |
|
*/ |
708 |
|
|
709 |
|
int |
710 |
49 |
ObjCopyAttr(struct worker *wrk, struct objcore *oc, struct objcore *ocs, |
711 |
|
enum obj_attr attr) |
712 |
|
{ |
713 |
|
const void *vps; |
714 |
|
void *vpd; |
715 |
|
ssize_t l; |
716 |
|
|
717 |
49 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
718 |
49 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
719 |
49 |
CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); |
720 |
49 |
CHECK_OBJ_NOTNULL(ocs, OBJCORE_MAGIC); |
721 |
|
|
722 |
49 |
vps = ObjGetAttr(wrk, ocs, attr, &l); |
723 |
|
// XXX: later we want to have zero-length OA's too |
724 |
49 |
if (vps == NULL || l <= 0) |
725 |
0 |
return (-1); |
726 |
49 |
vpd = ObjSetAttr(wrk, oc, attr, l, vps); |
727 |
49 |
if (vpd == NULL) |
728 |
0 |
return (-1); |
729 |
49 |
return (0); |
730 |
49 |
} |
731 |
|
|
732 |
|
int |
733 |
2216 |
ObjSetXID(struct worker *wrk, struct objcore *oc, vxid_t xid) |
734 |
|
{ |
735 |
|
uint64_t u; |
736 |
|
|
737 |
2216 |
u = VXID(xid); |
738 |
2216 |
AZ(ObjSetU64(wrk, oc, OA_VXID, u)); |
739 |
2216 |
return (0); |
740 |
|
} |
741 |
|
|
742 |
|
|
743 |
|
vxid_t |
744 |
2916 |
ObjGetXID(struct worker *wrk, struct objcore *oc) |
745 |
|
{ |
746 |
|
vxid_t u; |
747 |
|
|
748 |
2916 |
AZ(ObjGetU64(wrk, oc, OA_VXID, &u.vxid)); |
749 |
2916 |
return (u); |
750 |
|
} |
751 |
|
|
752 |
|
/*-------------------------------------------------------------------- |
753 |
|
* There is no well-defined byteorder for IEEE-754 double and the |
754 |
|
* correct solution (frexp(3) and manual encoding) is more work |
755 |
|
* than our (weak) goal of being endian-agnostic requires at this point. |
756 |
|
* We give it a shot by memcpy'ing doubles over a uint64_t and then |
757 |
|
* BE encode that. |
758 |
|
*/ |
759 |
|
|
760 |
|
int |
761 |
2216 |
ObjSetDouble(struct worker *wrk, struct objcore *oc, enum obj_attr a, double t) |
762 |
|
{ |
763 |
|
void *vp; |
764 |
|
uint64_t u; |
765 |
|
|
766 |
2216 |
assert(sizeof t == sizeof u); |
767 |
2216 |
memcpy(&u, &t, sizeof u); |
768 |
2216 |
vp = ObjSetAttr(wrk, oc, a, sizeof u, NULL); |
769 |
2216 |
if (vp == NULL) |
770 |
0 |
return (-1); |
771 |
2216 |
vbe64enc(vp, u); |
772 |
2216 |
return (0); |
773 |
2216 |
} |
774 |
|
|
775 |
|
int |
776 |
2 |
ObjGetDouble(struct worker *wrk, struct objcore *oc, enum obj_attr a, double *d) |
777 |
|
{ |
778 |
|
const void *vp; |
779 |
|
uint64_t u; |
780 |
|
ssize_t l; |
781 |
|
|
782 |
2 |
assert(sizeof *d == sizeof u); |
783 |
2 |
vp = ObjGetAttr(wrk, oc, a, &l); |
784 |
2 |
if (vp == NULL) |
785 |
0 |
return (-1); |
786 |
2 |
if (d != NULL) { |
787 |
2 |
assert(l == sizeof u); |
788 |
2 |
u = vbe64dec(vp); |
789 |
2 |
memcpy(d, &u, sizeof *d); |
790 |
2 |
} |
791 |
2 |
return (0); |
792 |
2 |
} |
793 |
|
|
794 |
|
/*-------------------------------------------------------------------- |
795 |
|
*/ |
796 |
|
|
797 |
|
int |
798 |
4934 |
ObjSetU64(struct worker *wrk, struct objcore *oc, enum obj_attr a, uint64_t t) |
799 |
|
{ |
800 |
|
void *vp; |
801 |
|
|
802 |
4934 |
vp = ObjSetAttr(wrk, oc, a, sizeof t, NULL); |
803 |
4934 |
if (vp == NULL) |
804 |
0 |
return (-1); |
805 |
4934 |
vbe64enc(vp, t); |
806 |
4934 |
return (0); |
807 |
4934 |
} |
808 |
|
|
809 |
|
int |
810 |
8296 |
ObjGetU64(struct worker *wrk, struct objcore *oc, enum obj_attr a, uint64_t *d) |
811 |
|
{ |
812 |
|
const void *vp; |
813 |
|
ssize_t l; |
814 |
|
|
815 |
8296 |
vp = ObjGetAttr(wrk, oc, a, &l); |
816 |
8296 |
if (vp == NULL || l != sizeof *d) |
817 |
0 |
return (-1); |
818 |
8296 |
if (d != NULL) |
819 |
8296 |
*d = vbe64dec(vp); |
820 |
8296 |
return (0); |
821 |
8296 |
} |
822 |
|
|
823 |
|
/*-------------------------------------------------------------------- |
824 |
|
*/ |
825 |
|
|
826 |
|
int |
827 |
7321 |
ObjCheckFlag(struct worker *wrk, struct objcore *oc, enum obj_flags of) |
828 |
|
{ |
829 |
|
const uint8_t *fp; |
830 |
|
|
831 |
7321 |
fp = ObjGetAttr(wrk, oc, OA_FLAGS, NULL); |
832 |
7321 |
AN(fp); |
833 |
7321 |
return ((*fp) & of); |
834 |
|
} |
835 |
|
|
836 |
|
void |
837 |
491 |
ObjSetFlag(struct worker *wrk, struct objcore *oc, enum obj_flags of, int val) |
838 |
|
{ |
839 |
|
uint8_t *fp; |
840 |
|
|
841 |
491 |
fp = ObjSetAttr(wrk, oc, OA_FLAGS, 1, NULL); |
842 |
491 |
AN(fp); |
843 |
491 |
if (val) |
844 |
489 |
(*fp) |= of; |
845 |
|
else |
846 |
2 |
(*fp) &= ~of; |
847 |
491 |
} |
848 |
|
|
849 |
|
/*==================================================================== |
850 |
|
* Object event subscription mechanism. |
851 |
|
* |
852 |
|
* XXX: it is extremely unclear what the locking circumstances are here. |
853 |
|
*/ |
854 |
|
|
855 |
|
struct oev_entry { |
856 |
|
unsigned magic; |
857 |
|
#define OEV_MAGIC 0xb0b7c5a1 |
858 |
|
unsigned mask; |
859 |
|
obj_event_f *func; |
860 |
|
void *priv; |
861 |
|
VTAILQ_ENTRY(oev_entry) list; |
862 |
|
}; |
863 |
|
|
864 |
|
static VTAILQ_HEAD(,oev_entry) oev_list; |
865 |
|
static pthread_rwlock_t oev_rwl; |
866 |
|
static unsigned oev_mask; |
867 |
|
|
868 |
|
/* |
869 |
|
* NB: ObjSubscribeEvents() is not atomic: |
870 |
|
* oev_mask is checked optimistically in ObjSendEvent() |
871 |
|
*/ |
872 |
|
uintptr_t |
873 |
39 |
ObjSubscribeEvents(obj_event_f *func, void *priv, unsigned mask) |
874 |
|
{ |
875 |
|
struct oev_entry *oev; |
876 |
|
|
877 |
39 |
AN(func); |
878 |
39 |
AZ(mask & ~OEV_MASK); |
879 |
|
|
880 |
39 |
ALLOC_OBJ(oev, OEV_MAGIC); |
881 |
39 |
AN(oev); |
882 |
39 |
oev->func = func; |
883 |
39 |
oev->priv = priv; |
884 |
39 |
oev->mask = mask; |
885 |
39 |
PTOK(pthread_rwlock_wrlock(&oev_rwl)); |
886 |
39 |
VTAILQ_INSERT_TAIL(&oev_list, oev, list); |
887 |
39 |
oev_mask |= mask; |
888 |
39 |
PTOK(pthread_rwlock_unlock(&oev_rwl)); |
889 |
39 |
return ((uintptr_t)oev); |
890 |
|
} |
891 |
|
|
892 |
|
void |
893 |
1 |
ObjUnsubscribeEvents(uintptr_t *handle) |
894 |
|
{ |
895 |
1 |
struct oev_entry *oev, *oev2 = NULL; |
896 |
1 |
unsigned newmask = 0; |
897 |
|
|
898 |
1 |
AN(handle); |
899 |
1 |
AN(*handle); |
900 |
1 |
PTOK(pthread_rwlock_wrlock(&oev_rwl)); |
901 |
2 |
VTAILQ_FOREACH(oev, &oev_list, list) { |
902 |
1 |
CHECK_OBJ_NOTNULL(oev, OEV_MAGIC); |
903 |
1 |
if ((uintptr_t)oev == *handle) |
904 |
1 |
oev2 = oev; |
905 |
|
else |
906 |
0 |
newmask |= oev->mask; |
907 |
1 |
} |
908 |
1 |
AN(oev2); |
909 |
1 |
VTAILQ_REMOVE(&oev_list, oev2, list); |
910 |
1 |
oev_mask = newmask; |
911 |
1 |
AZ(newmask & ~OEV_MASK); |
912 |
1 |
PTOK(pthread_rwlock_unlock(&oev_rwl)); |
913 |
1 |
FREE_OBJ(oev2); |
914 |
1 |
*handle = 0; |
915 |
1 |
} |
916 |
|
|
917 |
|
void |
918 |
3273 |
ObjSendEvent(struct worker *wrk, struct objcore *oc, unsigned event) |
919 |
|
{ |
920 |
|
struct oev_entry *oev; |
921 |
|
|
922 |
3273 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
923 |
3273 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
924 |
3273 |
AN(event & OEV_MASK); |
925 |
3273 |
AZ(event & ~OEV_MASK); |
926 |
3273 |
if (!(event & oev_mask)) |
927 |
3184 |
return; |
928 |
|
|
929 |
89 |
PTOK(pthread_rwlock_rdlock(&oev_rwl)); |
930 |
194 |
VTAILQ_FOREACH(oev, &oev_list, list) { |
931 |
105 |
CHECK_OBJ_NOTNULL(oev, OEV_MAGIC); |
932 |
105 |
if (event & oev->mask) |
933 |
105 |
oev->func(wrk, oev->priv, oc, event); |
934 |
105 |
} |
935 |
89 |
PTOK(pthread_rwlock_unlock(&oev_rwl)); |
936 |
|
|
937 |
3273 |
} |
938 |
|
|
939 |
|
void |
940 |
936 |
ObjInit(void) |
941 |
|
{ |
942 |
936 |
VTAILQ_INIT(&oev_list); |
943 |
936 |
PTOK(pthread_rwlock_init(&oev_rwl, NULL)); |
944 |
936 |
} |