varnish-cache/bin/varnishd/storage/storage_persistent_silo.c
0
/*-
1
 * Copyright (c) 2008-2011 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * Persistent storage method
30
 *
31
 * XXX: Before we start the client or maybe after it stops, we should give the
32
 * XXX: stevedores a chance to examine their storage for consistency.
33
 *
34
 */
35
36
#include "config.h"
37
38
39
#include <stdio.h>
40
#include <stdlib.h>
41
42
#include "cache/cache_varnishd.h"
43
44
#include "vsha256.h"
45
#include "vend.h"
46
#include "vtim.h"
47
48
#include "cache/cache_objhead.h"
49
50
#include "storage/storage.h"
51
#include "storage/storage_simple.h"
52
#include "storage/storage_persistent.h"
53
54
/*
55
 * We use the top bit to mark objects still needing fixup
56
 * In theory this may need to be platform dependent
57
 */
58
59
#define NEED_FIXUP      (1U << 31)
60
61
/*--------------------------------------------------------------------
62
 * Write the segmentlist back to the silo.
63
 *
64
 * We write the first copy, sync it synchronously, then write the
65
 * second copy and sync it synchronously.
66
 *
67
 * Provided the kernel doesn't lie, that means we will always have
68
 * at least one valid copy on in the silo.
69
 */
70
71
static void
72 232
smp_save_seg(const struct smp_sc *sc, struct smp_signspace *spc)
73
{
74
        struct smp_segptr *ss;
75
        struct smp_seg *sg;
76
        uint64_t length;
77
78 232
        Lck_AssertHeld(&sc->mtx);
79 232
        smp_reset_signspace(spc);
80 232
        ss = SIGNSPACE_DATA(spc);
81 232
        length = 0;
82 436
        VTAILQ_FOREACH(sg, &sc->segments, list) {
83 204
                assert(sg->p.offset < sc->mediasize);
84 204
                assert(sg->p.offset + sg->p.length <= sc->mediasize);
85 204
                *ss = sg->p;
86 204
                ss++;
87 204
                length += sizeof *ss;
88 204
        }
89 232
        smp_append_signspace(spc, length);
90 232
        smp_sync_sign(&spc->ctx);
91 232
}
92
93
void
94 116
smp_save_segs(struct smp_sc *sc)
95
{
96
        struct smp_seg *sg, *sg2;
97
98 116
        CHECK_OBJ_NOTNULL(sc, SMP_SC_MAGIC);
99 116
        Lck_AssertHeld(&sc->mtx);
100
101
        /*
102
         * Remove empty segments from the front of the list
103
         * before we write the segments to disk.
104
         */
105 126
        VTAILQ_FOREACH_SAFE(sg, &sc->segments, list, sg2) {
106 106
                CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
107
108 106
                if (sg->nobj > 0)
109 96
                        break;
110 10
                if (sg == sc->cur_seg)
111 2
                        continue;
112 8
                VTAILQ_REMOVE(&sc->segments, sg, list);
113 8
                AN(VTAILQ_EMPTY(&sg->objcores));
114 8
                FREE_OBJ(sg);
115 8
        }
116 116
        smp_save_seg(sc, &sc->seg1);
117 116
        smp_save_seg(sc, &sc->seg2);
118 116
}
119
120
/*--------------------------------------------------------------------
121
 * Load segments
122
 *
123
 * The overall objective is to register the existence of an object, based
124
 * only on the minimally sized struct smp_object, without causing the
125
 * main object to be faulted in.
126
 *
127
 * XXX: We can test this by mprotecting the main body of the segment
128
 * XXX: until the first fixup happens, or even just over this loop,
129
 * XXX: However: the requires that the smp_objects starter further
130
 * XXX: into the segment than a page so that they do not get hit
131
 * XXX: by the protection.
132
 */
133
134
void
135 30
smp_load_seg(struct worker *wrk, const struct smp_sc *sc,
136
    struct smp_seg *sg)
137
{
138
        struct smp_object *so;
139
        struct objcore *oc;
140
        struct ban *ban;
141
        uint32_t no;
142 30
        double t_now = VTIM_real();
143
        struct smp_signctx ctx[1];
144
145 30
        ASSERT_SILO_THREAD(sc);
146 30
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
147 30
        CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
148 30
        assert(sg->flags & SMP_SEG_MUSTLOAD);
149 30
        sg->flags &= ~SMP_SEG_MUSTLOAD;
150 30
        AN(sg->p.offset);
151 30
        if (sg->p.objlist == 0)
152 0
                return;
153 30
        smp_def_sign(sc, ctx, sg->p.offset, "SEGHEAD");
154 30
        if (smp_chk_sign(ctx))
155 0
                return;
156
157
        /* test SEGTAIL */
158
        /* test OBJIDX */
159 30
        so = (void*)(sc->base + sg->p.objlist);
160 30
        sg->objs = so;
161 30
        no = sg->p.lobjlist;
162
        /* Clear the bogus "hold" count */
163 30
        sg->nobj = 0;
164 64
        for (;no > 0; so++,no--) {
165 34
                if (EXP_WHEN(so) < t_now)
166 0
                        continue;
167 34
                ban = BAN_FindBan(so->ban);
168 34
                AN(ban);
169 34
                oc = ObjNew(wrk);
170 34
                oc->stobj->stevedore = sc->parent;
171 34
                smp_init_oc(oc, sg, no);
172 34
                VTAILQ_INSERT_TAIL(&sg->objcores, oc, lru_list);
173 34
                oc->stobj->priv2 |= NEED_FIXUP;
174 34
                EXP_COPY(oc, so);
175 34
                sg->nobj++;
176 34
                oc->refcnt++;
177 34
                HSH_Insert(wrk, so->hash, oc, ban);
178 34
                AN(oc->ban);
179 34
                HSH_DerefBoc(wrk, oc);  // XXX Keep it an stream resurrection?
180 34
                (void)HSH_DerefObjCore(wrk, &oc);
181 34
                wrk->stats->n_vampireobject++;
182 34
        }
183 30
        Pool_Sumstat(wrk);
184 30
        sg->flags |= SMP_SEG_LOADED;
185 30
}
186
187
/*--------------------------------------------------------------------
188
 * Create a new segment
189
 */
190
191
void
192 82
smp_new_seg(struct smp_sc *sc)
193
{
194
        struct smp_seg tmpsg;
195
        struct smp_seg *sg;
196
197 82
        AZ(sc->cur_seg);
198 82
        Lck_AssertHeld(&sc->mtx);
199
200
        /* XXX: find where it goes in silo */
201
202 82
        INIT_OBJ(&tmpsg, SMP_SEG_MAGIC);
203 82
        tmpsg.sc = sc;
204 82
        tmpsg.p.offset = sc->free_offset;
205
        /* XXX: align */
206 82
        assert(tmpsg.p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
207 82
        assert(tmpsg.p.offset < sc->mediasize);
208
209 82
        tmpsg.p.length = sc->aim_segl;
210 82
        tmpsg.p.length = RDN2(tmpsg.p.length, 8);
211
212 82
        if (smp_segend(&tmpsg) > sc->mediasize)
213
                /* XXX: Consider truncation in this case */
214 0
                tmpsg.p.offset = sc->ident->stuff[SMP_SPC_STUFF];
215
216 82
        assert(smp_segend(&tmpsg) <= sc->mediasize);
217
218 82
        sg = VTAILQ_FIRST(&sc->segments);
219 82
        if (sg != NULL && tmpsg.p.offset <= sg->p.offset) {
220 0
                if (smp_segend(&tmpsg) > sg->p.offset)
221
                        /* No more space, return (cur_seg will be NULL) */
222
                        /* XXX: Consider truncation instead of failing */
223 0
                        return;
224 0
                assert(smp_segend(&tmpsg) <= sg->p.offset);
225 0
        }
226
227 82
        if (tmpsg.p.offset == sc->ident->stuff[SMP_SPC_STUFF])
228 46
                printf("Wrapped silo\n");
229
230 82
        ALLOC_OBJ(sg, SMP_SEG_MAGIC);
231 82
        if (sg == NULL)
232 0
                return;
233 82
        *sg = tmpsg;
234 82
        VTAILQ_INIT(&sg->objcores);
235
236 82
        sg->p.offset = IRNUP(sc, sg->p.offset);
237 82
        sg->p.length -= sg->p.offset - tmpsg.p.offset;
238 82
        sg->p.length = IRNDN(sc, sg->p.length);
239 82
        assert(sg->p.offset + sg->p.length <= tmpsg.p.offset + tmpsg.p.length);
240 82
        sc->free_offset = sg->p.offset + sg->p.length;
241
242 82
        VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
243
244
        /* Neuter the new segment in case there is an old one there */
245 82
        AN(sg->p.offset);
246 82
        smp_def_sign(sc, sg->ctx, sg->p.offset, "SEGHEAD");
247 82
        smp_reset_sign(sg->ctx);
248 82
        smp_sync_sign(sg->ctx);
249
250
        /* Set up our allocation points */
251 82
        sc->cur_seg = sg;
252 82
        sc->next_bot = sg->p.offset + IRNUP(sc, SMP_SIGN_SPACE);
253 82
        sc->next_top = smp_segend(sg);
254 82
        sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
255 82
        IASSERTALIGN(sc, sc->next_bot);
256 82
        IASSERTALIGN(sc, sc->next_top);
257 82
        sg->objs = (void*)(sc->base + sc->next_top);
258 82
}
259
260
/*--------------------------------------------------------------------
261
 * Close a segment
262
 */
263
264
void
265 80
smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
266
{
267
        uint64_t left, dst, len;
268
        void *dp;
269
270 80
        CHECK_OBJ_NOTNULL(sc, SMP_SC_MAGIC);
271 80
        Lck_AssertHeld(&sc->mtx);
272
273 80
        CHECK_OBJ_NOTNULL(sg, SMP_SEG_MAGIC);
274 80
        assert(sg == sc->cur_seg);
275 80
        AN(sg->p.offset);
276 80
        sc->cur_seg = NULL;
277
278 80
        if (sg->nalloc == 0) {
279
                /* If segment is empty, delete instead */
280 42
                VTAILQ_REMOVE(&sc->segments, sg, list);
281 42
                assert(sg->p.offset >= sc->ident->stuff[SMP_SPC_STUFF]);
282 42
                assert(sg->p.offset < sc->mediasize);
283 42
                sc->free_offset = sg->p.offset;
284 42
                AN(VTAILQ_EMPTY(&sg->objcores));
285 42
                FREE_OBJ(sg);
286 42
                return;
287
        }
288
289
        /*
290
         * If there is enough space left, that we can move the smp_objects
291
         * down without overwriting the present copy, we will do so to
292
         * compact the segment.
293
         */
294 38
        left = smp_spaceleft(sc, sg);
295 38
        len = sizeof(struct smp_object) * sg->p.lobjlist;
296 38
        if (len < left) {
297 38
                dst = sc->next_bot + IRNUP(sc, SMP_SIGN_SPACE);
298 38
                dp = sc->base + dst;
299 38
                assert((uintptr_t)dp + len < (uintptr_t)sg->objs);
300 38
                memcpy(dp, sg->objs, len);
301 38
                sc->next_top = dst;
302 38
                sg->objs = dp;
303 76
                sg->p.length = (sc->next_top - sg->p.offset)
304 38
                     + len + IRNUP(sc, SMP_SIGN_SPACE);
305 38
                (void)smp_spaceleft(sc, sg);    /* for the asserts */
306
307 38
        }
308
309
        /* Update the segment header */
310 38
        sg->p.objlist = sc->next_top;
311
312
        /* Write the (empty) OBJIDX signature */
313 38
        sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
314 38
        assert(sc->next_top >= sc->next_bot);
315 38
        smp_def_sign(sc, sg->ctx, sc->next_top, "OBJIDX");
316 38
        smp_reset_sign(sg->ctx);
317 38
        smp_sync_sign(sg->ctx);
318
319
        /* Write the (empty) SEGTAIL signature */
320 76
        smp_def_sign(sc, sg->ctx,
321 38
            sg->p.offset + sg->p.length - IRNUP(sc, SMP_SIGN_SPACE), "SEGTAIL");
322 38
        smp_reset_sign(sg->ctx);
323 38
        smp_sync_sign(sg->ctx);
324
325
        /* Save segment list */
326 38
        smp_save_segs(sc);
327 38
        sc->free_offset = smp_segend(sg);
328 80
}
329
330
331
/*---------------------------------------------------------------------
332
 */
333
334
static struct smp_object *
335 768
smp_find_so(const struct smp_seg *sg, unsigned priv2)
336
{
337
        struct smp_object *so;
338
339 768
        priv2 &= ~NEED_FIXUP;
340 768
        assert(priv2 > 0);
341 768
        assert(priv2 <= sg->p.lobjlist);
342 768
        so = &sg->objs[sg->p.lobjlist - priv2];
343 768
        return (so);
344
}
345
346
/*---------------------------------------------------------------------
347
 * Check if a given storage structure is valid to use
348
 */
349
350
static int
351 4
smp_loaded_st(const struct smp_sc *sc, const struct smp_seg *sg,
352
    const struct storage *st)
353
{
354
        struct smp_seg *sg2;
355
        const uint8_t *pst;
356
        uint64_t o;
357
358 4
        (void)sg;               /* XXX: faster: Start search from here */
359 4
        pst = (const void *)st;
360
361 4
        if (pst < (sc->base + sc->ident->stuff[SMP_SPC_STUFF]))
362 0
                return (0x01);          /* Before silo payload start */
363 4
        if (pst > (sc->base + sc->ident->stuff[SMP_END_STUFF]))
364 0
                return (0x02);          /* After silo end */
365
366 4
        o = pst - sc->base;
367
368
        /* Find which segment contains the storage structure */
369 8
        VTAILQ_FOREACH(sg2, &sc->segments, list)
370 6
                if (o > sg2->p.offset && (o + sizeof(*st)) < sg2->p.objlist)
371 2
                        break;
372 4
        if (sg2 == NULL)
373 2
                return (0x04);          /* No claiming segment */
374 2
        if (!(sg2->flags & SMP_SEG_LOADED))
375 0
                return (0x08);          /* Claiming segment not loaded */
376
377
        /* It is now safe to access the storage structure */
378 2
        if (st->magic != STORAGE_MAGIC)
379 0
                return (0x10);          /* Not enough magic */
380
381 2
        if (o + st->space >= sg2->p.objlist)
382 0
                return (0x20);          /* Allocation not inside segment */
383
384 2
        if (st->len > st->space)
385 0
                return (0x40);          /* Plain bad... */
386
387
        /*
388
         * XXX: We could patch up st->stevedore and st->priv here
389
         * XXX: but if things go right, we will never need them.
390
         */
391 2
        return (0);
392 4
}
393
394
/*---------------------------------------------------------------------
395
 * objcore methods for persistent objects
396
 */
397
398
static void
399 3822
fix_ptr(const struct smp_seg *sg, const struct storage *st, void **ptr)
400
{
401
        // See comment where used below
402
        uintptr_t u;
403
404 3822
        u = (uintptr_t)(*ptr);
405 3822
        if (u != 0) {
406 2634
                u -= (uintptr_t)st->priv;
407 2634
                u += (uintptr_t)sg->sc->base;
408 2634
        }
409 3822
        *ptr = (void *)u;
410 3822
}
411
412
struct object * v_matchproto_(sml_getobj_f)
413 586
smp_sml_getobj(struct worker *wrk, struct objcore *oc)
414
{
415
        struct object *o;
416
        struct smp_seg *sg;
417
        struct smp_object *so;
418
        struct storage *st, *st2;
419
        uint64_t l;
420
        int bad;
421
422 586
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
423 586
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
424 586
        AN(oc->stobj->stevedore);
425
426 586
        CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
427 586
        so = smp_find_so(sg, oc->stobj->priv2);
428
429
        /**************************************************************
430
         * The silo may have been remapped at a different address,
431
         * because the people who came up with ASLR were unable
432
         * imagine that there might be beneficial use-cases for
433
         * always mapping a file at the same specific address.
434
         *
435
         * We store the silos base address in struct storage->priv
436
         * and manually fix all the pointers in struct object and
437
         * the list of struct storage objects which hold the body.
438
         * When done, we update the storage->priv, so we can do the
439
         * same trick next time.
440
         *
441
         * This is a prohibitively expensive workaround, but we can
442
         * live with it, because the role of this stevedore is only
443
         * to keep the internal stevedore API honest.
444
         */
445
446 586
        st = (void*)(sg->sc->base + so->ptr);
447 586
        fix_ptr(sg, st, (void**)&st->ptr);
448
449 586
        o = (void*)st->ptr;
450 586
        fix_ptr(sg, st, (void**)&o->objstore);
451 586
        fix_ptr(sg, st, (void**)&o->va_vary);
452 586
        fix_ptr(sg, st, (void**)&o->va_headers);
453 586
        fix_ptr(sg, st, (void**)&o->list.vtqh_first);
454 586
        fix_ptr(sg, st, (void**)&o->list.vtqh_last);
455 586
        st->priv = (void*)(sg->sc->base);
456
457 586
        st2 = o->list.vtqh_first;
458 688
        while (st2 != NULL) {
459 102
                fix_ptr(sg, st2, (void**)&st2->list.vtqe_next);
460 102
                fix_ptr(sg, st2, (void**)&st2->list.vtqe_prev);
461 102
                fix_ptr(sg, st2, (void**)&st2->ptr);
462 102
                st2->priv = (void*)(sg->sc->base);
463 102
                st2 = st2->list.vtqe_next;
464
        }
465
466
        /*
467
         * The object may not be in this segment since we allocate it
468
         * In a separate operation than the smp_object.  We could check
469
         * that it is in a later segment, but that would be complicated.
470
         * XXX: For now, be happy if it is inside the silo
471
         */
472 586
        ASSERT_PTR_IN_SILO(sg->sc, o);
473 586
        CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC);
474
475
        /*
476
         * If this flag is not set, it will not be, and the lock is not
477
         * needed to test it.
478
         */
479 586
        if (!(oc->stobj->priv2 & NEED_FIXUP))
480 560
                return (o);
481
482 26
        Lck_Lock(&sg->sc->mtx);
483
        /* Check again, we might have raced. */
484 26
        if (oc->stobj->priv2 & NEED_FIXUP) {
485
                /* We trust caller to have a refcnt for us */
486
487 26
                bad = 0;
488 26
                l = 0;
489 28
                VTAILQ_FOREACH(st, &o->list, list) {
490 4
                        bad |= smp_loaded_st(sg->sc, sg, st);
491 4
                        if (bad)
492 2
                                break;
493 2
                        l += st->len;
494 2
                }
495 26
                if (l != vbe64dec(o->fa_len))
496 2
                        bad |= 0x100;
497
498 26
                if (bad) {
499 2
                        EXP_ZERO(oc);
500 2
                        EXP_ZERO(so);
501 2
                }
502
503 26
                sg->nfixed++;
504 26
                wrk->stats->n_object++;
505 26
                wrk->stats->n_vampireobject--;
506 26
                oc->stobj->priv2 &= ~NEED_FIXUP;
507 26
        }
508 26
        Lck_Unlock(&sg->sc->mtx);
509 26
        return (o);
510 586
}
511
512
void v_matchproto_(objfree_f)
513 8
smp_oc_objfree(struct worker *wrk, struct objcore *oc)
514
{
515
        struct smp_seg *sg;
516
        struct smp_object *so;
517
518 8
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
519 8
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
520
521 8
        CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
522 8
        so = smp_find_so(sg, oc->stobj->priv2);
523
524 8
        Lck_Lock(&sg->sc->mtx);
525 8
        EXP_ZERO(so);
526 8
        so->ptr = 0;
527
528 8
        assert(sg->nobj > 0);
529 8
        sg->nobj--;
530 8
        if (oc->stobj->priv2 & NEED_FIXUP) {
531 0
                wrk->stats->n_vampireobject--;
532 0
        } else {
533 8
                assert(sg->nfixed > 0);
534 8
                sg->nfixed--;
535 8
                wrk->stats->n_object--;
536
        }
537 8
        VTAILQ_REMOVE(&sg->objcores, oc, lru_list);
538
539 8
        Lck_Unlock(&sg->sc->mtx);
540 8
        memset(oc->stobj, 0, sizeof oc->stobj);
541 8
}
542
543
/*--------------------------------------------------------------------*/
544
545
void
546 78
smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx)
547
{
548
549 78
        AZ(objidx & NEED_FIXUP);
550 78
        oc->stobj->priv = sg;
551 78
        oc->stobj->priv2 = objidx;
552 78
}
553
554
/*--------------------------------------------------------------------*/
555
556
void v_matchproto_(obj_event_f)
557 206
smp_oc_event(struct worker *wrk, void *priv, struct objcore *oc, unsigned ev)
558
{
559
        struct stevedore *st;
560
        struct smp_seg *sg;
561
        struct smp_object *so;
562
563 206
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
564 206
        CAST_OBJ_NOTNULL(st, priv, STEVEDORE_MAGIC);
565 206
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
566
567 206
        if (oc->stobj->stevedore != st)
568 32
                return;
569
570 174
        CAST_OBJ_NOTNULL(sg, oc->stobj->priv, SMP_SEG_MAGIC);
571 174
        CHECK_OBJ_NOTNULL(sg->sc, SMP_SC_MAGIC);
572 174
        so = smp_find_so(sg, oc->stobj->priv2);
573
574 174
        if (sg == sg->sc->cur_seg) {
575
                /* Lock necessary, we might race close_seg */
576 88
                Lck_Lock(&sg->sc->mtx);
577 88
                if (ev & (OEV_BANCHG|OEV_INSERT))
578 44
                        so->ban = BAN_Time(oc->ban);
579 88
                if (ev & (OEV_TTLCHG|OEV_INSERT))
580 88
                        EXP_COPY(so, oc);
581 88
                Lck_Unlock(&sg->sc->mtx);
582 88
        } else {
583 86
                if (ev & (OEV_BANCHG|OEV_INSERT))
584 52
                        so->ban = BAN_Time(oc->ban);
585 86
                if (ev & (OEV_TTLCHG|OEV_INSERT))
586 68
                        EXP_COPY(so, oc);
587
        }
588 206
}
589