varnish-cache/bin/varnishd/cache/cache_shmlog.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 *
7
 * SPDX-License-Identifier: BSD-2-Clause
8
 *
9
 * Redistribution and use in source and binary forms, with or without
10
 * modification, are permitted provided that the following conditions
11
 * are met:
12
 * 1. Redistributions of source code must retain the above copyright
13
 *    notice, this list of conditions and the following disclaimer.
14
 * 2. Redistributions in binary form must reproduce the above copyright
15
 *    notice, this list of conditions and the following disclaimer in the
16
 *    documentation and/or other materials provided with the distribution.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
 * SUCH DAMAGE.
29
 */
30
31
#include "config.h"
32
33
#include "cache_varnishd.h"
34
35
#include <stdio.h>
36
#include <stdlib.h>
37
38
#include "vgz.h"
39
#include "vsl_priv.h"
40
#include "vmb.h"
41
42
#include "common/heritage.h"
43
#include "common/vsmw.h"
44
45
/* ------------------------------------------------------------
46
 * strands helpers - move elsewhere?
47
 */
48
49
static unsigned
50 1977705
strands_len(const struct strands *s)
51
{
52 1977705
        unsigned r = 0;
53
        int i;
54
55 1977705
        CHECK_OBJ_NOTNULL(s, STRANDS_MAGIC);
56 3957872
        for (i = 0; i < s->n; i++) {
57 1980167
                if (s->p[i] == NULL || *s->p[i] == '\0')
58 188275
                        continue;
59 1791892
                r += strlen(s->p[i]);
60 1791892
        }
61
62 1977705
        return (r);
63
}
64
65
/*
66
 * like VRT_Strands(), but truncating instead of failing for end of buffer
67
 *
68
 * returns number of bytes including NUL
69
 */
70
static unsigned
71 1985488
strands_cat(char *buf, unsigned bufl, const struct strands *s)
72
{
73 1985488
        unsigned l = 0, ll;
74
        int i;
75
76
        /* NUL-terminated */
77 1985488
        assert(bufl > 0);
78 1985488
        bufl--;
79 1985488
        CHECK_OBJ_NOTNULL(s, STRANDS_MAGIC);
80
81 3789068
        for (i = 0; i < s->n && bufl > 0; i++) {
82 1803584
                if (s->p[i] == NULL || *s->p[i] == '\0')
83 88
                        continue;
84 1803500
                ll = vmin_t(unsigned, strlen(s->p[i]), bufl);
85 1803500
                memcpy(buf, s->p[i], ll);
86 1803500
                l += ll;
87 1803500
                buf += ll;
88 1803500
                bufl -= ll;
89 1803500
        }
90 1985484
        *buf = '\0';    /* NUL-terminated */
91 1985484
        return (l + 1);
92
}
93
94
/* These cannot be struct lock, which depends on vsm/vsl working */
95
static pthread_mutex_t vsl_mtx;
96
static pthread_mutex_t vsc_mtx;
97
static pthread_mutex_t vsm_mtx;
98
99
static struct VSL_head          *vsl_head;
100
static const uint32_t           *vsl_end;
101
static uint32_t                 *vsl_ptr;
102
static unsigned                 vsl_segment_n;
103
static ssize_t                  vsl_segsize;
104
105
struct VSC_main *VSC_C_main;
106
107
static void
108 19645050
vsl_sanity(const struct vsl_log *vsl)
109
{
110 19645050
        AN(vsl);
111 19645050
        AN(vsl->wlp);
112 19645050
        AN(vsl->wlb);
113 19645050
        AN(vsl->wle);
114 19645050
        assert(vsl->wlb <= vsl->wlp);
115 19645050
        assert(vsl->wlp <= vsl->wle);
116 19645050
}
117
118
/*--------------------------------------------------------------------
119
 * Check if the VSL_tag is masked by parameter bitmap
120
 */
121
122
static inline int
123 14604232
vsl_tag_is_masked(enum VSL_tag_e tag)
124
{
125 14604232
        volatile uint8_t *bm = &cache_param->vsl_mask[0];
126
        uint8_t b;
127
128 14604232
        assert(tag > SLT__Bogus);
129 14604232
        assert(tag < SLT__Reserved);
130 14604232
        bm += ((unsigned)tag >> 3);
131 14604232
        b = (0x80 >> ((unsigned)tag & 7));
132 14604232
        return (*bm & b);
133
}
134
135
int
136 51051
VSL_tag_is_masked(enum VSL_tag_e tag)
137
{
138 51051
        return (vsl_tag_is_masked(tag));
139
}
140
141
/*--------------------------------------------------------------------
142
 * Lay down a header fields, and return pointer to the next record
143
 */
144
145
static inline uint32_t *
146 12037166
vsl_hdr(enum VSL_tag_e tag, uint32_t *p, unsigned len, vxid_t vxid)
147
{
148
149 12037166
        AZ((uintptr_t)p & 0x3);
150 12037166
        assert(tag > SLT__Bogus);
151 12037166
        assert(tag < SLT__Reserved);
152 12037166
        AZ(len & ~VSL_LENMASK);
153
154 12037166
        p[2] = vxid.vxid >> 32;
155 12037166
        p[1] = vxid.vxid;
156 24074332
        p[0] = (((unsigned)tag & VSL_IDMASK) << VSL_IDSHIFT) |
157 12037166
             (VSL_VERSION_3 << VSL_VERSHIFT) |
158 12037166
             len;
159 12037166
        return (VSL_END(p, len));
160
}
161
162
/*--------------------------------------------------------------------
163
 * Space available in a VSL buffer when accounting for overhead
164
 */
165
166
static unsigned
167 2925730
vsl_space(const struct vsl_log *vsl)
168
{
169
        ptrdiff_t mlen;
170
171 2925730
        mlen = vsl->wle - vsl->wlp;
172 2925730
        assert(mlen >= 0);
173 2925730
        if (mlen < VSL_OVERHEAD + 1)
174 80
                return (0);
175 2925650
        mlen -= VSL_OVERHEAD;
176 2925650
        mlen *= sizeof *vsl->wlp;
177 2925650
        if (mlen > cache_param->vsl_reclen)
178 2924930
                mlen = cache_param->vsl_reclen;
179 2925650
        return(mlen);
180 2925730
}
181
182
/*--------------------------------------------------------------------
183
 * Wrap the VSL buffer
184
 */
185
186
static void
187 0
vsl_wrap(void)
188
{
189
190 0
        assert(vsl_ptr >= vsl_head->log);
191 0
        assert(vsl_ptr < vsl_end);
192 0
        vsl_segment_n += VSL_SEGMENTS - (vsl_segment_n % VSL_SEGMENTS);
193 0
        assert(vsl_segment_n % VSL_SEGMENTS == 0);
194 0
        vsl_head->offset[0] = 0;
195 0
        vsl_head->log[0] = VSL_ENDMARKER;
196 0
        VWMB();
197 0
        if (vsl_ptr != vsl_head->log) {
198 0
                *vsl_ptr = VSL_WRAPMARKER;
199 0
                vsl_ptr = vsl_head->log;
200 0
        }
201 0
        vsl_head->segment_n = vsl_segment_n;
202 0
        VSC_C_main->shm_cycles++;
203 0
}
204
205
/*--------------------------------------------------------------------
206
 * Reserve bytes for a record, wrap if necessary
207
 */
208
209
static uint32_t *
210 3573178
vsl_get(unsigned len, unsigned records, unsigned flushes)
211
{
212
        uint32_t *p;
213
        int err;
214
215 3573178
        err = pthread_mutex_trylock(&vsl_mtx);
216 3573178
        if (err == EBUSY) {
217 23195
                PTOK(pthread_mutex_lock(&vsl_mtx));
218 23195
                VSC_C_main->shm_cont++;
219 23195
        } else {
220 3549983
                AZ(err);
221
        }
222 3573178
        assert(vsl_ptr < vsl_end);
223 3573178
        AZ((uintptr_t)vsl_ptr & 0x3);
224
225 3573178
        VSC_C_main->shm_writes++;
226 3573178
        VSC_C_main->shm_flushes += flushes;
227 3573178
        VSC_C_main->shm_records += records;
228 3573178
        VSC_C_main->shm_bytes +=
229 3573178
            VSL_BYTES(VSL_OVERHEAD + VSL_WORDS((uint64_t)len));
230
231
        /* Wrap if necessary */
232 3573178
        if (VSL_END(vsl_ptr, len) >= vsl_end)
233 0
                vsl_wrap();
234
235 3573178
        p = vsl_ptr;
236 3573178
        vsl_ptr = VSL_END(vsl_ptr, len);
237 3573178
        assert(vsl_ptr < vsl_end);
238 3573178
        AZ((uintptr_t)vsl_ptr & 0x3);
239
240 3573178
        *vsl_ptr = VSL_ENDMARKER;
241
242 3573258
        while ((vsl_ptr - vsl_head->log) / vsl_segsize >
243 3573258
            vsl_segment_n % VSL_SEGMENTS) {
244 80
                vsl_segment_n++;
245 80
                vsl_head->offset[vsl_segment_n % VSL_SEGMENTS] =
246 80
                    vsl_ptr - vsl_head->log;
247
        }
248
249 3573178
        PTOK(pthread_mutex_unlock(&vsl_mtx));
250
        /* Implicit VWMB() in mutex op ensures ENDMARKER and new table
251
           values are seen before new segment number */
252 3573178
        vsl_head->segment_n = vsl_segment_n;
253
254 3573178
        return (p);
255
}
256
257
/*--------------------------------------------------------------------
258
 * Stick a finished record into VSL.
259
 */
260
261
static void
262 2320625
vslr(enum VSL_tag_e tag, vxid_t vxid, const char *b, unsigned len)
263
{
264
        uint32_t *p;
265
        unsigned mlen;
266
267 2320625
        mlen = cache_param->vsl_reclen;
268
269
        /* Truncate */
270 2320625
        if (len > mlen)
271 0
                len = mlen;
272
273 2320625
        p = vsl_get(len, 1, 0);
274
275 2320625
        memcpy(p + VSL_OVERHEAD, b, len);
276
277
        /*
278
         * the vxid needs to be written before the barrier to
279
         * ensure it is valid when vsl_hdr() marks the record
280
         * ready by writing p[0]
281
         */
282 2320625
        p[2] = vxid.vxid >> 32;
283 2320625
        p[1] = vxid.vxid;
284 2320625
        VWMB();
285 2320625
        (void)vsl_hdr(tag, p, len, vxid);
286 2320625
}
287
288
/*--------------------------------------------------------------------
289
 * Add a unbuffered record to VSL
290
 *
291
 * NB: This variant should be used sparingly and only for low volume
292
 * NB: since it significantly adds to the mutex load on the VSL.
293
 */
294
295
void
296 3116979
VSLv(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, va_list ap)
297
{
298 3116979
        unsigned n, mlen = cache_param->vsl_reclen;
299 3116979
        char buf[mlen];
300
301 3116979
        AN(fmt);
302 3116979
        if (vsl_tag_is_masked(tag))
303 804317
                return;
304
305 2312662
        if (strchr(fmt, '%') == NULL) {
306 45823
                vslr(tag, vxid, fmt, strlen(fmt) + 1);
307 45823
        } else {
308 2266839
                n = vsnprintf(buf, mlen, fmt, ap);
309 2266839
                n = vmin(n, mlen - 1);
310 2266839
                buf[n++] = '\0'; /* NUL-terminated */
311 2266839
                vslr(tag, vxid, buf, n);
312
        }
313
314 3116979
}
315
316
void
317 7960
VSLs(enum VSL_tag_e tag, vxid_t vxid, const struct strands *s)
318
{
319 7960
        unsigned n, mlen = cache_param->vsl_reclen;
320 7960
        char buf[mlen];
321
322 7960
        if (vsl_tag_is_masked(tag))
323 0
                return;
324
325 7960
        n = strands_cat(buf, mlen, s);
326
327 7960
        vslr(tag, vxid, buf, n);
328 7960
}
329
330
void
331 3051179
VSL(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, ...)
332
{
333
        va_list ap;
334
335 3051179
        va_start(ap, fmt);
336 3051179
        VSLv(tag, vxid, fmt, ap);
337 3051179
        va_end(ap);
338 3051179
}
339
340
/*--------------------------------------------------------------------*/
341
342
void
343 1575682
VSL_Flush(struct vsl_log *vsl, int overflow)
344
{
345
        uint32_t *p;
346
        unsigned l;
347
348 1575682
        vsl_sanity(vsl);
349 1575682
        l = pdiff(vsl->wlb, vsl->wlp);
350 1575682
        if (l == 0)
351 327219
                return;
352
353 1248463
        assert(l >= 8);
354
355 1248463
        p = vsl_get(l, vsl->wlr, overflow);
356
357 1248463
        memcpy(p + VSL_OVERHEAD, vsl->wlb, l);
358 1248463
        p[1] = l;
359 1248463
        VWMB();
360 1248463
        p[0] = ((((unsigned)SLT__Batch & 0xff) << VSL_IDSHIFT));
361 1248463
        vsl->wlp = vsl->wlb;
362 1248463
        vsl->wlr = 0;
363 1575682
}
364
365
/*--------------------------------------------------------------------
366
 * Buffered VSLs
367
 */
368
369
static char *
370 9660739
vslb_get(struct vsl_log *vsl, enum VSL_tag_e tag, unsigned *length)
371
{
372 9660739
        unsigned mlen = cache_param->vsl_reclen;
373
        char *retval;
374
375 9660739
        vsl_sanity(vsl);
376 9660739
        if (*length < mlen)
377 9638404
                mlen = *length;
378
379 9660739
        if (VSL_END(vsl->wlp, mlen) > vsl->wle)
380 280
                VSL_Flush(vsl, 1);
381
382 9660739
        retval = VSL_DATA(vsl->wlp);
383
384
        /* If it still doesn't fit, truncate */
385 9660739
        if (VSL_END(vsl->wlp, mlen) > vsl->wle)
386 80
                mlen = vsl_space(vsl);
387
388 9660739
        vsl->wlp = vsl_hdr(tag, vsl->wlp, mlen, vsl->wid);
389 9660739
        vsl->wlr++;
390 9660739
        *length = mlen;
391 9660739
        return (retval);
392
}
393
394
static void
395 4761380
vslb_simple(struct vsl_log *vsl, enum VSL_tag_e tag,
396
    unsigned length, const char *str)
397
{
398
        char *p;
399
400 4761380
        if (length == 0)
401 372100
                length = strlen(str);
402 4761380
        length += 1; // NUL
403 4761380
        p = vslb_get(vsl, tag, &length);
404 4761380
        memcpy(p, str, length - 1);
405 4761380
        p[length - 1] = '\0';
406
407 4761380
        if (DO_DEBUG(DBG_SYNCVSL))
408 463681
                VSL_Flush(vsl, 0);
409 4761380
}
410
411
/*--------------------------------------------------------------------
412
 * VSL-buffered-txt
413
 */
414
415
void
416 5344132
VSLbt(struct vsl_log *vsl, enum VSL_tag_e tag, txt t)
417
{
418
419 5344132
        Tcheck(t);
420 5344132
        if (vsl_tag_is_masked(tag))
421 705156
                return;
422
423 4638976
        vslb_simple(vsl, tag, Tlen(t), t.b);
424 5344132
}
425
426
/*--------------------------------------------------------------------
427
 * VSL-buffered-strands
428
 */
429
void
430 2340691
VSLbs(struct vsl_log *vsl, enum VSL_tag_e tag, const struct strands *s)
431
{
432
        unsigned l;
433
        char *p;
434
435 2340691
        if (vsl_tag_is_masked(tag))
436 362873
                return;
437
438 1977818
        l = strands_len(s) + 1;
439 1977818
        p = vslb_get(vsl, tag, &l);
440
441 1977818
        (void)strands_cat(p, l, s);
442
443 1977818
        if (DO_DEBUG(DBG_SYNCVSL))
444 184814
                VSL_Flush(vsl, 0);
445 2340691
}
446
447
/*--------------------------------------------------------------------
448
 * VSL-buffered
449
 */
450
451
void
452 3641118
VSLbv(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, va_list ap)
453
{
454
        char *p, *p1;
455 3641118
        unsigned n = 0, mlen;
456
        va_list ap2;
457
458 3641118
        AN(fmt);
459 3641118
        if (vsl_tag_is_masked(tag))
460 592507
                return;
461
462
        /*
463
         * If there are no printf-expansions, don't waste time expanding them
464
         */
465 3048611
        if (strchr(fmt, '%') == NULL) {
466 121788
                vslb_simple(vsl, tag, 0, fmt);
467 121788
                return;
468
        }
469
470
        /*
471
         * If the format is trivial, deal with it directly
472
         */
473 2926823
        if (!strcmp(fmt, "%s")) {
474 720
                p1 = va_arg(ap, char *);
475 720
                vslb_simple(vsl, tag, 0, p1);
476 720
                return;
477
        }
478
479 2926103
        vsl_sanity(vsl);
480
481 2926103
        mlen = vsl_space(vsl);
482
483
        // First attempt, only if any space at all
484 2926103
        if (mlen > 0) {
485 2924622
                p = VSL_DATA(vsl->wlp);
486 2924622
                va_copy(ap2, ap);
487 2924622
                n = vsnprintf(p, mlen, fmt, ap2);
488 2924622
                va_end(ap2);
489 2924622
        }
490
491
        // Second attempt, if a flush might help
492 2926103
        if (mlen == 0 || (n + 1 > mlen && n + 1 <= cache_param->vsl_reclen)) {
493 2052
                VSL_Flush(vsl, 1);
494 2052
                mlen = vsl_space(vsl);
495 2052
                p = VSL_DATA(vsl->wlp);
496 2052
                n = vsnprintf(p, mlen, fmt, ap);
497 2052
        }
498 2926583
        if (n + 1 < mlen)
499 2915662
                mlen = n + 1;
500 2926583
        (void)vslb_get(vsl, tag, &mlen);
501
502 2926583
        if (DO_DEBUG(DBG_SYNCVSL))
503 345907
                VSL_Flush(vsl, 0);
504 3641598
}
505
506
void
507 3502522
VSLb(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
508
{
509
        va_list ap;
510
511 3502522
        vsl_sanity(vsl);
512 3502522
        va_start(ap, fmt);
513 3502522
        VSLbv(vsl, tag, fmt, ap);
514 3502522
        va_end(ap);
515 3502522
}
516
517
#define Tf6 "%ju.%06ju"
518
#define Ta6(t) (uintmax_t)floor((t)), (uintmax_t)floor((t) * 1e6) % 1000000U
519
520
void
521 1316331
VSLb_ts(struct vsl_log *vsl, const char *event, vtim_real first,
522
    vtim_real *pprev, vtim_real now)
523
{
524
525
        /*
526
         * XXX: Make an option to turn off some unnecessary timestamp
527
         * logging. This must be done carefully because some functions
528
         * (e.g. V1L_Open) takes the last timestamp as its initial
529
         * value for timeout calculation.
530
         */
531 1316331
        vsl_sanity(vsl);
532 1316331
        AN(event);
533 1316331
        AN(pprev);
534 1316331
        assert(!isnan(now) && now != 0.);
535 2632662
        VSLb(vsl, SLT_Timestamp, "%s: " Tf6 " " Tf6 " " Tf6,
536 1316331
            event, Ta6(now), Ta6(now - first), Ta6(now - *pprev));
537 1316331
        *pprev = now;
538 1316331
}
539
540
void
541 111963
VSLb_bin(struct vsl_log *vsl, enum VSL_tag_e tag, ssize_t len, const void *ptr)
542
{
543
        unsigned mlen;
544
        char *p;
545
546 111963
        vsl_sanity(vsl);
547 111963
        AN(ptr);
548 111963
        if (vsl_tag_is_masked(tag))
549 52323
                return;
550 59640
        mlen = cache_param->vsl_reclen;
551
552
        /* Truncate */
553 59640
        len = vmin_t(ssize_t, len, mlen);
554
555 59640
        assert(vsl->wlp <= vsl->wle);
556
557
        /* Flush if necessary */
558 59640
        if (VSL_END(vsl->wlp, len) > vsl->wle)
559 360
                VSL_Flush(vsl, 1);
560 59640
        assert(VSL_END(vsl->wlp, len) <= vsl->wle);
561 59640
        p = VSL_DATA(vsl->wlp);
562 59640
        memcpy(p, ptr, len);
563 59640
        vsl->wlp = vsl_hdr(tag, vsl->wlp, len, vsl->wid);
564 59640
        assert(vsl->wlp <= vsl->wle);
565 59640
        vsl->wlr++;
566
567 59640
        if (DO_DEBUG(DBG_SYNCVSL))
568 25000
                VSL_Flush(vsl, 0);
569 111963
}
570
571
/*--------------------------------------------------------------------
572
 * Setup a VSL buffer, allocate space if none provided.
573
 */
574
575
void
576 328293
VSL_Setup(struct vsl_log *vsl, void *ptr, size_t len)
577
{
578
579 328293
        if (ptr == NULL) {
580 113733
                len = cache_param->vsl_buffer;
581 113733
                ptr = malloc(len);
582 113733
                AN(ptr);
583 113733
        }
584 328293
        vsl->wlp = ptr;
585 328293
        vsl->wlb = ptr;
586 328293
        vsl->wle = ptr;
587 328293
        vsl->wle += len / sizeof(*vsl->wle);
588 328293
        vsl->wlr = 0;
589 328293
        vsl->wid = NO_VXID;
590 328293
        vsl_sanity(vsl);
591 328293
}
592
593
/*--------------------------------------------------------------------*/
594
595
void
596 3857
VSL_ChgId(struct vsl_log *vsl, const char *typ, const char *why, vxid_t vxid)
597
{
598
        vxid_t ovxid;
599
600 3857
        vsl_sanity(vsl);
601 3857
        ovxid = vsl->wid;
602 3857
        VSLb(vsl, SLT_Link, "%s %ju %s", typ, VXID(vxid), why);
603 3857
        VSL_End(vsl);
604 3857
        vsl->wid = vxid;
605 3857
        VSLb(vsl, SLT_Begin, "%s %ju %s", typ, VXID(ovxid), why);
606 3857
}
607
608
/*--------------------------------------------------------------------*/
609
610
void
611 249593
VSL_End(struct vsl_log *vsl)
612
{
613
        txt t;
614 249593
        char p[] = "";
615
616 249593
        vsl_sanity(vsl);
617 249593
        assert(!IS_NO_VXID(vsl->wid));
618 249593
        t.b = p;
619 249593
        t.e = p;
620 249593
        VSLbt(vsl, SLT_End, t);
621 249593
        VSL_Flush(vsl, 0);
622 249593
        vsl->wid = NO_VXID;
623 249593
}
624
625
static void v_matchproto_(vsm_lock_f)
626 1431871
vsm_vsc_lock(void)
627
{
628 1431871
        PTOK(pthread_mutex_lock(&vsc_mtx));
629 1431871
}
630
631
static void v_matchproto_(vsm_lock_f)
632 1431871
vsm_vsc_unlock(void)
633
{
634 1431871
        PTOK(pthread_mutex_unlock(&vsc_mtx));
635 1431871
}
636
637
static void v_matchproto_(vsm_lock_f)
638 1773513
vsm_vsmw_lock(void)
639
{
640 1773513
        PTOK(pthread_mutex_lock(&vsm_mtx));
641 1773513
}
642
643
static void v_matchproto_(vsm_lock_f)
644 1773513
vsm_vsmw_unlock(void)
645
{
646 1773513
        PTOK(pthread_mutex_unlock(&vsm_mtx));
647 1773513
}
648
649
/*--------------------------------------------------------------------*/
650
651
void
652 37949
VSM_Init(void)
653
{
654
        unsigned u;
655
656 37949
        assert(UINT_MAX % VSL_SEGMENTS == VSL_SEGMENTS - 1);
657
658 37949
        PTOK(pthread_mutex_init(&vsl_mtx, &mtxattr_errorcheck));
659 37949
        PTOK(pthread_mutex_init(&vsc_mtx, &mtxattr_errorcheck));
660 37949
        PTOK(pthread_mutex_init(&vsm_mtx, &mtxattr_errorcheck));
661
662 37949
        vsc_lock = vsm_vsc_lock;
663 37949
        vsc_unlock = vsm_vsc_unlock;
664 37949
        vsmw_lock = vsm_vsmw_lock;
665 37949
        vsmw_unlock = vsm_vsmw_unlock;
666
667 37949
        heritage.proc_vsmw = VSMW_New(heritage.vsm_fd, 0640, "_.index");
668 37949
        AN(heritage.proc_vsmw);
669
670 37949
        VSC_C_main = VSC_main_New(NULL, NULL, "");
671 37949
        AN(VSC_C_main);
672
673 37949
        AN(heritage.proc_vsmw);
674 75898
        vsl_head = VSMW_Allocf(heritage.proc_vsmw, NULL, VSL_CLASS,
675 37949
            cache_param->vsl_space, VSL_CLASS);
676 37949
        AN(vsl_head);
677 75898
        vsl_segsize = ((cache_param->vsl_space - sizeof *vsl_head) /
678 37949
            sizeof *vsl_end) / VSL_SEGMENTS;
679 37949
        vsl_end = vsl_head->log + vsl_segsize * VSL_SEGMENTS;
680
        /* Make segment_n always overflow on first log wrap to make any
681
           problems with regard to readers on that event visible */
682 37949
        vsl_segment_n = UINT_MAX - (VSL_SEGMENTS - 1);
683 37949
        AZ(vsl_segment_n % VSL_SEGMENTS);
684 37949
        vsl_ptr = vsl_head->log;
685 37949
        *vsl_ptr = VSL_ENDMARKER;
686
687 37949
        memset(vsl_head, 0, sizeof *vsl_head);
688 37949
        vsl_head->segsize = vsl_segsize;
689 37949
        vsl_head->offset[0] = 0;
690 37949
        vsl_head->segment_n = vsl_segment_n;
691 303592
        for (u = 1; u < VSL_SEGMENTS; u++)
692 265643
                vsl_head->offset[u] = -1;
693 37949
        VWMB();
694 37949
        memcpy(vsl_head->marker, VSL_HEAD_MARKER, sizeof vsl_head->marker);
695 37949
}