varnish-cache/bin/varnishd/common/common_vsmw.c
0
/*-
1
 * Copyright (c) 2010-2011 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * VSM stuff common to manager and child.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <fcntl.h>
36
#include <stdarg.h>
37
#include <stdio.h>
38
#include <stdint.h>
39
#include <stdlib.h>
40
#include <string.h>
41
#include <time.h>
42
#include <unistd.h>
43
#include <sys/mman.h>
44
#include <sys/resource.h>
45
#include <sys/stat.h>
46
47
#include "vdef.h"
48
#include "vas.h"
49
#include "vsb.h"
50
#include "miniobj.h"
51
#include "vqueue.h"
52
53
#include "vfil.h"
54
#include "vrnd.h"
55
56
#include "heritage.h"
57
#include "vsmw.h"
58
59
#ifndef MAP_HASSEMAPHORE
60
#  define MAP_HASSEMAPHORE 0 /* XXX Linux */
61
#endif
62
63
#ifndef MAP_NOSYNC
64
#  define MAP_NOSYNC 0 /* XXX Linux */
65
#endif
66
67
static void v_matchproto_(vsm_lock_f)
68 104050
vsmw_dummy_lock(void)
69
{
70 104050
}
71
72
static int vsmw_haslock;
73
vsm_lock_f *vsmw_lock = vsmw_dummy_lock;
74
vsm_lock_f *vsmw_unlock = vsmw_dummy_lock;
75
76
#define vsmw_assert_lock()      AN(vsmw_haslock)
77
78
#define vsmw_do_lock() vsmw_do_lock_(__func__, __LINE__)
79
80
#define vsmw_do_lock_(f, l)                             \
81
        do {                                    \
82
                vsmw_lock();                    \
83
                AZ(vsmw_haslock);               \
84
                vsmw_haslock = 1;               \
85
        } while(0)
86
87
#define vsmw_do_unlock() vsmw_do_unlock_(__func__, __LINE__)
88
#define vsmw_do_unlock_(f, l)                           \
89
        do {                                    \
90
                AN(vsmw_haslock);               \
91
                vsmw_haslock = 0;               \
92
                vsmw_unlock();                  \
93
        } while(0)
94
95
/*--------------------------------------------------------------------*/
96
97
struct vsmw_cluster {
98
        unsigned                        magic;
99
#define VSMW_CLUSTER_MAGIC              0x28b74c00
100
101
        VTAILQ_ENTRY(vsmw_cluster)      list;
102
        struct vsmwseg                  *cseg;
103
        char                            *fn;
104
        size_t                          len;
105
        void                            *ptr;
106
        size_t                          next;
107
        int                             refs;
108
        int                             named;
109
};
110
111
struct vsmwseg {
112
        unsigned                        magic;
113
#define VSMWSEG_MAGIC                   0x7e4ccaea
114
        VTAILQ_ENTRY(vsmwseg)           list;
115
        struct vsmw_cluster             *cluster;
116
117
        char                            *category;
118
        size_t                          off;
119
        size_t                          len;
120
        char                            *id;
121
        void                            *ptr;
122
};
123
124
struct vsmw {
125
        unsigned                        magic;
126
#define VSMW_MAGIC                      0xc2ca2cd9
127
        int                             vdirfd;
128
        int                             mode;
129
        char                            *idx;
130
        VTAILQ_HEAD(, vsmw_cluster)     clusters;
131
        VTAILQ_HEAD(, vsmwseg)          segs;
132
        struct vsb                      *vsb;
133
        pid_t                           pid;
134
        time_t                          birth;
135
        uint64_t                        nsegs;
136
        uint64_t                        nsubs;
137
};
138
139
/* Allocations in clusters never start at offset zero */
140
#define VSM_CLUSTER_OFFSET 16
141
142
/*--------------------------------------------------------------------*/
143
144
static void
145 9393
vsmw_idx_head(const struct vsmw *vsmw, int fd)
146
{
147
        char buf[64];
148
149 9393
        bprintf(buf, "# %jd %jd\n", (intmax_t)vsmw->pid, (intmax_t)vsmw->birth);
150
        // XXX handle ENOSPC? #2764
151 9393
        assert(write(fd, buf, strlen(buf)) == strlen(buf));
152 9393
}
153
154
#define ASSERT_SEG_STR(x) do {                  \
155
                AN(x);                          \
156
                AZ(strchr(x, '\n'));            \
157
        } while (0);
158
159
static void
160 263677
vsmw_fmt_index(const struct vsmw *vsmw, const struct vsmwseg *seg, char act)
161
{
162
163 263677
        vsmw_assert_lock();
164 263677
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
165 263677
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
166 263677
        AN(seg->cluster);
167 263677
        ASSERT_SEG_STR(seg->category);
168 263677
        ASSERT_SEG_STR(seg->id);
169
170 527354
        VSB_printf(vsmw->vsb, "%c %s %zu %zu %s %s\n",
171 263677
            act,
172 263677
            seg->cluster->fn,
173 263677
            seg->off,
174 263677
            seg->len,
175 263677
            seg->category,
176 263677
            seg->id);
177 263677
}
178
179
/*--------------------------------------------------------------------*/
180
181
static void
182 223174
vsmw_mkent(const struct vsmw *vsmw, const char *pfx)
183
{
184
        int fd;
185
        uint64_t rn;
186
187 223174
        AN(pfx);
188 223174
        vsmw_assert_lock();
189 223174
        while (1) {
190 223174
                VSB_clear(vsmw->vsb);
191 223174
                VSB_printf(vsmw->vsb, "_.%s", pfx);
192 223174
                AZ(VRND_RandomCrypto(&rn, sizeof rn));
193 223174
                VSB_printf(vsmw->vsb, ".%016jx", (uintmax_t)rn);
194 223174
                AZ(VSB_finish(vsmw->vsb));
195 223174
                fd = openat(vsmw->vdirfd, VSB_data(vsmw->vsb), O_RDONLY);
196 223174
                if (fd < 0 && errno == ENOENT)
197 223174
                        return;
198 0
                if (fd >= 0)
199 0
                        closefd(&fd);
200
        }
201
}
202
203
/*--------------------------------------------------------------------*/
204
205
static void
206 263377
vsmw_append_record(struct vsmw *vsmw, struct vsmwseg *seg, char act)
207
{
208
        int fd;
209
210 263377
        vsmw_assert_lock();
211 263377
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
212 263377
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
213 263377
        fd = openat(vsmw->vdirfd, vsmw->idx, O_APPEND | O_WRONLY);
214 263377
        assert(fd >= 0);
215 263377
        VSB_clear(vsmw->vsb);
216 263377
        vsmw_fmt_index(vsmw, seg, act);
217 263377
        AZ(VSB_finish(vsmw->vsb));
218 263377
        XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764
219 263377
        closefd(&fd);
220 263377
}
221
222
/*--------------------------------------------------------------------*/
223
224
static void
225 229504
vsmw_addseg(struct vsmw *vsmw, struct vsmwseg *seg)
226
{
227
228 229504
        vsmw_assert_lock();
229 229504
        VTAILQ_INSERT_TAIL(&vsmw->segs, seg, list);
230 229504
        vsmw_append_record(vsmw, seg, '+');
231 229504
        vsmw->nsegs++;
232 229504
}
233
234
/*--------------------------------------------------------------------*/
235
236
static void
237 33923
vsmw_delseg(struct vsmw *vsmw, struct vsmwseg *seg)
238
{
239 33923
        char *t = NULL;
240
        int fd;
241
        struct vsmwseg *s2;
242
243 33923
        vsmw_assert_lock();
244 33923
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
245 33923
        CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC);
246
247 33923
        VTAILQ_REMOVE(&vsmw->segs, seg, list);
248
249 33923
        vsmw->nsegs--;
250 33923
        if (vsmw->nsubs < 10 || vsmw->nsubs * 2 < vsmw->nsegs) {
251 33873
                vsmw_append_record(vsmw, seg, '-');
252 33873
                vsmw->nsubs++;
253 33873
        } else {
254 50
                vsmw_mkent(vsmw, vsmw->idx);
255 50
                REPLACE(t, VSB_data(vsmw->vsb));
256 100
                fd = openat(vsmw->vdirfd,
257 50
                    t, O_WRONLY|O_CREAT|O_EXCL, vsmw->mode);
258 50
                assert(fd >= 0);
259 50
                vsmw_idx_head(vsmw, fd);
260 50
                VSB_clear(vsmw->vsb);
261 350
                VTAILQ_FOREACH(s2, &vsmw->segs, list)
262 300
                        vsmw_fmt_index(vsmw, s2, '+');
263 50
                AZ(VSB_finish(vsmw->vsb));
264 50
                XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764
265 50
                closefd(&fd);
266 50
                AZ(renameat(vsmw->vdirfd, t, vsmw->vdirfd, vsmw->idx));
267 50
                REPLACE(t, NULL);
268 50
                vsmw->nsubs = 0;
269
        }
270 33923
        REPLACE(seg->category, NULL);
271 33923
        REPLACE(seg->id, NULL);
272 33923
        FREE_OBJ(seg);
273 33923
}
274
275
/*--------------------------------------------------------------------*/
276
277
#ifdef RLIMIT_MEMLOCK
278
static void
279 9460
printlim(const char *name, rlim_t lim)
280
{
281
282 9460
        fprintf(stderr, "Info: %s: ", name);
283 9460
        if (lim == RLIM_INFINITY)
284 9460
                fprintf(stderr, "unlimited\n");
285
        else
286 0
                fprintf(stderr, "%ju bytes\n", (uintmax_t)lim);
287 9460
}
288
289
static void
290 4730
printmemlock(void) {
291
        struct rlimit rlim;
292
293 4730
        AZ(getrlimit(RLIMIT_MEMLOCK, &rlim));
294 4730
        printlim("max locked memory (soft)", rlim.rlim_cur);
295 4730
        printlim("max locked memory (hard)", rlim.rlim_max);
296 4730
}
297
#else
298
static void printmemlock(void) {}
299
#endif
300
301
static struct vsmw_cluster *
302 223124
vsmw_newcluster(struct vsmw *vsmw, size_t len, const char *pfx)
303
{
304
        struct vsmw_cluster *vc;
305
        static int warn = 0;
306
        int fd;
307
        size_t ps;
308
309 223124
        vsmw_assert_lock();
310 223124
        ALLOC_OBJ(vc, VSMW_CLUSTER_MAGIC);
311 223124
        AN(vc);
312
313 223124
        vsmw_mkent(vsmw, pfx);
314 223124
        REPLACE(vc->fn, VSB_data(vsmw->vsb));
315
316 223124
        VTAILQ_INSERT_TAIL(&vsmw->clusters, vc, list);
317
318 223124
        ps = getpagesize();
319 223124
        len = RUP2(len, ps);
320 223124
        vc->len = len;
321
322 446248
        fd = openat(vsmw->vdirfd, vc->fn,
323 223124
            O_RDWR | O_CREAT | O_EXCL, vsmw->mode);
324 223124
        assert(fd >= 0);
325
326 223124
        AZ(VFIL_allocate(fd, (off_t)len, 1));
327
328 446248
        vc->ptr = (void *)mmap(NULL, len,
329
            PROT_READ|PROT_WRITE,
330
            MAP_HASSEMAPHORE | MAP_NOSYNC | MAP_SHARED,
331 223124
            fd, 0);
332
333 223124
        closefd(&fd);
334 223124
        assert(vc->ptr != MAP_FAILED);
335 223124
        if (mlock(vc->ptr, len) && warn++ == 0)  {
336 9460
                fprintf(stderr, "Warning: mlock() of VSM failed: %s (%d)\n",
337 4730
                    VAS_errtxt(errno), errno);
338 4730
                printmemlock();
339 4730
        }
340
341 223124
        return (vc);
342
}
343
344
struct vsmw_cluster *
345 5985
VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx)
346
{
347
        struct vsmw_cluster *vc;
348
        struct vsmwseg *seg;
349
350 5985
        vsmw_do_lock();
351 5985
        vc = vsmw_newcluster(vsmw, len + VSM_CLUSTER_OFFSET, pfx);
352 5985
        AN(vc);
353 5985
        vc->next += VSM_CLUSTER_OFFSET;
354
355 5985
        ALLOC_OBJ(seg, VSMWSEG_MAGIC);
356 5985
        AN(seg);
357 5985
        vc->cseg = seg;
358 5985
        seg->len = vc->len;
359 5985
        seg->cluster = vc;
360 5985
        REPLACE(seg->category, "");
361 5985
        REPLACE(seg->id, "");
362 5985
        vc->refs++;
363 5985
        vc->named = 1;
364 5985
        vsmw_addseg(vsmw, seg);
365
366 5985
        vsmw_do_unlock();
367 5985
        return (vc);
368
}
369
370
static void
371 9959
vsmw_DestroyCluster_locked(struct vsmw *vsmw, struct vsmw_cluster *vc)
372
{
373
374 9959
        vsmw_assert_lock();
375 9959
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
376 9959
        CHECK_OBJ_NOTNULL(vc, VSMW_CLUSTER_MAGIC);
377
378 9959
        AZ(vc->refs);
379
380 9959
        AZ(munmap(vc->ptr, vc->len));
381 9959
        if (vc->named)
382 384
                vsmw_delseg(vsmw, vc->cseg);
383 9959
        vc->cseg = 0;
384
385 9959
        VTAILQ_REMOVE(&vsmw->clusters, vc, list);
386 9959
        if (unlinkat(vsmw->vdirfd, vc->fn, 0))
387 0
                assert (errno == ENOENT);
388 9959
        REPLACE(vc->fn, NULL);
389 9959
        FREE_OBJ(vc);
390 9959
}
391
392
void
393 384
VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vsmcp)
394
{
395
        struct vsmw_cluster *vc;
396
397 384
        TAKE_OBJ_NOTNULL(vc, vsmcp, VSMW_CLUSTER_MAGIC);
398
399 384
        vsmw_do_lock();
400 384
        if (--vc->refs == 0)
401 215
                vsmw_DestroyCluster_locked(vsmw, vc);
402 384
        vsmw_do_unlock();
403 384
}
404
405
/*--------------------------------------------------------------------*/
406
407
void *
408 223519
VSMW_Allocv(struct vsmw *vsmw, struct vsmw_cluster *vc,
409
    const char *category, size_t payload, const char *prefix,
410
    const char *fmt, va_list va)
411
{
412
        struct vsmwseg *seg;
413
        ssize_t l;
414
415 223519
        vsmw_do_lock();
416 223519
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
417
418 223519
        ALLOC_OBJ(seg, VSMWSEG_MAGIC);
419 223519
        AN(seg);
420 223519
        REPLACE(seg->category, category);
421 223519
        seg->len = PRNDUP(payload);
422
423 223519
        VSB_clear(vsmw->vsb);
424 223519
        if (prefix != NULL) {
425 163213
                assert(prefix[0] != '\0');
426 163213
                VSB_cat(vsmw->vsb, prefix);
427 163213
                if (fmt[0] != '\0')
428 153865
                        VSB_cat(vsmw->vsb, ".");
429 163213
        }
430 223519
        l = VSB_len(vsmw->vsb);
431 223519
        assert(l >= 0);
432 223519
        VSB_vprintf(vsmw->vsb, fmt, va);
433 223519
        AZ(VSB_finish(vsmw->vsb));
434 223519
        assert(fmt[0] == '\0' || l < VSB_len(vsmw->vsb));
435
436 223519
        REPLACE(seg->id, VSB_data(vsmw->vsb));
437
438 223519
        if (vc == NULL)
439 217139
                vc = vsmw_newcluster(vsmw, seg->len, category);
440 223519
        AN(vc);
441 223519
        vc->refs++;
442
443 223519
        seg->cluster = vc;
444 223519
        seg->off = vc->next;
445 223519
        vc->next += seg->len;
446 223519
        assert(vc->next <= vc->len);
447 223519
        seg->ptr = seg->off + (char*)vc->ptr;
448
449 223519
        vsmw_addseg(vsmw, seg);
450
451 223519
        vsmw_do_unlock();
452 223519
        return (seg->ptr);
453
}
454
455
void *
456 28258
VSMW_Allocf(struct vsmw *vsmw, struct vsmw_cluster *vc,
457
    const char *category, size_t len, const char *fmt, ...)
458
{
459
        va_list ap;
460
        void *p;
461
462 28258
        va_start(ap, fmt);
463 28258
        p = VSMW_Allocv(vsmw, vc, category, len, NULL, fmt, ap);
464 28258
        va_end(ap);
465 28258
        return (p);
466
}
467
468
/*--------------------------------------------------------------------*/
469
470
void
471 9894
VSMW_Free(struct vsmw *vsmw, void **pp)
472
{
473
        struct vsmwseg *seg;
474
        struct vsmw_cluster *cp;
475
476 9894
        vsmw_do_lock();
477 9894
        CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC);
478 9894
        AN(pp);
479 79610
        VTAILQ_FOREACH(seg, &vsmw->segs, list)
480 79610
                if (seg->ptr == *pp)
481 9894
                        break;
482 9894
        AN(seg);
483 9894
        *pp = NULL;
484
485 9894
        cp = seg->cluster;
486 9894
        CHECK_OBJ_NOTNULL(cp, VSMW_CLUSTER_MAGIC);
487 9894
        assert(cp->refs > 0);
488
489 9894
        vsmw_delseg(vsmw, seg);
490
491 9894
        if (!--cp->refs)
492 9744
                vsmw_DestroyCluster_locked(vsmw, cp);
493 9894
        vsmw_do_unlock();
494 9894
}
495
496
/*--------------------------------------------------------------------*/
497
498
struct vsmw *
499 9343
VSMW_New(int vdirfd, int mode, const char *idxname)
500
{
501
        struct vsmw *vsmw;
502
        int fd;
503
504 9343
        assert(vdirfd > 0);
505 9343
        assert(mode > 0);
506 9343
        AN(idxname);
507
508 9343
        vsmw_do_lock();
509 9343
        ALLOC_OBJ(vsmw, VSMW_MAGIC);
510 9343
        AN(vsmw);
511
512 9343
        VTAILQ_INIT(&vsmw->segs);
513 9343
        VTAILQ_INIT(&vsmw->clusters);
514 9343
        vsmw->vsb = VSB_new_auto();
515 9343
        AN(vsmw->vsb);
516 9343
        REPLACE(vsmw->idx, idxname);
517 9343
        vsmw->mode = mode;
518 9343
        vsmw->vdirfd = vdirfd;
519 9343
        vsmw->pid = getpid();
520 9343
        vsmw->birth = time(NULL);
521
522 9343
        if (unlinkat(vdirfd, vsmw->idx, 0))
523 9343
                assert (errno == ENOENT);
524 18686
        fd = openat(vdirfd,
525 9343
            vsmw->idx, O_APPEND | O_WRONLY | O_CREAT, vsmw->mode);
526 9343
        assert(fd >= 0);
527 9343
        vsmw_idx_head(vsmw, fd);
528 9343
        closefd(&fd);
529
530 9343
        vsmw_do_unlock();
531 9343
        return (vsmw);
532
}
533
534
void
535 4730
VSMW_Destroy(struct vsmw **pp)
536
{
537
        struct vsmw *vsmw;
538
        struct vsmwseg *seg, *s2;
539
540 4730
        vsmw_do_lock();
541 4730
        TAKE_OBJ_NOTNULL(vsmw, pp, VSMW_MAGIC);
542 28375
        VTAILQ_FOREACH_SAFE(seg, &vsmw->segs, list, s2)
543 23645
                vsmw_delseg(vsmw, seg);
544 4730
        if (unlinkat(vsmw->vdirfd, vsmw->idx, 0))
545 0
                assert (errno == ENOENT);
546 4730
        REPLACE(vsmw->idx, NULL);
547 4730
        VSB_destroy(&vsmw->vsb);
548 4730
        closefd(&vsmw->vdirfd);
549 4730
        FREE_OBJ(vsmw);
550 4730
        vsmw_do_unlock();
551 4730
}