| | varnish-cache/bin/varnishd/common/common_vsmw.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2010-2011 Varnish Software AS |
| 2 |
|
* All rights reserved. |
| 3 |
|
* |
| 4 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
| 5 |
|
* |
| 6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 7 |
|
* |
| 8 |
|
* Redistribution and use in source and binary forms, with or without |
| 9 |
|
* modification, are permitted provided that the following conditions |
| 10 |
|
* are met: |
| 11 |
|
* 1. Redistributions of source code must retain the above copyright |
| 12 |
|
* notice, this list of conditions and the following disclaimer. |
| 13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 14 |
|
* notice, this list of conditions and the following disclaimer in the |
| 15 |
|
* documentation and/or other materials provided with the distribution. |
| 16 |
|
* |
| 17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 27 |
|
* SUCH DAMAGE. |
| 28 |
|
* |
| 29 |
|
* VSM stuff common to manager and child. |
| 30 |
|
* |
| 31 |
|
*/ |
| 32 |
|
|
| 33 |
|
#include "config.h" |
| 34 |
|
|
| 35 |
|
#include <fcntl.h> |
| 36 |
|
#include <stdarg.h> |
| 37 |
|
#include <stdio.h> |
| 38 |
|
#include <stdint.h> |
| 39 |
|
#include <stdlib.h> |
| 40 |
|
#include <string.h> |
| 41 |
|
#include <time.h> |
| 42 |
|
#include <unistd.h> |
| 43 |
|
#include <sys/mman.h> |
| 44 |
|
#include <sys/resource.h> |
| 45 |
|
#include <sys/stat.h> |
| 46 |
|
|
| 47 |
|
#include "vdef.h" |
| 48 |
|
#include "vas.h" |
| 49 |
|
#include "vsb.h" |
| 50 |
|
#include "miniobj.h" |
| 51 |
|
#include "vqueue.h" |
| 52 |
|
|
| 53 |
|
#include "vfil.h" |
| 54 |
|
#include "vrnd.h" |
| 55 |
|
|
| 56 |
|
#include "heritage.h" |
| 57 |
|
#include "vsmw.h" |
| 58 |
|
|
| 59 |
|
#ifndef MAP_HASSEMAPHORE |
| 60 |
|
# define MAP_HASSEMAPHORE 0 /* XXX Linux */ |
| 61 |
|
#endif |
| 62 |
|
|
| 63 |
|
#ifndef MAP_NOSYNC |
| 64 |
|
# define MAP_NOSYNC 0 /* XXX Linux */ |
| 65 |
|
#endif |
| 66 |
|
|
| 67 |
|
static void v_matchproto_(vsm_lock_f) |
| 68 |
859638 |
vsmw_dummy_lock(void) |
| 69 |
|
{ |
| 70 |
859638 |
} |
| 71 |
|
|
| 72 |
|
static int vsmw_haslock; |
| 73 |
|
vsm_lock_f *vsmw_lock = vsmw_dummy_lock; |
| 74 |
|
vsm_lock_f *vsmw_unlock = vsmw_dummy_lock; |
| 75 |
|
|
| 76 |
|
#define vsmw_assert_lock() AN(vsmw_haslock) |
| 77 |
|
|
| 78 |
|
#define vsmw_do_lock() vsmw_do_lock_(__func__, __LINE__) |
| 79 |
|
|
| 80 |
|
#define vsmw_do_lock_(f, l) \ |
| 81 |
|
do { \ |
| 82 |
|
vsmw_lock(); \ |
| 83 |
|
AZ(vsmw_haslock); \ |
| 84 |
|
vsmw_haslock = 1; \ |
| 85 |
|
} while(0) |
| 86 |
|
|
| 87 |
|
#define vsmw_do_unlock() vsmw_do_unlock_(__func__, __LINE__) |
| 88 |
|
#define vsmw_do_unlock_(f, l) \ |
| 89 |
|
do { \ |
| 90 |
|
AN(vsmw_haslock); \ |
| 91 |
|
vsmw_haslock = 0; \ |
| 92 |
|
vsmw_unlock(); \ |
| 93 |
|
} while(0) |
| 94 |
|
|
| 95 |
|
/*--------------------------------------------------------------------*/ |
| 96 |
|
|
| 97 |
|
struct vsmw_cluster { |
| 98 |
|
unsigned magic; |
| 99 |
|
#define VSMW_CLUSTER_MAGIC 0x28b74c00 |
| 100 |
|
|
| 101 |
|
VTAILQ_ENTRY(vsmw_cluster) list; |
| 102 |
|
struct vsmwseg *cseg; |
| 103 |
|
char *fn; |
| 104 |
|
size_t len; |
| 105 |
|
void *ptr; |
| 106 |
|
size_t next; |
| 107 |
|
int refs; |
| 108 |
|
int named; |
| 109 |
|
}; |
| 110 |
|
|
| 111 |
|
struct vsmwseg { |
| 112 |
|
unsigned magic; |
| 113 |
|
#define VSMWSEG_MAGIC 0x7e4ccaea |
| 114 |
|
VTAILQ_ENTRY(vsmwseg) list; |
| 115 |
|
struct vsmw_cluster *cluster; |
| 116 |
|
|
| 117 |
|
char *category; |
| 118 |
|
size_t off; |
| 119 |
|
size_t len; |
| 120 |
|
char *id; |
| 121 |
|
void *ptr; |
| 122 |
|
}; |
| 123 |
|
|
| 124 |
|
struct vsmw { |
| 125 |
|
unsigned magic; |
| 126 |
|
#define VSMW_MAGIC 0xc2ca2cd9 |
| 127 |
|
int vdirfd; |
| 128 |
|
int mode; |
| 129 |
|
char *idx; |
| 130 |
|
VTAILQ_HEAD(, vsmw_cluster) clusters; |
| 131 |
|
VTAILQ_HEAD(, vsmwseg) segs; |
| 132 |
|
struct vsb *vsb; |
| 133 |
|
pid_t pid; |
| 134 |
|
time_t birth; |
| 135 |
|
uint64_t nsegs; |
| 136 |
|
uint64_t nsubs; |
| 137 |
|
}; |
| 138 |
|
|
| 139 |
|
/* Allocations in clusters never start at offset zero */ |
| 140 |
|
#define VSM_CLUSTER_OFFSET 16 |
| 141 |
|
|
| 142 |
|
/*--------------------------------------------------------------------*/ |
| 143 |
|
|
| 144 |
|
static void |
| 145 |
77510 |
vsmw_idx_head(const struct vsmw *vsmw, int fd) |
| 146 |
|
{ |
| 147 |
|
char buf[64]; |
| 148 |
|
|
| 149 |
77510 |
bprintf(buf, "# %jd %jd\n", (intmax_t)vsmw->pid, (intmax_t)vsmw->birth); |
| 150 |
|
// XXX handle ENOSPC? #2764 |
| 151 |
77510 |
assert(write(fd, buf, strlen(buf)) == strlen(buf)); |
| 152 |
77510 |
} |
| 153 |
|
|
| 154 |
|
#define ASSERT_SEG_STR(x) do { \ |
| 155 |
|
AN(x); \ |
| 156 |
|
AZ(strchr(x, '\n')); \ |
| 157 |
|
} while (0); |
| 158 |
|
|
| 159 |
|
static void |
| 160 |
2288496 |
vsmw_fmt_index(const struct vsmw *vsmw, const struct vsmwseg *seg, char act) |
| 161 |
|
{ |
| 162 |
|
|
| 163 |
2288496 |
vsmw_assert_lock(); |
| 164 |
2288496 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
| 165 |
2288496 |
CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC); |
| 166 |
2288496 |
AN(seg->cluster); |
| 167 |
2288496 |
ASSERT_SEG_STR(seg->category); |
| 168 |
2288496 |
ASSERT_SEG_STR(seg->id); |
| 169 |
|
|
| 170 |
4576992 |
VSB_printf(vsmw->vsb, "%c %s %zu %zu %s %s\n", |
| 171 |
2288496 |
act, |
| 172 |
2288496 |
seg->cluster->fn, |
| 173 |
2288496 |
seg->off, |
| 174 |
2288496 |
seg->len, |
| 175 |
2288496 |
seg->category, |
| 176 |
2288496 |
seg->id); |
| 177 |
2288496 |
} |
| 178 |
|
|
| 179 |
|
/*--------------------------------------------------------------------*/ |
| 180 |
|
|
| 181 |
|
static void |
| 182 |
1954337 |
vsmw_mkent(const struct vsmw *vsmw, const char *pfx) |
| 183 |
|
{ |
| 184 |
|
int fd; |
| 185 |
|
uint64_t rn; |
| 186 |
|
|
| 187 |
1954337 |
AN(pfx); |
| 188 |
1954337 |
vsmw_assert_lock(); |
| 189 |
1954337 |
while (1) { |
| 190 |
1954337 |
VSB_clear(vsmw->vsb); |
| 191 |
1954337 |
VSB_printf(vsmw->vsb, "_.%s", pfx); |
| 192 |
1954337 |
AZ(VRND_RandomCrypto(&rn, sizeof rn)); |
| 193 |
1954337 |
VSB_printf(vsmw->vsb, ".%016jx", (uintmax_t)rn); |
| 194 |
1954337 |
AZ(VSB_finish(vsmw->vsb)); |
| 195 |
1954337 |
fd = openat(vsmw->vdirfd, VSB_data(vsmw->vsb), O_RDONLY); |
| 196 |
1954337 |
if (fd < 0 && errno == ENOENT) |
| 197 |
1954337 |
return; |
| 198 |
0 |
if (fd >= 0) |
| 199 |
0 |
closefd(&fd); |
| 200 |
|
} |
| 201 |
|
} |
| 202 |
|
|
| 203 |
|
/*--------------------------------------------------------------------*/ |
| 204 |
|
|
| 205 |
|
static void |
| 206 |
2286016 |
vsmw_append_record(struct vsmw *vsmw, struct vsmwseg *seg, char act) |
| 207 |
|
{ |
| 208 |
|
int fd; |
| 209 |
|
|
| 210 |
2286016 |
vsmw_assert_lock(); |
| 211 |
2286016 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
| 212 |
2286016 |
CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC); |
| 213 |
2286016 |
fd = openat(vsmw->vdirfd, vsmw->idx, O_APPEND | O_WRONLY); |
| 214 |
2286016 |
assert(fd >= 0); |
| 215 |
2286016 |
VSB_clear(vsmw->vsb); |
| 216 |
2286016 |
vsmw_fmt_index(vsmw, seg, act); |
| 217 |
2286016 |
AZ(VSB_finish(vsmw->vsb)); |
| 218 |
2286016 |
XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764 |
| 219 |
2286016 |
closefd(&fd); |
| 220 |
2286016 |
} |
| 221 |
|
|
| 222 |
|
/*--------------------------------------------------------------------*/ |
| 223 |
|
|
| 224 |
|
static void |
| 225 |
2006297 |
vsmw_addseg(struct vsmw *vsmw, struct vsmwseg *seg) |
| 226 |
|
{ |
| 227 |
|
|
| 228 |
2006297 |
vsmw_assert_lock(); |
| 229 |
2006297 |
VTAILQ_INSERT_TAIL(&vsmw->segs, seg, list); |
| 230 |
2006297 |
vsmw_append_record(vsmw, seg, '+'); |
| 231 |
2006297 |
vsmw->nsegs++; |
| 232 |
2006297 |
} |
| 233 |
|
|
| 234 |
|
/*--------------------------------------------------------------------*/ |
| 235 |
|
|
| 236 |
|
static void |
| 237 |
280119 |
vsmw_delseg(struct vsmw *vsmw, struct vsmwseg *seg) |
| 238 |
|
{ |
| 239 |
280119 |
char *t = NULL; |
| 240 |
|
int fd; |
| 241 |
|
struct vsmwseg *s2; |
| 242 |
|
|
| 243 |
280119 |
vsmw_assert_lock(); |
| 244 |
280119 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
| 245 |
280119 |
CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC); |
| 246 |
|
|
| 247 |
280119 |
VTAILQ_REMOVE(&vsmw->segs, seg, list); |
| 248 |
|
|
| 249 |
280119 |
vsmw->nsegs--; |
| 250 |
280119 |
if (vsmw->nsubs < 10 || vsmw->nsubs * 2 < vsmw->nsegs) { |
| 251 |
279719 |
vsmw_append_record(vsmw, seg, '-'); |
| 252 |
279719 |
vsmw->nsubs++; |
| 253 |
279719 |
} else { |
| 254 |
400 |
vsmw_mkent(vsmw, vsmw->idx); |
| 255 |
400 |
REPLACE(t, VSB_data(vsmw->vsb)); |
| 256 |
800 |
fd = openat(vsmw->vdirfd, |
| 257 |
400 |
t, O_WRONLY|O_CREAT|O_EXCL, vsmw->mode); |
| 258 |
400 |
assert(fd >= 0); |
| 259 |
400 |
vsmw_idx_head(vsmw, fd); |
| 260 |
400 |
VSB_clear(vsmw->vsb); |
| 261 |
2880 |
VTAILQ_FOREACH(s2, &vsmw->segs, list) |
| 262 |
2480 |
vsmw_fmt_index(vsmw, s2, '+'); |
| 263 |
400 |
AZ(VSB_finish(vsmw->vsb)); |
| 264 |
400 |
XXXAZ(VSB_tofile(vsmw->vsb, fd)); // XXX handle ENOSPC? #2764 |
| 265 |
400 |
closefd(&fd); |
| 266 |
400 |
AZ(renameat(vsmw->vdirfd, t, vsmw->vdirfd, vsmw->idx)); |
| 267 |
400 |
REPLACE(t, NULL); |
| 268 |
400 |
vsmw->nsubs = 0; |
| 269 |
|
} |
| 270 |
280119 |
REPLACE(seg->category, NULL); |
| 271 |
280119 |
REPLACE(seg->id, NULL); |
| 272 |
280119 |
FREE_OBJ(seg); |
| 273 |
280119 |
} |
| 274 |
|
|
| 275 |
|
/*--------------------------------------------------------------------*/ |
| 276 |
|
|
| 277 |
|
#ifdef RLIMIT_MEMLOCK |
| 278 |
|
static void |
| 279 |
78154 |
printlim(const char *name, rlim_t lim) |
| 280 |
|
{ |
| 281 |
|
|
| 282 |
78154 |
fprintf(stderr, "Info: %s: ", name); |
| 283 |
78154 |
if (lim == RLIM_INFINITY) |
| 284 |
78154 |
fprintf(stderr, "unlimited\n"); |
| 285 |
|
else |
| 286 |
0 |
fprintf(stderr, "%ju bytes\n", (uintmax_t)lim); |
| 287 |
78154 |
} |
| 288 |
|
|
| 289 |
|
static void |
| 290 |
39077 |
printmemlock(void) { |
| 291 |
|
struct rlimit rlim; |
| 292 |
|
|
| 293 |
39077 |
AZ(getrlimit(RLIMIT_MEMLOCK, &rlim)); |
| 294 |
39077 |
printlim("max locked memory (soft)", rlim.rlim_cur); |
| 295 |
39077 |
printlim("max locked memory (hard)", rlim.rlim_max); |
| 296 |
39077 |
} |
| 297 |
|
#else |
| 298 |
|
static void printmemlock(void) {} |
| 299 |
|
#endif |
| 300 |
|
|
| 301 |
|
static struct vsmw_cluster * |
| 302 |
1953937 |
vsmw_newcluster(struct vsmw *vsmw, size_t len, const char *pfx) |
| 303 |
|
{ |
| 304 |
|
struct vsmw_cluster *vc; |
| 305 |
|
static int warn = 0; |
| 306 |
|
int fd; |
| 307 |
|
size_t ps; |
| 308 |
|
|
| 309 |
1953937 |
vsmw_assert_lock(); |
| 310 |
1953937 |
ALLOC_OBJ(vc, VSMW_CLUSTER_MAGIC); |
| 311 |
1953937 |
AN(vc); |
| 312 |
|
|
| 313 |
1953937 |
vsmw_mkent(vsmw, pfx); |
| 314 |
1953937 |
REPLACE(vc->fn, VSB_data(vsmw->vsb)); |
| 315 |
|
|
| 316 |
1953937 |
VTAILQ_INSERT_TAIL(&vsmw->clusters, vc, list); |
| 317 |
|
|
| 318 |
1953937 |
ps = getpagesize(); |
| 319 |
1953937 |
len = RUP2(len, ps); |
| 320 |
1953937 |
vc->len = len; |
| 321 |
|
|
| 322 |
3907874 |
fd = openat(vsmw->vdirfd, vc->fn, |
| 323 |
1953937 |
O_RDWR | O_CREAT | O_EXCL, vsmw->mode); |
| 324 |
1953937 |
assert(fd >= 0); |
| 325 |
|
|
| 326 |
1953937 |
AZ(VFIL_allocate(fd, (off_t)len, 1)); |
| 327 |
|
|
| 328 |
3907874 |
vc->ptr = (void *)mmap(NULL, len, |
| 329 |
|
PROT_READ|PROT_WRITE, |
| 330 |
|
MAP_HASSEMAPHORE | MAP_NOSYNC | MAP_SHARED, |
| 331 |
1953937 |
fd, 0); |
| 332 |
|
|
| 333 |
1953937 |
closefd(&fd); |
| 334 |
1953937 |
assert(vc->ptr != MAP_FAILED); |
| 335 |
1953937 |
if (mlock(vc->ptr, len) && warn++ == 0) { |
| 336 |
78154 |
fprintf(stderr, "Warning: mlock() of VSM failed: %s (%d)\n", |
| 337 |
39077 |
VAS_errtxt(errno), errno); |
| 338 |
39077 |
printmemlock(); |
| 339 |
39077 |
} |
| 340 |
|
|
| 341 |
1953937 |
return (vc); |
| 342 |
|
} |
| 343 |
|
|
| 344 |
|
struct vsmw_cluster * |
| 345 |
49400 |
VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx) |
| 346 |
|
{ |
| 347 |
|
struct vsmw_cluster *vc; |
| 348 |
|
struct vsmwseg *seg; |
| 349 |
|
|
| 350 |
49400 |
vsmw_do_lock(); |
| 351 |
49400 |
vc = vsmw_newcluster(vsmw, len + VSM_CLUSTER_OFFSET, pfx); |
| 352 |
49400 |
AN(vc); |
| 353 |
49400 |
vc->next += VSM_CLUSTER_OFFSET; |
| 354 |
|
|
| 355 |
49400 |
ALLOC_OBJ(seg, VSMWSEG_MAGIC); |
| 356 |
49400 |
AN(seg); |
| 357 |
49400 |
vc->cseg = seg; |
| 358 |
49400 |
seg->len = vc->len; |
| 359 |
49400 |
seg->cluster = vc; |
| 360 |
49400 |
REPLACE(seg->category, ""); |
| 361 |
49400 |
REPLACE(seg->id, ""); |
| 362 |
49400 |
vc->refs++; |
| 363 |
49400 |
vc->named = 1; |
| 364 |
49400 |
vsmw_addseg(vsmw, seg); |
| 365 |
|
|
| 366 |
49400 |
vsmw_do_unlock(); |
| 367 |
49400 |
return (vc); |
| 368 |
|
} |
| 369 |
|
|
| 370 |
|
static void |
| 371 |
82187 |
vsmw_DestroyCluster_locked(struct vsmw *vsmw, struct vsmw_cluster *vc) |
| 372 |
|
{ |
| 373 |
|
|
| 374 |
82187 |
vsmw_assert_lock(); |
| 375 |
82187 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
| 376 |
82187 |
CHECK_OBJ_NOTNULL(vc, VSMW_CLUSTER_MAGIC); |
| 377 |
|
|
| 378 |
82187 |
AZ(vc->refs); |
| 379 |
|
|
| 380 |
82187 |
AZ(munmap(vc->ptr, vc->len)); |
| 381 |
82187 |
if (vc->named) |
| 382 |
3107 |
vsmw_delseg(vsmw, vc->cseg); |
| 383 |
82187 |
vc->cseg = 0; |
| 384 |
|
|
| 385 |
82187 |
VTAILQ_REMOVE(&vsmw->clusters, vc, list); |
| 386 |
82187 |
if (unlinkat(vsmw->vdirfd, vc->fn, 0)) |
| 387 |
0 |
assert (errno == ENOENT); |
| 388 |
82187 |
REPLACE(vc->fn, NULL); |
| 389 |
82187 |
FREE_OBJ(vc); |
| 390 |
82187 |
} |
| 391 |
|
|
| 392 |
|
void |
| 393 |
3107 |
VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vsmcp) |
| 394 |
|
{ |
| 395 |
|
struct vsmw_cluster *vc; |
| 396 |
|
|
| 397 |
3107 |
TAKE_OBJ_NOTNULL(vc, vsmcp, VSMW_CLUSTER_MAGIC); |
| 398 |
|
|
| 399 |
3107 |
vsmw_do_lock(); |
| 400 |
3107 |
if (--vc->refs == 0) |
| 401 |
1720 |
vsmw_DestroyCluster_locked(vsmw, vc); |
| 402 |
3107 |
vsmw_do_unlock(); |
| 403 |
3107 |
} |
| 404 |
|
|
| 405 |
|
/*--------------------------------------------------------------------*/ |
| 406 |
|
|
| 407 |
|
void * |
| 408 |
1956897 |
VSMW_Allocv(struct vsmw *vsmw, struct vsmw_cluster *vc, |
| 409 |
|
const char *category, size_t payload, const char *prefix, |
| 410 |
|
const char *fmt, va_list va) |
| 411 |
|
{ |
| 412 |
|
struct vsmwseg *seg; |
| 413 |
|
ssize_t l; |
| 414 |
|
|
| 415 |
1956897 |
vsmw_do_lock(); |
| 416 |
1956897 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
| 417 |
|
|
| 418 |
1956897 |
ALLOC_OBJ(seg, VSMWSEG_MAGIC); |
| 419 |
1956897 |
AN(seg); |
| 420 |
1956897 |
REPLACE(seg->category, category); |
| 421 |
1956897 |
seg->len = PRNDUP(payload); |
| 422 |
|
|
| 423 |
1956897 |
VSB_clear(vsmw->vsb); |
| 424 |
1956897 |
if (prefix != NULL) { |
| 425 |
1421296 |
assert(prefix[0] != '\0'); |
| 426 |
1421296 |
VSB_cat(vsmw->vsb, prefix); |
| 427 |
1421296 |
if (fmt[0] != '\0') |
| 428 |
1306114 |
VSB_cat(vsmw->vsb, "."); |
| 429 |
1421296 |
} |
| 430 |
1956897 |
l = VSB_len(vsmw->vsb); |
| 431 |
1956897 |
assert(l >= 0); |
| 432 |
1956897 |
VSB_vprintf(vsmw->vsb, fmt, va); |
| 433 |
1956897 |
AZ(VSB_finish(vsmw->vsb)); |
| 434 |
1956897 |
assert(fmt[0] == '\0' || l < VSB_len(vsmw->vsb)); |
| 435 |
|
|
| 436 |
1956897 |
REPLACE(seg->id, VSB_data(vsmw->vsb)); |
| 437 |
|
|
| 438 |
1956897 |
if (vc == NULL) |
| 439 |
1904537 |
vc = vsmw_newcluster(vsmw, seg->len, category); |
| 440 |
1956897 |
AN(vc); |
| 441 |
1956897 |
vc->refs++; |
| 442 |
|
|
| 443 |
1956897 |
seg->cluster = vc; |
| 444 |
1956897 |
seg->off = vc->next; |
| 445 |
1956897 |
vc->next += seg->len; |
| 446 |
1956897 |
assert(vc->next <= vc->len); |
| 447 |
1956897 |
seg->ptr = seg->off + (char*)vc->ptr; |
| 448 |
|
|
| 449 |
1956897 |
vsmw_addseg(vsmw, seg); |
| 450 |
|
|
| 451 |
1956897 |
vsmw_do_unlock(); |
| 452 |
1956897 |
return (seg->ptr); |
| 453 |
|
} |
| 454 |
|
|
| 455 |
|
void * |
| 456 |
233384 |
VSMW_Allocf(struct vsmw *vsmw, struct vsmw_cluster *vc, |
| 457 |
|
const char *category, size_t len, const char *fmt, ...) |
| 458 |
|
{ |
| 459 |
|
va_list ap; |
| 460 |
|
void *p; |
| 461 |
|
|
| 462 |
233384 |
va_start(ap, fmt); |
| 463 |
233384 |
p = VSMW_Allocv(vsmw, vc, category, len, NULL, fmt, ap); |
| 464 |
233384 |
va_end(ap); |
| 465 |
233384 |
return (p); |
| 466 |
|
} |
| 467 |
|
|
| 468 |
|
/*--------------------------------------------------------------------*/ |
| 469 |
|
|
| 470 |
|
void |
| 471 |
81667 |
VSMW_Free(struct vsmw *vsmw, void **pp) |
| 472 |
|
{ |
| 473 |
|
struct vsmwseg *seg; |
| 474 |
|
struct vsmw_cluster *cp; |
| 475 |
|
|
| 476 |
81667 |
vsmw_do_lock(); |
| 477 |
81667 |
CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); |
| 478 |
81667 |
AN(pp); |
| 479 |
665262 |
VTAILQ_FOREACH(seg, &vsmw->segs, list) |
| 480 |
665262 |
if (seg->ptr == *pp) |
| 481 |
81667 |
break; |
| 482 |
81667 |
AN(seg); |
| 483 |
81667 |
*pp = NULL; |
| 484 |
|
|
| 485 |
81667 |
cp = seg->cluster; |
| 486 |
81667 |
CHECK_OBJ_NOTNULL(cp, VSMW_CLUSTER_MAGIC); |
| 487 |
81667 |
assert(cp->refs > 0); |
| 488 |
|
|
| 489 |
81667 |
vsmw_delseg(vsmw, seg); |
| 490 |
|
|
| 491 |
81667 |
if (!--cp->refs) |
| 492 |
80467 |
vsmw_DestroyCluster_locked(vsmw, cp); |
| 493 |
81667 |
vsmw_do_unlock(); |
| 494 |
81667 |
} |
| 495 |
|
|
| 496 |
|
/*--------------------------------------------------------------------*/ |
| 497 |
|
|
| 498 |
|
struct vsmw * |
| 499 |
77110 |
VSMW_New(int vdirfd, int mode, const char *idxname) |
| 500 |
|
{ |
| 501 |
|
struct vsmw *vsmw; |
| 502 |
|
int fd; |
| 503 |
|
|
| 504 |
77110 |
assert(vdirfd > 0); |
| 505 |
77110 |
assert(mode > 0); |
| 506 |
77110 |
AN(idxname); |
| 507 |
|
|
| 508 |
77110 |
vsmw_do_lock(); |
| 509 |
77110 |
ALLOC_OBJ(vsmw, VSMW_MAGIC); |
| 510 |
77110 |
AN(vsmw); |
| 511 |
|
|
| 512 |
77110 |
VTAILQ_INIT(&vsmw->segs); |
| 513 |
77110 |
VTAILQ_INIT(&vsmw->clusters); |
| 514 |
77110 |
vsmw->vsb = VSB_new_auto(); |
| 515 |
77110 |
AN(vsmw->vsb); |
| 516 |
77110 |
REPLACE(vsmw->idx, idxname); |
| 517 |
77110 |
vsmw->mode = mode; |
| 518 |
77110 |
vsmw->vdirfd = vdirfd; |
| 519 |
77110 |
vsmw->pid = getpid(); |
| 520 |
77110 |
vsmw->birth = time(NULL); |
| 521 |
|
|
| 522 |
77110 |
if (unlinkat(vdirfd, vsmw->idx, 0)) |
| 523 |
77110 |
assert (errno == ENOENT); |
| 524 |
154220 |
fd = openat(vdirfd, |
| 525 |
77110 |
vsmw->idx, O_APPEND | O_WRONLY | O_CREAT, vsmw->mode); |
| 526 |
77110 |
assert(fd >= 0); |
| 527 |
77110 |
vsmw_idx_head(vsmw, fd); |
| 528 |
77110 |
closefd(&fd); |
| 529 |
|
|
| 530 |
77110 |
vsmw_do_unlock(); |
| 531 |
77110 |
return (vsmw); |
| 532 |
|
} |
| 533 |
|
|
| 534 |
|
void |
| 535 |
39077 |
VSMW_Destroy(struct vsmw **pp) |
| 536 |
|
{ |
| 537 |
|
struct vsmw *vsmw; |
| 538 |
|
struct vsmwseg *seg, *s2; |
| 539 |
|
|
| 540 |
39077 |
vsmw_do_lock(); |
| 541 |
39077 |
TAKE_OBJ_NOTNULL(vsmw, pp, VSMW_MAGIC); |
| 542 |
234422 |
VTAILQ_FOREACH_SAFE(seg, &vsmw->segs, list, s2) |
| 543 |
195345 |
vsmw_delseg(vsmw, seg); |
| 544 |
39077 |
if (unlinkat(vsmw->vdirfd, vsmw->idx, 0)) |
| 545 |
0 |
assert (errno == ENOENT); |
| 546 |
39077 |
REPLACE(vsmw->idx, NULL); |
| 547 |
39077 |
VSB_destroy(&vsmw->vsb); |
| 548 |
39077 |
closefd(&vsmw->vdirfd); |
| 549 |
39077 |
FREE_OBJ(vsmw); |
| 550 |
39077 |
vsmw_do_unlock(); |
| 551 |
39077 |
} |