varnish-cache/lib/libvarnishapi/vsl_cursor.c
0
/*-
1
 * Copyright (c) 2006 Verdens Gang AS
2
 * Copyright (c) 2006-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
6
 * Author: Martin Blix Grydeland <martin@varnish-software.com>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 *
31
 */
32
33
#include "config.h"
34
35
#include <sys/mman.h>
36
#include <sys/stat.h>
37
#include <sys/types.h>
38
39
#include <fcntl.h>
40
#include <stdint.h>
41
#include <stdio.h>
42
#include <stdlib.h>
43
#include <string.h>
44
#include <unistd.h>
45
46
#include "vdef.h"
47
#include "vas.h"
48
#include "miniobj.h"
49
#include "vmb.h"
50
51
#include "vqueue.h"
52
#include "vre.h"
53
#include "vsl_priv.h"
54
55
#include "vapi/vsl.h"
56
#include "vapi/vsm.h"
57
58
#include "vsl_api.h"
59
60
struct vslc_vsm {
61
        unsigned                        magic;
62
#define VSLC_VSM_MAGIC                  0x4D3903A6
63
64
        struct VSL_cursor               cursor;
65
66
        unsigned                        options;
67
68
        struct vsm                      *vsm;
69
        struct vsm_fantom               vf;
70
71
        const struct VSL_head           *head;
72
        const uint32_t                  *end;
73
        struct VSLC_ptr                 next;
74
};
75
76
static void
77 2750
vslc_vsm_delete(const struct VSL_cursor *cursor)
78
{
79
        struct vslc_vsm *c;
80
81 2750
        AN(cursor);
82 2750
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
83 2750
        AZ(VSM_Unmap(c->vsm, &c->vf));
84 2750
        assert(&c->cursor == cursor);
85 2750
        FREE_OBJ(c);
86 2750
}
87
88
/*
89
 * We tolerate the fact that segment_n wraps around eventually: for the default
90
 * vsl_space of 80MB and 8 segments, each segment is 10MB long, so we wrap
91
 * roughly after 40 pebibytes (32bit) or 160 yobibytes (64bit) worth of vsl
92
 * written.
93
 *
94
 * The vsm_check would fail if a vslc paused while this amount of data was
95
 * written
96
 */
97
98
static enum vsl_check v_matchproto_(vslc_check_f)
99 788938
vslc_vsm_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
100
{
101
        const struct vslc_vsm *c;
102
        unsigned dist;
103
104 788938
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
105 788938
        assert(&c->cursor == cursor);
106
107 788938
        if (ptr->ptr == NULL)
108 0
                return (vsl_check_e_inval);
109
110 788938
        dist = c->head->segment_n - ptr->priv;
111
112 788938
        if (dist >= VSL_SEGMENTS - 2)
113
                /* Too close to continue */
114 0
                return (vsl_check_e_inval);
115 788938
        if (dist >= VSL_SEGMENTS - 4)
116
                /* Warning level */
117 0
                return (vsl_check_warn);
118
        /* Safe */
119 788938
        return (vsl_check_valid);
120 788938
}
121
122
static enum vsl_status v_matchproto_(vslc_next_f)
123 712962
vslc_vsm_next(const struct VSL_cursor *cursor)
124
{
125
        struct vslc_vsm *c;
126
        enum vsl_check i;
127
        uint32_t t;
128
129 712962
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
130 712962
        assert(&c->cursor == cursor);
131
132 712962
        while (1) {
133 771579
                i = vslc_vsm_check(&c->cursor, &c->next);
134 771579
                if (i < vsl_check_warn) {
135 0
                        if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
136 0
                                return (vsl_e_abandon);
137
                        else
138 0
                                return (vsl_e_overrun);
139
                }
140
141 771579
                t = *(volatile const uint32_t *)c->next.ptr;
142 771579
                AN(t);
143
144 771579
                if (t == VSL_ENDMARKER) {
145 33027
                        if (VSM_StillValid(c->vsm, &c->vf) != VSM_valid)
146 0
                                return (vsl_e_abandon);
147 33027
                        if (c->options & VSL_COPT_TAILSTOP)
148 62
                                return (vsl_e_eof);
149
                        /* No new records available */
150 32965
                        return (vsl_end);
151
                }
152
153
                /* New data observed. Ensure load ordering with the log
154
                 * writer. */
155 738552
                VRMB();
156
157 738552
                if (t == VSL_WRAPMARKER) {
158
                        /* Wrap around not possible at front */
159 0
                        assert(c->next.ptr != c->head->log);
160 0
                        c->next.ptr = c->head->log;
161 0
                        while (c->next.priv % VSL_SEGMENTS)
162 0
                                c->next.priv++;
163 0
                        continue;
164
                }
165
166 738552
                c->cursor.rec = c->next;
167 738552
                c->next.ptr = VSL_NEXT(c->next.ptr);
168
169 738552
                if (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch) {
170 69277
                        if (!(c->options & VSL_COPT_BATCH))
171
                                /* Skip the batch record */
172 58617
                                continue;
173
                        /* Next call will point to the first record past
174
                           the batch */
175 10660
                        c->next.ptr +=
176 10660
                            VSL_WORDS(VSL_BATCHLEN(c->cursor.rec.ptr));
177 10660
                }
178
179 679937
                while ((c->next.ptr - c->head->log) / c->head->segsize >
180 679937
                    c->next.priv % VSL_SEGMENTS)
181 2
                        c->next.priv++;
182
183 679935
                assert(c->next.ptr >= c->head->log);
184 679935
                assert(c->next.ptr < c->end);
185
186 679935
                return (vsl_more);
187
        }
188 712962
}
189
190
static enum vsl_status v_matchproto_(vslc_reset_f)
191 2750
vslc_vsm_reset(const struct VSL_cursor *cursor)
192
{
193
        struct vslc_vsm *c;
194
        unsigned u, segment_n;
195
        enum vsl_status r;
196
197 2750
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_VSM_MAGIC);
198 2750
        assert(&c->cursor == cursor);
199 2750
        c->cursor.rec.ptr = NULL;
200
201 2750
        segment_n = c->head->segment_n;
202
        /* Make sure offset table is not stale compared to segment_n */
203 2750
        VRMB();
204
205 2750
        if (c->options & VSL_COPT_TAIL) {
206
                /* Start in the same segment varnishd currently is in and
207
                   run forward until we see the end */
208 1704
                u = c->next.priv = segment_n;
209 1704
                assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
210 3408
                c->next.ptr = c->head->log +
211 1704
                    c->head->offset[c->next.priv % VSL_SEGMENTS];
212 1704
                do {
213 369734
                        if (c->head->segment_n - u > 1) {
214
                                /* Give up if varnishd is moving faster
215
                                   than us */
216 0
                                return (vsl_e_overrun);
217
                        }
218 369734
                        r = vslc_vsm_next(&c->cursor);
219 369734
                } while (r == vsl_more);
220 1704
                if (r != vsl_end)
221 0
                        return (r);
222 1704
        } else {
223
                /* Starting (VSL_SEGMENTS - 3) behind varnishd. This way
224
                 * even if varnishd advances segment_n immediately, we'll
225
                 * still have a full segment worth of log before the
226
                 * general constraint of at least 2 segments apart will be
227
                 * broken.
228
                 */
229 1046
                c->next.priv = segment_n - (VSL_SEGMENTS - 3);
230 6276
                while (c->head->offset[c->next.priv % VSL_SEGMENTS] < 0) {
231
                        /* seg 0 must be initialized */
232 5230
                        assert(c->next.priv % VSL_SEGMENTS != 0);
233 5230
                        c->next.priv++;
234
                }
235 1046
                assert(c->head->offset[c->next.priv % VSL_SEGMENTS] >= 0);
236 2092
                c->next.ptr = c->head->log +
237 1046
                    c->head->offset[c->next.priv % VSL_SEGMENTS];
238
        }
239 2750
        assert(c->next.ptr >= c->head->log);
240 2750
        assert(c->next.ptr < c->end);
241 2750
        return (vsl_end);
242 2750
}
243
244
static const struct vslc_tbl vslc_vsm_tbl = {
245
        .magic          = VSLC_TBL_MAGIC,
246
        .delete         = vslc_vsm_delete,
247
        .next           = vslc_vsm_next,
248
        .reset          = vslc_vsm_reset,
249
        .check          = vslc_vsm_check,
250
};
251
252
struct VSL_cursor *
253 6997
VSL_CursorVSM(struct VSL_data *vsl, struct vsm *vsm, unsigned options)
254
{
255
        struct vslc_vsm *c;
256
        struct vsm_fantom vf;
257
        struct VSL_head *head;
258
        enum vsl_status r;
259
260 6997
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
261
262 6997
        if (!VSM_Get(vsm, &vf, VSL_CLASS, NULL)) {
263 4246
                (void)vsl_diag(vsl,
264
                    "No VSL chunk found (child not started ?)");
265 4246
                return (NULL);
266
        }
267 2751
        if (VSM_Map(vsm, &vf)) {
268 2
                (void)vsl_diag(vsl,
269 1
                    "VSM_Map(): %s", VSM_Error(vsm));
270 1
                return (NULL);
271
        }
272 2750
        AN(vf.b);
273
274 2750
        head = vf.b;
275 2750
        if (memcmp(head->marker, VSL_HEAD_MARKER, sizeof head->marker)) {
276 0
                AZ(VSM_Unmap(vsm, &vf));
277 0
                (void)vsl_diag(vsl, "Not a VSL chunk");
278 0
                return (NULL);
279
        }
280 2750
        ALLOC_OBJ(c, VSLC_VSM_MAGIC);
281 2750
        if (c == NULL) {
282 0
                AZ(VSM_Unmap(vsm, &vf));
283 0
                (void)vsl_diag(vsl, "Out of memory");
284 0
                return (NULL);
285
        }
286 2750
        c->cursor.priv_tbl = &vslc_vsm_tbl;
287 2750
        c->cursor.priv_data = c;
288
289 2750
        c->options = options;
290 2750
        c->vsm = vsm;
291 2750
        c->vf = vf;
292 2750
        c->head = head;
293 2750
        c->end = c->head->log + c->head->segsize * VSL_SEGMENTS;
294 2750
        assert(c->end <= (const uint32_t *)vf.e);
295
296 2750
        r = vslc_vsm_reset(&c->cursor);
297 2750
        if (r != vsl_end) {
298 0
                AZ(VSM_Unmap(vsm, &vf));
299 0
                (void)vsl_diag(vsl, "Cursor initialization failure (%d)", r);
300 0
                FREE_OBJ(c);
301 0
                return (NULL);
302
        }
303
304 2750
        return (&c->cursor);
305 6997
}
306
307
struct vslc_file {
308
        unsigned                        magic;
309
#define VSLC_FILE_MAGIC                 0x1D65FFEF
310
311
        int                             fd;
312
        int                             close_fd;
313
        ssize_t                         buflen;
314
        uint32_t                        *buf;
315
316
        struct VSL_cursor               cursor;
317
318
};
319
320
static void
321 2
vslc_file_delete(const struct VSL_cursor *cursor)
322
{
323
        struct vslc_file *c;
324
325 2
        AN(cursor);
326 2
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
327 2
        assert(&c->cursor == cursor);
328 2
        if (c->close_fd)
329 0
                (void)close(c->fd);
330 2
        if (c->buf != NULL)
331 2
                free(c->buf);
332 2
        FREE_OBJ(c);
333 2
}
334
335
/* Read n bytes from fd into buf */
336
static ssize_t
337 200
vslc_file_readn(int fd, void *buf, ssize_t n)
338
{
339 200
        ssize_t t = 0;
340
        ssize_t l;
341
342 200
        assert(n > 0);
343
344 398
        while (t < n) {
345 200
                l = read(fd, (char *)buf + t, n - t);
346 200
                if (l <= 0)
347 2
                        return (l);
348 198
                t += l;
349
        }
350 198
        return (t);
351 200
}
352
353
static enum vsl_status v_matchproto_(vslc_next_f)
354 98
vslc_file_next(const struct VSL_cursor *cursor)
355
{
356
        struct vslc_file *c;
357
        ssize_t i;
358
        ssize_t l;
359
360 98
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_FILE_MAGIC);
361 98
        assert(&c->cursor == cursor);
362
363 98
        do {
364 98
                c->cursor.rec.ptr = NULL;
365 98
                assert(c->buflen >= 2);
366 98
                i = vslc_file_readn(c->fd, c->buf, VSL_BYTES(VSL_OVERHEAD));
367 98
                if (i < 0)
368 0
                        return (vsl_e_io);
369 98
                if (i == 0)
370 2
                        return (vsl_e_eof);
371 96
                assert(i == VSL_BYTES(VSL_OVERHEAD));
372 96
                l = VSL_OVERHEAD + VSL_WORDS(VSL_LEN(c->buf));
373 96
                if (c->buflen < l) {
374 0
                        while (c->buflen < l)
375 0
                                c->buflen = 2 * l;
376 0
                        c->buf = realloc(c->buf, VSL_BYTES(c->buflen));
377 0
                        AN(c->buf);
378 0
                }
379 96
                if (l > 2) {
380 192
                        i = vslc_file_readn(c->fd, c->buf + VSL_OVERHEAD,
381 96
                            VSL_BYTES(l - VSL_OVERHEAD));
382 96
                        if (i < 0)
383 0
                                return (vsl_e_io);
384 96
                        if (i == 0)
385 0
                                return (vsl_e_eof);
386 96
                        assert(i == VSL_BYTES(l - VSL_OVERHEAD));
387 96
                }
388 96
                c->cursor.rec.ptr = c->buf;
389 96
        } while (VSL_TAG(c->cursor.rec.ptr) == SLT__Batch);
390 96
        return (vsl_more);
391 98
}
392
393
static enum vsl_status v_matchproto_(vslc_reset_f)
394 0
vslc_file_reset(const struct VSL_cursor *cursor)
395
{
396 0
        (void)cursor;
397
        /* XXX: Implement me */
398 0
        return (vsl_e_eof);
399
}
400
401
static const struct vslc_tbl vslc_file_tbl = {
402
        .magic          = VSLC_TBL_MAGIC,
403
        .delete         = vslc_file_delete,
404
        .next           = vslc_file_next,
405
        .reset          = vslc_file_reset,
406
        .check          = NULL,
407
};
408
409
struct vslc_mmap {
410
        unsigned                        magic;
411
#define VSLC_MMAP_MAGIC                 0x7de15f61
412
        int                             fd;
413
        int                             close_fd;
414
        char                            *b;
415
        char                            *e;
416
        struct VSL_cursor               cursor;
417
        struct VSLC_ptr                 next;
418
};
419
420
static void
421 3
vslc_mmap_delete(const struct VSL_cursor *cursor)
422
{
423
        struct vslc_mmap *c;
424
425 3
        AN(cursor);
426 3
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
427 3
        assert(&c->cursor == cursor);
428 3
        AZ(munmap(c->b, c->e - c->b));
429 3
        if (c->close_fd)
430 3
                (void)close(c->fd);
431 3
        FREE_OBJ(c);
432 3
}
433
434
static enum vsl_status v_matchproto_(vslc_next_f)
435 36
vslc_mmap_next(const struct VSL_cursor *cursor)
436
{
437
        struct vslc_mmap *c;
438
        const char *t;
439
440 36
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
441 36
        assert(&c->cursor == cursor);
442 36
        c->cursor.rec = c->next;
443 36
        t = TRUST_ME(c->cursor.rec.ptr);
444 36
        if (t == c->e)
445 3
                return (vsl_e_eof);
446 33
        c->next.ptr = VSL_NEXT(c->next.ptr);
447 33
        t = TRUST_ME(c->next.ptr);
448 33
        if (t > c->e)
449 0
                return (vsl_e_io);
450 33
        return (vsl_more);
451 36
}
452
453
static enum vsl_status v_matchproto_(vslc_reset_f)
454 0
vslc_mmap_reset(const struct VSL_cursor *cursor)
455
{
456
        struct vslc_mmap *c;
457
458 0
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
459 0
        assert(&c->cursor == cursor);
460 0
        return (vsl_e_eof);
461
}
462
463
static enum vsl_check v_matchproto_(vslc_check_f)
464 50
vslc_mmap_check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
465
{
466
        struct vslc_mmap *c;
467
        const char *t;
468
469 50
        CAST_OBJ_NOTNULL(c, cursor->priv_data, VSLC_MMAP_MAGIC);
470 50
        assert(&c->cursor == cursor);
471 50
        AN(ptr->ptr);
472 50
        t = TRUST_ME(ptr->ptr);
473 50
        assert(t > c->b);
474 50
        assert(t <= c->e);
475 50
        return (vsl_check_valid);
476
}
477
478
static const struct vslc_tbl vslc_mmap_tbl = {
479
        .magic          = VSLC_TBL_MAGIC,
480
        .delete         = vslc_mmap_delete,
481
        .next           = vslc_mmap_next,
482
        .reset          = vslc_mmap_reset,
483
        .check          = vslc_mmap_check,
484
};
485
486
static struct VSL_cursor *
487 5
vsl_cursor_mmap(struct VSL_data *vsl, int fd, int close_fd)
488
{
489
        struct vslc_mmap *c;
490
        struct stat st[1];
491
        void *p;
492
493 5
        AZ(fstat(fd, st));
494 5
        if ((st->st_mode & S_IFMT) != S_IFREG)
495 2
                return (MAP_FAILED);
496
497 3
        assert(st->st_size >= (off_t)(sizeof VSL_FILE_ID));
498 3
        p = mmap(NULL, st->st_size, PROT_READ, MAP_PRIVATE, fd, 0);
499 3
        if (p == MAP_FAILED) {
500 0
                vsl_diag(vsl, "Cannot mmap: %s", strerror(errno));
501 0
                return (MAP_FAILED);
502
        }
503
504 3
        ALLOC_OBJ(c, VSLC_MMAP_MAGIC);
505 3
        if (c == NULL) {
506 0
                (void)munmap(p, st->st_size);
507 0
                if (close_fd)
508 0
                        (void)close(fd);
509 0
                vsl_diag(vsl, "Out of memory");
510 0
                return (NULL);
511
        }
512 3
        c->cursor.priv_tbl = &vslc_mmap_tbl;
513 3
        c->cursor.priv_data = c;
514
515 3
        c->fd = fd;
516 3
        c->close_fd = close_fd;
517 3
        c->b = p;
518 3
        c->e = c->b + st->st_size;
519 3
        c->next.ptr = TRUST_ME(c->b + sizeof VSL_FILE_ID);
520
521 3
        return (&c->cursor);
522 5
}
523
524
struct VSL_cursor *
525 7
VSL_CursorFile(struct VSL_data *vsl, const char *name, unsigned options)
526
{
527
        struct VSL_cursor *mc;
528
        struct vslc_file *c;
529
        int fd;
530 7
        int close_fd = 0;
531
        char buf[sizeof VSL_FILE_ID];
532
        ssize_t i;
533
534 7
        CHECK_OBJ_NOTNULL(vsl, VSL_MAGIC);
535 7
        AN(name);
536 7
        (void)options;
537
538 7
        if (!strcmp(name, "-"))
539 2
                fd = STDIN_FILENO;
540
        else {
541 5
                fd = open(name, O_RDONLY);
542 5
                if (fd < 0) {
543 2
                        vsl_diag(vsl, "Cannot open %s: %s", name,
544 1
                            strerror(errno));
545 1
                        return (NULL);
546
                }
547 4
                close_fd = 1;
548
        }
549
550 6
        i = vslc_file_readn(fd, buf, sizeof buf);
551 6
        if (i <= 0) {
552 0
                if (close_fd)
553 0
                        (void)close(fd);
554 0
                vsl_diag(vsl, "VSL file read error: %s",
555 0
                    i < 0 ? strerror(errno) : "EOF");
556 0
                return (NULL);
557
        }
558 6
        assert(i == sizeof buf);
559 6
        if (memcmp(buf, VSL_FILE_ID, sizeof buf)) {
560 1
                if (close_fd)
561 1
                        (void)close(fd);
562 1
                vsl_diag(vsl, "Not a VSL file: %s", name);
563 1
                return (NULL);
564
        }
565
566 5
        mc = vsl_cursor_mmap(vsl, fd, close_fd);
567 5
        if (mc == NULL)
568 0
                return (NULL);
569 5
        if (mc != MAP_FAILED)
570 3
                return (mc);
571
572 2
        ALLOC_OBJ(c, VSLC_FILE_MAGIC);
573 2
        if (c == NULL) {
574 0
                if (close_fd)
575 0
                        (void)close(fd);
576 0
                vsl_diag(vsl, "Out of memory");
577 0
                return (NULL);
578
        }
579 2
        c->cursor.priv_tbl = &vslc_file_tbl;
580 2
        c->cursor.priv_data = c;
581
582 2
        c->fd = fd;
583 2
        c->close_fd = close_fd;
584 2
        c->buflen = VSL_WORDS(BUFSIZ);
585 2
        c->buf = malloc(VSL_BYTES(c->buflen));
586 2
        AN(c->buf);
587
588 2
        return (&c->cursor);
589 7
}
590
591
void
592 2755
VSL_DeleteCursor(const struct VSL_cursor *cursor)
593
{
594
        const struct vslc_tbl *tbl;
595
596 2755
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
597 2755
        if (tbl->delete == NULL)
598 0
                return;
599 2755
        (tbl->delete)(cursor);
600 2755
}
601
602
enum vsl_status
603 4458
VSL_ResetCursor(const struct VSL_cursor *cursor)
604
{
605
        const struct vslc_tbl *tbl;
606
607 4458
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
608 4458
        if (tbl->reset == NULL)
609 0
                return (vsl_e_eof);
610 4458
        return ((tbl->reset)(cursor));
611 4458
}
612
613
enum vsl_status
614 407652
VSL_Next(const struct VSL_cursor *cursor)
615
{
616
        const struct vslc_tbl *tbl;
617
618 407652
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
619 407652
        AN(tbl->next);
620 407652
        return ((tbl->next)(cursor));
621
}
622
623
enum vsl_check
624 17516
VSL_Check(const struct VSL_cursor *cursor, const struct VSLC_ptr *ptr)
625
{
626
        const struct vslc_tbl *tbl;
627
628 17516
        CAST_OBJ_NOTNULL(tbl, cursor->priv_tbl, VSLC_TBL_MAGIC);
629 17516
        if (tbl->check == NULL)
630 88
                return (vsl_check_e_notsupp);
631 17428
        return ((tbl->check)(cursor, ptr));
632 17516
}