| | varnish-cache/bin/varnishd/storage/storage_lru.c |
| 0 |
|
/*- |
| 1 |
|
* Copyright (c) 2007-2015 Varnish Software AS |
| 2 |
|
* All rights reserved. |
| 3 |
|
* |
| 4 |
|
* Author: Dag-Erling Smørgav <des@des.no> |
| 5 |
|
* |
| 6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
| 7 |
|
* |
| 8 |
|
* Redistribution and use in source and binary forms, with or without |
| 9 |
|
* modification, are permitted provided that the following conditions |
| 10 |
|
* are met: |
| 11 |
|
* 1. Redistributions of source code must retain the above copyright |
| 12 |
|
* notice, this list of conditions and the following disclaimer. |
| 13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
| 14 |
|
* notice, this list of conditions and the following disclaimer in the |
| 15 |
|
* documentation and/or other materials provided with the distribution. |
| 16 |
|
* |
| 17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
| 21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 27 |
|
* SUCH DAMAGE. |
| 28 |
|
* |
| 29 |
|
* Least-Recently-Used logic for freeing space in stevedores. |
| 30 |
|
*/ |
| 31 |
|
|
| 32 |
|
#include "config.h" |
| 33 |
|
|
| 34 |
|
#include <stdlib.h> |
| 35 |
|
|
| 36 |
|
#include "cache/cache_varnishd.h" |
| 37 |
|
#include "cache/cache_objhead.h" |
| 38 |
|
|
| 39 |
|
#include "storage/storage.h" |
| 40 |
|
|
| 41 |
|
struct lru { |
| 42 |
|
unsigned magic; |
| 43 |
|
#define LRU_MAGIC 0x3fec7bb0 |
| 44 |
|
VTAILQ_HEAD(,objcore) lru_head; |
| 45 |
|
struct lock mtx; |
| 46 |
|
}; |
| 47 |
|
|
| 48 |
|
static struct lru * |
| 49 |
73720 |
lru_get(const struct objcore *oc) |
| 50 |
|
{ |
| 51 |
73720 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 52 |
73720 |
CHECK_OBJ_NOTNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
| 53 |
73720 |
CHECK_OBJ_NOTNULL(oc->stobj->stevedore->lru, LRU_MAGIC); |
| 54 |
73720 |
return (oc->stobj->stevedore->lru); |
| 55 |
|
} |
| 56 |
|
|
| 57 |
|
struct lru * |
| 58 |
74818 |
LRU_Alloc(void) |
| 59 |
|
{ |
| 60 |
|
struct lru *lru; |
| 61 |
|
|
| 62 |
74818 |
ALLOC_OBJ(lru, LRU_MAGIC); |
| 63 |
74818 |
AN(lru); |
| 64 |
74818 |
VTAILQ_INIT(&lru->lru_head); |
| 65 |
74818 |
Lck_New(&lru->mtx, lck_lru); |
| 66 |
74818 |
return (lru); |
| 67 |
|
} |
| 68 |
|
|
| 69 |
|
void |
| 70 |
0 |
LRU_Free(struct lru **pp) |
| 71 |
|
{ |
| 72 |
|
struct lru *lru; |
| 73 |
|
|
| 74 |
0 |
TAKE_OBJ_NOTNULL(lru, pp, LRU_MAGIC); |
| 75 |
0 |
Lck_Lock(&lru->mtx); |
| 76 |
0 |
AN(VTAILQ_EMPTY(&lru->lru_head)); |
| 77 |
0 |
Lck_Unlock(&lru->mtx); |
| 78 |
0 |
Lck_Delete(&lru->mtx); |
| 79 |
0 |
FREE_OBJ(lru); |
| 80 |
0 |
} |
| 81 |
|
|
| 82 |
|
void |
| 83 |
113557 |
LRU_Add(struct objcore *oc, vtim_real now) |
| 84 |
|
{ |
| 85 |
|
struct lru *lru; |
| 86 |
|
|
| 87 |
113557 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 88 |
|
|
| 89 |
113557 |
if (oc->flags & OC_F_PRIVATE) |
| 90 |
56477 |
return; |
| 91 |
|
|
| 92 |
57080 |
AZ(oc->boc); |
| 93 |
57080 |
AN(isnan(oc->last_lru)); |
| 94 |
57080 |
AZ(isnan(now)); |
| 95 |
57080 |
lru = lru_get(oc); |
| 96 |
57080 |
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC); |
| 97 |
57080 |
Lck_Lock(&lru->mtx); |
| 98 |
57080 |
VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list); |
| 99 |
57080 |
oc->last_lru = now; |
| 100 |
57080 |
AZ(isnan(oc->last_lru)); |
| 101 |
57080 |
Lck_Unlock(&lru->mtx); |
| 102 |
113557 |
} |
| 103 |
|
|
| 104 |
|
void |
| 105 |
71704 |
LRU_Remove(struct objcore *oc) |
| 106 |
|
{ |
| 107 |
|
struct lru *lru; |
| 108 |
|
|
| 109 |
71704 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 110 |
|
|
| 111 |
71704 |
if (oc->flags & OC_F_PRIVATE) |
| 112 |
56479 |
return; |
| 113 |
|
|
| 114 |
15225 |
AZ(oc->boc); |
| 115 |
15225 |
lru = lru_get(oc); |
| 116 |
15225 |
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC); |
| 117 |
15225 |
Lck_Lock(&lru->mtx); |
| 118 |
15225 |
AZ(isnan(oc->last_lru)); |
| 119 |
15225 |
VTAILQ_REMOVE(&lru->lru_head, oc, lru_list); |
| 120 |
15225 |
oc->last_lru = NAN; |
| 121 |
15225 |
Lck_Unlock(&lru->mtx); |
| 122 |
71704 |
} |
| 123 |
|
|
| 124 |
|
void v_matchproto_(objtouch_f) |
| 125 |
132131 |
LRU_Touch(struct worker *wrk, struct objcore *oc, vtim_real now) |
| 126 |
|
{ |
| 127 |
|
struct lru *lru; |
| 128 |
|
|
| 129 |
132131 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 130 |
132131 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 131 |
|
|
| 132 |
132131 |
if (oc->flags & OC_F_PRIVATE || isnan(oc->last_lru)) |
| 133 |
56673 |
return; |
| 134 |
|
|
| 135 |
|
/* |
| 136 |
|
* To avoid the exphdl->mtx becoming a hotspot, we only |
| 137 |
|
* attempt to move objects if they have not been moved |
| 138 |
|
* recently and if the lock is available. This optimization |
| 139 |
|
* obviously leaves the LRU list imperfectly sorted. |
| 140 |
|
*/ |
| 141 |
|
|
| 142 |
75458 |
if (now - oc->last_lru < cache_param->lru_interval) |
| 143 |
74036 |
return; |
| 144 |
|
|
| 145 |
1422 |
lru = lru_get(oc); |
| 146 |
1422 |
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC); |
| 147 |
|
|
| 148 |
1422 |
if (Lck_Trylock(&lru->mtx)) |
| 149 |
2 |
return; |
| 150 |
|
|
| 151 |
1420 |
if (!isnan(oc->last_lru)) { |
| 152 |
1420 |
VTAILQ_REMOVE(&lru->lru_head, oc, lru_list); |
| 153 |
1420 |
VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list); |
| 154 |
1420 |
VSC_C_main->n_lru_moved++; |
| 155 |
1420 |
oc->last_lru = now; |
| 156 |
1420 |
} |
| 157 |
1420 |
Lck_Unlock(&lru->mtx); |
| 158 |
132131 |
} |
| 159 |
|
|
| 160 |
|
/*-------------------------------------------------------------------- |
| 161 |
|
* Attempt to make space by nuking the oldest object on the LRU list |
| 162 |
|
* which isn't in use. |
| 163 |
|
* Returns: 1: did, 0: didn't; |
| 164 |
|
*/ |
| 165 |
|
|
| 166 |
|
int |
| 167 |
1077 |
LRU_NukeOne(struct worker *wrk, struct lru *lru) |
| 168 |
|
{ |
| 169 |
|
struct objcore *oc, *oc2; |
| 170 |
|
|
| 171 |
1077 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
| 172 |
1077 |
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC); |
| 173 |
|
|
| 174 |
1077 |
if (wrk->strangelove-- <= 0) { |
| 175 |
520 |
VSLb(wrk->vsl, SLT_ExpKill, "LRU reached nuke_limit"); |
| 176 |
520 |
VSC_C_main->n_lru_limited++; |
| 177 |
520 |
return (0); |
| 178 |
|
} |
| 179 |
|
|
| 180 |
|
/* Find the first currently unused object on the LRU. */ |
| 181 |
557 |
Lck_Lock(&lru->mtx); |
| 182 |
557 |
VTAILQ_FOREACH_SAFE(oc, &lru->lru_head, lru_list, oc2) { |
| 183 |
440 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
| 184 |
440 |
AZ(isnan(oc->last_lru)); |
| 185 |
|
|
| 186 |
880 |
VSLb(wrk->vsl, SLT_ExpKill, "LRU_Cand p=%p f=0x%x r=%d", |
| 187 |
440 |
oc, oc->flags, oc->refcnt); |
| 188 |
|
|
| 189 |
440 |
if (HSH_Snipe(wrk, oc)) { |
| 190 |
440 |
VSC_C_main->n_lru_nuked++; // XXX per lru ? |
| 191 |
440 |
VTAILQ_REMOVE(&lru->lru_head, oc, lru_list); |
| 192 |
440 |
VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list); |
| 193 |
440 |
break; |
| 194 |
|
} |
| 195 |
0 |
} |
| 196 |
557 |
Lck_Unlock(&lru->mtx); |
| 197 |
|
|
| 198 |
557 |
if (oc == NULL) { |
| 199 |
117 |
VSLb(wrk->vsl, SLT_ExpKill, "LRU_Fail"); |
| 200 |
117 |
return (0); |
| 201 |
|
} |
| 202 |
|
|
| 203 |
|
/* XXX: We could grab and return one storage segment to our caller */ |
| 204 |
440 |
ObjSlim(wrk, oc); |
| 205 |
|
|
| 206 |
440 |
VSLb(wrk->vsl, SLT_ExpKill, "LRU xid=%ju", VXID(ObjGetXID(wrk, oc))); |
| 207 |
440 |
(void)HSH_DerefObjCore(wrk, &oc); // Ref from HSH_Snipe |
| 208 |
440 |
return (1); |
| 209 |
1077 |
} |