Update Files

This commit is contained in:
2025-01-22 17:22:38 +01:00
parent 89b9349629
commit 4c5e729485
5132 changed files with 1195369 additions and 0 deletions

View File

@ -0,0 +1,960 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Implements a cache backing store compatible with netscape cookies.txt format
* There is one entry per "line", and fields are tab-delimited
*
* We need to know the format here, because while the unique cookie tag consists
* of "hostname|urlpath|cookiename", that does not appear like that in the file;
* we have to go parse the fields and synthesize the corresponding tag.
*
* We rely on all the fields except the cookie value fitting in a 256 byte
* buffer, and allow eating multiple buffers to get a huge cookie values.
*
* Because the cookie file is a device-wide asset, although lws will change it
* from the lws thread without conflict, there may be other processes that will
* change it by removal and regenerating the file asynchronously. For that
* reason, file handles are opened fresh each time we want to use the file, so
* we always get the latest version.
*
* When updating the file ourselves, we use a lockfile to ensure our process
* has exclusive access.
*
*
* Tag Matching rules
*
* There are three kinds of tag matching rules
*
* 1) specific - tag strigs must be the same
* 2) wilcard - tags matched using optional wildcards
* 3) wildcard + lookup - wildcard, but path part matches using cookie scope rules
*
*/
#include <private-lib-core.h>
#include "private-lib-misc-cache-ttl.h"
typedef enum nsc_iterator_ret {
NIR_CONTINUE = 0,
NIR_FINISH_OK = 1,
NIR_FINISH_ERROR = -1
} nsc_iterator_ret_t;
typedef enum cbreason {
LCN_SOL = (1 << 0),
LCN_EOL = (1 << 1)
} cbreason_t;
typedef int (*nsc_cb_t)(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size);
static void
expiry_cb(lws_sorted_usec_list_t *sul);
static int
nsc_backing_open_lock(lws_cache_nscookiejar_t *cache, int mode, const char *par)
{
int sanity = 50;
char lock[128];
int fd_lock, fd;
lwsl_debug("%s: %s\n", __func__, par);
lws_snprintf(lock, sizeof(lock), "%s.LCK",
cache->cache.info.u.nscookiejar.filepath);
do {
fd_lock = open(lock, LWS_O_CREAT | O_EXCL, 0600);
if (fd_lock >= 0) {
close(fd_lock);
break;
}
if (!sanity--) {
lwsl_warn("%s: unable to lock %s: errno %d\n", __func__,
lock, errno);
return -1;
}
#if defined(WIN32)
Sleep(100);
#else
usleep(100000);
#endif
} while (1);
fd = open(cache->cache.info.u.nscookiejar.filepath,
LWS_O_CREAT | mode, 0600);
if (fd == -1) {
lwsl_warn("%s: unable to open or create %s\n", __func__,
cache->cache.info.u.nscookiejar.filepath);
unlink(lock);
}
return fd;
}
static void
nsc_backing_close_unlock(lws_cache_nscookiejar_t *cache, int fd)
{
char lock[128];
lwsl_debug("%s\n", __func__);
lws_snprintf(lock, sizeof(lock), "%s.LCK",
cache->cache.info.u.nscookiejar.filepath);
if (fd >= 0)
close(fd);
unlink(lock);
}
/*
* We're going to call the callback with chunks of the file with flags
* indicating we're giving it the start of a line and / or giving it the end
* of a line.
*
* It's like this because the cookie value may be huge (and to a lesser extent
* the path may also be big).
*
* If it's the start of a line (flags on the cb has LCN_SOL), then the buffer
* contains up to the first 256 chars of the line, it's enough to match with.
*
* We cannot hold the file open inbetweentimes, since other processes may
* regenerate it, so we need to bind to a new inode. We open it with an
* exclusive flock() so other processes can't replace conflicting changes
* while we also write changes, without having to wait and see our changes.
*/
static int
nscookiejar_iterate(lws_cache_nscookiejar_t *cache, int fd,
nsc_cb_t cb, void *opaque)
{
int m = 0, n = 0, e, r = LCN_SOL, ignore = 0, ret = 0;
char temp[256], eof = 0;
if (lseek(fd, 0, SEEK_SET) == (off_t)-1)
return -1;
do { /* for as many buffers in the file */
int n1;
lwsl_debug("%s: n %d, m %d\n", __func__, n, m);
read:
n1 = (int)read(fd, temp + n, sizeof(temp) - (size_t)n);
lwsl_debug("%s: n1 %d\n", __func__, n1);
if (n1 <= 0) {
eof = 1;
if (m == n)
continue;
} else
n += n1;
while (m < n) {
m++;
if (temp[m - 1] != '\n')
continue;
/* ie, we hit EOL */
if (temp[0] == '#')
/* lines starting with # are comments */
e = 0;
else
e = cb(cache, opaque, r | LCN_EOL, temp,
(size_t)m - 1);
r = LCN_SOL;
ignore = 0;
/*
* Move back remainder and prefill the gap that opened
* up: we want to pass enough in the start chunk so the
* cb can classify it even if it can't get all the
* value part in one go
*/
memmove(temp, temp + m, (size_t)(n - m));
n -= m;
m = 0;
if (e) {
ret = e;
goto bail;
}
goto read;
}
if (m) {
/* we ran out of buffer */
if (ignore || (r == LCN_SOL && n && temp[0] == '#')) {
e = 0;
ignore = 1;
} else {
e = cb(cache, opaque,
r | (n == m && eof ? LCN_EOL : 0),
temp, (size_t)m);
m = 0;
n = 0;
}
if (e) {
/*
* We have to call off the whole thing if any
* step, eg, OOMs
*/
ret = e;
goto bail;
}
r = 0;
}
} while (!eof || n != m);
ret = 0;
bail:
return ret;
}
/*
* lookup() just handles wildcard resolution, it doesn't deal with moving the
* hits to L1. That has to be done individually by non-wildcard names.
*/
enum {
NSC_COL_HOST = 0, /* wc idx 0 */
NSC_COL_PATH = 2, /* wc idx 1 */
NSC_COL_EXPIRY = 4,
NSC_COL_NAME = 5, /* wc idx 2 */
NSC_COL_COUNT = 6
};
/*
* This performs the specialized wildcard that knows about cookie path match
* rules.
*
* To defeat the lookup path matching, lie to it about idx being NSC_COL_PATH
*/
static int
nsc_match(const char *wc, size_t wc_len, const char *col, size_t col_len,
int idx)
{
size_t n = 0;
if (idx != NSC_COL_PATH)
return lws_strcmp_wildcard(wc, wc_len, col, col_len);
/*
* Cookie path match is special, if we lookup on a path like /my/path,
* we must match on cookie paths for every dir level including /, so
* match on /, /my, and /my/path. But we must not match on /m or
* /my/pa etc. If we lookup on /, we must not match /my/path
*
* Let's go through wc checking at / and for every complete subpath if
* it is an explicit match
*/
if (!strcmp(col, wc))
return 0; /* exact hit */
while (n <= wc_len) {
if (n == wc_len || wc[n] == '/') {
if (n && col_len <= n && !strncmp(wc, col, n))
return 0; /* hit */
if (n != wc_len && col_len <= n + 1 &&
!strncmp(wc, col, n + 1)) /* check for trailing / */
return 0; /* hit */
}
n++;
}
return 1; /* fail */
}
static const uint8_t nsc_cols[] = { NSC_COL_HOST, NSC_COL_PATH, NSC_COL_NAME };
static int
lws_cache_nscookiejar_tag_match(struct lws_cache_ttl_lru *cache,
const char *wc, const char *tag, char lookup)
{
const char *wc_end = wc + strlen(wc), *tag_end = tag + strlen(tag),
*start_wc, *start_tag;
int n = 0;
lwsl_cache("%s: '%s' vs '%s'\n", __func__, wc, tag);
/*
* Given a well-formed host|path|name tag and a wildcard term,
* make the determination if the tag matches the wildcard or not,
* using lookup rules that apply at this cache level.
*/
while (n < 3) {
start_wc = wc;
while (wc < wc_end && *wc != LWSCTAG_SEP)
wc++;
start_tag = tag;
while (tag < tag_end && *tag != LWSCTAG_SEP)
tag++;
lwsl_cache("%s: '%.*s' vs '%.*s'\n", __func__,
lws_ptr_diff(wc, start_wc), start_wc,
lws_ptr_diff(tag, start_tag), start_tag);
if (nsc_match(start_wc, lws_ptr_diff_size_t(wc, start_wc),
start_tag, lws_ptr_diff_size_t(tag, start_tag),
lookup ? nsc_cols[n] : NSC_COL_HOST)) {
lwsl_cache("%s: fail\n", __func__);
return 1;
}
if (wc < wc_end)
wc++;
if (tag < tag_end)
tag++;
n++;
}
lwsl_cache("%s: hit\n", __func__);
return 0; /* match */
}
/*
* Converts the start of a cookie file line into a tag
*/
static int
nsc_line_to_tag(const char *buf, size_t size, char *tag, size_t max_tag,
lws_usec_t *pexpiry)
{
int n, idx = 0, tl = 0;
lws_usec_t expiry = 0;
size_t bn = 0;
char col[64];
if (size < 3)
return 1;
while (bn < size && idx <= NSC_COL_NAME) {
n = 0;
while (bn < size && n < (int)sizeof(col) - 1 &&
buf[bn] != '\t')
col[n++] = buf[bn++];
col[n] = '\0';
if (buf[bn] == '\t')
bn++;
switch (idx) {
case NSC_COL_EXPIRY:
expiry = (lws_usec_t)((unsigned long long)atoll(col) *
(lws_usec_t)LWS_US_PER_SEC);
break;
case NSC_COL_HOST:
case NSC_COL_PATH:
case NSC_COL_NAME:
/*
* As we match the pieces of the wildcard,
* compose the matches into a specific tag
*/
if (tl + n + 2 > (int)max_tag)
return 1;
if (tl)
tag[tl++] = LWSCTAG_SEP;
memcpy(tag + tl, col, (size_t)n);
tl += n;
tag[tl] = '\0';
break;
default:
break;
}
idx++;
}
if (pexpiry)
*pexpiry = expiry;
lwsl_info("%s: %.*s: tag '%s'\n", __func__, (int)size, buf, tag);
return 0;
}
struct nsc_lookup_ctx {
const char *wildcard_key;
lws_dll2_owner_t *results_owner;
lws_cache_match_t *match; /* current match if any */
size_t wklen;
};
static int
nsc_lookup_cb(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size)
{
struct nsc_lookup_ctx *ctx = (struct nsc_lookup_ctx *)opaque;
lws_usec_t expiry;
char tag[200];
int tl;
if (!(flags & LCN_SOL)) {
if (ctx->match)
ctx->match->payload_size += size;
return NIR_CONTINUE;
}
/*
* There should be enough in buf to match or reject it... let's
* synthesize a tag from the text "line" and then check the tags for
* a match
*/
ctx->match = NULL; /* new SOL means stop tracking payload len */
if (nsc_line_to_tag(buf, size, tag, sizeof(tag), &expiry))
return NIR_CONTINUE;
if (lws_cache_nscookiejar_tag_match(&cache->cache,
ctx->wildcard_key, tag, 1))
return NIR_CONTINUE;
tl = (int)strlen(tag);
/*
* ... it looks like a match then... create new match
* object with the specific tag, and add it to the owner list
*/
ctx->match = lws_fi(&cache->cache.info.cx->fic, "cache_lookup_oom") ? NULL :
lws_malloc(sizeof(*ctx->match) + (unsigned int)tl + 1u,
__func__);
if (!ctx->match)
/* caller of lookup will clean results list on fail */
return NIR_FINISH_ERROR;
ctx->match->payload_size = size;
ctx->match->tag_size = (size_t)tl;
ctx->match->expiry = expiry;
memset(&ctx->match->list, 0, sizeof(ctx->match->list));
memcpy(&ctx->match[1], tag, (size_t)tl + 1u);
lws_dll2_add_tail(&ctx->match->list, ctx->results_owner);
return NIR_CONTINUE;
}
static int
lws_cache_nscookiejar_lookup(struct lws_cache_ttl_lru *_c,
const char *wildcard_key,
lws_dll2_owner_t *results_owner)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
struct nsc_lookup_ctx ctx;
int ret, fd;
fd = nsc_backing_open_lock(cache, LWS_O_RDONLY, __func__);
if (fd < 0)
return 1;
ctx.wildcard_key = wildcard_key;
ctx.results_owner = results_owner;
ctx.wklen = strlen(wildcard_key);
ctx.match = 0;
ret = nscookiejar_iterate(cache, fd, nsc_lookup_cb, &ctx);
/*
* The cb can fail, eg, with OOM, making the whole lookup
* invalid and returning fail. Caller will clean
* results_owner on fail.
*/
nsc_backing_close_unlock(cache, fd);
return ret == NIR_FINISH_ERROR;
}
/*
* It's pretty horrible having to implement add or remove individual items by
* file regeneration, but if we don't want to keep it all in heap, and we want
* this cookie jar format, that is what we are into.
*
* Allow to optionally add a "line", optionally wildcard delete tags, and always
* delete expired entries.
*
* Although we can rely on the lws thread to be doing this, multiple processes
* may be using the cookie jar and can tread on each other. So we use flock()
* (linux only) to get exclusive access while we are processing this.
*
* We leave the existing file alone and generate a new one alongside it, with a
* fixed name.tmp format so it can't leak, if that went OK then we unlink the
* old and rename the new.
*/
struct nsc_regen_ctx {
const char *wildcard_key_delete;
const void *add_data;
lws_usec_t curr;
size_t add_size;
int fdt;
char drop;
};
/* only used by nsc_regen() */
static int
nsc_regen_cb(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size)
{
struct nsc_regen_ctx *ctx = (struct nsc_regen_ctx *)opaque;
char tag[256];
lws_usec_t expiry;
if (flags & LCN_SOL) {
ctx->drop = 0;
if (nsc_line_to_tag(buf, size, tag, sizeof(tag), &expiry))
/* filter it out if it is unparseable */
goto drop;
/* routinely track the earliest expiry */
if (!cache->earliest_expiry ||
(expiry && cache->earliest_expiry > expiry))
cache->earliest_expiry = expiry;
if (expiry && expiry < ctx->curr)
/* routinely strip anything beyond its expiry */
goto drop;
if (ctx->wildcard_key_delete)
lwsl_cache("%s: %s vs %s\n", __func__,
tag, ctx->wildcard_key_delete);
if (ctx->wildcard_key_delete &&
!lws_cache_nscookiejar_tag_match(&cache->cache,
ctx->wildcard_key_delete,
tag, 0)) {
lwsl_cache("%s: %s matches wc delete %s\n", __func__,
tag, ctx->wildcard_key_delete);
goto drop;
}
}
if (ctx->drop)
return 0;
cache->cache.current_footprint += (uint64_t)size;
if (write(ctx->fdt, buf, /*msvc*/(unsigned int)size) != (ssize_t)size)
return NIR_FINISH_ERROR;
if (flags & LCN_EOL)
if ((size_t)write(ctx->fdt, "\n", 1) != 1)
return NIR_FINISH_ERROR;
return 0;
drop:
ctx->drop = 1;
return NIR_CONTINUE;
}
static int
nsc_regen(lws_cache_nscookiejar_t *cache, const char *wc_delete,
const void *pay, size_t pay_size)
{
struct nsc_regen_ctx ctx;
char filepath[128];
int fd, ret = 1;
fd = nsc_backing_open_lock(cache, LWS_O_RDONLY, __func__);
if (fd < 0)
return 1;
lws_snprintf(filepath, sizeof(filepath), "%s.tmp",
cache->cache.info.u.nscookiejar.filepath);
unlink(filepath);
if (lws_fi(&cache->cache.info.cx->fic, "cache_regen_temp_open"))
goto bail;
ctx.fdt = open(filepath, LWS_O_CREAT | LWS_O_WRONLY, 0600);
if (ctx.fdt < 0)
goto bail;
/* magic header */
if (lws_fi(&cache->cache.info.cx->fic, "cache_regen_temp_write") ||
/* other consumers insist to see this at start of cookie jar */
write(ctx.fdt, "# Netscape HTTP Cookie File\n", 28) != 28)
goto bail1;
/* if we are adding something, put it first */
if (pay &&
write(ctx.fdt, pay, /*msvc*/(unsigned int)pay_size) !=
(ssize_t)pay_size)
goto bail1;
if (pay && write(ctx.fdt, "\n", 1u) != (ssize_t)1)
goto bail1;
cache->cache.current_footprint = 0;
ctx.wildcard_key_delete = wc_delete;
ctx.add_data = pay;
ctx.add_size = pay_size;
ctx.curr = lws_now_usecs();
ctx.drop = 0;
cache->earliest_expiry = 0;
if (lws_fi(&cache->cache.info.cx->fic, "cache_regen_iter_fail") ||
nscookiejar_iterate(cache, fd, nsc_regen_cb, &ctx))
goto bail1;
close(ctx.fdt);
ctx.fdt = -1;
if (unlink(cache->cache.info.u.nscookiejar.filepath) == -1)
lwsl_info("%s: unlink %s failed\n", __func__,
cache->cache.info.u.nscookiejar.filepath);
if (rename(filepath, cache->cache.info.u.nscookiejar.filepath) == -1)
lwsl_info("%s: rename %s failed\n", __func__,
cache->cache.info.u.nscookiejar.filepath);
if (cache->earliest_expiry)
lws_cache_schedule(&cache->cache, expiry_cb,
cache->earliest_expiry);
ret = 0;
goto bail;
bail1:
if (ctx.fdt >= 0)
close(ctx.fdt);
bail:
unlink(filepath);
nsc_backing_close_unlock(cache, fd);
return ret;
}
static void
expiry_cb(lws_sorted_usec_list_t *sul)
{
lws_cache_nscookiejar_t *cache = lws_container_of(sul,
lws_cache_nscookiejar_t, cache.sul);
/*
* regen the cookie jar without changes, so expired are removed and
* new earliest expired computed
*/
if (nsc_regen(cache, NULL, NULL, 0))
return;
if (cache->earliest_expiry)
lws_cache_schedule(&cache->cache, expiry_cb,
cache->earliest_expiry);
}
/* specific_key and expiry are ignored, since it must be encoded in payload */
static int
lws_cache_nscookiejar_write(struct lws_cache_ttl_lru *_c,
const char *specific_key, const uint8_t *source,
size_t size, lws_usec_t expiry, void **ppvoid)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
char tag[128];
lwsl_cache("%s: %s: len %d\n", __func__, _c->info.name, (int)size);
assert(source);
if (nsc_line_to_tag((const char *)source, size, tag, sizeof(tag), NULL))
return 1;
if (ppvoid)
*ppvoid = NULL;
if (nsc_regen(cache, tag, source, size)) {
lwsl_err("%s: regen failed\n", __func__);
return 1;
}
return 0;
}
struct nsc_get_ctx {
struct lws_buflist *buflist;
const char *specific_key;
const void **pdata;
size_t *psize;
lws_cache_ttl_lru_t *l1;
lws_usec_t expiry;
};
/*
* We're looking for a specific key, if found, we want to make an entry for it
* in L1 and return information about that
*/
static int
nsc_get_cb(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size)
{
struct nsc_get_ctx *ctx = (struct nsc_get_ctx *)opaque;
char tag[200];
uint8_t *q;
if (ctx->buflist)
goto collect;
if (!(flags & LCN_SOL))
return NIR_CONTINUE;
if (nsc_line_to_tag(buf, size, tag, sizeof(tag), &ctx->expiry)) {
lwsl_err("%s: can't get tag\n", __func__);
return NIR_CONTINUE;
}
lwsl_cache("%s: %s %s\n", __func__, ctx->specific_key, tag);
if (strcmp(ctx->specific_key, tag)) {
lwsl_cache("%s: no match\n", __func__);
return NIR_CONTINUE;
}
/* it's a match */
lwsl_cache("%s: IS match\n", __func__);
if (!(flags & LCN_EOL))
goto collect;
/* it all fit in the buffer, let's create it in L1 now */
*ctx->psize = size;
if (ctx->l1->info.ops->write(ctx->l1,
ctx->specific_key, (const uint8_t *)buf,
size, ctx->expiry, (void **)ctx->pdata))
return NIR_FINISH_ERROR;
return NIR_FINISH_OK;
collect:
/*
* it's bigger than one buffer-load, we have to stash what we're getting
* on a buflist and create it when we have it all
*/
if (lws_buflist_append_segment(&ctx->buflist, (const uint8_t *)buf,
size))
goto cleanup;
if (!(flags & LCN_EOL))
return NIR_CONTINUE;
/* we have all the payload, create the L1 entry without payload yet */
*ctx->psize = size;
if (ctx->l1->info.ops->write(ctx->l1, ctx->specific_key, NULL,
lws_buflist_total_len(&ctx->buflist),
ctx->expiry, (void **)&q))
goto cleanup;
*ctx->pdata = q;
/* dump the buflist into the L1 cache entry */
do {
uint8_t *p;
size_t len = lws_buflist_next_segment_len(&ctx->buflist, &p);
memcpy(q, p, len);
q += len;
lws_buflist_use_segment(&ctx->buflist, len);
} while (ctx->buflist);
return NIR_FINISH_OK;
cleanup:
lws_buflist_destroy_all_segments(&ctx->buflist);
return NIR_FINISH_ERROR;
}
static int
lws_cache_nscookiejar_get(struct lws_cache_ttl_lru *_c,
const char *specific_key, const void **pdata,
size_t *psize)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
struct nsc_get_ctx ctx;
int ret, fd;
fd = nsc_backing_open_lock(cache, LWS_O_RDONLY, __func__);
if (fd < 0)
return 1;
/* get a pointer to l1 */
ctx.l1 = &cache->cache;
while (ctx.l1->child)
ctx.l1 = ctx.l1->child;
ctx.pdata = pdata;
ctx.psize = psize;
ctx.specific_key = specific_key;
ctx.buflist = NULL;
ctx.expiry = 0;
ret = nscookiejar_iterate(cache, fd, nsc_get_cb, &ctx);
nsc_backing_close_unlock(cache, fd);
return ret != NIR_FINISH_OK;
}
static int
lws_cache_nscookiejar_invalidate(struct lws_cache_ttl_lru *_c,
const char *wc_key)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
return nsc_regen(cache, wc_key, NULL, 0);
}
static struct lws_cache_ttl_lru *
lws_cache_nscookiejar_create(const struct lws_cache_creation_info *info)
{
lws_cache_nscookiejar_t *cache;
cache = lws_fi(&info->cx->fic, "cache_createfail") ? NULL :
lws_zalloc(sizeof(*cache), __func__);
if (!cache)
return NULL;
cache->cache.info = *info;
/*
* We need to scan the file, if it exists, and find the earliest
* expiry while cleaning out any expired entries
*/
expiry_cb(&cache->cache.sul);
lwsl_notice("%s: create %s\n", __func__, info->name ? info->name : "?");
return (struct lws_cache_ttl_lru *)cache;
}
static int
lws_cache_nscookiejar_expunge(struct lws_cache_ttl_lru *_c)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
int r;
if (!cache)
return 0;
r = unlink(cache->cache.info.u.nscookiejar.filepath);
if (r)
lwsl_warn("%s: failed to unlink %s\n", __func__,
cache->cache.info.u.nscookiejar.filepath);
return r;
}
static void
lws_cache_nscookiejar_destroy(struct lws_cache_ttl_lru **_pc)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)*_pc;
if (!cache)
return;
lws_sul_cancel(&cache->cache.sul);
lws_free_set_NULL(*_pc);
}
#if defined(_DEBUG)
static int
nsc_dump_cb(lws_cache_nscookiejar_t *cache, void *opaque, int flags,
const char *buf, size_t size)
{
lwsl_hexdump_cache(buf, size);
return 0;
}
static void
lws_cache_nscookiejar_debug_dump(struct lws_cache_ttl_lru *_c)
{
lws_cache_nscookiejar_t *cache = (lws_cache_nscookiejar_t *)_c;
int fd = nsc_backing_open_lock(cache, LWS_O_RDONLY, __func__);
if (fd < 0)
return;
lwsl_cache("%s: %s\n", __func__, _c->info.name);
nscookiejar_iterate(cache, fd, nsc_dump_cb, NULL);
nsc_backing_close_unlock(cache, fd);
}
#endif
const struct lws_cache_ops lws_cache_ops_nscookiejar = {
.create = lws_cache_nscookiejar_create,
.destroy = lws_cache_nscookiejar_destroy,
.expunge = lws_cache_nscookiejar_expunge,
.write = lws_cache_nscookiejar_write,
.tag_match = lws_cache_nscookiejar_tag_match,
.lookup = lws_cache_nscookiejar_lookup,
.invalidate = lws_cache_nscookiejar_invalidate,
.get = lws_cache_nscookiejar_get,
#if defined(_DEBUG)
.debug_dump = lws_cache_nscookiejar_debug_dump,
#endif
};

View File

@ -0,0 +1,608 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <private-lib-core.h>
#include "private-lib-misc-cache-ttl.h"
#if defined(write)
#undef write
#endif
static void
update_sul(lws_cache_ttl_lru_t_heap_t *cache);
static int
lws_cache_heap_invalidate(struct lws_cache_ttl_lru *_c, const char *key);
static int
sort_expiry(const lws_dll2_t *a, const lws_dll2_t *b)
{
const lws_cache_ttl_item_heap_t
*c = lws_container_of(a, lws_cache_ttl_item_heap_t, list_expiry),
*d = lws_container_of(b, lws_cache_ttl_item_heap_t, list_expiry);
if (c->expiry > d->expiry)
return 1;
if (c->expiry < d->expiry)
return -1;
return 0;
}
static void
_lws_cache_heap_item_destroy(lws_cache_ttl_lru_t_heap_t *cache,
lws_cache_ttl_item_heap_t *item)
{
lwsl_cache("%s: %s (%s)\n", __func__, cache->cache.info.name,
(const char *)&item[1] + item->size);
lws_dll2_remove(&item->list_expiry);
lws_dll2_remove(&item->list_lru);
cache->cache.current_footprint -= item->size;
update_sul(cache);
if (cache->cache.info.cb)
cache->cache.info.cb((void *)((uint8_t *)&item[1]), item->size);
lws_free(item);
}
static void
lws_cache_heap_item_destroy(lws_cache_ttl_lru_t_heap_t *cache,
lws_cache_ttl_item_heap_t *item, int parent_too)
{
struct lws_cache_ttl_lru *backing = &cache->cache;
const char *tag = ((const char *)&item[1]) + item->size;
/*
* We're destroying a normal item?
*/
if (*tag == META_ITEM_LEADING)
/* no, nothing to check here then */
goto post;
if (backing->info.parent)
backing = backing->info.parent;
/*
* We need to check any cached meta-results from lookups that
* include this normal item, and if any, invalidate the meta-results
* since they have to be recalculated before being used again.
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
cache->items_lru.head) {
lws_cache_ttl_item_heap_t *i = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&item[1]) + item->size;
uint8_t *pay = (uint8_t *)&item[1], *end = pay + item->size;
if (*iname == META_ITEM_LEADING) {
size_t taglen = strlen(iname);
/*
* If the item about to be destroyed makes an
* appearance on the meta results list, we must kill
* the meta result item to force recalc next time
*/
while (pay < end) {
uint32_t tlen = lws_ser_ru32be(pay + 4);
if (tlen == taglen &&
!strcmp((const char *)pay + 8, iname)) {
#if defined(_DEBUG)
/*
* Sanity check that the item tag is
* really a match for that meta results
* item
*/
assert (!backing->info.ops->tag_match(
backing, iname + 1, tag, 1));
#endif
_lws_cache_heap_item_destroy(cache, i);
break;
}
pay += 8 + tlen + 1;
}
#if defined(_DEBUG)
/*
* Sanity check that the item tag really isn't a match
* for that meta results item
*/
assert (backing->info.ops->tag_match(backing, iname + 1,
tag, 1));
#endif
}
} lws_end_foreach_dll_safe(d, d1);
post:
_lws_cache_heap_item_destroy(cache, item);
}
static void
lws_cache_item_evict_lru(lws_cache_ttl_lru_t_heap_t *cache)
{
lws_cache_ttl_item_heap_t *ei;
if (!cache->items_lru.head)
return;
ei = lws_container_of(cache->items_lru.head,
lws_cache_ttl_item_heap_t, list_lru);
lws_cache_heap_item_destroy(cache, ei, 0);
}
/*
* We need to weed out expired entries in the backing file
*/
static void
expiry_cb(lws_sorted_usec_list_t *sul)
{
lws_cache_ttl_lru_t_heap_t *cache = lws_container_of(sul,
lws_cache_ttl_lru_t_heap_t, cache.sul);
lws_usec_t now = lws_now_usecs();
lwsl_cache("%s: %s\n", __func__, cache->cache.info.name);
while (cache->items_expiry.head) {
lws_cache_ttl_item_heap_t *item;
item = lws_container_of(cache->items_expiry.head,
lws_cache_ttl_item_heap_t, list_expiry);
if (item->expiry > now)
return;
lws_cache_heap_item_destroy(cache, item, 1);
}
}
/*
* Let's figure out what the earliest next expiry is
*/
static int
earliest_expiry(lws_cache_ttl_lru_t_heap_t *cache, lws_usec_t *pearliest)
{
lws_cache_ttl_item_heap_t *item;
if (!cache->items_expiry.head)
return 1;
item = lws_container_of(cache->items_expiry.head,
lws_cache_ttl_item_heap_t, list_expiry);
*pearliest = item->expiry;
return 0;
}
static void
update_sul(lws_cache_ttl_lru_t_heap_t *cache)
{
lws_usec_t earliest;
/* weed out any newly-expired */
expiry_cb(&cache->cache.sul);
/* figure out the next soonest expiring item */
if (earliest_expiry(cache, &earliest)) {
lws_sul_cancel(&cache->cache.sul);
return;
}
lwsl_debug("%s: setting exp %llu\n", __func__,
(unsigned long long)earliest);
if (earliest)
lws_cache_schedule(&cache->cache, expiry_cb, earliest);
}
static lws_cache_ttl_item_heap_t *
lws_cache_heap_specific(lws_cache_ttl_lru_t_heap_t *cache,
const char *specific_key)
{
lws_start_foreach_dll(struct lws_dll2 *, d, cache->items_lru.head) {
lws_cache_ttl_item_heap_t *item = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&item[1]) + item->size;
if (!strcmp(specific_key, iname))
return item;
} lws_end_foreach_dll(d);
return NULL;
}
static int
lws_cache_heap_tag_match(struct lws_cache_ttl_lru *cache, const char *wc,
const char *tag, char lookup_rules)
{
return lws_strcmp_wildcard(wc, strlen(wc), tag, strlen(tag));
}
static int
lws_cache_heap_lookup(struct lws_cache_ttl_lru *_c, const char *wildcard_key,
lws_dll2_owner_t *results_owner)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
size_t sklen = strlen(wildcard_key);
lws_start_foreach_dll(struct lws_dll2 *, d, cache->items_lru.head) {
lws_cache_ttl_item_heap_t *item = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&item[1]) + item->size;
if (!lws_strcmp_wildcard(wildcard_key, sklen, iname,
strlen(iname))) {
size_t ilen = strlen(iname);
lws_cache_match_t *m;
char hit = 0;
/*
* It musn't already be on the list from an earlier
* cache level
*/
lws_start_foreach_dll(struct lws_dll2 *, e,
results_owner->head) {
lws_cache_match_t *i = lws_container_of(e,
lws_cache_match_t, list);
if (i->tag_size == ilen &&
!strcmp(iname, ((const char *)&i[1]))) {
hit = 1;
break;
}
} lws_end_foreach_dll(e);
if (!hit) {
/*
* it's unique, instantiate a record for it
*/
m = lws_fi(&_c->info.cx->fic,
"cache_lookup_oom") ? NULL :
lws_malloc(sizeof(*m) + ilen + 1,
__func__);
if (!m) {
lws_cache_clear_matches(results_owner);
return 1;
}
memset(&m->list, 0, sizeof(m->list));
m->tag_size = ilen;
memcpy(&m[1], iname, ilen + 1);
lws_dll2_add_tail(&m->list, results_owner);
}
}
} lws_end_foreach_dll(d);
return 0;
}
static int
lws_cache_heap_write(struct lws_cache_ttl_lru *_c, const char *specific_key,
const uint8_t *source, size_t size, lws_usec_t expiry,
void **ppvoid)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
struct lws_cache_ttl_lru *backing = _c;
lws_cache_ttl_item_heap_t *item, *ei;
size_t kl = strlen(specific_key);
char *p;
lwsl_cache("%s: %s: len %d\n", __func__, _c->info.name, (int)size);
/*
* Is this new tag going to invalidate any existing cached meta-results?
*
* If so, let's destroy any of those first to recover the heap
*/
if (backing->info.parent)
backing = backing->info.parent;
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
cache->items_lru.head) {
lws_cache_ttl_item_heap_t *i = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&i[1]) + i->size;
if (*iname == META_ITEM_LEADING) {
/*
* If the item about to be added would match any cached
* results from before it was added, we have to
* invalidate them. To check this, we have to use the
* matching rules at the backing store level
*/
if (!strcmp(iname + 1, specific_key))
_lws_cache_heap_item_destroy(cache, i);
}
} lws_end_foreach_dll_safe(d, d1);
/*
* Keep us under the limit if possible... note this will always allow
* caching a single large item even if it is above the limits
*/
while ((cache->cache.info.max_footprint &&
cache->cache.current_footprint + size >
cache->cache.info.max_footprint) ||
(cache->cache.info.max_items &&
cache->items_lru.count + 1 > cache->cache.info.max_items))
lws_cache_item_evict_lru(cache);
/* remove any existing entry of the same key */
lws_cache_heap_invalidate(&cache->cache, specific_key);
item = lws_fi(&_c->info.cx->fic, "cache_write_oom") ? NULL :
lws_malloc(sizeof(*item) + kl + 1u + size, __func__);
if (!item)
return 1;
cache->cache.current_footprint += item->size;
/* only need to zero down our item object */
memset(item, 0, sizeof(*item));
p = (char *)&item[1];
if (ppvoid)
*ppvoid = p;
/* copy the payload into place */
if (source)
memcpy(p, source, size);
/* copy the key string into place, with terminating NUL */
memcpy(p + size, specific_key, kl + 1);
item->expiry = expiry;
item->key_len = kl;
item->size = size;
if (expiry) {
/* adding to expiry is optional, on nonzero expiry */
lws_dll2_add_sorted(&item->list_expiry, &cache->items_expiry,
sort_expiry);
ei = lws_container_of(cache->items_expiry.head,
lws_cache_ttl_item_heap_t, list_expiry);
lwsl_debug("%s: setting exp %llu\n", __func__,
(unsigned long long)ei->expiry);
lws_cache_schedule(&cache->cache, expiry_cb, ei->expiry);
}
/* always add outselves to head of lru list */
lws_dll2_add_head(&item->list_lru, &cache->items_lru);
return 0;
}
static int
lws_cache_heap_get(struct lws_cache_ttl_lru *_c, const char *specific_key,
const void **pdata, size_t *psize)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
lws_cache_ttl_item_heap_t *item;
item = lws_cache_heap_specific(cache, specific_key);
if (!item)
return 1;
/* we are using it, move it to lru head */
lws_dll2_remove(&item->list_lru);
lws_dll2_add_head(&item->list_lru, &cache->items_lru);
if (pdata) {
*pdata = (const void *)&item[1];
*psize = item->size;
}
return 0;
}
static int
lws_cache_heap_invalidate(struct lws_cache_ttl_lru *_c, const char *specific_key)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
struct lws_cache_ttl_lru *backing = _c;
lws_cache_ttl_item_heap_t *item;
const void *user;
size_t size;
if (lws_cache_heap_get(_c, specific_key, &user, &size))
return 0;
if (backing->info.parent)
backing = backing->info.parent;
item = (lws_cache_ttl_item_heap_t *)(((uint8_t *)user) - sizeof(*item));
/*
* We must invalidate any cached results that would have included this
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
cache->items_lru.head) {
lws_cache_ttl_item_heap_t *i = lws_container_of(d,
lws_cache_ttl_item_heap_t,
list_lru);
const char *iname = ((const char *)&i[1]) + i->size;
if (*iname == META_ITEM_LEADING) {
/*
* If the item about to be added would match any cached
* results from before it was added, we have to
* invalidate them. To check this, we have to use the
* matching rules at the backing store level
*/
if (!backing->info.ops->tag_match(backing, iname + 1,
specific_key, 1))
_lws_cache_heap_item_destroy(cache, i);
}
} lws_end_foreach_dll_safe(d, d1);
lws_cache_heap_item_destroy(cache, item, 0);
return 0;
}
static struct lws_cache_ttl_lru *
lws_cache_heap_create(const struct lws_cache_creation_info *info)
{
lws_cache_ttl_lru_t_heap_t *cache;
assert(info->cx);
assert(info->name);
cache = lws_fi(&info->cx->fic, "cache_createfail") ? NULL :
lws_zalloc(sizeof(*cache), __func__);
if (!cache)
return NULL;
cache->cache.info = *info;
if (info->parent)
info->parent->child = &cache->cache;
// lwsl_cache("%s: create %s\n", __func__, info->name);
return (struct lws_cache_ttl_lru *)cache;
}
static int
destroy_dll(struct lws_dll2 *d, void *user)
{
lws_cache_ttl_lru_t *_c = (struct lws_cache_ttl_lru *)user;
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
lws_cache_ttl_item_heap_t *item =
lws_container_of(d, lws_cache_ttl_item_heap_t, list_lru);
lws_cache_heap_item_destroy(cache, item, 0);
return 0;
}
static int
lws_cache_heap_expunge(struct lws_cache_ttl_lru *_c)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
lws_dll2_foreach_safe(&cache->items_lru, cache, destroy_dll);
return 0;
}
static void
lws_cache_heap_destroy(struct lws_cache_ttl_lru **_cache)
{
lws_cache_ttl_lru_t *c = *_cache;
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)c;
if (!cache)
return;
lws_sul_cancel(&c->sul);
lws_dll2_foreach_safe(&cache->items_lru, cache, destroy_dll);
lws_free_set_NULL(*_cache);
}
#if defined(_DEBUG)
static int
dump_dll(struct lws_dll2 *d, void *user)
{
lws_cache_ttl_item_heap_t *item =
lws_container_of(d, lws_cache_ttl_item_heap_t, list_lru);
lwsl_cache(" %s: size %d, exp %llu\n",
(const char *)&item[1] + item->size,
(int)item->size, (unsigned long long)item->expiry);
lwsl_hexdump_cache((const char *)&item[1], item->size);
return 0;
}
static void
lws_cache_heap_debug_dump(struct lws_cache_ttl_lru *_c)
{
lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c;
#if !defined(LWS_WITH_NO_LOGS)
lws_cache_ttl_item_heap_t *item = NULL;
lws_dll2_t *d = cache->items_expiry.head;
if (d)
item = lws_container_of(d, lws_cache_ttl_item_heap_t,
list_expiry);
lwsl_cache("%s: %s: items %d, earliest %llu\n", __func__,
cache->cache.info.name, (int)cache->items_lru.count,
item ? (unsigned long long)item->expiry : 0ull);
#endif
lws_dll2_foreach_safe(&cache->items_lru, cache, dump_dll);
}
#endif
const struct lws_cache_ops lws_cache_ops_heap = {
.create = lws_cache_heap_create,
.destroy = lws_cache_heap_destroy,
.expunge = lws_cache_heap_expunge,
.write = lws_cache_heap_write,
.tag_match = lws_cache_heap_tag_match,
.lookup = lws_cache_heap_lookup,
.invalidate = lws_cache_heap_invalidate,
.get = lws_cache_heap_get,
#if defined(_DEBUG)
.debug_dump = lws_cache_heap_debug_dump,
#endif
};

View File

@ -0,0 +1,300 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <private-lib-core.h>
#include "private-lib-misc-cache-ttl.h"
#include <assert.h>
#if defined(write)
#undef write
#endif
void
lws_cache_clear_matches(lws_dll2_owner_t *results_owner)
{
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, results_owner->head) {
lws_cache_match_t *item = lws_container_of(d, lws_cache_match_t,
list);
lws_dll2_remove(d);
lws_free(item);
} lws_end_foreach_dll_safe(d, d1);
}
void
lws_cache_schedule(struct lws_cache_ttl_lru *cache, sul_cb_t cb, lws_usec_t e)
{
lwsl_cache("%s: %s schedule %llu\n", __func__, cache->info.name,
(unsigned long long)e);
lws_sul_schedule(cache->info.cx, cache->info.tsi, &cache->sul, cb,
e - lws_now_usecs());
}
int
lws_cache_write_through(struct lws_cache_ttl_lru *cache,
const char *specific_key, const uint8_t *source,
size_t size, lws_usec_t expiry, void **ppay)
{
struct lws_cache_ttl_lru *levels[LWS_CACHE_MAX_LEVELS], *c = cache;
int n = 0, r = 0;
lws_cache_item_remove(cache, specific_key);
/* starting from L1 */
do {
levels[n++] = c;
c = c->info.parent;
} while (c && n < (int)LWS_ARRAY_SIZE(levels));
/* starting from outermost cache level */
while (n) {
n--;
r = levels[n]->info.ops->write(levels[n], specific_key,
source, size, expiry, ppay);
}
return r;
}
/*
* We want to make a list of unique keys that exist at any cache level
* matching a wildcard search key.
*
* If L1 has a cached version though, we will just use that.
*/
int
lws_cache_lookup(struct lws_cache_ttl_lru *cache, const char *wildcard_key,
const void **pdata, size_t *psize)
{
struct lws_cache_ttl_lru *l1 = cache;
lws_dll2_owner_t results_owner;
lws_usec_t expiry = 0;
char meta_key[128];
uint8_t *p, *temp;
size_t sum = 0;
int n;
memset(&results_owner, 0, sizeof(results_owner));
meta_key[0] = META_ITEM_LEADING;
lws_strncpy(&meta_key[1], wildcard_key, sizeof(meta_key) - 2);
/*
* If we have a cached result set in L1 already, return that
*/
if (!l1->info.ops->get(l1, meta_key, pdata, psize))
return 0;
/*
* No, we have to do the actual lookup work in the backing store layer
* to get results for this...
*/
while (cache->info.parent)
cache = cache->info.parent;
if (cache->info.ops->lookup(cache, wildcard_key, &results_owner)) {
/* eg, OOM */
lwsl_cache("%s: bs lookup fail\n", __func__);
lws_cache_clear_matches(&results_owner);
return 1;
}
/*
* Scan the results, we want to know how big a payload it needs in
* the cache, and we want to know the earliest expiry of any of the
* component parts, so the meta cache entry for these results can be
* expired when any of the results would expire.
*/
lws_start_foreach_dll(struct lws_dll2 *, d, results_owner.head) {
lws_cache_match_t *m = lws_container_of(d, lws_cache_match_t,
list);
sum += 8; /* payload size, name length */
sum += m->tag_size + 1;
if (m->expiry && (!expiry || expiry < m->expiry))
expiry = m->expiry;
} lws_end_foreach_dll(d);
lwsl_cache("%s: results %d, size %d\n", __func__,
(int)results_owner.count, (int)sum);
temp = lws_malloc(sum, __func__);
if (!temp) {
lws_cache_clear_matches(&results_owner);
return 1;
}
/*
* Fill temp with the serialized results
*/
p = temp;
lws_start_foreach_dll(struct lws_dll2 *, d, results_owner.head) {
lws_cache_match_t *m = lws_container_of(d, lws_cache_match_t,
list);
/* we don't copy the payload in, but take note of its size */
lws_ser_wu32be(p, (uint32_t)m->payload_size);
p += 4;
/* length of the tag name (there is an uncounted NUL after) */
lws_ser_wu32be(p, (uint32_t)m->tag_size);
p += 4;
/* then the tag name, plus the extra NUL */
memcpy(p, &m[1], m->tag_size + 1);
p += m->tag_size + 1;
} lws_end_foreach_dll(d);
lws_cache_clear_matches(&results_owner);
/*
* Create the right amount of space for an L1 record of these results,
* with its expiry set to the earliest of the results, and copy it in
* from temp
*/
n = l1->info.ops->write(l1, meta_key, temp, sum, expiry, (void **)&p);
/* done with temp */
lws_free(temp);
if (n)
return 1;
/* point to the results in L1 */
*pdata = p;
*psize = sum;
return 0;
}
int
lws_cache_item_get(struct lws_cache_ttl_lru *cache, const char *specific_key,
const void **pdata, size_t *psize)
{
while (cache) {
if (!cache->info.ops->get(cache, specific_key, pdata, psize)) {
lwsl_cache("%s: hit\n", __func__);
return 0;
}
cache = cache->info.parent;
}
return 1;
}
int
lws_cache_expunge(struct lws_cache_ttl_lru *cache)
{
int ret = 0;
while (cache) {
ret |= cache->info.ops->expunge(cache);
cache = cache->info.parent;
}
return ret;
}
int
lws_cache_item_remove(struct lws_cache_ttl_lru *cache, const char *wildcard_key)
{
while (cache) {
if (cache->info.ops->invalidate(cache, wildcard_key))
return 1;
cache = cache->info.parent;
}
return 0;
}
uint64_t
lws_cache_footprint(struct lws_cache_ttl_lru *cache)
{
return cache->current_footprint;
}
void
lws_cache_debug_dump(struct lws_cache_ttl_lru *cache)
{
#if defined(_DEBUG)
if (cache->info.ops->debug_dump)
cache->info.ops->debug_dump(cache);
#endif
}
int
lws_cache_results_walk(lws_cache_results_t *walk_ctx)
{
if (!walk_ctx->size)
return 1;
walk_ctx->payload_len = lws_ser_ru32be(walk_ctx->ptr);
walk_ctx->tag_len = lws_ser_ru32be(walk_ctx->ptr + 4);
walk_ctx->tag = walk_ctx->ptr + 8;
walk_ctx->ptr += walk_ctx->tag_len + 1 + 8;
walk_ctx->size -= walk_ctx->tag_len + 1 + 8;
return 0;
}
struct lws_cache_ttl_lru *
lws_cache_create(const struct lws_cache_creation_info *info)
{
assert(info);
assert(info->ops);
assert(info->name);
assert(info->ops->create);
return info->ops->create(info);
}
void
lws_cache_destroy(struct lws_cache_ttl_lru **_cache)
{
lws_cache_ttl_lru_t *cache = *_cache;
if (!cache)
return;
assert(cache->info.ops->destroy);
lws_sul_cancel(&cache->sul);
cache->info.ops->destroy(_cache);
}

View File

@ -0,0 +1,98 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define lwsl_cache lwsl_debug
#define lwsl_hexdump_cache lwsl_hexdump_debug
#define LWS_CACHE_MAX_LEVELS 3
/*
* If we need structure inside the cache tag names, use this character as a
* separator
*/
#define LWSCTAG_SEP '|'
/*
* Our synthetic cache result items all have tags starting with this char
*/
#define META_ITEM_LEADING '!'
typedef struct lws_cache_ttl_item_heap {
lws_dll2_t list_expiry;
lws_dll2_t list_lru;
lws_usec_t expiry;
size_t key_len;
size_t size;
/*
* len + key_len + 1 bytes of data overcommitted, user object first
* so it is well-aligned, then the NUL-terminated key name
*/
} lws_cache_ttl_item_heap_t;
/* this is a "base class", all cache implementations have one at the start */
typedef struct lws_cache_ttl_lru {
struct lws_cache_creation_info info;
lws_sorted_usec_list_t sul;
struct lws_cache_ttl_lru *child;
uint64_t current_footprint;
} lws_cache_ttl_lru_t;
/*
* The heap-backed cache uses lws_dll2 linked-lists to track items that are
* in it.
*/
typedef struct lws_cache_ttl_lru_heap {
lws_cache_ttl_lru_t cache;
lws_dll2_owner_t items_expiry;
lws_dll2_owner_t items_lru;
} lws_cache_ttl_lru_t_heap_t;
/*
* We want to be able to work with a large file-backed implementation even on
* devices that don't have heap to track what is in it. It means if lookups
* reach this cache layer, we will be scanning a potentially large file.
*
* L1 caching of lookups (including null result list) reduces the expense of
* this on average. We keep a copy of the last computed earliest expiry.
*
* We can't keep an open file handle here. Because other processes may change
* the cookie file by deleting and replacing it, we have to open it fresh each
* time.
*/
typedef struct lws_cache_nscookiejar {
lws_cache_ttl_lru_t cache;
lws_usec_t earliest_expiry;
} lws_cache_nscookiejar_t;
void
lws_cache_clear_matches(lws_dll2_owner_t *results_owner);
void
lws_cache_schedule(struct lws_cache_ttl_lru *cache, sul_cb_t cb, lws_usec_t e);