Update Files

This commit is contained in:
2025-01-22 17:22:38 +01:00
parent 89b9349629
commit 4c5e729485
5132 changed files with 1195369 additions and 0 deletions

View File

@ -0,0 +1,82 @@
#
# libwebsockets - small server side websockets and web server implementation
#
# Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# The strategy is to only export to PARENT_SCOPE
#
# - changes to LIB_LIST
# - changes to SOURCES
# - includes via include_directories
#
# and keep everything else private
include_directories(./async-dns)
if (NOT LWS_ONLY_SSPC)
list(APPEND SOURCES
system/system.c)
endif()
if (LWS_WITH_NETWORK)
if (LWS_WITH_SYS_ASYNC_DNS)
list(APPEND SOURCES
system/async-dns/async-dns.c
system/async-dns/async-dns-parse.c)
endif()
if (LWS_WITH_SYS_NTPCLIENT)
list(APPEND SOURCES
system/ntpclient/ntpclient.c)
endif()
if (LWS_WITH_SYS_DHCP_CLIENT)
list(APPEND SOURCES
system/dhcpclient/dhcpclient.c
system/dhcpclient/dhcpc4.c)
endif()
if (LWS_WITH_OTA)
list(APPEND SOURCES
system/ota/ota.c)
endif()
if (LWS_WITH_SYS_SMD)
add_subdir_include_dirs(smd)
endif()
if (LWS_WITH_SYS_FAULT_INJECTION)
include_directories(./fault-injection)
list(APPEND SOURCES
system/fault-injection/fault-injection.c)
endif()
add_subdir_include_dirs(metrics)
endif()
#
# Keep explicit parent scope exports at end
#
exports_to_parent_scope()

View File

@ -0,0 +1,68 @@
# LWS System Helpers
Lws now has a little collection of helper utilities for common network-based
functions necessary for normal device operation, eg, async DNS, ntpclient
(necessary for tls validation), and DHCP client.
## Conventions
If any system helper is enabled for build, lws creates an additional vhost
"system" at Context Creation time. Wsi that are created for the system
features are bound to this. In the context object, this is available as
`.vhost_system`.
# Attaching to an existing context from other threads
To simplify the case different pieces of code want to attach to a single
lws_context at runtime, from different thread contexts, lws_system has an api
via an lws_system operation function pointer where the other threads can use
platform-specific locking to request callbacks to their own code from the
lws event loop thread context safely.
For convenience, the callback can be delayed until the system has entered or
passed a specified system state, eg, LWS_SYSTATE_OPERATIONAL so the code will
only get called back after the network, ntpclient and auth have been done.
Additionally an opaque pointer can be passed to the callback when it is called
from the lws event loop context.
## Implementing the system-specific locking
`lws_system_ops_t` struct has a member `.attach`
```
int (*attach)(struct lws_context *context, int tsi, lws_attach_cb_t *cb,
lws_system_states_t state, void *opaque,
struct lws_attach_item **get);
```
This should be defined in user code as setting locking, then passing the
arguments through to a non-threadsafe helper
```
int
__lws_system_attach(struct lws_context *context, int tsi, lws_attach_cb_t *cb,
lws_system_states_t state, void *opaque,
struct lws_attach_item **get);
```
that does the actual attach work. When it returns, the locking should be
unlocked and the return passed back.
## Attaching the callback request
User code should call the lws_system_ops_t `.attach` function like
```
lws_system_get_ops(context)->attach(...);
```
The callback function which will be called from the lws event loop context
should look like this
```
void my_callback(struct lws_context *context, int tsi, void *opaque);
```
with the callback function name passed into the (*attach)() call above. When
the callback happens, the opaque user pointer set at the (*attach)() call is
passed back to it as an argument.

View File

@ -0,0 +1,710 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#include "private-lib-async-dns.h"
/* updates *dest, returns chars used from ls directly, else -1 for fail */
static int
lws_adns_parse_label(const uint8_t *pkt, int len, const uint8_t *ls, int budget,
char **dest, size_t dl)
{
const uint8_t *e = pkt + len, *ols = ls;
char pointer = 0, first = 1;
uint8_t ll;
int n;
if (len < DHO_SIZEOF || len > 1500)
return -1;
if (budget < 1)
return 0;
/* caller must catch end of labels */
assert(*ls);
again1:
if (ls >= e)
return -1;
if (((*ls) & 0xc0) == 0xc0) {
if (budget < 2)
return -1;
/* pointer into message pkt to name to actually use */
n = lws_ser_ru16be(ls) & 0x3fff;
if (n >= len) {
lwsl_notice("%s: illegal name pointer\n", __func__);
return -1;
}
/* dereference the label pointer */
ls = pkt + n;
/* are we being fuzzed or messed with? */
if (((*ls) & 0xc0) == 0xc0) {
/* ... pointer to pointer is unreasonable */
lwsl_notice("%s: label ptr to ptr invalid\n", __func__);
return -1;
}
pointer = 1;
}
if (ls >= e)
return -1;
ll = *ls++;
if (ls + ll + 1 > e) {
lwsl_notice("%s: label len invalid, %d vs %d\n", __func__,
lws_ptr_diff((ls + ll + 1), pkt), lws_ptr_diff(e, pkt));
return -1;
}
if (ll > lws_ptr_diff_size_t(ls, ols) + (size_t)budget) {
lwsl_notice("%s: label too long %d vs %d\n", __func__, ll, budget);
return -1;
}
if ((unsigned int)ll + 2 > dl) {
lwsl_notice("%s: qname too large\n", __func__);
return -1;
}
/* copy the label content into place */
memcpy(*dest, ls, ll);
(*dest)[ll] = '.';
(*dest)[ll + 1] = '\0';
*dest += ll + 1;
ls += ll;
if (pointer) {
if (*ls)
goto again1;
/*
* special fun rule... if whole qname was a pointer label,
* it has no 00 terminator afterwards
*/
if (first)
return 2; /* we just took the 16-bit pointer */
return 3;
}
first = 0;
if (*ls)
goto again1;
ls++;
return lws_ptr_diff(ls, ols);
}
typedef int (*lws_async_dns_find_t)(const char *name, void *opaque,
uint32_t ttl, adns_query_type_t type,
const uint8_t *payload);
/* locally query the response packet */
struct label_stack {
char name[DNS_MAX];
int enl;
const uint8_t *p;
};
/*
* Walk the response packet, calling back to the user-provided callback for each
* A (and AAAA if LWS_IPV6=1) record with a matching name found in there.
*
* Able to recurse using an explicit non-CPU stack to resolve CNAME usages
*
* Return -1: unexpectedly failed
* 0: found
* 1: didn't find anything matching
*/
static int
lws_adns_iterate(lws_adns_q_t *q, const uint8_t *pkt, int len,
const char *expname, lws_async_dns_find_t cb, void *opaque)
{
const uint8_t *e = pkt + len, *p, *pay;
struct label_stack stack[4];
int n = 0, stp = 0, ansc, m;
uint16_t rrtype, rrpaylen;
char *sp, inq;
uint32_t ttl;
if (len < DHO_SIZEOF || len > 1500)
return -1;
lws_strncpy(stack[0].name, expname, sizeof(stack[0].name));
stack[0].enl = (int)strlen(expname);
start:
ansc = lws_ser_ru16be(pkt + DHO_NANSWERS);
p = pkt + DHO_SIZEOF;
inq = 1;
/*
* The response also includes the query... and we have to parse it
* so we can understand we reached the response... there's a QNAME
* made up of labels and then 2 x 16-bit fields, for query type and
* query class
*/
while (p + 14 < e && (inq || ansc)) {
if (!inq && !stp)
ansc--;
/*
* First is the name the query applies to... two main
* formats can appear here, one is a pointer to
* elsewhere in the message, the other separately
* provides len / data for each dotted "label", so for
* "warmcat.com" warmcat and com are given each with a
* prepended length byte. Any of those may be a pointer
* to somewhere else in the packet :-/
*
* Paranoia is appropriate since the name length must be
* parsed out before the rest of the RR can be used and
* we can be attacked with absolutely any crafted
* content easily via UDP.
*
* So parse the name and additionally confirm it matches
* what the query the TID belongs to actually asked for.
*/
sp = stack[0].name;
/* while we have more labels */
n = lws_adns_parse_label(pkt, len, p, len, &sp,
sizeof(stack[0].name) -
lws_ptr_diff_size_t(sp, stack[0].name));
/* includes case name won't fit */
if (n < 0)
return -1;
p += n;
if (p + (inq ? 5 : 14) > e)
return -1;
/*
* p is now just after the decoded RR name, pointing at: type
*
* We sent class = 1 = IN query... response must match
*/
if (lws_ser_ru16be(&p[2]) != 1) {
lwsl_err("%s: non-IN response 0x%x\n", __func__,
lws_ser_ru16be(&p[2]));
return -1;
}
if (inq) {
lwsl_debug("%s: reached end of inq\n", __func__);
inq = 0;
p += 4;
continue;
}
/* carefully validate the claimed RR payload length */
rrpaylen = lws_ser_ru16be(&p[8]);
if (p + 10 + rrpaylen > e) { /* it may be == e */
lwsl_notice("%s: invalid RR data length\n", __func__);
return -1;
}
ttl = lws_ser_ru32be(&p[4]);
rrtype = lws_ser_ru16be(&p[0]);
p += 10; /* point to the payload */
pay = p;
/*
* Compare the RR names, allowing for the decoded labelname
* to have an extra '.' at the end.
*/
n = lws_ptr_diff(sp, stack[0].name);
if (stack[0].name[n - 1] == '.')
n--;
m = stack[stp].enl;
if (stack[stp].name[m - 1] == '.')
m--;
if (n < 1 || n != m ||
strncmp(stack[0].name, stack[stp].name, (unsigned int)n)) {
//lwsl_notice("%s: skipping %s vs %s\n", __func__,
// stack[0].name, stack[stp].name);
goto skip;
}
/*
* It's something we could be interested in...
*
* We can skip RRs we don't understand. But we need to deal
* with at least these and their payloads:
*
* A: 4: ipv4 address
* AAAA: 16: ipv6 address (if asked for AAAA)
* CNAME: ?: labelized name
*
* If we hit a CNAME we need to try to dereference it with
* stuff that is in the same response packet and judge it
* from that, without losing our place here. CNAMEs may
* point to CNAMEs to whatever depth we're willing to handle.
*/
switch (rrtype) {
case LWS_ADNS_RECORD_AAAA:
if (rrpaylen != 16) {
lwsl_err("%s: unexpected rrpaylen\n", __func__);
return -1;
}
#if defined(LWS_WITH_IPV6)
goto do_cb;
#else
break;
#endif
case LWS_ADNS_RECORD_A:
if (rrpaylen != 4) {
lwsl_err("%s: unexpected rrpaylen4\n", __func__);
return -1;
}
#if defined(LWS_WITH_IPV6)
do_cb:
#endif
cb(stack[0].name, opaque, ttl, rrtype, p);
break;
case LWS_ADNS_RECORD_CNAME:
/*
* The name the CNAME refers to MAY itself be
* included elsewhere in the response packet.
*
* So switch tack, stack where to resume from and
* search for the decoded CNAME label name definition
* instead.
*
* First decode the CNAME label payload into the next
* stack level buffer for it.
*/
if (++stp == (int)LWS_ARRAY_SIZE(stack)) {
lwsl_notice("%s: CNAMEs too deep\n", __func__);
return -1;
}
sp = stack[stp].name;
/* get the cname alias */
n = lws_adns_parse_label(pkt, len, p, rrpaylen, &sp,
sizeof(stack[stp].name) -
lws_ptr_diff_size_t(sp, stack[stp].name));
/* includes case name won't fit */
if (n < 0)
return -1;
p += n;
if (p + 14 > e)
return -1;
#if 0
/* it should have exactly reached rrpaylen if only one
* CNAME, else somewhere in the middle */
if (p != pay + rrpaylen) {
lwsl_err("%s: cname name bad len %d\n", __func__, rrpaylen);
return -1;
}
#endif
lwsl_notice("%s: recursing looking for %s\n", __func__, stack[stp].name);
lwsl_info("%s: recursing looking for %s\n", __func__,
stack[stp].name);
stack[stp].enl = lws_ptr_diff(sp, stack[stp].name);
/* when we unstack, resume from here */
stack[stp].p = pay + rrpaylen;
goto start;
default:
break;
}
skip:
p += rrpaylen;
}
if (!stp)
return 1; /* we didn't find anything, but we didn't error */
lwsl_info("%s: '%s' -> CNAME '%s' resolution not provided, recursing\n",
__func__, ((const char *)&q[1]) + DNS_MAX,
stack[stp].name);
/*
* This implies there wasn't any usable definition for the
* CNAME in the end, eg, only AAAA when we needed an A.
*
* It's also legit if the DNS just returns the CNAME, and that server
* did not directly know the next step in resolution of the CNAME, so
* instead of putting the resolution elsewhere in the response, has
* told us just the CNAME and left it to us to find out its resolution
* separately.
*
* Reset this request to be for the CNAME, and restart the request
* action with a new tid.
*/
if (lws_async_dns_get_new_tid(q->context, q))
return -1;
LADNS_MOST_RECENT_TID(q) &= 0xfffe;
q->asked = q->responded = 0;
#if defined(LWS_WITH_IPV6)
q->sent[1] = 0;
#endif
q->sent[0] = 0;
q->is_synthetic = 0;
q->recursion++;
if (q->recursion == DNS_RECURSION_LIMIT) {
lwsl_err("%s: recursion overflow\n", __func__);
return -1;
}
if (q->firstcache)
lws_adns_cache_destroy(q->firstcache);
q->firstcache = NULL;
/* overwrite the query name with the CNAME */
n = 0;
{
char *cp = (char *)&q[1];
while (stack[stp].name[n])
*cp++ = (char)tolower(stack[stp].name[n++]);
/* trim the following . if any */
if (n && cp[-1] == '.')
cp--;
*cp = '\0';
}
if (q->dsrv && q->dsrv->wsi)
lws_callback_on_writable(q->dsrv->wsi);
return 2;
}
int
lws_async_dns_estimate(const char *name, void *opaque, uint32_t ttl,
adns_query_type_t type, const uint8_t *payload)
{
size_t *est = (size_t *)opaque, my;
my = sizeof(struct addrinfo);
if (type == LWS_ADNS_RECORD_AAAA)
my += sizeof(struct sockaddr_in6);
else
my += sizeof(struct sockaddr_in);
*est += my;
return 0;
}
struct adstore {
const char *name;
struct addrinfo *pos;
struct addrinfo *prev;
int ctr;
uint32_t smallest_ttl;
uint8_t flags;
};
/*
* Callback for each A or AAAA record, creating getaddrinfo-compatible results
* into the preallocated exact-sized storage.
*/
int
lws_async_dns_store(const char *name, void *opaque, uint32_t ttl,
adns_query_type_t type, const uint8_t *payload)
{
struct adstore *adst = (struct adstore *)opaque;
#if defined(_DEBUG)
char buf[48];
#endif
size_t i;
if (ttl < adst->smallest_ttl || !adst->ctr)
adst->smallest_ttl = ttl;
if (adst->prev)
adst->prev->ai_next = adst->pos;
adst->prev = adst->pos;
adst->pos->ai_flags = 0;
adst->pos->ai_family = type == LWS_ADNS_RECORD_AAAA ?
AF_INET6 : AF_INET;
adst->pos->ai_socktype = SOCK_STREAM;
adst->pos->ai_protocol = IPPROTO_UDP; /* no meaning */
adst->pos->ai_addrlen = type == LWS_ADNS_RECORD_AAAA ?
sizeof(struct sockaddr_in6) :
sizeof(struct sockaddr_in);
adst->pos->ai_canonname = (char *)adst->name;
adst->pos->ai_addr = (struct sockaddr *)&adst->pos[1];
adst->pos->ai_next = NULL;
#if defined(LWS_WITH_IPV6)
if (type == LWS_ADNS_RECORD_AAAA) {
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&adst->pos[1];
i = sizeof(*in6);
memset(in6, 0, i);
in6->sin6_family = (sa_family_t)adst->pos->ai_family;
memcpy(in6->sin6_addr.s6_addr, payload, 16);
adst->flags |= 2;
} else
#endif
{
struct sockaddr_in *in = (struct sockaddr_in *)&adst->pos[1];
i = sizeof(*in);
memset(in, 0, i);
in->sin_family = (sa_family_t)adst->pos->ai_family;
memcpy(&in->sin_addr.s_addr, payload, 4);
adst->flags |= 1;
}
adst->pos = (struct addrinfo *)((uint8_t *)adst->pos +
sizeof(struct addrinfo) + i);
#if defined(_DEBUG)
if (lws_write_numeric_address(payload,
type == LWS_ADNS_RECORD_AAAA ? 16 : 4,
buf, sizeof(buf)) > 0)
lwsl_info("%s: %d: %s: %s\n", __func__, adst->ctr,
adst->name, buf);
#endif
adst->ctr++;
return 0;
}
/*
* We want to parse out all A or AAAA records
*/
void
lws_adns_parse_udp(lws_async_dns_t *dns, const uint8_t *pkt, size_t len)
{
const char *nm, *nmcname;
lws_adns_cache_t *c;
struct adstore adst;
lws_adns_q_t *q;
int n, ncname;
size_t est;
// lwsl_hexdump_notice(pkt, len);
/* we have to at least have the header */
if (len < DHO_SIZEOF || len > 1500)
return;
/* we asked with one query, so anything else is bogus */
if (lws_ser_ru16be(pkt + DHO_NQUERIES) != 1)
return;
/* match both A and AAAA queries if any */
q = lws_adns_get_query(dns, 0, lws_ser_ru16be(pkt + DHO_TID), NULL);
if (!q) {
lwsl_info("%s: dropping unknown query tid 0x%x\n",
__func__, lws_ser_ru16be(pkt + DHO_TID));
return;
}
/* we can get dups... drop any that have already happened */
n = 1 << (lws_ser_ru16be(pkt + DHO_TID) & 1);
if (q->responded & n) {
lwsl_notice("%s: dup\n", __func__);
goto fail_out;
}
q->responded = (uint8_t)(q->responded | n);
/* we want to confirm the results against what we last requested... */
nmcname = ((const char *)&q[1]);
/*
* First walk the packet figuring out the allocation needed for all
* the results. Produce the following layout at c
*
* lws_adns_cache_t: new cache object
* [struct addrinfo + struct sockaddr_in or _in6]: for each A or AAAA
* char []: copy of resolved name
*/
ncname = (int)strlen(nmcname) + 1;
est = sizeof(lws_adns_cache_t) + (unsigned int)ncname;
if (lws_ser_ru16be(pkt + DHO_NANSWERS)) {
int ir = lws_adns_iterate(q, pkt, (int)len, nmcname,
lws_async_dns_estimate, &est);
if (ir < 0)
goto fail_out;
if (ir == 2) /* CNAME recursive resolution */
return;
}
/* but we want to create the cache entry against the original request */
nm = ((const char *)&q[1]) + DNS_MAX;
n = (int)strlen(nm) + 1;
lwsl_info("%s: create cache entry for %s, %zu\n", __func__, nm,
est - sizeof(lws_adns_cache_t));
c = lws_malloc(est + 1, "async-dns-entry");
if (!c) {
lwsl_err("%s: OOM %zu\n", __func__, est);
goto fail_out;
}
memset(c, 0, sizeof(*c));
/* place it at end, no need to care about alignment padding */
c->name = adst.name = ((const char *)c) + est - n;
memcpy((char *)c->name, nm, (unsigned int)n);
/*
* Then walk the packet again, placing the objects we accounted for
* the first time into the result allocation after the cache object
* and copy of the name
*/
adst.pos = (struct addrinfo *)&c[1];
adst.prev = NULL;
adst.ctr = 0;
adst.smallest_ttl = 3600;
adst.flags = 0;
/*
* smallest_ttl applies as it is to empty results (NXDOMAIN), or is
* set to the minimum ttl seen in all the results.
*/
if (lws_ser_ru16be(pkt + DHO_NANSWERS) &&
lws_adns_iterate(q, pkt, (int)len, nmcname, lws_async_dns_store, &adst) < 0) {
lws_free(c);
goto fail_out;
}
if (lws_ser_ru16be(pkt + DHO_NANSWERS)) {
c->results = (struct addrinfo *)&c[1];
if (q->last) /* chain the second one on */
*q->last = c->results;
else /* first one had no results, set first guy's c->results */
if (q->firstcache)
q->firstcache->results = c->results;
}
if (adst.prev) /* so we know where to continue the addrinfo list */
/* can be NULL if first resp empty */
q->last = &adst.prev->ai_next;
if (q->firstcache) { /* also need to free chain when we free this guy */
q->firstcache->chain = c;
c->firstcache = q->firstcache;
} else {
q->firstcache = c;
c->incomplete = !q->responded;// != q->asked;
/*
* Only register the first one into the cache...
* Trim the oldest cache entry if necessary
*/
lws_async_dns_trim_cache(dns);
/*
* cache the first results object... if a second one comes,
* we won't directly register it but will chain it on to this
* first one and continue to addinfo ai_next linked list from
* the first into the second
*/
c->flags = adst.flags;
lws_dll2_add_head(&c->list, &dns->cached);
lws_sul_schedule(q->context, 0, &c->sul, sul_cb_expire,
lws_now_usecs() +
(adst.smallest_ttl * LWS_US_PER_SEC));
}
if (q->responded != q->asked)
return;
/*
* Now we captured everything into the new object, return the
* addrinfo results, if any, to all interested wsi, if any...
*/
c->incomplete = 0;
lws_async_dns_complete(q, q->firstcache);
q->go_nogo = METRES_GO;
/*
* the query is completely finished with
*/
fail_out:
lws_adns_q_destroy(q);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,164 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define DNS_MAX 128 /* Maximum host name */
#define DNS_RECURSION_LIMIT 4
#define DNS_PACKET_LEN 1400 /* Buffer size for DNS packet */
#define MAX_CACHE_ENTRIES 10 /* Dont cache more than that */
#define DNS_QUERY_TIMEOUT 30 /* Query timeout, seconds */
#if defined(LWS_WITH_SYS_ASYNC_DNS)
/*
* ... when we completed a query then the query object is destroyed and a
* cache object below is created with the results in getaddrinfo format
* appended to the allocation
*/
typedef struct lws_adns_cache {
lws_sorted_usec_list_t sul; /* for cache TTL management */
lws_dll2_t list;
struct lws_adns_cache *firstcache;
struct lws_adns_cache *chain;
struct addrinfo *results;
const char *name;
uint8_t flags; /* b0 = has ipv4, b1 = has ipv6 */
char refcount;
char incomplete;
/* addrinfo, lws_sa46, then name overallocated here */
} lws_adns_cache_t;
/*
* these objects are used while a query is ongoing...
*/
typedef struct lws_adns_q {
lws_sorted_usec_list_t sul; /* per-query write retry timer */
lws_sorted_usec_list_t write_sul; /* fail if unable to write by this time */
lws_dll2_t list;
lws_metrics_caliper_compose(metcal)
lws_dll2_owner_t wsi_adns;
lws_async_dns_cb_t standalone_cb; /* if not associated to wsi */
struct lws_context *context;
void *opaque;
struct addrinfo **last;
lws_async_dns_t *dns;
lws_async_dns_server_t *dsrv;
lws_adns_cache_t *firstcache;
lws_async_dns_retcode_t ret;
uint16_t tid[3]; /* last 3 sent tid */
uint16_t qtype;
uint16_t retry;
uint8_t tsi;
#if defined(LWS_WITH_IPV6)
uint8_t sent[2];
#else
uint8_t sent[1];
#endif
uint8_t asked;
uint8_t responded;
uint8_t recursion;
uint8_t tids;
uint8_t go_nogo;
uint8_t is_retry:1;
uint8_t is_synthetic:1; /* test will deliver canned */
/* name overallocated here */
} lws_adns_q_t;
#define LADNS_MOST_RECENT_TID(_q) \
q->tid[(int)(_q->tids - 1) % (int)LWS_ARRAY_SIZE(q->tid)]
enum {
DHO_TID,
DHO_FLAGS = 2,
DHO_NQUERIES = 4,
DHO_NANSWERS = 6,
DHO_NAUTH = 8,
DHO_NOTHER = 10,
DHO_SIZEOF = 12 /* last */
};
void
lws_adns_q_destroy(lws_adns_q_t *q);
void
sul_cb_expire(struct lws_sorted_usec_list *sul);
void
lws_adns_cache_destroy(lws_adns_cache_t *c);
int
lws_async_dns_complete(lws_adns_q_t *q, lws_adns_cache_t *c);
lws_adns_cache_t *
lws_adns_get_cache(lws_async_dns_t *dns, const char *name);
lws_adns_q_t *
lws_adns_get_query(lws_async_dns_t *dns, adns_query_type_t qtype,
uint16_t tid, const char *name);
lws_adns_q_t *
lws_adns_get_query_srv(lws_async_dns_server_t *dsrv, adns_query_type_t qtype,
uint16_t tid, const char *name);
void
lws_async_dns_trim_cache(lws_async_dns_t *dns);
int
lws_async_dns_get_new_tid(struct lws_context *context, lws_adns_q_t *q);
/* require: context lock on this set */
lws_async_dns_server_t *
__lws_async_dns_server_find(lws_async_dns_t *dns, const lws_sockaddr46 *sa46);
lws_async_dns_server_t *
__lws_async_dns_server_find_wsi(lws_async_dns_t *dns, struct lws *wsi);
lws_async_dns_server_t *
__lws_async_dns_server_add(lws_async_dns_t *dns, const lws_sockaddr46 *sa46);
void
__lws_async_dns_server_remove(lws_async_dns_t *dns, const lws_sockaddr46 *sa46);
#if defined(_DEBUG)
void
lws_adns_dump(lws_async_dns_t *dns);
#else
#define lws_adns_dump(_d)
#endif
#endif

View File

@ -0,0 +1,289 @@
/*
* LWA auth support for Secure Streams
*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2019 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <private-lib-core.h>
typedef struct ss_api_amazon_auth {
struct lws_ss_handle *ss;
void *opaque_data;
/* ... application specific state ... */
struct lejp_ctx jctx;
size_t pos;
int expires_secs;
} ss_api_amazon_auth_t;
static const char * const lejp_tokens_lwa[] = {
"access_token",
"expires_in",
};
typedef enum {
LSSPPT_ACCESS_TOKEN,
LSSPPT_EXPIRES_IN,
} lejp_tokens_t;
enum {
AUTH_IDX_LWA,
AUTH_IDX_ROOT,
};
static void
lws_ss_sys_auth_api_amazon_com_kick(lws_sorted_usec_list_t *sul)
{
struct lws_context *context = lws_container_of(sul, struct lws_context,
sul_api_amazon_com_kick);
lws_state_transition_steps(&context->mgr_system,
LWS_SYSTATE_OPERATIONAL);
}
static void
lws_ss_sys_auth_api_amazon_com_renew(lws_sorted_usec_list_t *sul)
{
struct lws_context *context = lws_container_of(sul, struct lws_context,
sul_api_amazon_com);
lws_ss_sys_auth_api_amazon_com(context);
}
static signed char
auth_api_amazon_com_parser_cb(struct lejp_ctx *ctx, char reason)
{
ss_api_amazon_auth_t *m = (ss_api_amazon_auth_t *)ctx->user;
struct lws_context *context = (struct lws_context *)m->opaque_data;
lws_system_blob_t *blob;
if (!(reason & LEJP_FLAG_CB_IS_VALUE) || !ctx->path_match)
return 0;
switch (ctx->path_match - 1) {
case LSSPPT_ACCESS_TOKEN:
if (!ctx->npos)
break;
blob = lws_system_get_blob(context, LWS_SYSBLOB_TYPE_AUTH,
AUTH_IDX_LWA);
if (!blob)
return -1;
if (lws_system_blob_heap_append(blob,
(const uint8_t *)ctx->buf,
ctx->npos)) {
lwsl_err("%s: unable to store auth token\n", __func__);
return -1;
}
break;
case LSSPPT_EXPIRES_IN:
m->expires_secs = atoi(ctx->buf);
lws_sul_schedule(context, 0, &context->sul_api_amazon_com,
lws_ss_sys_auth_api_amazon_com_renew,
(lws_usec_t)m->expires_secs * LWS_US_PER_SEC);
break;
}
return 0;
}
/* secure streams payload interface */
static lws_ss_state_return_t
ss_api_amazon_auth_rx(void *userobj, const uint8_t *buf, size_t len, int flags)
{
ss_api_amazon_auth_t *m = (ss_api_amazon_auth_t *)userobj;
struct lws_context *context = (struct lws_context *)m->opaque_data;
lws_system_blob_t *ab;
#if !defined(LWS_WITH_NO_LOGS)
size_t total;
#endif
int n;
ab = lws_system_get_blob(context, LWS_SYSBLOB_TYPE_AUTH, AUTH_IDX_LWA);
/* coverity */
if (!ab)
return LWSSSSRET_DISCONNECT_ME;
if (buf) {
if (flags & LWSSS_FLAG_SOM) {
lejp_construct(&m->jctx, auth_api_amazon_com_parser_cb,
m, lejp_tokens_lwa,
LWS_ARRAY_SIZE(lejp_tokens_lwa));
lws_system_blob_heap_empty(ab);
}
n = lejp_parse(&m->jctx, buf, (int)len);
if (n < 0) {
lejp_destruct(&m->jctx);
lws_system_blob_destroy(
lws_system_get_blob(context,
LWS_SYSBLOB_TYPE_AUTH,
AUTH_IDX_LWA));
return LWSSSSRET_DISCONNECT_ME;
}
}
if (!(flags & LWSSS_FLAG_EOM))
return LWSSSSRET_OK;
/* we should have the auth token now */
#if !defined(LWS_WITH_NO_LOGS)
total = lws_system_blob_get_size(ab);
lwsl_notice("%s: acquired %u-byte api.amazon.com auth token, exp %ds\n",
__func__, (unsigned int)total, m->expires_secs);
#endif
lejp_destruct(&m->jctx);
/* we move the system state at auth connection close */
return LWSSSSRET_DISCONNECT_ME;
}
static lws_ss_state_return_t
ss_api_amazon_auth_tx(void *userobj, lws_ss_tx_ordinal_t ord, uint8_t *buf,
size_t *len, int *flags)
{
ss_api_amazon_auth_t *m = (ss_api_amazon_auth_t *)userobj;
struct lws_context *context = (struct lws_context *)m->opaque_data;
lws_system_blob_t *ab;
size_t total;
int n;
/*
* We send out auth slot AUTH_IDX_ROOT, it's the LWA user / device
* identity token
*/
ab = lws_system_get_blob(context, LWS_SYSBLOB_TYPE_AUTH, AUTH_IDX_ROOT);
if (!ab)
return LWSSSSRET_DESTROY_ME;
total = lws_system_blob_get_size(ab);
n = lws_system_blob_get(ab, buf, len, m->pos);
if (n < 0)
return LWSSSSRET_TX_DONT_SEND;
if (!m->pos)
*flags |= LWSSS_FLAG_SOM;
m->pos += *len;
if (m->pos == total) {
*flags |= LWSSS_FLAG_EOM;
m->pos = 0; /* for next time */
}
return LWSSSSRET_OK;
}
static lws_ss_state_return_t
ss_api_amazon_auth_state(void *userobj, void *sh, lws_ss_constate_t state,
lws_ss_tx_ordinal_t ack)
{
ss_api_amazon_auth_t *m = (ss_api_amazon_auth_t *)userobj;
struct lws_context *context = (struct lws_context *)m->opaque_data;
lws_system_blob_t *ab;
size_t s;
lwsl_info("%s: %s, ord 0x%x\n", __func__, lws_ss_state_name((int)state),
(unsigned int)ack);
ab = lws_system_get_blob(context, LWS_SYSBLOB_TYPE_AUTH, AUTH_IDX_ROOT);
/* coverity */
if (!ab)
return LWSSSSRET_DESTROY_ME;
switch (state) {
case LWSSSCS_CREATING:
//if (lws_ss_set_metadata(m->ss, "ctype", "application/json", 16))
// return LWSSSSRET_DESTROY_ME;
/* fallthru */
case LWSSSCS_CONNECTING:
s = lws_system_blob_get_size(ab);
if (!s)
lwsl_debug("%s: no auth blob\n", __func__);
m->pos = 0;
return lws_ss_request_tx_len(m->ss, (unsigned long)s);
case LWSSSCS_DISCONNECTED:
/*
* We defer moving the system state forward until we have
* closed our connection + tls for the auth action... this is
* because on small systems, we need that memory recovered
* before we can make another connection subsequently.
*
* At this point, we're ultimately being called from within
* the wsi close process, the tls tunnel is not freed yet.
* Use a sul to actually do it next time around the event loop
* when the close process for the auth wsi has completed and
* the related tls is already freed.
*/
s = lws_system_blob_get_size(ab);
if (s && context->mgr_system.state != LWS_SYSTATE_OPERATIONAL)
lws_sul_schedule(context, 0,
&context->sul_api_amazon_com_kick,
lws_ss_sys_auth_api_amazon_com_kick, 1);
context->hss_auth = NULL;
return LWSSSSRET_DESTROY_ME;
default:
break;
}
return LWSSSSRET_OK;
}
int
lws_ss_sys_auth_api_amazon_com(struct lws_context *context)
{
lws_ss_info_t ssi;
if (context->hss_auth) /* already exists */
return 0;
/* We're making an outgoing secure stream ourselves */
memset(&ssi, 0, sizeof(ssi));
ssi.handle_offset = offsetof(ss_api_amazon_auth_t, ss);
ssi.opaque_user_data_offset = offsetof(ss_api_amazon_auth_t, opaque_data);
ssi.rx = ss_api_amazon_auth_rx;
ssi.tx = ss_api_amazon_auth_tx;
ssi.state = ss_api_amazon_auth_state;
ssi.user_alloc = sizeof(ss_api_amazon_auth_t);
ssi.streamtype = "api_amazon_com_auth";
if (lws_ss_create(context, 0, &ssi, context, &context->hss_auth,
NULL, NULL)) {
lwsl_info("%s: Create LWA auth ss failed (policy?)\n", __func__);
return 1;
}
return 0;
}

View File

@ -0,0 +1,574 @@
/*
* Sigv4 support for Secure Streams
*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2020 Andy Green <andy@warmcat.com>
* securestreams-dev@amazon.com
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <private-lib-core.h>
struct sigv4_header {
const char * name;
const char * value;
};
#define MAX_HEADER_NUM 8
struct sigv4 {
struct sigv4_header headers[MAX_HEADER_NUM];
uint8_t hnum;
char ymd[10]; /*YYYYMMDD*/
const char *timestamp;
const char *payload_hash;
const char *region;
const char *service;
};
static const uint8_t blob_idx[] = {
LWS_SYSBLOB_TYPE_EXT_AUTH1,
LWS_SYSBLOB_TYPE_EXT_AUTH2,
LWS_SYSBLOB_TYPE_EXT_AUTH3,
LWS_SYSBLOB_TYPE_EXT_AUTH4,
};
enum {
LWS_SS_SIGV4_KEYID,
LWS_SS_SIGV4_KEY,
LWS_SS_SIGV4_BLOB_SLOTS
};
static inline int add_header(struct sigv4 *s, const char *name, const char *value)
{
if (s->hnum >= MAX_HEADER_NUM) {
lwsl_err("%s too many sigv4 headers\n", __func__);
return -1;
}
s->headers[s->hnum].name = name;
s->headers[s->hnum].value = value;
s->hnum++;
if (!strncmp(name, "x-amz-content-sha256", strlen("x-amz-content-sha256")))
s->payload_hash = value;
if (!strncmp(name, "x-amz-date", strlen("x-amz-date"))) {
s->timestamp = value;
strncpy(s->ymd, value, 8);
}
return 0;
}
static int
cmp_header(const void * a, const void * b)
{
return strcmp(((struct sigv4_header *)a)->name,
((struct sigv4_header *)b)->name);
}
static int
init_sigv4(struct lws *wsi, struct lws_ss_handle *h, struct sigv4 *s)
{
lws_ss_metadata_t *polmd = h->policy->metadata;
int m = 0;
add_header(s, "host:", lws_hdr_simple_ptr(wsi, _WSI_TOKEN_CLIENT_HOST));
while (polmd) {
if (polmd->value__may_own_heap &&
((uint8_t *)polmd->value__may_own_heap)[0] &&
h->metadata[m].value__may_own_heap) {
/* consider all headers start with "x-amz-" need to be signed */
if (!strncmp(polmd->value__may_own_heap, "x-amz-",
strlen("x-amz-"))) {
if (add_header(s, polmd->value__may_own_heap,
h->metadata[m].value__may_own_heap))
return -1;
}
}
if (!strcmp(h->metadata[m].name, h->policy->aws_region) &&
h->metadata[m].value__may_own_heap)
s->region = h->metadata[m].value__may_own_heap;
if (!strcmp(h->metadata[m].name, h->policy->aws_service) &&
h->metadata[m].value__may_own_heap)
s->service = h->metadata[m].value__may_own_heap;
m++;
polmd = polmd->next;
}
qsort(s->headers, s->hnum, sizeof(struct sigv4_header), cmp_header);
#if 0
do {
int i;
for (i= 0; i<s->hnum; i++)
lwsl_debug("%s hdr %s %s\n", __func__,
s->headers[i].name, s->headers[i].value);
lwsl_debug("%s service: %s region: %s\n", __func__,
s->service, s->region);
} while(0);
#endif
return 0;
}
static void
bin2hex(uint8_t *in, size_t len, char *out)
{
static const char *hex = "0123456789abcdef";
size_t n;
for (n = 0; n < len; n++) {
*out++ = hex[(in[n] >> 4) & 0xf];
*out++ = hex[in[n] & 15];
}
*out = '\0';
}
static int
hmacsha256(const uint8_t *key, size_t keylen, const uint8_t *txt,
size_t txtlen, uint8_t *digest)
{
struct lws_genhmac_ctx hmacctx;
if (lws_genhmac_init(&hmacctx, LWS_GENHMAC_TYPE_SHA256,
key, keylen))
return -1;
if (lws_genhmac_update(&hmacctx, txt, txtlen)) {
lwsl_err("%s: hmac computation failed\n", __func__);
lws_genhmac_destroy(&hmacctx, NULL);
return -1;
}
if (lws_genhmac_destroy(&hmacctx, digest)) {
lwsl_err("%s: problem destroying hmac\n", __func__);
return -1;
}
return 0;
}
/* cut the last byte of the str */
static inline int hash_update_bite_str(struct lws_genhash_ctx *ctx, const char * str)
{
int ret = 0;
if ((ret = lws_genhash_update(ctx, (void *)str, strlen(str)-1))) {
lws_genhash_destroy(ctx, NULL);
lwsl_err("%s err %d line \n", __func__, ret);
}
return ret;
}
static inline int hash_update_str(struct lws_genhash_ctx *ctx, const char * str)
{
int ret = 0;
if ((ret = lws_genhash_update(ctx, (void *)str, strlen(str)))) {
lws_genhash_destroy(ctx, NULL);
lwsl_err("%s err %d \n", __func__, ret);
}
return ret;
}
static int
build_sign_string(struct lws *wsi, char *buf, size_t bufsz,
struct lws_ss_handle *h, struct sigv4 *s)
{
char hash[65], *end = &buf[bufsz - 1], *start;
struct lws_genhash_ctx hash_ctx;
uint8_t hash_bin[32];
int i, ret = 0;
start = buf;
if ((ret = lws_genhash_init(&hash_ctx, LWS_GENHASH_TYPE_SHA256))) {
lws_genhash_destroy(&hash_ctx, NULL);
lwsl_err("%s genhash init err %d \n", __func__, ret);
return -1;
}
/*
* hash canonical_request
*/
if (hash_update_str(&hash_ctx, h->policy->u.http.method) ||
hash_update_str(&hash_ctx, "\n"))
return -1;
if (hash_update_str(&hash_ctx, lws_hdr_simple_ptr(wsi, _WSI_TOKEN_CLIENT_URI)) ||
hash_update_str(&hash_ctx, "\n"))
return -1;
/* TODO, append query string */
if (hash_update_str(&hash_ctx, "\n"))
return -1;
for (i = 0; i < s->hnum; i++) {
if (hash_update_str(&hash_ctx, s->headers[i].name) ||
hash_update_str(&hash_ctx, s->headers[i].value) ||
hash_update_str(&hash_ctx, "\n"))
return -1;
}
if (hash_update_str(&hash_ctx, "\n"))
return -1;
for (i = 0; i < s->hnum-1; i++) {
if (hash_update_bite_str(&hash_ctx, s->headers[i].name) ||
hash_update_str(&hash_ctx, ";"))
return -1;
}
if (hash_update_bite_str(&hash_ctx, s->headers[i].name) ||
hash_update_str(&hash_ctx, "\n") ||
hash_update_str(&hash_ctx, s->payload_hash))
return -1;
if ((ret = lws_genhash_destroy(&hash_ctx, hash_bin))) {
lws_genhash_destroy(&hash_ctx, NULL);
lwsl_err("%s lws_genhash error \n", __func__);
return -1;
}
bin2hex(hash_bin, sizeof(hash_bin), hash);
/*
* build sign string like the following
*
* "AWS4-HMAC-SHA256" + "\n" +
* timeStampISO8601Format + "\n" +
* date.Format(<YYYYMMDD>) + "/" + <region> + "/" + <service> + "/aws4_request" + "\n" +
* Hex(SHA256Hash(<CanonicalRequest>))
*/
buf = start;
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "%s\n",
"AWS4-HMAC-SHA256");
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "%s\n",
s->timestamp);
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "%s/%s/%s/%s\n",
s->ymd, s->region, s->service, "aws4_request");
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "%s", hash);
*buf++ = '\0';
assert(buf <= start + bufsz);
return 0;
}
/*
* DateKey = HMAC-SHA256("AWS4"+"<SecretAccessKey>", "<YYYYMMDD>")
* DateRegionKey = HMAC-SHA256(<DateKey>, "<aws-region>")
* DateRegionServiceKey = HMAC-SHA256(<DateRegionKey>, "<aws-service>")
* SigningKey = HMAC-SHA256(<DateRegionServiceKey>, "aws4_request")
*/
static int
calc_signing_key(struct lws *wsi, struct lws_ss_handle *h,
struct sigv4 *s, uint8_t *sign_key)
{
uint8_t key[128], date_key[32], and_region_key[32],
and_service_key[32], *kb;
lws_system_blob_t *ab;
size_t keylen;
int n;
ab = lws_system_get_blob(wsi->a.context,
blob_idx[h->policy->auth->blob_index],
LWS_SS_SIGV4_KEY);
if (!ab)
return -1;
kb = key;
*kb++ = 'A';
*kb++ = 'W';
*kb++ = 'S';
*kb++ = '4';
keylen = sizeof(key) - 4;
if (lws_system_blob_get_size(ab) > keylen - 1)
return -1;
n = lws_system_blob_get(ab, kb, &keylen, 0);
if (n < 0)
return -1;
kb[keylen] = '\0';
hmacsha256((const uint8_t *)key, strlen((const char *)key),
(const uint8_t *)s->ymd, strlen(s->ymd), date_key);
hmacsha256(date_key, sizeof(date_key), (const uint8_t *)s->region,
strlen(s->region), and_region_key);
hmacsha256(and_region_key, sizeof(and_region_key),
(const uint8_t *)s->service,
strlen(s->service), and_service_key);
hmacsha256(and_service_key, sizeof(and_service_key),
(uint8_t *)"aws4_request",
strlen("aws4_request"), sign_key);
return 0;
}
/* Sample auth string:
*
* 'Authorization: AWS4-HMAC-SHA256 Credential=AKIAVHWASOFE7TJ7ZUQY/20200731/us-west-2/s3/aws4_request,
* SignedHeaders=host;x-amz-content-sha256;x-amz-date, \
* Signature=ad9fb75ff3b46c7990e3e8f090abfdd6c01fd67761a517111694377e20698377'
*/
static int
build_auth_string(struct lws *wsi, char * buf, size_t bufsz,
struct lws_ss_handle *h, struct sigv4 *s,
uint8_t *signature_bin)
{
#if defined(_DEBUG)
char *start = buf;
#endif
char *end = &buf[bufsz - 1];
char *c;
lws_system_blob_t *ab;
size_t keyidlen = 128; // max keyid len is 128
int n;
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "%s",
"AWS4-HMAC-SHA256 ");
ab = lws_system_get_blob(wsi->a.context,
blob_idx[h->policy->auth->blob_index],
LWS_SS_SIGV4_KEYID);
if (!ab)
return -1;
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "%s",
"Credential=");
n = lws_system_blob_get(ab,(uint8_t *)buf, &keyidlen, 0);
if (n < 0)
return -1;
buf += keyidlen;
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "/%s/%s/%s/%s, ",
s->ymd, s->region, s->service, "aws4_request");
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "%s",
"SignedHeaders=");
for (n = 0; n < s->hnum; n++) {
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf),
"%s",s->headers[n].name);
buf--; /* remove ':' */
*buf++ = ';';
}
c = buf - 1;
*c = ','; /* overwrite ';' back to ',' */
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf),
"%s", " Signature=");
bin2hex(signature_bin, 32, buf);
#if defined(_DEBUG)
assert(buf + 65 <= start + bufsz);
lwsl_debug("%s %s\n", __func__, start);
#endif
return 0;
}
int
lws_ss_apply_sigv4(struct lws *wsi, struct lws_ss_handle *h,
unsigned char **p, unsigned char *end)
{
uint8_t buf[512], sign_key[32], signature_bin[32], *bp;
struct sigv4 s;
memset(&s, 0, sizeof(s));
bp = buf;
init_sigv4(wsi, h, &s);
if (!s.timestamp || !s.payload_hash) {
lwsl_err("%s missing headers\n", __func__);
return -1;
}
if (build_sign_string(wsi, (char *)bp, sizeof(buf), h, &s))
return -1;
if (calc_signing_key(wsi, h, &s, sign_key))
return -1;
hmacsha256(sign_key, sizeof(sign_key), (const uint8_t *)buf,
strlen((const char *)buf), signature_bin);
bp = buf; /* reuse for auth_str */
if (build_auth_string(wsi, (char *)bp, sizeof(buf), h, &s,
signature_bin))
return -1;
if (lws_add_http_header_by_name(wsi,
(const uint8_t *)"Authorization:", buf,
(int)strlen((const char*)buf), p, end))
return -1;
return 0;
}
int
lws_ss_sigv4_set_aws_key(struct lws_context* context, uint8_t idx,
const char * keyid, const char * key)
{
const char * s[] = { keyid, key };
lws_system_blob_t *ab;
int i;
if (idx > LWS_ARRAY_SIZE(blob_idx))
return -1;
for (i = 0; i < LWS_SS_SIGV4_BLOB_SLOTS; i++) {
ab = lws_system_get_blob(context, blob_idx[idx], i);
if (!ab)
return -1;
lws_system_blob_heap_empty(ab);
if (lws_system_blob_heap_append(ab, (const uint8_t *)s[i],
strlen(s[i]))) {
lwsl_err("%s: can't store %d \n", __func__, i);
return -1;
}
}
return 0;
}
#if defined(__linux__) || defined(__APPLE__) || defined(WIN32) || \
defined(__FreeBSD__) || defined(__NetBSD__) || defined(__ANDROID__) || \
defined(__sun) || defined(__OpenBSD__) || defined(__NuttX__)
/* ie, if we have filesystem ops */
int
lws_aws_filesystem_credentials_helper(const char *path, const char *kid,
const char *ak, char **aws_keyid,
char **aws_key)
{
char *str = NULL, *val = NULL, *line = NULL, sth[128];
size_t len = sizeof(sth);
const char *home = "";
int i, poff = 0;
ssize_t rd;
FILE *fp;
*aws_keyid = *aws_key = NULL;
if (path[0] == '~') {
home = getenv("HOME");
if (home && strlen(home) > sizeof(sth) - 1) /* coverity */
return -1;
else {
if (!home)
home = "";
poff = 1;
}
}
lws_snprintf(sth, sizeof(sth), "%s%s", home, path + poff);
fp = fopen(sth, "r");
if (!fp) {
lwsl_err("%s can't open '%s'\n", __func__, sth);
return -1;
}
while ((rd = getline(&line, &len, fp)) != -1) {
for (i = 0; i < 2; i++) {
size_t slen;
if (strncmp(line, i ? kid : ak, strlen(i ? kid : ak)))
continue;
str = strchr(line, '=');
if (!str)
continue;
str++;
/* only read the first key for each */
if (*(i ? aws_keyid : aws_key))
continue;
/*
* Trim whitespace from the start and end
*/
slen = (size_t)(rd - lws_ptr_diff(str, line));
while (slen && *str == ' ') {
str++;
slen--;
}
while (slen && (str[slen - 1] == '\r' ||
str[slen - 1] == '\n' ||
str[slen - 1] == ' '))
slen--;
val = malloc(slen + 1);
if (!val)
goto bail;
strncpy(val, str, slen);
val[slen] = '\0';
*(i ? aws_keyid : aws_key) = val;
}
}
bail:
fclose(fp);
if (line)
free(line);
if (!*aws_keyid || !*aws_key) {
if (*aws_keyid) {
free(*aws_keyid);
*aws_keyid = NULL;
}
if (*aws_key) {
free(*aws_key);
*aws_key = NULL;
}
lwsl_err("%s can't find aws credentials! \
please check %s\n", __func__, path);
return -1;
}
lwsl_info("%s: '%s' '%s'\n", __func__, *aws_keyid, *aws_key);
return 0;
}
#endif

View File

@ -0,0 +1,98 @@
/*
* Captive portal detect for Secure Streams
*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2019 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <private-lib-core.h>
typedef struct ss_cpd {
struct lws_ss_handle *ss;
void *opaque_data;
/* ... application specific state ... */
lws_sorted_usec_list_t sul;
} ss_cpd_t;
static lws_ss_state_return_t
ss_cpd_state(void *userobj, void *sh, lws_ss_constate_t state,
lws_ss_tx_ordinal_t ack)
{
ss_cpd_t *m = (ss_cpd_t *)userobj;
struct lws_context *cx = (struct lws_context *)m->opaque_data;
lwsl_ss_info(m->ss, "%s, ord 0x%x\n", lws_ss_state_name((int)state),
(unsigned int)ack);
switch (state) {
case LWSSSCS_CREATING:
lws_ss_start_timeout(m->ss, 3 * LWS_US_PER_SEC);
return lws_ss_request_tx(m->ss);
case LWSSSCS_QOS_ACK_REMOTE:
lws_system_cpd_set(cx, LWS_CPD_INTERNET_OK);
cx->ss_cpd = NULL;
return LWSSSSRET_DESTROY_ME;
case LWSSSCS_TIMEOUT:
case LWSSSCS_ALL_RETRIES_FAILED:
case LWSSSCS_DISCONNECTED:
/*
* First result reported sticks... if nothing else, this will
* cover the situation we didn't connect to anything
*/
lws_system_cpd_set(cx, LWS_CPD_NO_INTERNET);
cx->ss_cpd = NULL;
return LWSSSSRET_DESTROY_ME;
default:
break;
}
return LWSSSSRET_OK;
}
static const lws_ss_info_t ssi_cpd = {
.handle_offset = offsetof(ss_cpd_t, ss),
.opaque_user_data_offset = offsetof(ss_cpd_t, opaque_data),
.state = ss_cpd_state,
.user_alloc = sizeof(ss_cpd_t),
.streamtype = "captive_portal_detect",
};
int
lws_ss_sys_cpd(struct lws_context *cx)
{
if (cx->ss_cpd) {
lwsl_cx_notice(cx, "CPD already ongoing");
return 0;
}
if (lws_ss_create(cx, 0, &ssi_cpd, cx, &cx->ss_cpd, NULL, NULL)) {
lwsl_cx_info(cx, "Create stream failed (policy?)");
return 1;
}
return 0;
}

View File

@ -0,0 +1,535 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* The protocol part of dhcp4 client
*/
#include "private-lib-core.h"
#include "private-lib-system-dhcpclient.h"
#define LDHC_OP_BOOTREQUEST 1
#define LDHC_OP_BOOTREPLY 2
/*
* IPv4... max total 576
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | op (1) | htype (1) | hlen (1) | hops (1) |
* +---------------+---------------+---------------+---------------+
* | +04 xid (4) |
* +-------------------------------+-------------------------------+
* | +08 secs (2) | +0a flags (2) |
* +-------------------------------+-------------------------------+
* | +0C ciaddr (4) client IP |
* +---------------------------------------------------------------+
* | +10 yiaddr (4) your IP |
* +---------------------------------------------------------------+
* | +14 siaddr (4) server IP |
* +---------------------------------------------------------------+
* | +18 giaddr (4) gateway IP |
* +---------------------------------------------------------------+
* | |
* | +1C chaddr (16) client HWADDR |
* +---------------------------------------------------------------+
* | |
* | +2C sname (64) |
* +---------------------------------------------------------------+
* | |
* | +6C file (128) |
* +---------------------------------------------------------------+
* | |
* | +EC options (variable) |
* +---------------------------------------------------------------+
*/
static const uint8_t rawdisc4[] = {
0x45, 0x00, 0, 0, 0, 0, 0x40, 0, 0x2e, IPPROTO_UDP,
0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
0, 68, 0, 67, 0, 0, 0, 0
};
static const uint32_t botable2[] = { 1500, 1750, 5000 /* in case dog slow */ };
static const lws_retry_bo_t bo2 = {
botable2, LWS_ARRAY_SIZE(botable2), LWS_RETRY_CONCEAL_ALWAYS, 0, 0, 20 };
static int
lws_dhcpc4_prep(uint8_t *start, unsigned int bufsiz, lws_dhcpc_req_t *r, int op)
{
uint8_t *p = start;
memset(start, 0, bufsiz);
*p++ = 1;
*p++ = 1;
*p++ = 6; /* sizeof ethernet MAC */
memcpy(p + 1, r->xid, 4);
// p[7] = 0x80; /* broadcast flag */
p += 0x1c - 3;
if (lws_plat_ifname_to_hwaddr(r->wsi_raw->desc.sockfd,
(const char *)&r[1], r->is.mac, 6) < 0)
return -1;
memcpy(p, r->is.mac, 6);
p += 16 + 64 + 128;
*p++ = 0x63; /* RFC2132 Magic Cookie indicates start of options */
*p++ = 0x82;
*p++ = 0x53;
*p++ = 0x63;
*p++ = LWSDHC4POPT_MESSAGE_TYPE;
*p++ = 1; /* length */
*p++ = (uint8_t)op;
switch (op) {
case LWSDHC4PDISCOVER:
*p++ = LWSDHC4POPT_PARAM_REQ_LIST;
*p++ = 4; /* length */
*p++ = LWSDHC4POPT_SUBNET_MASK;
*p++ = LWSDHC4POPT_ROUTER;
*p++ = LWSDHC4POPT_DNSERVER;
*p++ = LWSDHC4POPT_DOMAIN_NAME;
break;
case LWSDHC4PREQUEST:
if (r->is.sa46[LWSDH_SA46_IP].sa4.sin_family != AF_INET)
break;
*p++ = LWSDHC4POPT_REQUESTED_ADS;
*p++ = 4; /* length */
lws_ser_wu32be(p, r->is.sa46[LWSDH_SA46_IP].sa4.sin_addr.s_addr);
p += 4;
*p++ = LWSDHC4POPT_SERVER_ID;
*p++ = 4; /* length */
lws_ser_wu32be(p, r->is.sa46[LWSDH_SA46_DHCP_SERVER].sa4.sin_addr.s_addr);
p += 4;
break;
}
*p++ = LWSDHC4POPT_END_OPTIONS;
return lws_ptr_diff(p, start);
}
static int
callback_dhcpc4(struct lws *wsi, enum lws_callback_reasons reason, void *user,
void *in, size_t len)
{
lws_dhcpc_req_t *r = (lws_dhcpc_req_t *)user;
uint8_t pkt[LWS_PRE + 576], *p = pkt + LWS_PRE;
int n, m;
switch (reason) {
case LWS_CALLBACK_RAW_ADOPT:
lwsl_debug("%s: LWS_CALLBACK_RAW_ADOPT\n", __func__);
lws_callback_on_writable(wsi);
break;
case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
lwsl_err("%s: udp conn failed\n", __func__);
/* fallthru */
case LWS_CALLBACK_RAW_CLOSE:
lwsl_debug("%s: LWS_CALLBACK_RAW_CLOSE\n", __func__);
if (!r)
break;
r->wsi_raw = NULL;
lws_sul_cancel(&r->sul_write);
if (r->state != LDHC_BOUND) {
r->state = LDHC_INIT;
lws_retry_sul_schedule(r->context, 0, &r->sul_conn,
&bo2, lws_dhcpc4_retry_conn,
&r->retry_count_conn);
}
break;
case LWS_CALLBACK_RAW_RX:
if (lws_dhcpc4_parse(r, in, len))
break;
/*
* that's it... commit to the configuration
*/
/* set up our network interface as offered */
if (lws_plat_ifconfig(r->wsi_raw->desc.sockfd, &r->is))
/*
* Problem setting the IP... maybe something
* transient like racing with NetworkManager?
* Since the sul retries are still around it
* will retry
*/
return -1;
/* clear timeouts related to the broadcast socket */
lws_sul_cancel(&r->sul_write);
lws_sul_cancel(&r->sul_conn);
lwsl_notice("%s: DHCP configured %s\n", __func__,
(const char *)&r[1]);
r->state = LDHC_BOUND;
lws_state_transition_steps(&wsi->a.context->mgr_system,
LWS_SYSTATE_OPERATIONAL);
r->cb(r->opaque, &r->is);
r->wsi_raw = NULL;
return -1; /* close the broadcast wsi */
case LWS_CALLBACK_RAW_WRITEABLE:
if (!r)
break;
/*
* UDP is not reliable, it can be locally dropped, or dropped
* by any intermediary or the remote peer. So even though we
* will do the write in a moment, we schedule another request
* for rewrite according to the wsi retry policy.
*
* If the result came before, we'll cancel it in the close flow.
*
* If we have already reached the end of our concealed retries
* in the policy, just close without another write.
*/
if (lws_dll2_is_detached(&r->sul_write.list) &&
lws_retry_sul_schedule_retry_wsi(wsi, &r->sul_write,
lws_dhcpc_retry_write,
&r->retry_count_write)) {
/* we have reached the end of our concealed retries */
lwsl_warn("%s: concealed retries done, failing\n",
__func__);
goto retry_conn;
}
switch (r->state) {
case LDHC_INIT:
n = LWSDHC4PDISCOVER;
goto bcast;
case LDHC_REQUESTING:
n = LWSDHC4PREQUEST;
/* fallthru */
bcast:
n = lws_dhcpc4_prep(p + 28, (unsigned int)
(sizeof(pkt) - LWS_PRE - 28), r, n);
if (n < 0) {
lwsl_err("%s: failed to prep\n", __func__);
break;
}
m = lws_plat_rawudp_broadcast(p, rawdisc4,
LWS_ARRAY_SIZE(rawdisc4),
(size_t)(n + 28),
r->wsi_raw->desc.sockfd,
(const char *)&r[1]);
if (m < 0)
lwsl_err("%s: Failed to write dhcp client req: "
"%d %d, errno %d\n", __func__,
n, m, LWS_ERRNO);
break;
default:
break;
}
return 0;
retry_conn:
lws_retry_sul_schedule(wsi->a.context, 0, &r->sul_conn, &bo2,
lws_dhcpc4_retry_conn,
&r->retry_count_conn);
return -1;
default:
break;
}
return 0;
}
struct lws_protocols lws_system_protocol_dhcpc4 =
{ "lws-dhcp4client", callback_dhcpc4, 0, 128, 0, NULL, 0 };
void
lws_dhcpc4_retry_conn(struct lws_sorted_usec_list *sul)
{
lws_dhcpc_req_t *r = lws_container_of(sul, lws_dhcpc_req_t, sul_conn);
if (r->wsi_raw || !lws_dll2_is_detached(&r->sul_conn.list))
return;
/* create the UDP socket aimed at the server */
r->retry_count_write = 0;
r->wsi_raw = lws_create_adopt_udp(r->context->vhost_system, "0.0.0.0",
68, LWS_CAUDP_PF_PACKET |
LWS_CAUDP_BROADCAST,
"lws-dhcp4client", (const char *)&r[1],
NULL, NULL, &bo2, "dhcpc");
lwsl_debug("%s: created wsi_raw: %s\n", __func__, lws_wsi_tag(r->wsi_raw));
if (!r->wsi_raw) {
lwsl_err("%s: unable to create udp skt\n", __func__);
lws_retry_sul_schedule(r->context, 0, &r->sul_conn, &bo2,
lws_dhcpc4_retry_conn,
&r->retry_count_conn);
return;
}
/* force the network if up */
lws_plat_if_up((const char *)&r[1], r->wsi_raw->desc.sockfd, 0);
lws_plat_if_up((const char *)&r[1], r->wsi_raw->desc.sockfd, 1);
r->wsi_raw->user_space = r;
r->wsi_raw->user_space_externally_allocated = 1;
lws_get_random(r->wsi_raw->a.context, r->xid, 4);
}
static void
lws_sa46_set_ipv4(lws_dhcpc_req_t *r, unsigned int which, uint8_t *p)
{
r->is.sa46[which].sa4.sin_family = AF_INET;
r->is.sa46[which].sa4.sin_addr.s_addr = ntohl(lws_ser_ru32be(p));
}
int
lws_dhcpc4_parse(lws_dhcpc_req_t *r, void *in, size_t len)
{
uint8_t pkt[LWS_PRE + 576], *p = pkt + LWS_PRE, *end;
int n, m;
switch (r->state) {
case LDHC_INIT: /* expect DHCPOFFER */
case LDHC_REQUESTING: /* expect DHCPACK */
/*
* We should check carefully if we like what we were
* sent... anything can spam us with crafted replies
*/
if (len < 0x100)
break;
p = (uint8_t *)in + 28; /* skip to UDP payload */
if (p[0] != 2 || p[1] != 1 || p[2] != 6)
break;
if (memcmp(&p[4], r->xid, 4)) /* must be our xid */
break;
if (memcmp(&p[0x1c], r->is.mac, 6)) /* our netif mac? */
break;
/* the DHCP magic cookie must be in place */
if (lws_ser_ru32be(&p[0xec]) != 0x63825363)
break;
/* "your" client IP address */
lws_sa46_set_ipv4(r, LWSDH_SA46_IP, p + 0x10);
/* IP of next server used in bootstrap */
lws_sa46_set_ipv4(r, LWSDH_SA46_DHCP_SERVER, p + 0x14);
/* it looks legit so far... look at the options */
end = (uint8_t *)in + len;
p += 0xec + 4;
while (p < end) {
uint8_t c = *p++;
uint8_t l = 0;
if (c && c != 0xff) {
/* pad 0 and EOT 0xff have no length */
l = *p++;
if (!l) {
lwsl_err("%s: zero length\n",
__func__);
goto broken;
}
if (p + l > end) {
/* ...nice try... */
lwsl_err("%s: bad len\n",
__func__);
goto broken;
}
}
if (c == 0xff) /* end of options */
break;
m = 0;
switch (c) {
case LWSDHC4POPT_SUBNET_MASK:
n = LWSDH_IPV4_SUBNET_MASK;
goto get_ipv4;
case LWSDHC4POPT_ROUTER:
lws_sa46_set_ipv4(r, LWSDH_SA46_IPV4_ROUTER, p);
break;
case LWSDHC4POPT_TIME_SERVER:
lws_sa46_set_ipv4(r, LWSDH_SA46_NTP_SERVER, p);
break;
case LWSDHC4POPT_BROADCAST_ADS:
n = LWSDH_IPV4_BROADCAST;
goto get_ipv4;
case LWSDHC4POPT_LEASE_TIME:
n = LWSDH_LEASE_SECS;
goto get_ipv4;
case LWSDHC4POPT_RENEWAL_TIME: /* AKA T1 */
n = LWSDH_RENEWAL_SECS;
goto get_ipv4;
case LWSDHC4POPT_REBINDING_TIME: /* AKA T2 */
n = LWSDH_REBINDING_SECS;
goto get_ipv4;
case LWSDHC4POPT_DNSERVER:
if (l & 3)
break;
m = LWSDH_SA46_DNS_SRV_1;
while (l && m - LWSDH_SA46_DNS_SRV_1 < 4) {
lws_sa46_set_ipv4(r, (unsigned int)m, p);
lws_async_dns_server_add(r->context,
&r->is.sa46[m]);
l = (uint8_t)(l - 4);
p += 4;
m++;
}
break;
case LWSDHC4POPT_DOMAIN_NAME:
m = l;
if (m > (int)sizeof(r->is.domain) - 1)
m = sizeof(r->is.domain) - 1;
lws_strnncpy(r->is.domain, (const char *)p,
(unsigned int)m, sizeof(r->is.domain));
break;
case LWSDHC4POPT_MESSAGE_TYPE:
/*
* Confirm this is the right message
* for the state of the negotiation
*/
if (r->state == LDHC_INIT && *p != LWSDHC4POFFER)
goto broken;
if (r->state == LDHC_REQUESTING &&
*p != LWSDHC4PACK)
goto broken;
break;
default:
break;
}
p += l;
continue;
get_ipv4:
if (l >= 4)
r->is.nums[n] = ntohl(lws_ser_ru32be(p));
p += l;
continue;
broken:
memset(r->is.sa46, 0, sizeof(r->is.sa46));
break;
}
#if defined(_DEBUG)
/* dump what we have parsed out */
for (n = 0; n < (int)_LWSDH_NUMS_COUNT; n++) {
m = (int)ntohl(r->is.nums[n]);
lwsl_info("%s: %d: 0x%x\n", __func__, n, m);
}
for (n = 0; n < (int)_LWSDH_SA46_COUNT; n++) {
lws_sa46_write_numeric_address(&r->is.sa46[n],
(char *)pkt, 48);
lwsl_info("%s: %d: %s\n", __func__, n, pkt);
}
#endif
/*
* Having seen everything in there... do we really feel
* we could use it? Everything critical is there?
*/
if (!r->is.sa46[LWSDH_SA46_IP].sa4.sin_family ||
!r->is.sa46[LWSDH_SA46_DHCP_SERVER].sa4.sin_family ||
!r->is.sa46[LWSDH_SA46_IPV4_ROUTER].sa4.sin_family ||
!r->is.nums[LWSDH_IPV4_SUBNET_MASK] ||
!r->is.nums[LWSDH_LEASE_SECS] ||
!r->is.sa46[LWSDH_SA46_DNS_SRV_1].sa4.sin_family) {
lwsl_notice("%s: rejecting on incomplete\n", __func__);
memset(r->is.sa46, 0, sizeof(r->is.sa46));
break;
}
/*
* Network layout has to be internally consistent...
* DHCP server has to be reachable by broadcast and
* default route has to be on same subnet
*/
if ((r->is.sa46[LWSDH_SA46_IP].sa4.sin_addr.s_addr &
r->is.nums[LWSDH_IPV4_SUBNET_MASK]) !=
(r->is.sa46[LWSDH_SA46_DHCP_SERVER].sa4.sin_addr.s_addr &
r->is.nums[LWSDH_IPV4_SUBNET_MASK])) {
lwsl_notice("%s: rejecting on srv %x reachable on mask %x\n",
__func__, r->is.sa46[LWSDH_SA46_IP].sa4.sin_addr.s_addr,
r->is.nums[LWSDH_IPV4_SUBNET_MASK]);
break;
}
if (r->state == LDHC_INIT) {
lwsl_info("%s: moving to REQ\n", __func__);
r->state = LDHC_REQUESTING;
lws_callback_on_writable(r->wsi_raw);
//break;
}
return 0;
default:
break;
}
return 1;
}

View File

@ -0,0 +1,156 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#include "private-lib-system-dhcpclient.h"
void
lws_dhcpc_retry_write(struct lws_sorted_usec_list *sul)
{
lws_dhcpc_req_t *r = lws_container_of(sul, lws_dhcpc_req_t, sul_write);
lwsl_debug("%s\n", __func__);
if (r && r->wsi_raw)
lws_callback_on_writable(r->wsi_raw);
}
static void
lws_dhcpc_destroy(lws_dhcpc_req_t **pr)
{
lws_dhcpc_req_t *r = *pr;
lws_sul_cancel(&r->sul_conn);
lws_sul_cancel(&r->sul_write);
lws_sul_cancel(&r->sul_renew);
if (r->wsi_raw)
lws_set_timeout(r->wsi_raw, 1, LWS_TO_KILL_ASYNC);
lws_dll2_remove(&r->list);
lws_free_set_NULL(r);
}
int
lws_dhcpc_status(struct lws_context *context, lws_sockaddr46 *sa46)
{
lws_dhcpc_req_t *r;
lws_start_foreach_dll(struct lws_dll2 *, p, context->dhcpc_owner.head) {
r = (lws_dhcpc_req_t *)p;
if (r->state == LDHC_BOUND) {
if (sa46) {
memcpy(sa46, &r->is.sa46[LWSDH_SA46_DNS_SRV_1],
sizeof(*sa46));
}
return 1;
}
} lws_end_foreach_dll(p);
return 0;
}
static lws_dhcpc_req_t *
lws_dhcpc_find(struct lws_context *context, const char *iface, int af)
{
lws_dhcpc_req_t *r;
/* see if we are already looking after this af / iface combination */
lws_start_foreach_dll(struct lws_dll2 *, p, context->dhcpc_owner.head) {
r = (lws_dhcpc_req_t *)p;
if (!strcmp((const char *)&r[1], iface) && af == r->af)
return r; /* yes... */
} lws_end_foreach_dll(p);
return NULL;
}
/*
* Create a persistent dhcp client entry for network interface "iface" and AF
* type "af"
*/
int
lws_dhcpc_request(struct lws_context *context, const char *iface, int af,
dhcpc_cb_t cb, void *opaque)
{
lws_dhcpc_req_t *r = lws_dhcpc_find(context, iface, af);
int n;
/* see if we are already looking after this af / iface combination */
if (r)
return 0;
/* nope... let's create a request object as he asks */
n = (int)strlen(iface);
r = lws_zalloc(sizeof(*r) + (unsigned int)n + 1u, __func__);
if (!r)
return 1;
memcpy(&r[1], iface, (unsigned int)n + 1);
r->af = (uint8_t)af;
r->cb = cb;
r->opaque = opaque;
r->context = context;
r->state = LDHC_INIT;
lws_strncpy(r->is.ifname, iface, sizeof(r->is.ifname));
lws_dll2_add_head(&r->list, &context->dhcpc_owner); /* add him to list */
lws_dhcpc4_retry_conn(&r->sul_conn);
return 0;
}
/*
* Destroy every DHCP client object related to interface "iface"
*/
static int
_remove_if(struct lws_dll2 *d, void *opaque)
{
lws_dhcpc_req_t *r = lws_container_of(d, lws_dhcpc_req_t, list);
if (!opaque || !strcmp((const char *)&r[1], (const char *)opaque))
lws_dhcpc_destroy(&r);
return 0;
}
int
lws_dhcpc_remove(struct lws_context *context, const char *iface)
{
lws_dll2_foreach_safe(&context->dhcpc_owner, (void *)iface, _remove_if);
return 0;
}

View File

@ -0,0 +1,112 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
typedef enum {
LDHC_INIT_REBOOT,
LDHC_REBOOTING, /* jitterwait */
LDHC_INIT, /* issue DHCPDISCOVER */
LDHC_SELECTING,
LDHC_REQUESTING,
LDHC_REBINDING,
LDHC_BOUND,
LDHC_RENEWING
} lws_dhcpc_state_t;
enum {
LWSDHC4PDISCOVER = 1,
LWSDHC4POFFER,
LWSDHC4PREQUEST,
LWSDHC4PDECLINE,
LWSDHC4PACK,
LWSDHC4PNACK,
LWSDHC4PRELEASE,
LWSDHC4POPT_PAD = 0,
LWSDHC4POPT_SUBNET_MASK = 1,
LWSDHC4POPT_TIME_OFFSET = 2,
LWSDHC4POPT_ROUTER = 3,
LWSDHC4POPT_TIME_SERVER = 4,
LWSDHC4POPT_NAME_SERVER = 5,
LWSDHC4POPT_DNSERVER = 6,
LWSDHC4POPT_LOG_SERVER = 7,
LWSDHC4POPT_COOKIE_SERVER = 8,
LWSDHC4POPT_LPR_SERVER = 9,
LWSDHC4POPT_IMPRESS_SERVER = 10,
LWSDHC4POPT_RESLOC_SERVER = 11,
LWSDHC4POPT_HOST_NAME = 12,
LWSDHC4POPT_BOOTFILE_SIZE = 13,
LWSDHC4POPT_MERIT_DUMP_FILE = 14,
LWSDHC4POPT_DOMAIN_NAME = 15,
LWSDHC4POPT_SWAP_SERVER = 16,
LWSDHC4POPT_ROOT_PATH = 17,
LWSDHC4POPT_EXTENSIONS_PATH = 18,
LWSDHC4POPT_BROADCAST_ADS = 28,
LWSDHC4POPT_REQUESTED_ADS = 50,
LWSDHC4POPT_LEASE_TIME = 51,
LWSDHC4POPT_OPTION_OVERLOAD = 52,
LWSDHC4POPT_MESSAGE_TYPE = 53,
LWSDHC4POPT_SERVER_ID = 54,
LWSDHC4POPT_PARAM_REQ_LIST = 55,
LWSDHC4POPT_MESSAGE = 56,
LWSDHC4POPT_MAX_DHCP_MSG_SIZE = 57,
LWSDHC4POPT_RENEWAL_TIME = 58, /* AKA T1 */
LWSDHC4POPT_REBINDING_TIME = 59, /* AKA T2 */
LWSDHC4POPT_VENDOR_CLASS_ID = 60,
LWSDHC4POPT_CLIENT_ID = 61,
LWSDHC4POPT_END_OPTIONS = 255
};
typedef struct lws_dhcpc_req {
lws_dll2_t list;
struct lws_context *context;
lws_sorted_usec_list_t sul_renew;
lws_sorted_usec_list_t sul_conn;
lws_sorted_usec_list_t sul_write;
dhcpc_cb_t cb; /* cb on completion / failure */
void *opaque; /* ignored by lws, give to cb */
/* these are separated so we can close the bcast one asynchronously */
struct lws *wsi_raw; /* for broadcast */
lws_dhcpc_state_t state;
lws_dhcpc_ifstate_t is;
uint16_t retry_count_conn;
uint16_t retry_count_write;
uint8_t xid[4];
uint8_t af; /* address family */
} lws_dhcpc_req_t;
/* interface name is overallocated here */
void
lws_dhcpc4_retry_conn(struct lws_sorted_usec_list *sul);
int
lws_dhcpc4_parse(lws_dhcpc_req_t *r, void *in, size_t len);
void
lws_dhcpc_retry_write(struct lws_sorted_usec_list *sul);

View File

@ -0,0 +1,447 @@
/*
* lws System Fault Injection
*
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#include <assert.h>
static lws_fi_priv_t *
lws_fi_lookup(const lws_fi_ctx_t *fic, const char *name)
{
lws_start_foreach_dll(struct lws_dll2 *, p, fic->fi_owner.head) {
lws_fi_priv_t *pv = lws_container_of(p, lws_fi_priv_t, list);
if (!strcmp(pv->fi.name, name))
return pv;
} lws_end_foreach_dll(p);
return NULL;
}
int
lws_fi(const lws_fi_ctx_t *fic, const char *name)
{
lws_fi_priv_t *pv;
int n;
pv = lws_fi_lookup(fic, name);
if (!pv)
return 0;
switch (pv->fi.type) {
case LWSFI_ALWAYS:
goto inject;
case LWSFI_DETERMINISTIC:
pv->fi.times++;
if (pv->fi.times >= pv->fi.pre)
if (pv->fi.times < pv->fi.pre + pv->fi.count)
goto inject;
return 0;
case LWSFI_PROBABILISTIC:
if (lws_xos_percent((lws_xos_t *)&fic->xos, (int)pv->fi.pre))
goto inject;
return 0;
case LWSFI_PATTERN:
case LWSFI_PATTERN_ALLOC:
n = (int)((pv->fi.times++) % pv->fi.count);
if (pv->fi.pattern[n >> 3] & (1 << (n & 7)))
goto inject;
return 0;
default:
return 0;
}
return 0;
inject:
lwsl_warn("%s: Injecting fault %s->%s\n", __func__,
fic->name ? fic->name : "unk", pv->fi.name);
return 1;
}
int
lws_fi_range(const lws_fi_ctx_t *fic, const char *name, uint64_t *result)
{
lws_fi_priv_t *pv;
uint64_t d;
pv = lws_fi_lookup(fic, name);
if (!pv)
return 1;
if (pv->fi.type != LWSFI_RANGE) {
lwsl_err("%s: fault %s is not a 123..456 range\n",
__func__, name);
return 1;
}
d = pv->fi.count - pv->fi.pre;
*result = pv->fi.pre + (lws_xos((lws_xos_t *)&fic->xos) % d);
return 0;
}
int
_lws_fi_user_wsi_fi(struct lws *wsi, const char *name)
{
return lws_fi(&wsi->fic, name);
}
int
_lws_fi_user_context_fi(struct lws_context *ctx, const char *name)
{
return lws_fi(&ctx->fic, name);
}
#if defined(LWS_WITH_SECURE_STREAMS)
int
_lws_fi_user_ss_fi(struct lws_ss_handle *h, const char *name)
{
return lws_fi(&h->fic, name);
}
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
int
_lws_fi_user_sspc_fi(struct lws_sspc_handle *h, const char *name)
{
return lws_fi(&h->fic, name);
}
#endif
#endif
int
lws_fi_add(lws_fi_ctx_t *fic, const lws_fi_t *fi)
{
lws_fi_priv_t *pv;
size_t n = strlen(fi->name);
pv = lws_malloc(sizeof(*pv) + n + 1, __func__);
if (!pv)
return 1;
lws_dll2_clear(&pv->list);
memcpy(&pv->fi, fi, sizeof(*fi));
pv->fi.name = (const char *)&pv[1];
memcpy(&pv[1], fi->name, n + 1);
lws_dll2_add_tail(&pv->list, &fic->fi_owner);
return 0;
}
void
lws_fi_remove(lws_fi_ctx_t *fic, const char *name)
{
lws_fi_priv_t *pv = lws_fi_lookup(fic, name);
if (!pv)
return;
lws_dll2_remove(&pv->list);
lws_free(pv);
}
void
lws_fi_import(lws_fi_ctx_t *fic_dest, const lws_fi_ctx_t *fic_src)
{
/* inherit the PRNG seed for our context from source guy too */
lws_xos_init(&fic_dest->xos, lws_xos((lws_xos_t *)&fic_src->xos));
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
fic_src->fi_owner.head) {
lws_fi_priv_t *pv = lws_container_of(p, lws_fi_priv_t, list);
lws_dll2_remove(&pv->list);
lws_dll2_add_tail(&pv->list, &fic_dest->fi_owner);
} lws_end_foreach_dll_safe(p, p1);
}
static void
do_inherit(lws_fi_ctx_t *fic_dest, lws_fi_t *pfi, size_t trim)
{
lws_fi_t fi = *pfi;
fi.name += trim;
lwsl_info("%s: %s: %s inherited as %s\n", __func__, fic_dest->name,
pfi->name, fi.name);
if (fi.type == LWSFI_PATTERN_ALLOC) {
fi.pattern = lws_malloc((size_t)((fi.count >> 3) + 1), __func__);
if (!fi.pattern)
return;
memcpy((uint8_t *)fi.pattern, pfi->pattern,
(size_t)((fi.count >> 3) + 1));
}
lws_fi_add(fic_dest, &fi);
}
void
lws_fi_inherit_copy(lws_fi_ctx_t *fic_dest, const lws_fi_ctx_t *fic_src,
const char *scope, const char *value)
{
size_t sl = 0, vl = 0;
if (scope)
sl = strlen(scope);
if (value)
vl = strlen(value);
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
fic_src->fi_owner.head) {
lws_fi_priv_t *pv = lws_container_of(p, lws_fi_priv_t, list);
size_t nl = strlen(pv->fi.name);
if (!scope)
do_inherit(fic_dest, &pv->fi, 0);
else
if (nl > sl + 2 &&
!strncmp(pv->fi.name, scope, sl) &&
pv->fi.name[sl] == '/')
do_inherit(fic_dest, &pv->fi, sl + 1);
else {
if (value && nl > sl + vl + 2 &&
pv->fi.name[sl] == '=' &&
!strncmp(pv->fi.name + sl + 1, value, vl) &&
pv->fi.name[sl + 1 + vl] == '/')
do_inherit(fic_dest, &pv->fi, sl + vl + 2);
}
} lws_end_foreach_dll_safe(p, p1);
}
void
lws_fi_destroy(const lws_fi_ctx_t *fic)
{
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
fic->fi_owner.head) {
lws_fi_priv_t *pv = lws_container_of(p, lws_fi_priv_t, list);
if (pv->fi.type == LWSFI_PATTERN_ALLOC && pv->fi.pattern) {
lws_free((void *)pv->fi.pattern);
pv->fi.pattern = NULL;
}
lws_dll2_remove(&pv->list);
lws_free(pv);
} lws_end_foreach_dll_safe(p, p1);
}
/*
* We want to support these kinds of qualifier
*
* myfault true always
* myfault(10%) true 10% of the time
* myfault(....X X) true when X
* myfault2(20..3000) pick a number between 20 and 3000
*/
enum {
PARSE_NAME,
PARSE_WHEN,
PARSE_PC,
PARSE_ENDBR,
PARSE_COMMA
};
void
lws_fi_deserialize(lws_fi_ctx_t *fic, const char *sers)
{
int state = PARSE_NAME, m;
struct lws_tokenize ts;
lws_fi_t fi;
char nm[64];
/*
* Go through the comma-separated list of faults
* creating them and adding to the lws_context info
*/
lws_tokenize_init(&ts, sers, LWS_TOKENIZE_F_DOT_NONTERM |
LWS_TOKENIZE_F_NO_INTEGERS |
LWS_TOKENIZE_F_NO_FLOATS |
LWS_TOKENIZE_F_EQUALS_NONTERM |
LWS_TOKENIZE_F_SLASH_NONTERM |
LWS_TOKENIZE_F_MINUS_NONTERM);
ts.len = (unsigned int)strlen(sers);
if (ts.len < 1 || ts.len > 10240)
return;
do {
ts.e = (int8_t)lws_tokenize(&ts);
switch (ts.e) {
case LWS_TOKZE_TOKEN:
if (state == PARSE_NAME) {
/*
* One fault to inject looks like, eg,
*
* vh=xxx/listenskt
*/
memset(&fi, 0, sizeof(fi));
lws_strnncpy(nm, ts.token, ts.token_len,
sizeof(nm));
fi.name = nm;
fi.type = LWSFI_ALWAYS;
lwsl_notice("%s: name %.*s\n", __func__,
(int)ts.token_len, ts.token);
/* added later, potentially after (when) */
break;
}
if (state == PARSE_WHEN) {
/* it's either numeric (then % or ..num2), or
* .X pattern */
lwsl_notice("%s: when\n", __func__);
if (*ts.token == '.' || *ts.token == 'X') {
uint8_t *pat;
size_t n;
/*
* pattern... we need to allocate it
*/
fi.type = LWSFI_PATTERN_ALLOC;
pat = lws_zalloc((ts.token_len >> 3) + 1,
__func__);
if (!pat)
return;
fi.pattern = pat;
fi.count = (uint64_t)ts.token_len;
for (n = 0; n < ts.token_len; n++)
if (ts.token[n] == 'X')
pat[n >> 3] = (uint8_t)(
pat[n >> 3] |
(1 << (n & 7)));
lwsl_hexdump_notice(pat,
(ts.token_len >> 3) + 1);
state = PARSE_ENDBR;
break;
}
fi.pre = (uint64_t)atoll(ts.token);
for (m = 0; m < (int)ts.token_len - 1; m++)
if (ts.token[m] < '0' ||
ts.token[m] > '9')
break;
/*
* We can understand num% or num..num
*/
if (m != (int)ts.token_len &&
ts.token[m] == '.' &&
ts.token[m + 1] == '.') {
fi.count = (uint64_t)atoll(
&ts.token[m + 2]);
fi.type = LWSFI_RANGE;
state = PARSE_ENDBR;
if (fi.pre >= fi.count) {
lwsl_err("%s: range must have "
"smaller first!\n",
__func__);
}
lwsl_notice("%s: range %llx .."
"%llx\n", __func__,
(unsigned long long)fi.pre,
(unsigned long long)fi.count);
break;
}
lwsl_notice("%s: prob %d%%\n", __func__,
(int)fi.pre);
fi.type = LWSFI_PROBABILISTIC;
state = PARSE_PC;
break;
}
break;
case LWS_TOKZE_DELIMITER:
if (*ts.token == ',') {
lws_fi_add(fic, &fi);
state = PARSE_NAME;
break;
}
if (*ts.token == '(') {
lwsl_notice("%s: (\n", __func__);
if (state != PARSE_NAME) {
lwsl_err("%s: misplaced (\n", __func__);
return;
}
state = PARSE_WHEN;
break;
}
if (*ts.token == ')') {
if (state != PARSE_ENDBR) {
lwsl_err("%s: misplaced )\n", __func__);
return;
}
state = PARSE_NAME;
break;
}
if (*ts.token == '%') {
if (state != PARSE_PC) {
lwsl_err("%s: misplaced %%\n", __func__);
return;
}
state = PARSE_ENDBR;
break;
}
break;
case LWS_TOKZE_ENDED:
lws_fi_add(fic, &fi);
return;
default:
return;
}
} while (ts.e > 0);
}

View File

@ -0,0 +1,35 @@
/*
* lws System Message Distribution
*
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
typedef struct lws_fi_priv {
lws_dll2_t list;
lws_fi_t fi;
} lws_fi_priv_t;
void
lws_fi_import(lws_fi_ctx_t *fic_dest, const lws_fi_ctx_t *fic_src);
#endif

View File

@ -0,0 +1,170 @@
/*
* Policy fetching for Secure Streams
*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2019 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <private-lib-core.h>
typedef struct ss_fetch_policy {
struct lws_ss_handle *ss;
void *opaque_data;
/* ... application specific state ... */
lws_sorted_usec_list_t sul;
uint8_t partway;
} ss_fetch_policy_t;
/* secure streams payload interface */
static lws_ss_state_return_t
ss_fetch_policy_rx(void *userobj, const uint8_t *buf, size_t len, int flags)
{
ss_fetch_policy_t *m = (ss_fetch_policy_t *)userobj;
struct lws_context *context = (struct lws_context *)m->opaque_data;
if (flags & LWSSS_FLAG_SOM) {
if (lws_ss_policy_parse_begin(context, 0))
return LWSSSSRET_OK;
m->partway = 1;
}
if (len && lws_ss_policy_parse(context, buf, len) < 0)
return LWSSSSRET_OK;
if (flags & LWSSS_FLAG_EOM)
m->partway = 2;
return LWSSSSRET_OK;
}
static lws_ss_state_return_t
ss_fetch_policy_tx(void *userobj, lws_ss_tx_ordinal_t ord, uint8_t *buf,
size_t *len, int *flags)
{
return LWSSSSRET_TX_DONT_SEND;
}
static void
policy_set(lws_sorted_usec_list_t *sul)
{
ss_fetch_policy_t *m = lws_container_of(sul, ss_fetch_policy_t, sul);
struct lws_context *context = (struct lws_context *)m->opaque_data;
/*
* We get called if the policy parse was successful, just after the
* ss connection close that was using the vhost from the old policy
*/
lws_ss_destroy(&m->ss);
if (lws_ss_policy_set(context, "updated"))
lwsl_err("%s: policy set failed\n", __func__);
else {
context->policy_updated = 1;
#if defined(LWS_WITH_SYS_STATE)
lws_state_transition_steps(&context->mgr_system,
LWS_SYSTATE_OPERATIONAL);
#endif
}
}
static lws_ss_state_return_t
ss_fetch_policy_state(void *userobj, void *sh, lws_ss_constate_t state,
lws_ss_tx_ordinal_t ack)
{
ss_fetch_policy_t *m = (ss_fetch_policy_t *)userobj;
struct lws_context *context = (struct lws_context *)m->opaque_data;
lwsl_info("%s: %s, ord 0x%x\n", __func__, lws_ss_state_name((int)state),
(unsigned int)ack);
switch (state) {
case LWSSSCS_CREATING:
return lws_ss_request_tx(m->ss);
case LWSSSCS_CONNECTING:
break;
case LWSSSCS_QOS_ACK_REMOTE:
switch (m->partway) {
case 2:
lws_sul_schedule(context, 0, &m->sul, policy_set, 1);
m->partway = 0;
break;
}
break;
case LWSSSCS_DISCONNECTED:
if (m->partway == 1) {
lws_ss_policy_parse_abandon(context);
break;
}
m->partway = 0;
break;
default:
break;
}
return LWSSSSRET_OK;
}
int
lws_ss_sys_fetch_policy(struct lws_context *context)
{
lws_ss_info_t ssi;
if (context->hss_fetch_policy) /* already exists */
return 0;
/* We're making an outgoing secure stream ourselves */
memset(&ssi, 0, sizeof(ssi));
ssi.handle_offset = offsetof(ss_fetch_policy_t, ss);
ssi.opaque_user_data_offset = offsetof(ss_fetch_policy_t, opaque_data);
ssi.rx = ss_fetch_policy_rx;
ssi.tx = ss_fetch_policy_tx;
ssi.state = ss_fetch_policy_state;
ssi.user_alloc = sizeof(ss_fetch_policy_t);
ssi.streamtype = "fetch_policy";
if (lws_ss_create(context, 0, &ssi, context, &context->hss_fetch_policy,
NULL, NULL)) {
/*
* If there's no fetch_policy streamtype, it can just be we're
* running on a proxied client with no policy of its own,
* it's OK.
*/
lwsl_info("%s: Policy fetch ss failed (stub policy?)\n", __func__);
return 0;
}
lwsl_info("%s: policy fetching ongoing\n", __func__);
/* fetching it is ongoing */
return 1;
}

View File

@ -0,0 +1,10 @@
include_directories(.)
if (LWS_WITH_SYS_METRICS)
list(APPEND SOURCES
system/metrics/metrics.c
)
endif()
exports_to_parent_scope()

View File

@ -0,0 +1,894 @@
/*
* lws Generic Metrics
*
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#include <assert.h>
int
lws_metrics_tag_add(lws_dll2_owner_t *owner, const char *name, const char *val)
{
size_t vl = strlen(val);
lws_metrics_tag_t *tag;
// lwsl_notice("%s: adding %s=%s\n", __func__, name, val);
/*
* Remove (in order to replace) any existing tag of same name
*/
lws_start_foreach_dll(struct lws_dll2 *, d, owner->head) {
tag = lws_container_of(d, lws_metrics_tag_t, list);
if (!strcmp(name, tag->name)) {
lws_dll2_remove(&tag->list);
lws_free(tag);
break;
}
} lws_end_foreach_dll(d);
/*
* Create the new tag
*/
tag = lws_malloc(sizeof(*tag) + vl + 1, __func__);
if (!tag)
return 1;
lws_dll2_clear(&tag->list);
tag->name = name;
memcpy(&tag[1], val, vl + 1);
lws_dll2_add_tail(&tag->list, owner);
return 0;
}
int
lws_metrics_tag_wsi_add(struct lws *wsi, const char *name, const char *val)
{
__lws_lc_tag(wsi->a.context, NULL, &wsi->lc, "|%s", val);
return lws_metrics_tag_add(&wsi->cal_conn.mtags_owner, name, val);
}
#if defined(LWS_WITH_SECURE_STREAMS)
int
lws_metrics_tag_ss_add(struct lws_ss_handle *ss, const char *name, const char *val)
{
__lws_lc_tag(ss->context, NULL, &ss->lc, "|%s", val);
return lws_metrics_tag_add(&ss->cal_txn.mtags_owner, name, val);
}
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
int
lws_metrics_tag_sspc_add(struct lws_sspc_handle *sspc, const char *name,
const char *val)
{
__lws_lc_tag(sspc->context, NULL, &sspc->lc, "|%s", val);
return lws_metrics_tag_add(&sspc->cal_txn.mtags_owner, name, val);
}
#endif
#endif
void
lws_metrics_tags_destroy(lws_dll2_owner_t *owner)
{
lws_metrics_tag_t *t;
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, owner->head) {
t = lws_container_of(d, lws_metrics_tag_t, list);
lws_dll2_remove(&t->list);
lws_free(t);
} lws_end_foreach_dll_safe(d, d1);
}
size_t
lws_metrics_tags_serialize(lws_dll2_owner_t *owner, char *buf, size_t len)
{
char *end = buf + len - 1, *p = buf;
lws_metrics_tag_t *t;
lws_start_foreach_dll(struct lws_dll2 *, d, owner->head) {
t = lws_container_of(d, lws_metrics_tag_t, list);
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p),
"%s=\"%s\"", t->name, (const char *)&t[1]);
if (d->next && p + 2 < end)
*p++ = ',';
} lws_end_foreach_dll(d);
*p = '\0';
return lws_ptr_diff_size_t(p, buf);
}
const char *
lws_metrics_tag_get(lws_dll2_owner_t *owner, const char *name)
{
lws_metrics_tag_t *t;
lws_start_foreach_dll(struct lws_dll2 *, d, owner->head) {
t = lws_container_of(d, lws_metrics_tag_t, list);
if (!strcmp(name, t->name))
return (const char *)&t[1];
} lws_end_foreach_dll(d);
return NULL;
}
static int
lws_metrics_dump_cb(lws_metric_pub_t *pub, void *user);
static void
lws_metrics_report_and_maybe_clear(struct lws_context *ctx, lws_metric_pub_t *pub)
{
if (!pub->us_first || pub->us_last == pub->us_dumped)
return;
lws_metrics_dump_cb(pub, ctx);
}
static void
lws_metrics_periodic_cb(lws_sorted_usec_list_t *sul)
{
lws_metric_policy_dyn_t *dmp = lws_container_of(sul,
lws_metric_policy_dyn_t, sul);
struct lws_context *ctx = lws_container_of(dmp->list.owner,
struct lws_context, owner_mtr_dynpol);
if (!ctx->system_ops || !ctx->system_ops->metric_report)
return;
lws_start_foreach_dll(struct lws_dll2 *, d, dmp->owner.head) {
lws_metric_t *mt = lws_container_of(d, lws_metric_t, list);
lws_metric_pub_t *pub = lws_metrics_priv_to_pub(mt);
lws_metrics_report_and_maybe_clear(ctx, pub);
} lws_end_foreach_dll(d);
#if defined(LWS_WITH_SYS_SMD) && defined(LWS_WITH_SECURE_STREAMS)
(void)lws_smd_msg_printf(ctx, LWSSMDCL_METRICS,
"{\"dump\":\"%s\",\"ts\":%lu}",
dmp->policy->name,
(long)ctx->last_policy);
#endif
if (dmp->policy->us_schedule)
lws_sul_schedule(ctx, 0, &dmp->sul,
lws_metrics_periodic_cb,
(lws_usec_t)dmp->policy->us_schedule);
}
/*
* Policies are in two pieces, a const policy and a dynamic part that contains
* lists and sul timers for the policy etc. This creates a dynmic part
* corresponding to the static part.
*
* Metrics can exist detached from being bound to any policy about how to
* report them, these are collected but not reported unless they later become
* bound to a reporting policy dynamically.
*/
lws_metric_policy_dyn_t *
lws_metrics_policy_dyn_create(struct lws_context *ctx,
const lws_metric_policy_t *po)
{
lws_metric_policy_dyn_t *dmet;
dmet = lws_zalloc(sizeof(*dmet), __func__);
if (!dmet)
return NULL;
dmet->policy = po;
lws_dll2_add_tail(&dmet->list, &ctx->owner_mtr_dynpol);
if (po->us_schedule)
lws_sul_schedule(ctx, 0, &dmet->sul,
lws_metrics_periodic_cb,
(lws_usec_t)po->us_schedule);
return dmet;
}
/*
* Get a dynamic metrics policy from the const one, may return NULL if OOM
*/
lws_metric_policy_dyn_t *
lws_metrics_policy_get_dyn(struct lws_context *ctx,
const lws_metric_policy_t *po)
{
lws_start_foreach_dll(struct lws_dll2 *, d, ctx->owner_mtr_dynpol.head) {
lws_metric_policy_dyn_t *dm =
lws_container_of(d, lws_metric_policy_dyn_t, list);
if (dm->policy == po)
return dm;
} lws_end_foreach_dll(d);
/*
* no dyn policy part for this const policy --> create one
*
* We want a dynamic part for listing metrics that bound to the policy
*/
return lws_metrics_policy_dyn_create(ctx, po);
}
static int
lws_metrics_check_in_policy(const char *polstring, const char *name)
{
struct lws_tokenize ts;
memset(&ts, 0, sizeof(ts));
ts.start = polstring;
ts.len = strlen(polstring);
ts.flags = (uint16_t)(LWS_TOKENIZE_F_MINUS_NONTERM |
LWS_TOKENIZE_F_ASTERISK_NONTERM |
LWS_TOKENIZE_F_COMMA_SEP_LIST |
LWS_TOKENIZE_F_NO_FLOATS |
LWS_TOKENIZE_F_DOT_NONTERM);
do {
ts.e = (int8_t)lws_tokenize(&ts);
if (ts.e == LWS_TOKZE_TOKEN) {
if (!lws_strcmp_wildcard(ts.token, ts.token_len, name,
strlen(name)))
/* yes, we are mentioned in this guy's policy */
return 0;
}
} while (ts.e > 0);
/* no, this policy doesn't apply to a metric with our name */
return 1;
}
static const lws_metric_policy_t *
lws_metrics_find_policy(struct lws_context *ctx, const char *name)
{
const lws_metric_policy_t *mp = ctx->metrics_policies;
if (!mp) {
#if defined(LWS_WITH_SECURE_STREAMS)
if (ctx->pss_policies)
mp = ctx->pss_policies->metrics;
#endif
if (!mp)
return NULL;
}
while (mp) {
if (mp->report && !lws_metrics_check_in_policy(mp->report, name))
return mp;
mp = mp->next;
}
return NULL;
}
/*
* Create a lws_metric_t, bind to a named policy if possible (or add to the
* context list of unbound metrics) and set its lws_system
* idx. The metrics objects themselves are typically composed into other
* objects and are well-known composed members of them.
*/
lws_metric_t *
lws_metric_create(struct lws_context *ctx, uint8_t flags, const char *name)
{
const lws_metric_policy_t *po;
lws_metric_policy_dyn_t *dmp;
lws_metric_pub_t *pub;
lws_metric_t *mt;
char pname[32];
size_t nl;
if (ctx->metrics_prefix) {
/*
* In multi-process case, we want to prefix metrics from this
* process / context with a string distinguishing which
* application they came from
*/
nl = (size_t)lws_snprintf(pname, sizeof(pname) - 1, "%s.%s",
ctx->metrics_prefix, name);
name = pname;
} else
nl = strlen(name);
mt = (lws_metric_t *)lws_zalloc(sizeof(*mt) /* private */ +
sizeof(lws_metric_pub_t) +
nl + 1 /* copy of metric name */,
__func__);
if (!mt)
return NULL;
pub = lws_metrics_priv_to_pub(mt);
pub->name = (char *)pub + sizeof(lws_metric_pub_t);
memcpy((char *)pub->name, name, nl + 1);
pub->flags = flags;
/* after these common members, we have to use the right type */
if (!(flags & LWSMTFL_REPORT_HIST)) {
/* anything is smaller or equal to this */
pub->u.agg.min = ~(u_mt_t)0;
pub->us_first = lws_now_usecs();
}
mt->ctx = ctx;
/*
* Let's see if we can bind to a reporting policy straight away
*/
po = lws_metrics_find_policy(ctx, name);
if (po) {
dmp = lws_metrics_policy_get_dyn(ctx, po);
if (dmp) {
lwsl_notice("%s: metpol %s\n", __func__, name);
lws_dll2_add_tail(&mt->list, &dmp->owner);
return 0;
}
}
/*
* If not, well, let's go on without and maybe later at runtime, he'll
* get interested in us and apply a reporting policy
*/
lws_dll2_add_tail(&mt->list, &ctx->owner_mtr_no_pol);
return mt;
}
/*
* If our metric is bound to a reporting policy, return a pointer to it,
* otherwise NULL
*/
const lws_metric_policy_t *
lws_metric_get_policy(lws_metric_t *mt)
{
lws_metric_policy_dyn_t *dp;
/*
* Our metric must either be on the "no policy" context list or
* listed by the dynamic part of the policy it is bound to
*/
assert(mt->list.owner);
if ((char *)mt->list.owner >= (char *)mt->ctx &&
(char *)mt->list.owner < (char *)mt->ctx + sizeof(struct lws_context))
/* we are on the "no policy" context list */
return NULL;
/* we are listed by a dynamic policy owner */
dp = lws_container_of(mt->list.owner, lws_metric_policy_dyn_t, owner);
/* return the const policy the dynamic policy represents */
return dp->policy;
}
void
lws_metric_rebind_policies(struct lws_context *ctx)
{
const lws_metric_policy_t *po;
lws_metric_policy_dyn_t *dmp;
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
ctx->owner_mtr_no_pol.head) {
lws_metric_t *mt = lws_container_of(d, lws_metric_t, list);
lws_metric_pub_t *pub = lws_metrics_priv_to_pub(mt);
po = lws_metrics_find_policy(ctx, pub->name);
if (po) {
dmp = lws_metrics_policy_get_dyn(ctx, po);
if (dmp) {
lwsl_info("%s: %s <- pol %s\n", __func__,
pub->name, po->name);
lws_dll2_remove(&mt->list);
lws_dll2_add_tail(&mt->list, &dmp->owner);
}
} else
lwsl_debug("%s: no pol for %s\n", __func__, pub->name);
} lws_end_foreach_dll_safe(d, d1);
}
int
lws_metric_destroy(lws_metric_t **pmt, int keep)
{
lws_metric_t *mt = *pmt;
lws_metric_pub_t *pub;
if (!mt)
return 0;
pub = lws_metrics_priv_to_pub(mt);
lws_dll2_remove(&mt->list);
if (keep) {
lws_dll2_add_tail(&mt->list, &mt->ctx->owner_mtr_no_pol);
return 0;
}
if (pub->flags & LWSMTFL_REPORT_HIST) {
lws_metric_bucket_t *b = pub->u.hist.head, *b1;
pub->u.hist.head = NULL;
while (b) {
b1 = b->next;
lws_free(b);
b = b1;
}
}
lws_free(mt);
*pmt = NULL;
return 0;
}
/*
* Allow an existing metric to have its reporting policy changed at runtime
*/
int
lws_metric_switch_policy(lws_metric_t *mt, const char *polname)
{
const lws_metric_policy_t *po;
lws_metric_policy_dyn_t *dmp;
po = lws_metrics_find_policy(mt->ctx, polname);
if (!po)
return 1;
dmp = lws_metrics_policy_get_dyn(mt->ctx, po);
if (!dmp)
return 1;
lws_dll2_remove(&mt->list);
lws_dll2_add_tail(&mt->list, &dmp->owner);
return 0;
}
/*
* If keep is set, don't destroy existing metrics objects, just detach them
* from the policy being deleted and keep track of them on ctx->
* owner_mtr_no_pol
*/
void
lws_metric_policy_dyn_destroy(lws_metric_policy_dyn_t *dm, int keep)
{
lws_sul_cancel(&dm->sul);
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, dm->owner.head) {
lws_metric_t *m = lws_container_of(d, lws_metric_t, list);
lws_metric_destroy(&m, keep);
} lws_end_foreach_dll_safe(d, d1);
lws_sul_cancel(&dm->sul);
lws_dll2_remove(&dm->list);
lws_free(dm);
}
/*
* Destroy all dynamic metrics policies, deinit any metrics still using them
*/
void
lws_metrics_destroy(struct lws_context *ctx)
{
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
ctx->owner_mtr_dynpol.head) {
lws_metric_policy_dyn_t *dm =
lws_container_of(d, lws_metric_policy_dyn_t, list);
lws_metric_policy_dyn_destroy(dm, 0); /* don't keep */
} lws_end_foreach_dll_safe(d, d1);
/* destroy metrics with no current policy too... */
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
ctx->owner_mtr_no_pol.head) {
lws_metric_t *mt = lws_container_of(d, lws_metric_t, list);
lws_metric_destroy(&mt, 0); /* don't keep */
} lws_end_foreach_dll_safe(d, d1);
/* ... that's the whole allocated metrics footprint gone... */
}
int
lws_metrics_hist_bump_(lws_metric_pub_t *pub, const char *name)
{
lws_metric_bucket_t *buck = pub->u.hist.head;
size_t nl = strlen(name);
char *nm;
if (!(pub->flags & LWSMTFL_REPORT_HIST)) {
lwsl_err("%s: %s not histogram: flags %d\n", __func__,
pub->name, pub->flags);
assert(0);
}
assert(nl < 255);
pub->us_last = lws_now_usecs();
if (!pub->us_first)
pub->us_first = pub->us_last;
while (buck) {
if (lws_metric_bucket_name_len(buck) == nl &&
!strcmp(name, lws_metric_bucket_name(buck))) {
buck->count++;
goto happy;
}
buck = buck->next;
}
buck = lws_malloc(sizeof(*buck) + nl + 2, __func__);
if (!buck)
return 1;
nm = (char *)buck + sizeof(*buck);
/* length byte at beginning of name, avoid struct alignment overhead */
*nm = (char)nl;
memcpy(nm + 1, name, nl + 1);
buck->next = pub->u.hist.head;
pub->u.hist.head = buck;
buck->count = 1;
pub->u.hist.list_size++;
happy:
pub->u.hist.total_count++;
return 0;
}
int
lws_metrics_hist_bump_describe_wsi(struct lws *wsi, lws_metric_pub_t *pub,
const char *name)
{
char desc[192], d1[48], *p = desc, *end = desc + sizeof(desc);
#if defined(LWS_WITH_SECURE_STREAMS)
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
if (wsi->client_bound_sspc) {
lws_sspc_handle_t *h = (lws_sspc_handle_t *)wsi->a.opaque_user_data;
if (h)
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p), "ss=\"%s\",",
h->ssi.streamtype);
} else
if (wsi->client_proxy_onward) {
lws_ss_handle_t *h = (lws_ss_handle_t *)wsi->a.opaque_user_data;
struct lws_sss_proxy_conn *conn = h->conn_if_sspc_onw;
if (conn && conn->ss)
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p),
"ss=\"%s\",",
conn->ss->info.streamtype);
} else
#endif
if (wsi->for_ss) {
lws_ss_handle_t *h = (lws_ss_handle_t *)wsi->a.opaque_user_data;
if (h)
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p), "ss=\"%s\",",
h->info.streamtype);
}
#endif
#if defined(LWS_WITH_CLIENT)
if (wsi->stash && wsi->stash->cis[CIS_HOST])
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p), "hostname=\"%s\",",
wsi->stash->cis[CIS_HOST]);
#endif
lws_sa46_write_numeric_address(&wsi->sa46_peer, d1, sizeof(d1));
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p), "peer=\"%s\",", d1);
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p), "%s", name);
lws_metrics_hist_bump_(pub, desc);
return 0;
}
int
lws_metrics_foreach(struct lws_context *ctx, void *user,
int (*cb)(lws_metric_pub_t *pub, void *user))
{
int n;
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
ctx->owner_mtr_no_pol.head) {
lws_metric_t *mt = lws_container_of(d, lws_metric_t, list);
n = cb(lws_metrics_priv_to_pub(mt), user);
if (n)
return n;
} lws_end_foreach_dll_safe(d, d1);
lws_start_foreach_dll_safe(struct lws_dll2 *, d2, d3,
ctx->owner_mtr_dynpol.head) {
lws_metric_policy_dyn_t *dm =
lws_container_of(d2, lws_metric_policy_dyn_t, list);
lws_start_foreach_dll_safe(struct lws_dll2 *, e, e1,
dm->owner.head) {
lws_metric_t *mt = lws_container_of(e, lws_metric_t, list);
n = cb(lws_metrics_priv_to_pub(mt), user);
if (n)
return n;
} lws_end_foreach_dll_safe(e, e1);
} lws_end_foreach_dll_safe(d2, d3);
return 0;
}
static int
lws_metrics_dump_cb(lws_metric_pub_t *pub, void *user)
{
struct lws_context *ctx = (struct lws_context *)user;
int n;
if (!ctx->system_ops || !ctx->system_ops->metric_report)
return 0;
/*
* return nonzero to reset stats
*/
n = ctx->system_ops->metric_report(pub);
/* track when we dumped it... */
pub->us_first = pub->us_dumped = lws_now_usecs();
pub->us_last = 0;
if (!n)
return 0;
/* ... and clear it back to 0 */
if (pub->flags & LWSMTFL_REPORT_HIST) {
lws_metric_bucket_t *b = pub->u.hist.head, *b1;
pub->u.hist.head = NULL;
while (b) {
b1 = b->next;
lws_free(b);
b = b1;
}
pub->u.hist.total_count = 0;
pub->u.hist.list_size = 0;
} else
memset(&pub->u.agg, 0, sizeof(pub->u.agg));
return 0;
}
void
lws_metrics_dump(struct lws_context *ctx)
{
lws_metrics_foreach(ctx, ctx, lws_metrics_dump_cb);
}
static int
_lws_metrics_format(lws_metric_pub_t *pub, lws_usec_t now, int gng,
char *buf, size_t len)
{
const lws_humanize_unit_t *schema = humanize_schema_si;
char *end = buf + len - 1, *obuf = buf;
if (pub->flags & LWSMTFL_REPORT_DUTY_WALLCLOCK_US)
schema = humanize_schema_us;
if (!(pub->flags & LWSMTFL_REPORT_MEAN)) {
/* only the sum is meaningful */
if (pub->flags & LWSMTFL_REPORT_DUTY_WALLCLOCK_US) {
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), " %u, ",
(unsigned int)pub->u.agg.count[gng]);
buf += lws_humanize(buf, lws_ptr_diff_size_t(end, buf),
(uint64_t)pub->u.agg.sum[gng],
humanize_schema_us);
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), " / ");
buf += lws_humanize(buf, lws_ptr_diff_size_t(end, buf),
(uint64_t)(now - pub->us_first),
humanize_schema_us);
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf),
" (%d%%)", (int)((100 * pub->u.agg.sum[gng]) /
(unsigned long)(now - pub->us_first)));
} else {
/* it's a monotonic ordinal, like total tx */
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "(%u) ",
(unsigned int)pub->u.agg.count[gng]);
buf += lws_humanize(buf, lws_ptr_diff_size_t(end, buf),
(uint64_t)pub->u.agg.sum[gng],
humanize_schema_si);
}
} else {
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "%u, mean: ", (unsigned int)pub->u.agg.count[gng]);
/* the average over the period is meaningful */
buf += lws_humanize(buf, lws_ptr_diff_size_t(end, buf),
(uint64_t)(pub->u.agg.count[gng] ?
pub->u.agg.sum[gng] / pub->u.agg.count[gng] : 0),
schema);
}
return lws_ptr_diff(buf, obuf);
}
int
lws_metrics_format(lws_metric_pub_t *pub, lws_metric_bucket_t **sub, char *buf, size_t len)
{
char *end = buf + len - 1, *obuf = buf;
lws_usec_t t = lws_now_usecs();
const lws_humanize_unit_t *schema = humanize_schema_si;
if (pub->flags & LWSMTFL_REPORT_DUTY_WALLCLOCK_US)
schema = humanize_schema_us;
if (pub->flags & LWSMTFL_REPORT_HIST) {
if (*sub == NULL)
return 0;
if (*sub) {
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf),
"%s{%s} %llu", pub->name,
lws_metric_bucket_name(*sub),
(unsigned long long)(*sub)->count);
*sub = (*sub)->next;
}
goto happy;
}
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), "%s: ",
pub->name);
if (!pub->u.agg.count[METRES_GO] && !pub->u.agg.count[METRES_NOGO])
return 0;
if (pub->u.agg.count[METRES_GO]) {
if (!(pub->flags & LWSMTFL_REPORT_ONLY_GO))
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf),
"Go: ");
buf += _lws_metrics_format(pub, t, METRES_GO, buf,
lws_ptr_diff_size_t(end, buf));
}
if (!(pub->flags & LWSMTFL_REPORT_ONLY_GO) && pub->u.agg.count[METRES_NOGO]) {
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), ", NoGo: ");
buf += _lws_metrics_format(pub, t, METRES_NOGO, buf,
lws_ptr_diff_size_t(end, buf));
}
if (pub->flags & LWSMTFL_REPORT_MEAN) {
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), ", min: ");
buf += lws_humanize(buf, lws_ptr_diff_size_t(end, buf), pub->u.agg.min,
schema);
buf += lws_snprintf(buf, lws_ptr_diff_size_t(end, buf), ", max: ");
buf += lws_humanize(buf, lws_ptr_diff_size_t(end, buf), pub->u.agg.max,
schema);
}
happy:
if (pub->flags & LWSMTFL_REPORT_HIST)
return 1;
*sub = NULL;
return lws_ptr_diff(buf, obuf);
}
/*
* We want to, at least internally, record an event... depending on the policy,
* that might cause us to call through to the lws_system apis, or just update
* our local stats about it and dump at the next periodic chance (also set by
* the policy)
*/
void
lws_metric_event(lws_metric_t *mt, char go_nogo, u_mt_t val)
{
lws_metric_pub_t *pub;
assert((go_nogo & 0xfe) == 0);
if (!mt)
return;
pub = lws_metrics_priv_to_pub(mt);
assert(!(pub->flags & LWSMTFL_REPORT_HIST));
pub->us_last = lws_now_usecs();
if (!pub->us_first)
pub->us_first = pub->us_last;
pub->u.agg.count[(int)go_nogo]++;
pub->u.agg.sum[(int)go_nogo] += val;
if (val > pub->u.agg.max)
pub->u.agg.max = val;
if (val < pub->u.agg.min)
pub->u.agg.min = val;
if (pub->flags & LWSMTFL_REPORT_OOB)
lws_metrics_report_and_maybe_clear(mt->ctx, pub);
}
void
lws_metrics_hist_bump_priv_tagged(lws_metric_pub_t *mt, lws_dll2_owner_t *tow,
lws_dll2_owner_t *tow2)
{
char qual[192];
size_t p;
p = lws_metrics_tags_serialize(tow, qual, sizeof(qual));
if (tow2)
lws_metrics_tags_serialize(tow2, qual + p,
sizeof(qual) - p);
lws_metrics_hist_bump(mt, qual);
}

View File

@ -0,0 +1,124 @@
/*
* lws System Metrics
*
* Copyright (C) 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/*
* Const struct that describes a policy for processing raw metrics to turn them
* into events.
*
* Typically although we want to monitor every event, the data produced can be
* too large, and many events that are "normal" just need to be counted as such;
* outliers or change-to-continuous outliers may deserve closer recording as
* events in their own right.
*
* Mean computation must "decay" as it ages, we do this by halving the sum and
* count after .us_decay_unit us.
*
* We don't acknowledge outliers until there are at least .min_contributors
* in the current mean (which is subject to decaying)
*
* We decide something is an outlier event if it deviates from the mean by
* .pc_outlier_deviation %.
*/
/*
* The dynamic counterpart for each static metric policy, this is on heap
* one per const lws_metric_policy_t. It's listed in context->owner_mtr_dynpol
*/
typedef struct lws_metric_policy_dyn {
const lws_metric_policy_t *policy;
/**< the static part of the policy we belong to... can be NULL if no
* policy matches or the policy was invalidated */
lws_dll2_owner_t owner;
/**< list of metrics that are using this policy */
lws_dll2_t list;
/**< context owns us */
lws_sorted_usec_list_t sul;
/**< schedule periodic reports for metrics using this policy */
} lws_metric_policy_dyn_t;
/*
* A metrics private part, encapsulating the public part
*/
typedef struct lws_metric {
lws_dll2_t list;
/**< owned by either 1) ctx.lws_metric_policy_dyn_t.owner, or
* 2) ctx.owner_mtr_no_pol */
struct lws_context *ctx;
/* public part overallocated */
} lws_metric_t;
#if defined(LWS_WITH_SYS_METRICS)
#define lws_metrics_hist_bump_priv(_mt, _name) \
lws_metrics_hist_bump_(lws_metrics_priv_to_pub(_mt), _name)
#define lws_metrics_hist_bump_priv_wsi(_wsi, _hist, _name) \
lws_metrics_hist_bump_(lws_metrics_priv_to_pub(_wsi->a.context->_hist), _name)
#define lws_metrics_hist_bump_priv_ss(_ss, _hist, _name) \
lws_metrics_hist_bump_(lws_metrics_priv_to_pub(_ss->context->_hist), _name)
#define lws_metrics_priv_to_pub(_x) ((lws_metric_pub_t *)&(_x)[1])
#else
#define lws_metrics_hist_bump_priv(_mt, _name)
#define lws_metrics_hist_bump_priv_wsi(_wsi, _hist, _name)
#define lws_metrics_hist_bump_priv_ss(_ss, _hist, _name)
#define lws_metrics_priv_to_pub(_x) ((lws_metric_pub_t *)NULL)
#endif
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
/*
* sspc-specific version that also appends the tag value to the lifecycle tag
* used for logging the sspc identity
*/
int
lws_metrics_tag_sspc_add(struct lws_sspc_handle *ss, const char *name, const char *val);
#endif
int
lws_metrics_register_policy(struct lws_context *ctx,
const lws_metric_policy_t *head);
void
lws_metrics_destroy(struct lws_context *ctx);
void
lws_metric_event(lws_metric_t *mt, char go_nogo, u_mt_t val);
lws_metric_t *
lws_metric_create(struct lws_context *ctx, uint8_t flags, const char *name);
int
lws_metric_destroy(lws_metric_t **mt, int keep);
void
lws_metric_policy_dyn_destroy(lws_metric_policy_dyn_t *dm, int keep);
void
lws_metric_rebind_policies(struct lws_context *ctx);

View File

@ -0,0 +1,310 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#define LWSNTPC_LI_NONE 0
#define LWSNTPC_VN_3 3
#define LWSNTPC_MODE_CLIENT 3
struct vhd_ntpc {
struct lws_context *context;
struct lws_vhost *vhost;
const struct lws_protocols *protocol;
lws_sorted_usec_list_t sul_conn;
lws_sorted_usec_list_t sul_write; /* track write retries */
const char *ntp_server_ads;
struct lws *wsi_udp;
uint16_t retry_count_conn;
uint16_t retry_count_write;
char set_time;
};
/*
* Without a valid ntp we won't be able to do anything requiring client tls.
*
* We have our own outer backoff scheme that just keeps retrying dns lookup
* and the transaction forever.
*/
static const uint32_t botable[] =
{ 300, 500, 650, 800, 800, 900, 1000, 1100, 1500 };
static const lws_retry_bo_t bo = {
botable, LWS_ARRAY_SIZE(botable), LWS_RETRY_CONCEAL_ALWAYS, 0, 0, 20 };
/*
* Once we resolved the remote server (implying we should have network),
* we use a different policy on the wsi itself that gives it a few tries before
* failing the wsi and using to outer retry policy to get dns to a different
* server in the pool and try fresh
*/
static const uint32_t botable2[] = { 1000, 1250, 5000 /* in case dog slow */ };
static const lws_retry_bo_t bo2 = {
botable2, LWS_ARRAY_SIZE(botable2), LWS_ARRAY_SIZE(botable2),
/* don't conceal after the last table entry */ 0, 0, 20 };
static void
lws_ntpc_retry_conn(struct lws_sorted_usec_list *sul)
{
struct vhd_ntpc *v = lws_container_of(sul, struct vhd_ntpc, sul_conn);
lwsl_debug("%s: wsi_udp: %s\n", __func__, lws_wsi_tag(v->wsi_udp));
if (v->wsi_udp || !lws_dll2_is_detached(&v->sul_conn.list))
return;
/* create the UDP socket aimed at the server */
lwsl_notice("%s: server %s\n", __func__, v->ntp_server_ads);
v->retry_count_write = 0;
v->wsi_udp = lws_create_adopt_udp(v->vhost, v->ntp_server_ads, 123, 0,
v->protocol->name, NULL, NULL, NULL,
&bo2, "ntpclient");
lwsl_debug("%s: created wsi_udp: %s\n", __func__, lws_wsi_tag(v->wsi_udp));
if (!v->wsi_udp) {
lwsl_err("%s: unable to create udp skt\n", __func__);
lws_retry_sul_schedule(v->context, 0, &v->sul_conn, &bo,
lws_ntpc_retry_conn, &v->retry_count_conn);
}
}
static void
lws_ntpc_retry_write(struct lws_sorted_usec_list *sul)
{
struct vhd_ntpc *v = lws_container_of(sul, struct vhd_ntpc, sul_write);
lwsl_debug("%s\n", __func__);
if (v && v->wsi_udp)
lws_callback_on_writable(v->wsi_udp);
}
static int
callback_ntpc(struct lws *wsi, enum lws_callback_reasons reason, void *user,
void *in, size_t len)
{
struct vhd_ntpc *v = (struct vhd_ntpc *)
lws_protocol_vh_priv_get(lws_get_vhost(wsi),
lws_get_protocol(wsi));
uint8_t pkt[LWS_PRE + 48];
struct timeval t1;
int64_t delta_us;
uint64_t ns;
switch (reason) {
case LWS_CALLBACK_PROTOCOL_INIT: /* per vhost */
if (v)
break;
lwsl_debug("%s: LWS_CALLBACK_PROTOCOL_INIT:\n", __func__);
lws_protocol_vh_priv_zalloc(wsi->a.vhost, wsi->a.protocol,
sizeof(*v));
v = (struct vhd_ntpc *)lws_protocol_vh_priv_get(wsi->a.vhost,
wsi->a.protocol);
v->context = lws_get_context(wsi);
v->vhost = lws_get_vhost(wsi);
v->protocol = lws_get_protocol(wsi);
v->context->ntpclient_priv = v;
if (!lws_system_get_ops(wsi->a.context) ||
!lws_system_get_ops(wsi->a.context)->set_clock) {
#if !defined(LWS_ESP_PLATFORM)
lwsl_err("%s: set up system ops for set_clock\n",
__func__);
#endif
// return -1;
}
/* register our lws_system notifier */
v->ntp_server_ads = "pool.ntp.org";
lws_plat_ntpclient_config(v->context);
lws_system_blob_get_single_ptr(lws_system_get_blob(
v->context, LWS_SYSBLOB_TYPE_NTP_SERVER, 0),
(const uint8_t **)&v->ntp_server_ads);
if (!v->ntp_server_ads || v->ntp_server_ads[0] == '\0')
v->ntp_server_ads = "pool.ntp.org";
lwsl_notice("%s: using ntp server %s\n", __func__,
v->ntp_server_ads);
break;
case LWS_CALLBACK_PROTOCOL_DESTROY: /* per vhost */
if (!v)
break;
if (v->wsi_udp)
lws_set_timeout(v->wsi_udp, 1, LWS_TO_KILL_ASYNC);
v->wsi_udp = NULL;
goto cancel_conn_timer;
/* callbacks related to raw socket descriptor */
case LWS_CALLBACK_RAW_ADOPT:
lwsl_debug("%s: LWS_CALLBACK_RAW_ADOPT\n", __func__);
lws_callback_on_writable(wsi);
break;
case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
lwsl_info("%s: CONNECTION_ERROR\n", __func__);
goto do_close;
case LWS_CALLBACK_RAW_CLOSE:
lwsl_debug("%s: LWS_CALLBACK_RAW_CLOSE\n", __func__);
do_close:
v->wsi_udp = NULL;
/* cancel any pending write retry */
lws_sul_cancel(&v->sul_write);
if (v->set_time)
goto cancel_conn_timer;
lws_retry_sul_schedule(v->context, 0, &v->sul_conn, &bo,
lws_ntpc_retry_conn,
&v->retry_count_conn);
break;
case LWS_CALLBACK_RAW_RX:
if (len != 48)
return 0; /* ignore it */
/*
* First get the seconds, corrected for the ntp epoch of 1900
* vs the unix epoch of 1970. Then shift the seconds up by 1bn
* and add in the ns
*/
ns = (uint64_t)lws_ser_ru32be(((uint8_t *)in) + 40) - (uint64_t)2208988800;
ns = (ns * 1000000000) + lws_ser_ru32be(((uint8_t *)in) + 44);
/*
* Compute the step
*/
gettimeofday(&t1, NULL);
delta_us = ((int64_t)ns / 1000) -
((t1.tv_sec * LWS_US_PER_SEC) + t1.tv_usec);
lwsl_notice("%s: Unix time: %llu, step: %lldus\n", __func__,
(unsigned long long)ns / 1000000000,
(long long)delta_us);
#if defined(LWS_PLAT_FREERTOS)
{
struct timeval t;
t.tv_sec = (unsigned long long)ns / 1000000000;
t.tv_usec = (ns % 1000000000) / 1000;
lws_sul_nonmonotonic_adjust(wsi->a.context, delta_us);
settimeofday(&t, NULL);
}
#endif
if (lws_system_get_ops(wsi->a.context) &&
lws_system_get_ops(wsi->a.context)->set_clock)
lws_system_get_ops(wsi->a.context)->set_clock((int64_t)ns / 1000);
v->set_time = 1;
lws_state_transition_steps(&wsi->a.context->mgr_system,
LWS_SYSTATE_OPERATIONAL);
/* close the wsi */
return -1;
case LWS_CALLBACK_RAW_WRITEABLE:
/*
* UDP is not reliable, it can be locally dropped, or dropped
* by any intermediary or the remote peer. So even though we
* will do the write in a moment, we schedule another request
* for rewrite according to the wsi retry policy.
*
* If the result came before, we'll cancel it in the close flow.
*
* If we have already reached the end of our concealed retries
* in the policy, just close without another write.
*/
if (lws_dll2_is_detached(&v->sul_write.list) &&
lws_retry_sul_schedule_retry_wsi(wsi, &v->sul_write,
lws_ntpc_retry_write,
&v->retry_count_write)) {
/* we have reached the end of our concealed retries */
lwsl_warn("%s: concealed retries done, failing\n", __func__);
goto retry_conn;
}
memset(pkt + LWS_PRE, 0, sizeof(pkt) - LWS_PRE);
pkt[LWS_PRE] = (LWSNTPC_LI_NONE << 6) |
(LWSNTPC_VN_3 << 3) |
(LWSNTPC_MODE_CLIENT << 0);
if (lws_write(wsi, pkt + LWS_PRE, sizeof(pkt) - LWS_PRE, 0) ==
sizeof(pkt) - LWS_PRE)
break;
lwsl_err("%s: Failed to write ntp client req\n", __func__);
retry_conn:
lws_retry_sul_schedule(wsi->a.context, 0, &v->sul_conn, &bo,
lws_ntpc_retry_conn,
&v->retry_count_conn);
return -1;
default:
break;
}
return 0;
cancel_conn_timer:
lws_sul_cancel(&v->sul_conn);
return 0;
}
void
lws_ntpc_trigger(struct lws_context *ctx)
{
struct vhd_ntpc *v = (struct vhd_ntpc *)ctx->ntpclient_priv;
lwsl_notice("%s\n", __func__);
v->retry_count_conn = 0;
lws_ntpc_retry_conn(&v->sul_conn);
}
struct lws_protocols lws_system_protocol_ntpc =
{ "lws-ntpclient", callback_ntpc, 0, 128, 0, NULL, 0 };

View File

@ -0,0 +1,735 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2022 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Secure Streams / OTA
*
* In the interests of minimizing heap usage, OTA SS is only existing during
* update checks, update bulk data download, and OTA storage. Checks are
* initiated by cx->sul_ota_periodic which is triggered at OPERATIONAL and then
* periodically as set in system_ops->ota_ops->ota_periodic_check_secs.
*/
#include "private-lib-core.h"
static const char * const ota_pub_jwk = LWS_OTA_PUBLIC_JWK;
/* This is a string that is unique to the build type / application... we use
* it to make sure that we are updating to the same kind of build... */
const char *lws_ota_variant = LWS_OTA_VARIANT;
static void
ota_write_sul_cb(lws_sorted_usec_list_t *sul)
{
lws_ota_t *g = lws_container_of(sul, lws_ota_t, sul_drain);
/* we use this to retry entering modal */
if (g->state == LWSOS_AWAITING_MODAL) {
const lws_ota_ops_t *ota_ops = &g->cx->system_ops->ota_ops;
/*
* Ask the user code to move to AWAITING_MODAL_UPDATING which it
* should agree to... and then MODAL_UPDATING whereit may choose
* to indicate it can't stop what it's doing right now.
*/
lws_state_transition(&g->cx->mgr_system,
LWS_SYSTATE_AWAITING_MODAL_UPDATING);
lws_state_transition(&g->cx->mgr_system,
LWS_SYSTATE_MODAL_UPDATING);
if (g->cx->mgr_system.state != LWS_SYSTATE_MODAL_UPDATING) {
/*
* Something decided we can't do the update right now, eg,
* he's busy rendering something that would exhause the heap
* if we also tried to get on with the update.
*
* Let's try again in 1s, up to a timeout.
*/
lwsl_ss_warn(g->ss, "Scheduling update mode retry");
lws_sul_schedule(g->cx, 0, &g->sul_drain,
ota_write_sul_cb, LWS_US_PER_SEC);
return;
}
/* we can go ahead now, the system is in the update mode */
g->state = LWSOS_FETCHING;
/* prep the gzip stream decompression */
g->inflate = lws_upng_inflator_create(&g->outring,
&g->outringlen, &g->opl, &g->cl);
if (!g->inflate) {
lwsl_err("%s: zlib init failed\n", __func__);
goto update_impossible;
}
g->state = LWSOS_FETCHING_INITED_GZ;
/* prep the hash computation of the decompressed data */
if (lws_genhash_init(&g->ctx, LWS_GENHASH_TYPE_SHA512)) {
lwsl_err("%s: hash init failed\n", __func__);
goto update_impossible;
}
g->state = LWSOS_FETCHING_INITED_GZ_HASH;
/* we don't want to create a dupe of ourselves while
* we're busy doing the OTA */
lws_sul_cancel(&g->cx->sul_ota_periodic);
lwsl_warn("%s: platform ota start\n", __func__);
/* continues asynchronously */
if (ota_ops->ota_start(g)) {
lwsl_err("%s: ota_start failed\n", __func__);
goto update_impossible;
}
return;
update_impossible:
g->state = LWSOS_FAILED;
lws_ss_start_timeout(g->ss, 1);
return;
}
if (*((volatile lws_ota_async_t *)&g->async_last)) {
/*
* The task is busy, we can't start anything atm. When it
* is finished, the write completion will come back here.
*/
// lwsl_notice("%s: async_last busy\n", __func__);
return;
}
/*
* We have a chance to write the next chunk... let's stage g->buf with
* as much inflated data as we can with what we have to hand, and set it
* writing
*/
g->buf_len = 0;
while (g->buf_len < sizeof(g->buf) - 8 &&
g->seen + g->buf_len < g->expected_size) {
lws_stateful_ret_t sr = 0;
size_t os, part;
/* inflator pauses for WANT_OUTPUT after this many bytes out */
g->inflate->bypl = sizeof(g->buf) - g->buf_len - 1;
if (*g->opl == *g->cl) {
/* No output pending.. do we have unused input left? */
if (g->flow.len) {
/*
* There's some input already available,
* let's process that and see if it helped
*/
sr = lws_upng_inflate_data(g->inflate, NULL, 0);
if (sr & LWS_SRET_FATAL) {
lwsl_ss_err(g->ss, "inflate error 1");
goto fail;
}
g->flow.len = g->inflate->inlen - (g->inflate->bp >> 3);
}
if (*g->opl == *g->cl) {
/*
* Still no output available... let's
* attempt to move to the next
*/
lws_flow_req(&g->flow);
if (!g->flow.len)
break;
sr = lws_upng_inflate_data(g->inflate,
g->flow.data, g->flow.len);
g->flow.len = g->inflate->inlen -
(g->inflate->bp >> 3);
}
} /* there is already output pending */
if (sr & LWS_SRET_FATAL) {
lwsl_ss_err(g->ss, "inflate error %d", sr & 0xff);
goto fail;
}
os = ((*g->opl - g->old_op) % g->outringlen);
if (os > sizeof(g->buf) - g->buf_len)
os = sizeof(g->buf) - g->buf_len;
if (!os) {
lwsl_err("%s: Nothing to compose in\n", __func__);
break;
}
part = os;
if (*g->opl % g->outringlen < g->old_op)
part = g->outringlen - g->old_op;
memcpy(g->buf + g->buf_len, g->outring + g->old_op, part);
g->buf_len += part;
if (part != os) {
memcpy(g->buf + g->buf_len, g->outring, os - part);
g->buf_len += os - part;
}
g->old_op = *g->opl % g->outringlen;
*g->cl += os;
} /* while try to fill the staging buffer */
if (!g->buf_len)
/* no ammo to work with... we will come back next time we
* get some rx */
return;
g->seen += g->buf_len;
if (g->seen > g->expected_size) {
lwsl_ss_err(g->ss, "oversize payload");
goto fail;
}
/* let's track the hash as we get it */
if (lws_genhash_update(&g->ctx, g->buf, g->buf_len)) {
lwsl_ss_err(g->ss, "hash update failed");
goto fail;
}
if (g->seen == g->expected_size) {
char temp[64];
lws_upng_inflator_destroy(&g->inflate);
lws_genhash_destroy(&g->ctx, temp);
if (memcmp(temp, g->sha512, sizeof(temp))) {
lwsl_err("%s: payload hash differs\n", __func__);
goto fail;
}
}
g->cx->system_ops->ota_ops.ota_queue(g, LWS_OTA_ASYNC_WRITE);
return;
fail:
g->flow.state = LWSDLOFLOW_STATE_READ_FAILED;
lws_ss_cx_from_user(g)->system_ops->ota_ops.ota_queue(g,
LWS_OTA_ASYNC_ABORT);
}
static void
ota_completion_start(lws_ota_t *g)
{
if (g->async_r != LWSOTARET_OK) {
lwsl_ss_err(g->ss, "OTA START FAILED r %d", g->async_r);
g->flow.state = LWSDLOFLOW_STATE_READ_FAILED;
lws_ss_cx_from_user(g)->system_ops->ota_ops.ota_queue(g,
LWS_OTA_ASYNC_ABORT);
return;
}
/* we can start writing now */
g->ota_start_done = 1;
g->state = LWSOS_STARTED;
if (lws_ss_client_connect(lws_ss_from_user(g)))
lwsl_ss_warn(g->ss, "reconn failed");
lws_sul_schedule(g->cx, 0, &g->sul_drain, ota_write_sul_cb, 1);
}
static void
ota_completion_write(lws_ota_t *g)
{
const lws_ota_ops_t *ota_ops = &g->cx->system_ops->ota_ops;
uint8_t pc;
if (g->async_r != LWSOTARET_OK) {
lwsl_ss_err(g->ss, "r %d", g->async_r);
g->flow.state = LWSDLOFLOW_STATE_READ_FAILED;
lws_ss_cx_from_user(g)->system_ops->ota_ops.ota_queue(g,
LWS_OTA_ASYNC_ABORT);
return;
}
g->written += g->buf_len;
pc = (uint8_t)((g->written * 100) / g->expected_size);
if (pc != g->last_pc) {
g->last_pc = pc;
lwsl_notice("%s: %u%%\n", __func__, pc);
if (ota_ops->ota_progress)
g->cx->system_ops->ota_ops.ota_progress(LWSOTARET_PROGRESS, pc);
}
if (g->written != g->expected_size) {
lws_sul_schedule(g->cx, 0, &g->sul_drain, ota_write_sul_cb, 1);
return;
}
/* We have completed writing the last part */
lwsl_warn("%s: finalizing good ota\n", __func__);
g->cx->system_ops->ota_ops.ota_queue(g, LWS_OTA_ASYNC_FINALIZE);
}
static void
ota_completion_finalize(lws_ota_t *g)
{
lwsl_notice("%s: %d\n", __func__, g->async_r);
if (g->async_r)
return;
g->cx->system_ops->reboot();
}
static void
ota_completion_abort(lws_ota_t *g)
{
int secs = 0;
if (g->cx->system_ops && g->cx->system_ops->ota_ops.ota_periodic_check_secs)
secs = g->cx->system_ops->ota_ops.ota_periodic_check_secs;
/* return from modal update state */
lws_state_transition(&g->cx->mgr_system, LWS_SYSTATE_OPERATIONAL);
/* we've had it */
lws_ss_start_timeout(g->ss, 1);
lws_sul_schedule(g->cx, 0, &g->cx->sul_ota_periodic, lws_ota_periodic_cb,
secs ? secs * LWS_US_PER_SEC : 24 * 3600 * LWS_US_PER_SEC);
}
static lws_ss_state_return_t
ota_rx(void *userobj, const uint8_t *in, size_t len, int flags)
{
lws_ss_state_return_t r = LWSSSSRET_DISCONNECT_ME;
lws_ota_t *g = (lws_ota_t *)userobj;
const lws_ota_ops_t *ota_ops = &lws_ss_cx_from_user(g)->system_ops->ota_ops;
struct lws_jws_map map;
struct lws_jwk jwk;
uint64_t fw_last;
char temp[1024];
int temp_len = sizeof(temp);
const char *p;
size_t alen;
int n;
if (g->state >= LWSOS_FETCHING) {
lwsl_info("%s: fetching %u, fl 0x%02X\n", __func__, (unsigned int)len, flags);
/*
* We are decompressing, checking and flashing the image.
*
* g->flow and its buflist is managing COMPRESSED data from the
* network according to g->flow.window limit. Rx events are
* tiggered by tx credit manipulation from, and coming to
* service g->flow / buflist state ONLY and do not know or care
* about direct inflator state (it makes itself felt by using
* g->flow data in the write completion).
*
* The inflator may not need any g->flow data to produce output,
* or it may need all of it and more before it can produce
* output, or somewhere in the middle. At the output side, we
* have a fixed-size staging buffer so we may need to come back
* to issue more inflated data without any network event
* triggering it.
*/
if (flags & LWSSS_FLAG_SOM) {
g->state = LWSOS_WRITING;
g->flow.state = LWSDLOFLOW_STATE_READ;
g->flow.h = g->ss;
g->flow.window = 4096;
if (ota_ops->ota_progress)
ota_ops->ota_progress(LWSOTARET_PROGRESS, 0);
}
if (len &&
lws_buflist_append_segment(&g->flow.bl, in, len) < 0) {
lwsl_ss_err(g->ss, "OOM");
goto fetch_fail;
}
lws_sul_schedule(g->cx, 0, &g->sul_drain, ota_write_sul_cb, 1);
if (flags & LWSSS_FLAG_EOM)
/*
* This was the last part, so there is no more new data
* in flight
*/
g->flow.state = (uint8_t)LWSDLOFLOW_STATE_READ_COMPLETED;
return LWSSSSRET_OK;
fetch_fail:
g->flow.state = LWSDLOFLOW_STATE_READ_FAILED;
return LWSSSSRET_DISCONNECT_ME;
}
/* we are collecting the manifest... */
if (g->pos + len > sizeof(g->buf))
return LWSSSSRET_DISCONNECT_ME;
memcpy(g->buf + g->pos, in, len);
g->pos += len;
if ((flags & LWSSS_FLAG_EOM) != LWSSS_FLAG_EOM)
return LWSSSSRET_OK;
/* we want to validate the JWS manifest against our public JWK */
if (lws_jwk_import(&jwk, NULL, NULL, ota_pub_jwk, strlen(ota_pub_jwk))) {
lwsl_err("%s: unable to import jwk\n", __func__);
return LWSSSSRET_DISCONNECT_ME;
}
/* Step 1... is the JWS signed by the required key? */
if (lws_jws_sig_confirm_compact_b64(g->buf, g->pos, &map, &jwk,
lws_ss_cx_from_user(g), temp,
&temp_len)) {
lwsl_err("%s: manifest failed sig check\n", __func__);
goto bail;
}
/* finished with the jwk */
lws_jwk_destroy(&jwk);
/* Step 2... the JOSE and payload sections are there, right? */
if (!map.buf[LJWS_JOSE] || !map.buf[LJWS_PYLD]) {
lwsl_err("%s: no JOSE block\n", __func__);
goto bail1;
}
/* Step 3... do we agree the signing alg is secure enough? */
p = lws_json_simple_find(map.buf[LJWS_JOSE], map.len[LJWS_JOSE],
"\"alg\":", &alen);
if (!p) {
lwsl_err("%s: no alg\n", __func__);
goto bail1;
}
if (strncmp("ES512", p, alen)) {
lwsl_err("%s: bad alg %.*s %d\n", __func__, (int)alen, p, (int)alen);
goto bail1;
}
/*
* We trust that the manifest was robustly signed by the key we like,
* let's parse out the pieces we care about and validate the firmware is
* the same variant build as we're currently running, and, eg, we're not
* being given a validly-signed real firmware from the wrong variant,
* that will brick us.
*/
lwsl_hexdump_notice(map.buf[LJWS_PYLD], map.len[LJWS_PYLD]);
lwsl_notice("%s: JWS validated okay\n", __func__);
p = lws_json_simple_find(map.buf[LJWS_PYLD], map.len[LJWS_PYLD],
"\"variant\":", &alen);
if (!p || strncmp(lws_ota_variant, p, alen)) {
lwsl_err("%s: wrong variant %.*s\n", __func__, (int)alen, p);
goto bail1;
}
/*
* We liked the manifest, prepare to go again targeting the payload
* that the manifest described to us.
*/
p = lws_json_simple_find(map.buf[LJWS_PYLD], map.len[LJWS_PYLD],
"\"path\":", &alen);
if (!p) {
lwsl_err("%s: no path\n", __func__);
goto bail1;
}
lws_strnncpy(g->file, p, alen, sizeof(g->file));
if (lws_ss_set_metadata(lws_ss_from_user(g), "file", g->file, alen)) {
lwsl_err("%s: failed to set firmware file %s\n", __func__,
LWS_OTA_VARIANT);
return LWSSSSRET_DISCONNECT_ME;
}
p = lws_json_simple_find(map.buf[LJWS_PYLD], map.len[LJWS_PYLD],
"\"size\":", &alen);
if (!p) {
lwsl_err("%s: no size\n", __func__);
goto bail1;
}
g->expected_size = (size_t)atoll(p);
p = lws_json_simple_find(map.buf[LJWS_PYLD], map.len[LJWS_PYLD],
"\"unixtime\":", &alen);
if (!p) {
lwsl_err("%s: no unxitime\n", __func__);
goto bail1;
}
g->unixtime = (uint64_t)atoll(p);
p = lws_json_simple_find(map.buf[LJWS_PYLD], map.len[LJWS_PYLD],
"\"sha512\":", &alen);
if (!p) {
lwsl_err("%s: no hash\n", __func__);
goto bail1;
}
n = lws_hex_len_to_byte_array(p, alen, g->sha512, sizeof(g->sha512));
if (n != sizeof(g->sha512)) {
lwsl_err("%s: bad hash %d %u %s\n", __func__, n, (unsigned int)alen, p);
goto bail1;
}
/*
* So... is it newer?
*/
if (!ota_ops->ota_get_last_fw_unixtime(&fw_last) &&
g->unixtime <= fw_last) {
/*
* We don't actually want this...
*/
lwsl_ss_warn(g->ss, "Latest update is not newer");
return LWSSSSRET_DISCONNECT_ME;
}
/* ... this is something that we like the look of... schedule trying
* to enter LWS_SYSTATE_MODAL_UPDATING state after this, and retry if
* we don't get there immediately */
g->state = LWSOS_AWAITING_MODAL;
lws_sul_schedule(g->cx, 0, &g->sul_drain, ota_write_sul_cb, 1);
/* on the other hand, don't let it keep trying forever */
lws_ss_start_timeout(g->ss, 30000);
/*
* We will DISCONNECT shortly, we won't proceed to the update image
* download unless we can agree with the user code to enter MODAL_
* UPDATING within a timeout. Otherwise we will give up and retry
* after 24h or whatever.
*/
return LWSSSSRET_OK;
bail:
lws_jwk_destroy(&jwk);
bail1:
return r;
}
static lws_ss_state_return_t
ota_state(void *userobj, void *h_src, lws_ss_constate_t state,
lws_ss_tx_ordinal_t ack)
{
lws_ota_t *g = (lws_ota_t *)userobj;
int n;
switch ((int)state) {
case LWSSSCS_CREATING: /* start the transaction as soon as we exist */
g->cx = lws_ss_cx_from_user(g);
g->cx->ota_ss = g->ss;
g->state = LWSOS_CHECKING;
if (lws_ss_set_metadata(lws_ss_from_user(g),
"ota_variant", LWS_OTA_VARIANT,
strlen(LWS_OTA_VARIANT))) {
lwsl_err("%s: failed to set ota_variant %s\n", __func__,
LWS_OTA_VARIANT);
return LWSSSSRET_DISCONNECT_ME;
}
if (lws_ss_set_metadata(lws_ss_from_user(g),
"file", "manifest.jws", 12)) {
lwsl_err("%s: failed to set ota_variant %s\n", __func__,
LWS_OTA_VARIANT);
return LWSSSSRET_DISCONNECT_ME;
}
return lws_ss_client_connect(lws_ss_from_user(g));
case LWSSSCS_DISCONNECTED:
/*
* We have two kinds of connection that may disconnect, the
* manifest fetch, and the firmware fetch.
*/
switch (g->state) {
case LWSOS_FETCHING_INITED_GZ_HASH:
case LWSOS_FETCHING:
return LWSSSSRET_OK;
case LWSOS_WRITING:
/*
* The network part of fetching the update image is
* over. If it didn't fail, we need to stick around and
* let it either finish / writing and finalizing, or
* timeout.
*/
lwsl_notice("%s: draining\n", __func__);
lws_ss_start_timeout(g->ss, 45000);
return LWSSSSRET_OK;
case LWSOS_AWAITING_MODAL:
/*
* We might have to wait a bit to find a good moment to
* enter the update mode. If we disconnect
* inbetweentimes, it's OK.
*/
return LWSSSSRET_OK;
default:
lwsl_notice("%s: state %d, DESTROYING\n", __func__, g->state);
return LWSSSSRET_DESTROY_ME;
}
case LWSSSCS_DESTROYING:
/* we only live for one ota check / fetch */
lws_ss_cx_from_user(g)->ota_ss = NULL;
lws_buflist_destroy_all_segments(&g->flow.bl);
lws_sul_cancel(&g->sul_drain);
if (g->state == LWSOS_FETCHING_INITED_GZ_HASH)
lws_genhash_destroy(&g->ctx, NULL);
if (g->state >= LWSOS_FETCHING_INITED_GZ &&
g->state < LWSOS_FINALIZING)
lws_upng_inflator_destroy(&g->inflate);
return LWSSSSRET_OK;
case LWSSSCS_TIMEOUT:
lwsl_err("%s: timeout\n", __func__);
return LWSSSSRET_DESTROY_ME;
case LWSSSCS_EVENT_WAIT_CANCELLED:
/* We may have a completion */
if (g->async_completed) {
g->async_completed = 0;
n = g->async_last;
*((volatile lws_ota_async_t *)&g->async_last) = 0;
switch (n) {
case LWS_OTA_ASYNC_START:
ota_completion_start(g);
break;
case LWS_OTA_ASYNC_WRITE:
ota_completion_write(g);
break;
/* EVENT_WAIT_CANCELLED doesn't deal with returns */
case LWS_OTA_ASYNC_ABORT:
/* let's forget about it then */
lws_ss_start_timeout(g->ss, 1);
ota_completion_abort(g);
break;
case LWS_OTA_ASYNC_FINALIZE:
lws_ss_start_timeout(g->ss, 5000);
ota_completion_finalize(g);
break;
}
}
break;
}
return LWSSSSRET_OK;
}
static LWS_SS_INFO("ota", lws_ota_t)
.rx = ota_rx,
.state = ota_state,
.manual_initial_tx_credit = sizeof(((lws_ota_t *)NULL)->buf),
};
/*
* Creates the SS and kicks off the manifest check
*/
void
lws_ota_periodic_cb(lws_sorted_usec_list_t *sul)
{
struct lws_context *cx = lws_container_of(sul, struct lws_context,
sul_ota_periodic);
int secs = 0;
if (cx->system_ops && cx->system_ops->ota_ops.ota_periodic_check_secs)
secs = cx->system_ops->ota_ops.ota_periodic_check_secs;
lwsl_notice("%s\n", __func__);
if (lws_ss_create(cx, 0, &ssi_lws_ota_t, NULL, NULL, NULL, NULL))
lwsl_cx_warn(cx, "failed to create ota SS");
/* set up coming back again at (usually long) periods */
lws_sul_schedule(cx, 0, sul, lws_ota_periodic_cb,
secs ? secs * LWS_US_PER_SEC : 24 * 3600 * LWS_US_PER_SEC);
}
const char *
lws_ota_variant_name(void)
{
return lws_ota_variant;
}

View File

@ -0,0 +1,8 @@
include_directories(.)
list(APPEND SOURCES
system/smd/smd.c
)
exports_to_parent_scope()

View File

@ -0,0 +1,282 @@
# LWS System Message Distribution
## Overview
Independent pieces of a system may need to become aware of events and state
changes in the other pieces quickly, along with the new state if it is small.
These messages are local to inside a system, although they may be triggered by
events outside of it. Examples include keypresses, or networking state changes.
Individual OSes and frameworks typically have their own fragmented apis for
message-passing, but the lws apis operate the same across any platforms
including, eg, Windows and RTOS and allow crossplatform code to be written once.
Message payloads are short, less than 384 bytes, below system limits for atomic
pipe or UDS datagrams and consistent with heap usage on smaller systems, but
large enough to carry JSON usefully. Messages are typically low duty cycle.
![SMD message](/doc-assets/smd-message.png)
Messages may be sent by any registered participant, they are allocated on heap
in a linked-list, and delivered to all other registered participants for that
message class no sooner than next time around the event loop. This retains the
ability to handle multiple event queuing in one event loop trip while
guaranteeing message handling is nonrecursive and so with modest stack usage.
Messages are passed to all other registered participants before being destroyed.
Messages are delivered to all particpants on the same lws_context by default.
![SMD message](/doc-assets/smd-single-process.png)
`lws_smd` apis allow publication and subscription of message objects between
participants that are in a single process and are informed by callback from lws
service thread context.
SMD messages can also broadcast between particpants in different lws_contexts in
different processes, using existing Secure Streams proxying. In this way
different application processes can intercommunicate and all observe any system
smd messages they are interested in.
![SMD message](/doc-assets/smd-proxy.png)
Registering as a participant and sending messages are threadsafe APIs.
## Message Class
Message class is a bitfield messages use to indicate their general type, eg,
network status, or UI event like a keypress. Participants set a bitmask to
filter what kind of messages they care about, classes that are 0 in the peer's
filter are never delivered to the peer. A message usually indicates it is a
single class, but it's possible to set multiple class bits and match on any. If
so, care must be taken the payload can be parsed by readers expecting any of the
indicated classes, eg, by using JSON.
`lws_smd` tracks a global union mask for all participants' class mask. Requests
to allocate a message of a class that no participant listens for are rejected,
not at distribution-time but at message allocation-time, so no heap or cpu is
wasted on things that are not currently interesting; but such messages start to
appear as soon as a participant appears that wants them. The message generation
action should be bypassed without error in the case lws_smd_msg_alloc()
returns NULL.
Various well-known high level classes are defined but also a bit index
`LWSSMDCL_USER_BASE_BITNUM`, which can be used by user code to define up to 8
private classes, with class bit values `(1 << LWSSMDCL_USER_BASE_BITNUM)` thru
`(1 << (LWSSMDCL_USER_BASE_BITNUM + 7))`
## Messaging guarantees
Sent messages are delivered to all registered participants whose class mask
indicates they want it, including the sender. The send apis are threadsafe.
Locally-delivered message delivery callbacks occur from lws event loop thread
context 0 (the only one in the default case `LWS_MAX_SMP` = 1). Clients in
different processes receive callbacks from the thread context of their UDS
networking thread.
The message payload may be destroyed immediately when you return from the
callback, you can't store references to it or expect it to be there later.
Messages are timestamped with a systemwide monotonic timestamp. When
participants are on the lws event loop, messages are delivered in-order. When
participants are on different threads, delivery order depends on platform lock
acquisition. External process participants are connected by the Unix Domain
Socket capability of Secure Streams, and may be delivered out-of-order;
receivers that care must consult the message creation timestamps.
## Message Refcounting
To avoid keeping a list of the length of the number of participants for each
message, a refcount is used in the message, computed at the time the message
arrived considering the number of active participants that indicated a desire to
receive messages of that class.
Since peers may detach / close their link asynchronously, the logical peer
objects at the distributor defer destroying themselves until there is no more
possibility of messages arriving timestamped with the period they were active.
A grace period (default 2s) is used to ensure departing peers correctly account
for message refcounts before being destroyed.
## Message creation
Messages may contain arbitrary text or binary data depending on the class. JSON
is recommended since lws_smd messages are small and low duty cycle but have
open-ended content: JSON is maintainable, extensible, debuggable and self-
documenting and avoids, eg, fragile dependencies on header versions shared
between teams. To simplify issuing JSON, a threadsafe api to create and send
messages in one step using format strings is provided:
```
int
lws_smd_msg_printf(struct lws_context *ctx, lws_smd_class_t _class,
const char *format, ...);
```
## Secure Streams `lws_smd` streamtype
When built with LWS_WITH_SECURE_STREAMS, lws_smd exposes a built-in streamtype
`_lws_smd` which user Secure Streams may use to interoperate with lws_smd using
SS payload semantics.
When using `_lws_smd`, the SS info struct member `manual_initial_tx_credit`
provided by the user when creating the Secure Stream is overloaded to be used as
the RX class mask for the SMD connection associated with the Secure Stream.
Both RX and TX payloads have a 16-byte binary header before the actual payload.
For TX, although the header is 16-bytes, only the first 64-bit class bitfield
needs setting, the timestamp is fetched and added by lws.
- MSB-first 64-bit class bitfield (currently only 32 least-sig in use)
- MSB-First Order 64-bit us-resolution timestamp
A helper `lws_smd_ss_msg_printf()` is provided to format and create and smd
message from the SS tx() callback in one step, using the same api layout as
for direct messages via `lws_smd_msg_printf()`
```
int
lws_smd_ss_msg_printf(const char *tag, uint8_t *buf, size_t *len,
lws_smd_class_t _class, const char *format, ...);
```
## Well-known message schema
Class|Schema
---|---
LWSSMDCL_INTERACTION|lws_button events
LWSSMDCL_NETWORK|captive portal detection requests and results
LWSSMDCL_SYSTEM_STATE|lws_system state progression
### User interaction Button events
Class: `LWSSMDCL_INTERACTION`
Produced by lws_button when a user interacts with a defined button.
Click-related events are produced alongside up and down related events, the
participant can choose which to attend to according to the meaning of the
interaction.
Both kinds of event go through sophisticated filtering before being issued, see
`./lib/drivers/button/README.md` for details.
#### SMD Button interaction event
Schema:
```
{
"type": "button",
"src": "<controller-name>/<button-name>",
"event": "<event-name>"
}
```
For example, `{"type":"button","src":"bc/user","event":"doubleclick"}`
Event name|Meaning
---|---
down|The button passes a filter for being down, useful for duration-based response
up|The button has come up, useful for duration-based response
click|The button activity resulted in a classification as a single-click
longclick|The button activity resulted in a classification as a long-click
doubleclick|The button activity resulted in a classification as a double-click
### Routing Table Change
Class: `LWSSMDCL_NETWORK`
If able to subscribe to OS routing table changes (eg, by rtnetlink on Linux
which is supported), lws announces there have been changes using SMD.
If Captive Portal Detect is enabled, and routing tables changes can be seen,
then a new CPD is requested automatically and the results will be seen over SMD
when that completes.
Schema:
```
{
"rt": "add|del", "add" if being added
}
```
When the context / pts are created, if linux then lws attempts to get the
routing table sent, which requires root. This is done before the permissions
are dropped after protocols init.
Lws maintains a cache of the routing table in each pt. Upon changes, existing
connections are reassessed to see if their peer can still be routed to, if not
the connection is closed.
If a gateway route changes, `{"trigger":"cpdcheck","src":"gw-change"}` is
issued on SMD as well.
### Captive Portal Detection
Class: `LWSSMDCL_NETWORK`
Actively detects if the network can reach the internet or if it is
intercepted by a captive portal. The detection steps are programmable
via the Secure Streams Policy for a streamtype `captive_portal_detect`, eg
```
"captive_portal_detect": {
"endpoint": "connectivitycheck.android.com",
"http_url": "generate_204",
"port": 80,
"protocol": "h1",
"http_method": "GET",
"opportunistic": true,
"http_expect": 204,
"http_fail_redirect": true
}
```
#### SMD Report Result
Schema: `{"type": "cpd", "result":"<result>"}`
result|meaning
---|---
OK|Internet is reachable
Captive|Internet is behind a captive portal
No internet|There is no connectivity
#### SMD Request re-detection
Schema: `{"trigger": "cpdcheck"}`
### lws_system state progression
Class: `LWSSMDCL_SYSTEM_STATE`
Lws system state changes are forwarded to lws_smd messages so participants not
on the lws event loop directly can be aware of progress. Code registering a
lws_system notifier callback, on the main lws loop, can synchronously veto state
changes and hook proposed state changes, lws_smd events are asynchronous
notifications of state changes after they were decided only... however they are
available over the whole system.
It's not possible to make validated TLS connections until the system has
acquired the date as well as acquired an IP on a non-captive portal connection,
for that reason user code will usually be dependent on the system reaching
"OPERATIONAL" state if lws is responsible for managing the boot process.
#### System state event
Schema: `{"state":"<state>"}"`
State|Meaning
---|---
CONTEXT_CREATED|We're creating the lws_context
INITIALIZED|Initial vhosts and protocols initialized
IFACE_COLDPLUG|Network interfaces discovered
DHCP|DHCP acquired
CPD_PRE_TIME|Captive portal detect hook before we have system time
TIME_VALID|Ntpclient has run
CPD_POST_TIME|Captive portal detect hook after system time (tls-based check)
POLICY_VALID|The system policy has been acquired and parsed
REGISTERED|This device is registered with an authority
AUTH1|We acquired auth1 from the authority using our registration info
AUTH2|We acquired auth2 from the authority using our registration info
OPERATIONAL|We are active and able to make authenticated tls connections
POLICY_INVALID|The policy is being changed

View File

@ -0,0 +1,94 @@
/*
* lws System Message Distribution
*
* Copyright (C) 2019 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#if defined(LWS_WITH_SECURE_STREAMS)
#define LWS_SMD_SS_RX_HEADER_LEN_EFF (LWS_SMD_SS_RX_HEADER_LEN)
#else
#define LWS_SMD_SS_RX_HEADER_LEN_EFF (0)
#endif
struct lws_smd_peer;
typedef struct lws_smd_msg {
lws_dll2_t list;
struct lws_smd_peer *exc;
lws_usec_t timestamp;
lws_smd_class_t _class;
uint16_t length;
uint16_t refcount;
/* message itself is over-allocated after this */
} lws_smd_msg_t;
typedef struct lws_smd_peer {
lws_dll2_t list;
#if defined(LWS_WITH_SECURE_STREAMS)
lws_ss_handle_t *ss_handle; /* LSMDT_SECURE_STREAMS */
#endif
lws_smd_notification_cb_t cb; /* LSMDT_<other> */
struct lws_context *ctx;
void *opaque;
/* NULL, or next message we will handle */
lws_smd_msg_t *tail;
lws_smd_class_t _class_filter;
} lws_smd_peer_t;
/*
* Manages message distribution
*
* There is one of these in the lws_context, but the distribution action also
* gets involved in delivering to pt event loops individually for SMP case
*/
typedef struct lws_smd {
lws_dll2_owner_t owner_messages; /* lws_smd_msg_t */
lws_mutex_t lock_messages;
lws_dll2_owner_t owner_peers; /* lws_smd_peer_t */
lws_mutex_t lock_peers;
/* union of peer class filters, suppress creation of msg classes not set */
lws_smd_class_t _class_filter;
char delivering;
} lws_smd_t;
/* check if this tsi has pending messages to deliver */
int
lws_smd_message_pending(struct lws_context *ctx);
int
lws_smd_msg_distribute(struct lws_context *ctx);
int
_lws_smd_destroy(struct lws_context *ctx);

View File

@ -0,0 +1,803 @@
/*
* lws System Message Distribution
*
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#include <assert.h>
/* comment me to remove extra debug and sanity checks */
// #define LWS_SMD_DEBUG
#if defined(LWS_SMD_DEBUG)
#define lwsl_smd lwsl_notice
#else
#define lwsl_smd(_s, ...)
#endif
void *
lws_smd_msg_alloc(struct lws_context *ctx, lws_smd_class_t _class, size_t len)
{
lws_smd_msg_t *msg;
/* only allow it if someone wants to consume this class of event */
if (!(ctx->smd._class_filter & _class)) {
lwsl_cx_info(ctx, "rejecting class 0x%x as no participant wants",
(unsigned int)_class);
return NULL;
}
assert(len <= LWS_SMD_MAX_PAYLOAD);
/*
* If SS configured, over-allocate LWS_SMD_SS_RX_HEADER_LEN behind
* payload, ie, msg_t (gap LWS_SMD_SS_RX_HEADER_LEN) payload
*/
msg = lws_malloc(sizeof(*msg) + LWS_SMD_SS_RX_HEADER_LEN_EFF + len,
__func__);
if (!msg)
return NULL;
memset(msg, 0, sizeof(*msg));
msg->timestamp = lws_now_usecs();
msg->length = (uint16_t)len;
msg->_class = _class;
return ((uint8_t *)&msg[1]) + LWS_SMD_SS_RX_HEADER_LEN_EFF;
}
void
lws_smd_msg_free(void **ppay)
{
lws_smd_msg_t *msg = (lws_smd_msg_t *)(((uint8_t *)*ppay) -
LWS_SMD_SS_RX_HEADER_LEN_EFF - sizeof(*msg));
/* if SS configured, actual alloc is LWS_SMD_SS_RX_HEADER_LEN behind */
lws_free(msg);
*ppay = NULL;
}
#if defined(LWS_SMD_DEBUG)
static void
lws_smd_dump(lws_smd_t *smd)
{
int n = 1;
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
smd->owner_messages.head) {
lws_smd_msg_t *msg = lws_container_of(p, lws_smd_msg_t, list);
lwsl_info(" msg %d: %p: ref %d, lat %dms, cls: 0x%x, len %u: '%s'\n",
n++, msg, msg->refcount,
(unsigned int)((lws_now_usecs() - msg->timestamp) / 1000),
msg->length, msg->_class,
(const char *)&msg[1] + LWS_SMD_SS_RX_HEADER_LEN_EFF);
} lws_end_foreach_dll_safe(p, p1);
n = 1;
lws_start_foreach_dll(struct lws_dll2 *, p, smd->owner_peers.head) {
lws_smd_peer_t *pr = lws_container_of(p, lws_smd_peer_t, list);
lwsl_info(" peer %d: %p: tail: %p, filt 0x%x\n",
n++, pr, pr->tail, pr->_class_filter);
} lws_end_foreach_dll(p);
}
#endif
static int
_lws_smd_msg_peer_interested_in_msg(lws_smd_peer_t *pr, lws_smd_msg_t *msg)
{
return !!(msg->_class & pr->_class_filter);
}
/*
* Figure out what to set the initial refcount for the message to
*/
static int
_lws_smd_msg_assess_peers_interested(lws_smd_t *smd, lws_smd_msg_t *msg,
struct lws_smd_peer *exc)
{
struct lws_context *ctx = lws_container_of(smd, struct lws_context, smd);
int interested = 0;
lws_start_foreach_dll(struct lws_dll2 *, p, ctx->smd.owner_peers.head) {
lws_smd_peer_t *pr = lws_container_of(p, lws_smd_peer_t, list);
if (pr != exc && _lws_smd_msg_peer_interested_in_msg(pr, msg))
/*
* This peer wants to consume it
*/
interested++;
} lws_end_foreach_dll(p);
return interested;
}
static int
_lws_smd_class_mask_union(lws_smd_t *smd)
{
uint32_t mask = 0;
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
smd->owner_peers.head) {
lws_smd_peer_t *pr = lws_container_of(p, lws_smd_peer_t, list);
mask |= pr->_class_filter;
} lws_end_foreach_dll_safe(p, p1);
smd->_class_filter = mask;
return 0;
}
/* Call with message lock held */
static void
_lws_smd_msg_destroy(struct lws_context *cx, lws_smd_t *smd, lws_smd_msg_t *msg)
{
/*
* We think we gave the message to everyone and can destroy it.
* Sanity check that no peer holds a pointer to this guy
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
smd->owner_peers.head) {
lws_smd_peer_t *xpr = lws_container_of(p, lws_smd_peer_t, list);
if (xpr->tail == msg) {
lwsl_cx_err(cx, "peer %p has msg %p "
"we are about to destroy as tail", xpr, msg);
#if !defined(LWS_PLAT_FREERTOS)
assert(0);
#endif
}
} lws_end_foreach_dll_safe(p, p1);
/*
* We have fully delivered the message now, it
* can be unlinked and destroyed
*/
lwsl_cx_info(cx, "destroy msg %p", msg);
lws_dll2_remove(&msg->list);
lws_free(msg);
}
/*
* This is wanting to be threadsafe, limiting the apis we can call
*/
int
_lws_smd_msg_send(struct lws_context *ctx, void *pay, struct lws_smd_peer *exc)
{
lws_smd_msg_t *msg = (lws_smd_msg_t *)(((uint8_t *)pay) -
LWS_SMD_SS_RX_HEADER_LEN_EFF - sizeof(*msg));
if (ctx->smd.owner_messages.count >= ctx->smd_queue_depth) {
lwsl_cx_warn(ctx, "rejecting message on queue depth %d",
(int)ctx->smd.owner_messages.count);
/* reject the message due to max queue depth reached */
return 1;
}
if (!ctx->smd.delivering &&
lws_mutex_lock(ctx->smd.lock_peers)) /* +++++++++++++++ peers */
return 1; /* For Coverity */
if (lws_mutex_lock(ctx->smd.lock_messages)) /* +++++++++++++++++ messages */
goto bail;
msg->refcount = (uint16_t)_lws_smd_msg_assess_peers_interested(
&ctx->smd, msg, exc);
if (!msg->refcount) {
/* possible, condsidering exc and no other participants */
lws_mutex_unlock(ctx->smd.lock_messages); /* --------------- messages */
lws_free(msg);
if (!ctx->smd.delivering)
lws_mutex_unlock(ctx->smd.lock_peers); /* ------------- peers */
return 0;
}
msg->exc = exc;
/* let's add him on the queue... */
lws_dll2_add_tail(&msg->list, &ctx->smd.owner_messages);
/*
* Any peer with no active tail needs to check our class to see if we
* should become his tail
*/
lws_start_foreach_dll(struct lws_dll2 *, p, ctx->smd.owner_peers.head) {
lws_smd_peer_t *pr = lws_container_of(p, lws_smd_peer_t, list);
if (pr != exc &&
!pr->tail && _lws_smd_msg_peer_interested_in_msg(pr, msg)) {
pr->tail = msg;
/* tail message has to actually be of interest to the peer */
assert(!pr->tail || (pr->tail->_class & pr->_class_filter));
}
} lws_end_foreach_dll(p);
#if defined(LWS_SMD_DEBUG)
lwsl_smd("%s: added %p (refc %u) depth now %d\n", __func__,
msg, msg->refcount, ctx->smd.owner_messages.count);
lws_smd_dump(&ctx->smd);
#endif
lws_mutex_unlock(ctx->smd.lock_messages); /* --------------- messages */
bail:
if (!ctx->smd.delivering)
lws_mutex_unlock(ctx->smd.lock_peers); /* ------------- peers */
/* we may be happening from another thread context */
lws_cancel_service(ctx);
return 0;
}
/*
* This is wanting to be threadsafe, limiting the apis we can call
*/
int
lws_smd_msg_send(struct lws_context *ctx, void *pay)
{
return _lws_smd_msg_send(ctx, pay, NULL);
}
/*
* This is wanting to be threadsafe, limiting the apis we can call
*/
int
lws_smd_msg_printf(struct lws_context *ctx, lws_smd_class_t _class,
const char *format, ...)
{
lws_smd_msg_t *msg;
va_list ap;
void *p;
int n;
if (!(ctx->smd._class_filter & _class))
/*
* There's nobody interested in messages of this class atm.
* Don't bother generating it, and act like all is well.
*/
return 0;
va_start(ap, format);
n = vsnprintf(NULL, 0, format, ap);
va_end(ap);
if (n > LWS_SMD_MAX_PAYLOAD)
/* too large to send */
return 1;
p = lws_smd_msg_alloc(ctx, _class, (size_t)n + 2);
if (!p)
return 1;
msg = (lws_smd_msg_t *)(((uint8_t *)p) - LWS_SMD_SS_RX_HEADER_LEN_EFF -
sizeof(*msg));
msg->length = (uint16_t)n;
va_start(ap, format);
vsnprintf((char *)p, (unsigned int)n + 2, format, ap);
va_end(ap);
/*
* locks taken and released in here
*/
if (lws_smd_msg_send(ctx, p)) {
lws_smd_msg_free(&p);
return 1;
}
return 0;
}
#if defined(LWS_WITH_SECURE_STREAMS)
int
lws_smd_ss_msg_printf(const char *tag, uint8_t *buf, size_t *len,
lws_smd_class_t _class, const char *format, ...)
{
char *content = (char *)buf + LWS_SMD_SS_RX_HEADER_LEN;
va_list ap;
int n;
if (*len < LWS_SMD_SS_RX_HEADER_LEN)
return 1;
lws_ser_wu64be(buf, _class);
lws_ser_wu64be(buf + 8, 0); /* valgrind notices uninitialized if left */
va_start(ap, format);
n = vsnprintf(content, (*len) - LWS_SMD_SS_RX_HEADER_LEN, format, ap);
va_end(ap);
if (n > LWS_SMD_MAX_PAYLOAD ||
(unsigned int)n > (*len) - LWS_SMD_SS_RX_HEADER_LEN)
/* too large to send */
return 1;
*len = LWS_SMD_SS_RX_HEADER_LEN + (unsigned int)n;
lwsl_info("%s: %s send cl 0x%x, len %u\n", __func__, tag, (unsigned int)_class,
(unsigned int)n);
return 0;
}
/*
* This is a helper that user rx handler for LWS_SMD_STREAMTYPENAME SS can
* call through to with the payload it received from the proxy. It will then
* forward the recieved SMD message to all local (same-context) participants
* that are interested in that class (except ones with callback skip_cb, so
* we don't loop).
*/
static int
_lws_smd_ss_rx_forward(struct lws_context *ctx, const char *tag,
struct lws_smd_peer *pr, const uint8_t *buf, size_t len)
{
lws_smd_class_t _class;
lws_smd_msg_t *msg;
void *p;
if (len < LWS_SMD_SS_RX_HEADER_LEN_EFF)
return 1;
if (len >= LWS_SMD_MAX_PAYLOAD + LWS_SMD_SS_RX_HEADER_LEN_EFF)
return 1;
_class = (lws_smd_class_t)lws_ser_ru64be(buf);
//if (_class == LWSSMDCL_METRICS) {
//}
/* only locally forward messages that we care about in this process */
if (!(ctx->smd._class_filter & _class))
/*
* There's nobody interested in messages of this class atm.
* Don't bother generating it, and act like all is well.
*/
return 0;
p = lws_smd_msg_alloc(ctx, _class, len);
if (!p)
return 1;
msg = (lws_smd_msg_t *)(((uint8_t *)p) - LWS_SMD_SS_RX_HEADER_LEN_EFF -
sizeof(*msg));
msg->length = (uint16_t)(len - LWS_SMD_SS_RX_HEADER_LEN_EFF);
/* adopt the original source timestamp, not time we forwarded it */
msg->timestamp = (lws_usec_t)lws_ser_ru64be(buf + 8);
/* copy the message payload in */
memcpy(p, buf + LWS_SMD_SS_RX_HEADER_LEN_EFF, msg->length);
/*
* locks taken and released in here
*/
if (_lws_smd_msg_send(ctx, p, pr)) {
/* we couldn't send it after all that... */
lws_smd_msg_free(&p);
return 1;
}
lwsl_info("%s: %s send cl 0x%x, len %u, ts %llu\n", __func__,
tag, (unsigned int)_class, msg->length,
(unsigned long long)msg->timestamp);
return 0;
}
int
lws_smd_ss_rx_forward(void *ss_user, const uint8_t *buf, size_t len)
{
struct lws_ss_handle *h = (struct lws_ss_handle *)
(((char *)ss_user) - sizeof(*h));
struct lws_context *ctx = lws_ss_get_context(h);
return _lws_smd_ss_rx_forward(ctx, lws_ss_tag(h), h->u.smd.smd_peer, buf, len);
}
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
int
lws_smd_sspc_rx_forward(void *ss_user, const uint8_t *buf, size_t len)
{
struct lws_sspc_handle *h = (struct lws_sspc_handle *)
(((char *)ss_user) - sizeof(*h));
struct lws_context *ctx = lws_sspc_get_context(h);
return _lws_smd_ss_rx_forward(ctx, lws_sspc_tag(h), NULL, buf, len);
}
#endif
#endif
/*
* Peers that deregister need to adjust the refcount of messages they would
* have been interested in, but didn't take delivery of yet
*/
static void
_lws_smd_peer_destroy(lws_smd_peer_t *pr)
{
lws_smd_t *smd = lws_container_of(pr->list.owner, lws_smd_t,
owner_peers);
if (lws_mutex_lock(smd->lock_messages)) /* +++++++++ messages */
return; /* For Coverity */
lws_dll2_remove(&pr->list);
/*
* We take the approach to adjust the refcount of every would-have-been
* delivered message we were interested in
*/
while (pr->tail) {
lws_smd_msg_t *m1 = lws_container_of(pr->tail->list.next,
lws_smd_msg_t, list);
if (_lws_smd_msg_peer_interested_in_msg(pr, pr->tail)) {
if (!--pr->tail->refcount)
_lws_smd_msg_destroy(pr->ctx, smd, pr->tail);
}
pr->tail = m1;
}
lws_free(pr);
lws_mutex_unlock(smd->lock_messages); /* messages ------- */
}
static lws_smd_msg_t *
_lws_smd_msg_next_matching_filter(lws_smd_peer_t *pr)
{
lws_dll2_t *tail = &pr->tail->list;
lws_smd_msg_t *msg;
do {
tail = tail->next;
if (!tail)
return NULL;
msg = lws_container_of(tail, lws_smd_msg_t, list);
if (msg->exc != pr &&
_lws_smd_msg_peer_interested_in_msg(pr, msg))
return msg;
} while (1);
return NULL;
}
/*
* Delivers only one message to the peer and advances the tail, or sets to NULL
* if no more filtered queued messages. Returns nonzero if tail non-NULL.
*
* For Proxied SS, only asks for writeable and does not advance or change the
* tail.
*
* This is done so if multiple messages queued, we don't get a situation where
* one participant gets them all spammed, then the next etc. Instead they are
* delivered round-robin.
*
* Requires peer lock, may take message lock
*/
static int
_lws_smd_msg_deliver_peer(struct lws_context *ctx, lws_smd_peer_t *pr)
{
lws_smd_msg_t *msg;
if (!pr->tail)
return 0;
msg = lws_container_of(pr->tail, lws_smd_msg_t, list);
lwsl_cx_info(ctx, "deliver cl 0x%x, len %d, to peer %p",
(unsigned int)msg->_class, (int)msg->length,
pr);
pr->cb(pr->opaque, msg->_class, msg->timestamp,
((uint8_t *)&msg[1]) + LWS_SMD_SS_RX_HEADER_LEN_EFF,
(size_t)msg->length);
#if !defined(__COVERITY__)
assert(msg->refcount);
#endif
/*
* If there is one, move forward to the next queued
* message that meets the filters of this peer
*/
pr->tail = _lws_smd_msg_next_matching_filter(pr);
/* tail message has to actually be of interest to the peer */
assert(!pr->tail || (pr->tail->_class & pr->_class_filter));
if (lws_mutex_lock(ctx->smd.lock_messages)) /* +++++++++ messages */
return 1; /* For Coverity */
if (!--msg->refcount)
_lws_smd_msg_destroy(ctx, &ctx->smd, msg);
lws_mutex_unlock(ctx->smd.lock_messages); /* messages ------- */
return !!pr->tail;
}
/*
* Called when the event loop could deliver messages synchronously, eg, on
* entry to idle
*/
int
lws_smd_msg_distribute(struct lws_context *ctx)
{
char more;
/* commonly, no messages and nothing to do... */
if (!ctx->smd.owner_messages.count)
return 0;
ctx->smd.delivering = 1;
do {
more = 0;
if (lws_mutex_lock(ctx->smd.lock_peers)) /* +++++++++++++++ peers */
return 1; /* For Coverity */
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
ctx->smd.owner_peers.head) {
lws_smd_peer_t *pr = lws_container_of(p, lws_smd_peer_t, list);
more = (char)(more | !!_lws_smd_msg_deliver_peer(ctx, pr));
} lws_end_foreach_dll_safe(p, p1);
lws_mutex_unlock(ctx->smd.lock_peers); /* ------------- peers */
} while (more);
ctx->smd.delivering = 0;
return 0;
}
struct lws_smd_peer *
lws_smd_register(struct lws_context *ctx, void *opaque, int flags,
lws_smd_class_t _class_filter, lws_smd_notification_cb_t cb)
{
lws_smd_peer_t *pr = lws_zalloc(sizeof(*pr), __func__);
if (!pr)
return NULL;
pr->cb = cb;
pr->opaque = opaque;
pr->_class_filter = _class_filter;
pr->ctx = ctx;
if (!ctx->smd.delivering &&
lws_mutex_lock(ctx->smd.lock_peers)) { /* +++++++++++++++ peers */
lws_free(pr);
return NULL; /* For Coverity */
}
/*
* Let's lock the message list before adding this peer... because...
*/
if (lws_mutex_lock(ctx->smd.lock_messages)) { /* +++++++++ messages */
lws_free(pr);
pr = NULL;
goto bail1; /* For Coverity */
}
lws_dll2_add_tail(&pr->list, &ctx->smd.owner_peers);
/* update the global class mask union to account for new peer mask */
_lws_smd_class_mask_union(&ctx->smd);
/*
* Now there's a new peer added, any messages we have stashed will try
* to deliver to this guy too, if he's interested in that class. So we
* have to update the message refcounts for queued messages-he's-
* interested-in accordingly.
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
ctx->smd.owner_messages.head) {
lws_smd_msg_t *msg = lws_container_of(p, lws_smd_msg_t, list);
if (_lws_smd_msg_peer_interested_in_msg(pr, msg))
msg->refcount++;
} lws_end_foreach_dll_safe(p, p1);
/* ... ok we are done adding the peer */
lws_mutex_unlock(ctx->smd.lock_messages); /* messages ------- */
lwsl_cx_info(ctx, "peer %p (count %u) registered", pr,
(unsigned int)ctx->smd.owner_peers.count);
bail1:
if (!ctx->smd.delivering)
lws_mutex_unlock(ctx->smd.lock_peers); /* ------------- peers */
return pr;
}
void
lws_smd_unregister(struct lws_smd_peer *pr)
{
lws_smd_t *smd = lws_container_of(pr->list.owner, lws_smd_t, owner_peers);
if (!smd->delivering &&
lws_mutex_lock(smd->lock_peers)) /* +++++++++++++++++++ peers */
return; /* For Coverity */
lwsl_cx_notice(pr->ctx, "destroying peer %p", pr);
_lws_smd_peer_destroy(pr);
if (!smd->delivering)
lws_mutex_unlock(smd->lock_peers); /* ----------------- peers */
}
int
lws_smd_message_pending(struct lws_context *ctx)
{
int ret = 1;
/*
* First cheaply check the common case no messages pending, so there's
* definitely nothing for this tsi or anything else
*/
if (!ctx->smd.owner_messages.count)
return 0;
/*
* If there are any messages, check their age and expire ones that
* have been hanging around too long
*/
if (lws_mutex_lock(ctx->smd.lock_peers)) /* +++++++++++++++++++++++ peers */
return 1; /* For Coverity */
if (lws_mutex_lock(ctx->smd.lock_messages)) /* +++++++++++++++++ messages */
goto bail; /* For Coverity */
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
ctx->smd.owner_messages.head) {
lws_smd_msg_t *msg = lws_container_of(p, lws_smd_msg_t, list);
if ((lws_now_usecs() - msg->timestamp) > ctx->smd_ttl_us) {
lwsl_cx_warn(ctx, "timing out queued message %p",
msg);
/*
* We're forcibly yanking this guy, we can expect that
* there might be peers that point to it as their tail.
*
* In that case, move their tails on to the next guy
* they are interested in, if any.
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, pp, pp1,
ctx->smd.owner_peers.head) {
lws_smd_peer_t *pr = lws_container_of(pp,
lws_smd_peer_t, list);
if (pr->tail == msg)
pr->tail = _lws_smd_msg_next_matching_filter(pr);
} lws_end_foreach_dll_safe(pp, pp1);
/*
* No peer should fall foul of the peer tail checks
* when destroying the message now.
*/
_lws_smd_msg_destroy(ctx, &ctx->smd, msg);
}
} lws_end_foreach_dll_safe(p, p1);
lws_mutex_unlock(ctx->smd.lock_messages); /* --------------- messages */
/*
* Walk the peer list
*/
lws_start_foreach_dll(struct lws_dll2 *, p, ctx->smd.owner_peers.head) {
lws_smd_peer_t *pr = lws_container_of(p, lws_smd_peer_t, list);
if (pr->tail)
goto bail;
} lws_end_foreach_dll(p);
/*
* There's no message pending that we need to handle
*/
ret = 0;
bail:
lws_mutex_unlock(ctx->smd.lock_peers); /* --------------------- peers */
return ret;
}
int
_lws_smd_destroy(struct lws_context *ctx)
{
/* stop any message creation */
ctx->smd._class_filter = 0;
/*
* Walk the message list, destroying them
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
ctx->smd.owner_messages.head) {
lws_smd_msg_t *msg = lws_container_of(p, lws_smd_msg_t, list);
lws_dll2_remove(&msg->list);
lws_free(msg);
} lws_end_foreach_dll_safe(p, p1);
/*
* Walk the peer list, destroying them
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
ctx->smd.owner_peers.head) {
lws_smd_peer_t *pr = lws_container_of(p, lws_smd_peer_t, list);
pr->tail = NULL; /* we just nuked all the messages, ignore */
_lws_smd_peer_destroy(pr);
} lws_end_foreach_dll_safe(p, p1);
lws_mutex_destroy(ctx->smd.lock_messages);
lws_mutex_destroy(ctx->smd.lock_peers);
return 0;
}

View File

@ -0,0 +1,265 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <private-lib-core.h>
/*
* It's either a buflist (.is_direct = 0) or
* a direct pointer + len (.is_direct = 1)
*/
const lws_system_ops_t *
lws_system_get_ops(struct lws_context *context)
{
return context->system_ops;
}
void
lws_system_blob_direct_set(lws_system_blob_t *b, const uint8_t *ptr, size_t len)
{
b->is_direct = 1;
b->u.direct.ptr = ptr;
b->u.direct.len = len;
}
void
lws_system_blob_heap_empty(lws_system_blob_t *b)
{
b->is_direct = 0;
lws_buflist_destroy_all_segments(&b->u.bl);
}
int
lws_system_blob_heap_append(lws_system_blob_t *b, const uint8_t *buf, size_t len)
{
assert(!b->is_direct);
lwsl_debug("%s: blob %p\n", __func__, b);
if (lws_buflist_append_segment(&b->u.bl, buf, len) < 0)
return -1;
return 0;
}
size_t
lws_system_blob_get_size(lws_system_blob_t *b)
{
if (b->is_direct)
return b->u.direct.len;
return lws_buflist_total_len(&b->u.bl);
}
int
lws_system_blob_get(lws_system_blob_t *b, uint8_t *buf, size_t *len, size_t ofs)
{
int n;
if (b->is_direct) {
assert(b->u.direct.ptr);
if (ofs >= b->u.direct.len) {
*len = 0;
return 1;
}
if (*len > b->u.direct.len - ofs)
*len = b->u.direct.len - ofs;
memcpy(buf, b->u.direct.ptr + ofs, *len);
return 0;
}
n = lws_buflist_linear_copy(&b->u.bl, ofs, buf, *len);
if (n < 0)
return -2;
*len = (unsigned int)n;
return 0;
}
int
lws_system_blob_get_single_ptr(lws_system_blob_t *b, const uint8_t **ptr)
{
if (b->is_direct) {
*ptr = b->u.direct.ptr;
return 0;
}
if (!b->u.bl)
return -1;
if (b->u.bl->next)
return -1; /* multipart buflist, no single pointer to it all */
*ptr = (const uint8_t *)&b->u.bl[1] + LWS_PRE;
return 0;
}
void
lws_system_blob_destroy(lws_system_blob_t *b)
{
if (!b)
return;
// lwsl_info("%s: blob %p\n", __func__, b);
if (!b->is_direct)
lws_buflist_destroy_all_segments(&b->u.bl);
}
lws_system_blob_t *
lws_system_get_blob(struct lws_context *context, lws_system_blob_item_t type,
int idx)
{
if (idx < 0 ||
idx >= (int)LWS_ARRAY_SIZE(context->system_blobs))
return NULL;
return &context->system_blobs[type + (unsigned int)idx];
}
#if defined(LWS_WITH_NETWORK)
/*
* Caller must protect the whole call with system-specific locking
*/
int
__lws_system_attach(struct lws_context *context, int tsi, lws_attach_cb_t cb,
lws_system_states_t state, void *opaque,
struct lws_attach_item **get)
{
struct lws_context_per_thread *pt = &context->pt[tsi];
struct lws_attach_item *item;
if (!get) {
/*
* allocate and add to the head of the pt's attach list
*/
item = lws_zalloc(sizeof(*item), __func__);
if (!item)
return 1;
item->cb = cb;
item->opaque = opaque;
item->state = state;
lws_dll2_add_head(&item->list, &pt->attach_owner);
lws_cancel_service(context);
return 0;
}
*get = NULL;
#if defined(LWS_WITH_SYS_STATE)
if (!pt->attach_owner.count)
return 0;
/*
* If any, return the first guy whose state requirement matches
*/
lws_start_foreach_dll(struct lws_dll2 *, d,
lws_dll2_get_head(&pt->attach_owner)) {
item = lws_container_of(d, lws_attach_item_t, list);
if (pt->context->mgr_system.state >= (int)item->state) {
*get = item;
lws_dll2_remove(d);
/*
* We detached it, but the caller now has the
* responsibility to lws_free() *get.
*/
return 0;
}
} lws_end_foreach_dll(d);
#endif
/* nobody ready to go... leave *get as NULL and return cleanly */
return 0;
}
int
lws_system_do_attach(struct lws_context_per_thread *pt)
{
/*
* If nothing to do, we just return immediately
*/
while (pt->attach_owner.count) {
struct lws_attach_item *item;
/*
* If anybody used the attach apis, there must be an
* implementation of the (*attach) lws_system op function
*/
assert(pt->context->system_ops->attach);
if (!pt->context->system_ops->attach) {
lwsl_err("%s: define (*attach)\n", __func__);
return 1;
}
/*
* System locking is applied only around this next call, while
* we detach and get a pointer to the tail attach item. We
* become responsible to free what we have detached.
*/
if (pt->context->system_ops->attach(pt->context, pt->tid, NULL,
0, NULL, &item)) {
lwsl_err("%s: attach problem\n", __func__);
return 1;
}
if (!item)
/* there's nothing more to do at the moment */
return 0;
/*
* Do the callback from the lws event loop thread
*/
item->cb(pt->context, pt->tid, item->opaque);
/* it's done, destroy the item */
lws_free(item);
}
return 0;
}
#endif