Update Files

This commit is contained in:
2025-01-22 17:22:38 +01:00
parent 89b9349629
commit 4c5e729485
5132 changed files with 1195369 additions and 0 deletions

View File

@ -0,0 +1,85 @@
#
# libwebsockets - small server side websockets and web server implementation
#
# Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
include_directories(.)
list(APPEND SOURCES
core-net/dummy-callback.c
core-net/output.c
core-net/close.c
core-net/network.c
core-net/vhost.c
core-net/pollfd.c
core-net/service.c
core-net/sorted-usec-list.c
core-net/wsi.c
core-net/wsi-timeout.c
core-net/adopt.c
roles/pipe/ops-pipe.c
)
if (LWS_WITH_SYS_STATE)
list(APPEND SOURCES
core-net/state.c
)
endif()
if (LWS_WITH_NETLINK)
list(APPEND SOURCES
core-net/route.c
)
endif()
if (LWS_WITH_LWS_DSH)
list(APPEND SOURCES
core-net/lws-dsh.c)
endif()
if (LWS_WITH_WOL)
list(APPEND SOURCES
core-net/wol.c)
endif()
if (LWS_WITH_CLIENT)
list(APPEND SOURCES
core-net/client/client.c
core-net/client/connect.c
core-net/client/connect2.c
core-net/client/connect3.c
core-net/client/connect4.c
core-net/client/sort-dns.c
)
if (LWS_WITH_CONMON)
list(APPEND SOURCES
core-net/client/conmon.c
)
endif()
endif()
if (LWS_WITH_SOCKS5 AND NOT LWS_WITHOUT_CLIENT)
list(APPEND SOURCES
core-net/socks5-client.c)
endif()
exports_to_parent_scope()

View File

@ -0,0 +1,58 @@
# Implementation background
## Client connection Queueing
By default lws treats each client connection as completely separate, and each is
made from scratch with its own network connection independently.
If the user code sets the `LCCSCF_PIPELINE` bit on `info.ssl_connection` when
creating the client connection though, lws attempts to optimize multiple client
connections to the same place by sharing any existing connection and its tls
tunnel where possible.
There are two basic approaches, for h1 additional connections of the same type
and endpoint basically queue on a leader and happen sequentially.
For muxed protocols like h2, they may also queue if the initial connection is
not up yet, but subsequently the will all join the existing connection
simultaneously "broadside".
## h1 queueing
The initial wsi to start the network connection becomes the "leader" that
subsequent connection attempts will queue against. Each vhost has a dll2_owner
`wsi->dll_cli_active_conns_owner` that "leaders" who are actually making network
connections themselves can register on as "active client connections".
Other client wsi being created who find there is already a leader on the active
client connection list for the vhost, can join their dll2 wsi->dll2_cli_txn_queue
to the leader's wsi->dll2_cli_txn_queue_owner to "queue" on the leader.
The user code does not know which wsi was first or is queued, it just waits for
stuff to happen the same either way.
When the "leader" wsi connects, it performs its client transaction as normal,
and at the end arrives at `lws_http_transaction_completed_client()`. Here, it
calls through to the lws_mux `_lws_generic_transaction_completed_active_conn()`
helper. This helper sees if anything else is queued, and if so, migrates assets
like the SSL *, the socket fd, and any remaining queue from the original leader
to the head of the list, which replaces the old leader as the "active client
connection" any subsequent connects would queue on.
It has to be done this way so that user code which may know each client wsi by
its wsi, or have marked it with an opaque_user_data pointer, is getting its
specific request handled by the wsi it expects it to be handled by.
A side effect of this, and in order to be able to handle POSTs cleanly, lws
does not attempt to send the headers for the next queued child before the
previous child has finished.
The process of moving the SSL context and fd etc between the queued wsi continues
until the queue is all handled.
## muxed protocol queueing and stream binding
h2 connections act the same as h1 before the initial connection has been made,
but once it is made all the queued connections join the network connection as
child mux streams immediately, "broadside", binding the stream to the existing
network connection.

View File

@ -0,0 +1,947 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#include "private-lib-async-dns.h"
static int
lws_get_idlest_tsi(struct lws_context *context)
{
unsigned int lowest = ~0u;
int n = 0, hit = -1;
for (; n < context->count_threads; n++) {
lwsl_cx_debug(context, "%d %d\n", context->pt[n].fds_count,
context->fd_limit_per_thread - 1);
if ((unsigned int)context->pt[n].fds_count !=
context->fd_limit_per_thread - 1 &&
(unsigned int)context->pt[n].fds_count < lowest) {
lowest = context->pt[n].fds_count;
hit = n;
}
}
return hit;
}
struct lws *
lws_create_new_server_wsi(struct lws_vhost *vhost, int fixed_tsi, int group,
const char *desc)
{
struct lws *new_wsi;
int n = fixed_tsi;
if (n < 0)
n = lws_get_idlest_tsi(vhost->context);
if (n < 0) {
lwsl_vhost_err(vhost, "no space for new conn");
return NULL;
}
lws_context_lock(vhost->context, __func__);
new_wsi = __lws_wsi_create_with_role(vhost->context, n, NULL,
vhost->lc.log_cx);
lws_context_unlock(vhost->context);
if (new_wsi == NULL) {
lwsl_vhost_err(vhost, "OOM");
return NULL;
}
lws_wsi_fault_timedclose(new_wsi);
__lws_lc_tag(vhost->context, &vhost->context->lcg[group],
&new_wsi->lc, "%s|%s", vhost->name, desc);
new_wsi->wsistate |= LWSIFR_SERVER;
new_wsi->tsi = (char)n;
lwsl_wsi_debug(new_wsi, "joining vh %s, tsi %d",
vhost->name, new_wsi->tsi);
lws_vhost_bind_wsi(vhost, new_wsi);
new_wsi->rxflow_change_to = LWS_RXFLOW_ALLOW;
new_wsi->retry_policy = vhost->retry_policy;
/* initialize the instance struct */
lwsi_set_state(new_wsi, LRS_UNCONNECTED);
new_wsi->hdr_parsing_completed = 0;
#ifdef LWS_WITH_TLS
new_wsi->tls.use_ssl = LWS_SSL_ENABLED(vhost);
#endif
/*
* these can only be set once the protocol is known
* we set an un-established connection's protocol pointer
* to the start of the supported list, so it can look
* for matching ones during the handshake
*/
new_wsi->a.protocol = vhost->protocols;
new_wsi->user_space = NULL;
/*
* outermost create notification for wsi
* no user_space because no protocol selection
*/
vhost->protocols[0].callback(new_wsi, LWS_CALLBACK_WSI_CREATE, NULL,
NULL, 0);
return new_wsi;
}
/* if not a socket, it's a raw, non-ssl file descriptor
* req cx lock, acq pt lock, acq vh lock
*/
static struct lws *
__lws_adopt_descriptor_vhost1(struct lws_vhost *vh, lws_adoption_type type,
const char *vh_prot_name, struct lws *parent,
void *opaque, const char *fi_wsi_name)
{
struct lws_context *context;
struct lws_context_per_thread *pt;
struct lws *new_wsi;
int n;
/*
* Notice that in SMP case, the wsi may be being created on an
* entirely different pt / tsi for load balancing. In that case as
* we initialize it, it may become "live" concurrently unexpectedly...
*/
if (!vh)
return NULL;
context = vh->context;
lws_context_assert_lock_held(vh->context);
n = -1;
if (parent)
n = parent->tsi;
new_wsi = lws_create_new_server_wsi(vh, n, LWSLCG_WSI_SERVER, fi_wsi_name);
if (!new_wsi)
return NULL;
/* bring in specific fault injection rules early */
lws_fi_inherit_copy(&new_wsi->fic, &context->fic, "wsi", fi_wsi_name);
if (lws_fi(&new_wsi->fic, "createfail")) {
lws_fi_destroy(&new_wsi->fic);
return NULL;
}
new_wsi->a.opaque_user_data = opaque;
pt = &context->pt[(int)new_wsi->tsi];
lws_pt_lock(pt, __func__);
if (parent) {
new_wsi->parent = parent;
new_wsi->sibling_list = parent->child_list;
parent->child_list = new_wsi;
}
if (vh_prot_name) {
new_wsi->a.protocol = lws_vhost_name_to_protocol(new_wsi->a.vhost,
vh_prot_name);
if (!new_wsi->a.protocol) {
lwsl_vhost_err(new_wsi->a.vhost, "Protocol %s not enabled",
vh_prot_name);
goto bail;
}
if (lws_ensure_user_space(new_wsi)) {
lwsl_wsi_notice(new_wsi, "OOM");
goto bail;
}
}
if (!LWS_SSL_ENABLED(new_wsi->a.vhost) ||
!(type & LWS_ADOPT_SOCKET))
type &= (unsigned int)~LWS_ADOPT_ALLOW_SSL;
if (lws_role_call_adoption_bind(new_wsi, (int)type, vh_prot_name)) {
lwsl_wsi_err(new_wsi, "no role for desc type 0x%x", type);
goto bail;
}
#if defined(LWS_WITH_SERVER)
if (new_wsi->role_ops) {
lws_metrics_tag_wsi_add(new_wsi, "role", new_wsi->role_ops->name);
}
#endif
lws_pt_unlock(pt);
/*
* he's an allocated wsi, but he's not on any fds list or child list,
* join him to the vhost's list of these kinds of incomplete wsi until
* he gets another identity (he may do async dns now...)
*/
lws_vhost_lock(new_wsi->a.vhost);
lws_dll2_add_head(&new_wsi->vh_awaiting_socket,
&new_wsi->a.vhost->vh_awaiting_socket_owner);
lws_vhost_unlock(new_wsi->a.vhost);
return new_wsi;
bail:
lwsl_wsi_notice(new_wsi, "exiting on bail");
if (parent)
parent->child_list = new_wsi->sibling_list;
if (new_wsi->user_space)
lws_free(new_wsi->user_space);
lws_fi_destroy(&new_wsi->fic);
lws_pt_unlock(pt);
__lws_vhost_unbind_wsi(new_wsi); /* req cx, acq vh lock */
lws_free(new_wsi);
return NULL;
}
#if defined(LWS_WITH_SERVER) && defined(LWS_WITH_SECURE_STREAMS)
/*
* If the incoming wsi is bound to a vhost that is a ss server, this creates
* an accepted ss bound to the wsi.
*
* For h1 or raw, we can do the binding here, but for muxed protocols like h2
* or mqtt we have to do it not on the nwsi but on the stream. And for h2 we
* start off bound to h1 role, since we don't know if we will upgrade to h2
* until we meet the server.
*
* 1) No tls is assumed to mean no muxed protocol so can do it at adopt.
*
* 2) After alpn if not muxed we can do it.
*
* 3) For muxed, do it at the nwsi migration and on new stream
*/
int
lws_adopt_ss_server_accept(struct lws *new_wsi)
{
struct lws_context_per_thread *pt =
&new_wsi->a.context->pt[(int)new_wsi->tsi];
lws_ss_handle_t *h;
void *pv, **ppv;
if (!new_wsi->a.vhost->ss_handle)
return 0;
pv = (char *)&new_wsi->a.vhost->ss_handle[1];
/*
* Yes... the vhost is pointing to its secure stream representing the
* server... we want to create an accepted SS and bind it to new_wsi,
* the info/ssi from the server SS (so the SS callbacks defined there),
* the opaque_user_data of the server object and the policy of it.
*/
ppv = (void **)((char *)pv +
new_wsi->a.vhost->ss_handle->info.opaque_user_data_offset);
/*
* indicate we are an accepted connection referencing the
* server object
*/
new_wsi->a.vhost->ss_handle->info.flags |= LWSSSINFLAGS_SERVER;
if (lws_ss_create(new_wsi->a.context, new_wsi->tsi,
&new_wsi->a.vhost->ss_handle->info,
*ppv, &h, NULL, NULL)) {
lwsl_wsi_err(new_wsi, "accept ss creation failed");
goto fail1;
}
/*
* We made a fresh accepted SS conn from the server pieces,
* now bind the wsi... the problem is, this is the nwsi if it's
* h2.
*/
h->wsi = new_wsi;
new_wsi->a.opaque_user_data = h;
h->info.flags |= LWSSSINFLAGS_ACCEPTED;
/* indicate wsi should invalidate any ss link to it on close */
new_wsi->for_ss = 1;
// lwsl_wsi_notice(new_wsi, "%s: opaq %p, role %s",
// new_wsi->a.opaque_user_data,
// new_wsi->role_ops->name);
h->policy = new_wsi->a.vhost->ss_handle->policy;
/* apply requested socket options */
if (lws_plat_set_socket_options_ip(new_wsi->desc.sockfd,
h->policy->priority,
(LCCSCF_IP_LOW_LATENCY *
!!(h->policy->flags & LWSSSPOLF_ATTR_LOW_LATENCY)) |
(LCCSCF_IP_HIGH_THROUGHPUT *
!!(h->policy->flags & LWSSSPOLF_ATTR_HIGH_THROUGHPUT)) |
(LCCSCF_IP_HIGH_RELIABILITY *
!!(h->policy->flags & LWSSSPOLF_ATTR_HIGH_RELIABILITY)) |
(LCCSCF_IP_LOW_COST *
!!(h->policy->flags & LWSSSPOLF_ATTR_LOW_COST))))
lwsl_wsi_warn(new_wsi, "unable to set ip options");
/*
* add us to the list of clients that came in from the server
*/
lws_pt_lock(pt, __func__);
lws_dll2_add_tail(&h->cli_list, &new_wsi->a.vhost->ss_handle->src_list);
lws_pt_unlock(pt);
/*
* Let's give it appropriate state notifications
*/
if (lws_ss_event_helper(h, LWSSSCS_CREATING))
goto fail;
if (lws_ss_event_helper(h, LWSSSCS_CONNECTING))
goto fail;
/* defer CONNECTED until we see if he is upgrading */
// if (lws_ss_event_helper(h, LWSSSCS_CONNECTED))
// goto fail;
// lwsl_notice("%s: accepted ss complete, pcol %s\n", __func__,
// new_wsi->a.protocol->name);
return 0;
fail:
lws_ss_destroy(&h);
fail1:
return 1;
}
#endif
static struct lws *
lws_adopt_descriptor_vhost2(struct lws *new_wsi, lws_adoption_type type,
lws_sock_file_fd_type fd)
{
struct lws_context_per_thread *pt =
&new_wsi->a.context->pt[(int)new_wsi->tsi];
int n;
/* enforce that every fd is nonblocking */
if (type & LWS_ADOPT_SOCKET) {
if (lws_plat_set_nonblocking(fd.sockfd)) {
lwsl_wsi_err(new_wsi, "unable to set sockfd %d nonblocking",
fd.sockfd);
goto fail;
}
}
#if !defined(WIN32)
else
if (lws_plat_set_nonblocking(fd.filefd)) {
lwsl_wsi_err(new_wsi, "unable to set filefd nonblocking");
goto fail;
}
#endif
new_wsi->desc = fd;
if (!LWS_SSL_ENABLED(new_wsi->a.vhost) ||
!(type & LWS_ADOPT_SOCKET))
type &= (unsigned int)~LWS_ADOPT_ALLOW_SSL;
/*
* A new connection was accepted. Give the user a chance to
* set properties of the newly created wsi. There's no protocol
* selected yet so we issue this to the vhosts's default protocol,
* itself by default protocols[0]
*/
new_wsi->wsistate |= LWSIFR_SERVER;
n = LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED;
if (new_wsi->role_ops->adoption_cb[lwsi_role_server(new_wsi)])
n = new_wsi->role_ops->adoption_cb[lwsi_role_server(new_wsi)];
if (new_wsi->a.context->event_loop_ops->sock_accept)
if (new_wsi->a.context->event_loop_ops->sock_accept(new_wsi))
goto fail;
#if LWS_MAX_SMP > 1
/*
* Caution: after this point the wsi is live on its service thread
* which may be concurrent to this. We mark the wsi as still undergoing
* init in another pt so the assigned pt leaves it alone.
*/
new_wsi->undergoing_init_from_other_pt = 1;
#endif
if (!(type & LWS_ADOPT_ALLOW_SSL)) {
lws_pt_lock(pt, __func__);
if (__insert_wsi_socket_into_fds(new_wsi->a.context, new_wsi)) {
lws_pt_unlock(pt);
lwsl_wsi_err(new_wsi, "fail inserting socket");
goto fail;
}
lws_pt_unlock(pt);
}
#if defined(LWS_WITH_SERVER)
else
if (lws_server_socket_service_ssl(new_wsi, fd.sockfd, 0)) {
lwsl_wsi_info(new_wsi, "fail ssl negotiation");
goto fail;
}
#endif
lws_vhost_lock(new_wsi->a.vhost);
/* he has fds visibility now, remove from vhost orphan list */
lws_dll2_remove(&new_wsi->vh_awaiting_socket);
lws_vhost_unlock(new_wsi->a.vhost);
/*
* by deferring callback to this point, after insertion to fds,
* lws_callback_on_writable() can work from the callback
*/
if ((new_wsi->a.protocol->callback)(new_wsi, (enum lws_callback_reasons)n, new_wsi->user_space,
NULL, 0))
goto fail;
/* role may need to do something after all adoption completed */
lws_role_call_adoption_bind(new_wsi, (int)type | _LWS_ADOPT_FINISH,
new_wsi->a.protocol->name);
#if defined(LWS_WITH_SERVER) && defined(LWS_WITH_SECURE_STREAMS)
/*
* Did we come from an accepted client connection to a ss server?
*
* !!! For mux protocols, this will cause an additional inactive ss
* representing the nwsi. Doing that allows us to support both h1
* (here) and h2 (at __lws_wsi_server_new())
*/
lwsl_wsi_info(new_wsi, "vhost %s", new_wsi->a.vhost->lc.gutag);
if (lws_adopt_ss_server_accept(new_wsi))
goto fail;
#endif
#if LWS_MAX_SMP > 1
/* its actual pt can service it now */
new_wsi->undergoing_init_from_other_pt = 0;
#endif
lws_cancel_service_pt(new_wsi);
return new_wsi;
fail:
if (type & LWS_ADOPT_SOCKET)
lws_close_free_wsi(new_wsi, LWS_CLOSE_STATUS_NOSTATUS,
"adopt skt fail");
return NULL;
}
/* if not a socket, it's a raw, non-ssl file descriptor */
struct lws *
lws_adopt_descriptor_vhost(struct lws_vhost *vh, lws_adoption_type type,
lws_sock_file_fd_type fd, const char *vh_prot_name,
struct lws *parent)
{
lws_adopt_desc_t info;
memset(&info, 0, sizeof(info));
info.vh = vh;
info.type = type;
info.fd = fd;
info.vh_prot_name = vh_prot_name;
info.parent = parent;
return lws_adopt_descriptor_vhost_via_info(&info);
}
struct lws *
lws_adopt_descriptor_vhost_via_info(const lws_adopt_desc_t *info)
{
socklen_t slen = sizeof(lws_sockaddr46);
struct lws *new_wsi;
#if defined(LWS_WITH_PEER_LIMITS)
struct lws_peer *peer = NULL;
if (info->type & LWS_ADOPT_SOCKET) {
peer = lws_get_or_create_peer(info->vh, info->fd.sockfd);
if (peer && info->vh->context->ip_limit_wsi &&
peer->count_wsi >= info->vh->context->ip_limit_wsi) {
lwsl_info("Peer reached wsi limit %d\n",
info->vh->context->ip_limit_wsi);
if (info->vh->context->pl_notify_cb)
info->vh->context->pl_notify_cb(
info->vh->context,
info->fd.sockfd,
&peer->sa46);
compatible_close(info->fd.sockfd);
return NULL;
}
}
#endif
lws_context_lock(info->vh->context, __func__);
new_wsi = __lws_adopt_descriptor_vhost1(info->vh, info->type,
info->vh_prot_name, info->parent,
info->opaque, info->fi_wsi_name);
if (!new_wsi) {
if (info->type & LWS_ADOPT_SOCKET)
compatible_close(info->fd.sockfd);
goto bail;
}
if (info->type & LWS_ADOPT_SOCKET &&
getpeername(info->fd.sockfd, (struct sockaddr *)&new_wsi->sa46_peer,
&slen) < 0)
lwsl_info("%s: getpeername failed\n", __func__);
#if defined(LWS_WITH_PEER_LIMITS)
if (peer)
lws_peer_add_wsi(info->vh->context, peer, new_wsi);
#endif
new_wsi = lws_adopt_descriptor_vhost2(new_wsi, info->type, info->fd);
bail:
lws_context_unlock(info->vh->context);
return new_wsi;
}
struct lws *
lws_adopt_socket_vhost(struct lws_vhost *vh, lws_sockfd_type accept_fd)
{
lws_sock_file_fd_type fd;
fd.sockfd = accept_fd;
return lws_adopt_descriptor_vhost(vh, LWS_ADOPT_SOCKET |
LWS_ADOPT_HTTP | LWS_ADOPT_ALLOW_SSL, fd, NULL, NULL);
}
struct lws *
lws_adopt_socket(struct lws_context *context, lws_sockfd_type accept_fd)
{
return lws_adopt_socket_vhost(context->vhost_list, accept_fd);
}
/* Common read-buffer adoption for lws_adopt_*_readbuf */
static struct lws*
adopt_socket_readbuf(struct lws *wsi, const char *readbuf, size_t len)
{
struct lws_context_per_thread *pt;
struct lws_pollfd *pfd;
int n;
if (!wsi)
return NULL;
if (!readbuf || len == 0)
return wsi;
if (wsi->position_in_fds_table == LWS_NO_FDS_POS)
return wsi;
pt = &wsi->a.context->pt[(int)wsi->tsi];
n = lws_buflist_append_segment(&wsi->buflist, (const uint8_t *)readbuf,
len);
if (n < 0)
goto bail;
if (n)
lws_dll2_add_head(&wsi->dll_buflist, &pt->dll_buflist_owner);
/*
* we can't process the initial read data until we can attach an ah.
*
* if one is available, get it and place the data in his ah rxbuf...
* wsi with ah that have pending rxbuf get auto-POLLIN service.
*
* no autoservice because we didn't get a chance to attach the
* readbuf data to wsi or ah yet, and we will do it next if we get
* the ah.
*/
if (wsi->http.ah || !lws_header_table_attach(wsi, 0)) {
lwsl_notice("%s: calling service on readbuf ah\n", __func__);
/*
* unlike a normal connect, we have the headers already
* (or the first part of them anyway).
* libuv won't come back and service us without a network
* event, so we need to do the header service right here.
*/
pfd = &pt->fds[wsi->position_in_fds_table];
pfd->revents |= LWS_POLLIN;
lwsl_err("%s: calling service\n", __func__);
if (lws_service_fd_tsi(wsi->a.context, pfd, wsi->tsi))
/* service closed us */
return NULL;
return wsi;
}
lwsl_err("%s: deferring handling ah\n", __func__);
return wsi;
bail:
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
"adopt skt readbuf fail");
return NULL;
}
#if defined(LWS_WITH_UDP)
#if defined(LWS_WITH_CLIENT)
/*
* This is the ASYNC_DNS callback target for udp client, it's analogous to
* connect3()
*/
static struct lws *
lws_create_adopt_udp2(struct lws *wsi, const char *ads,
const struct addrinfo *r, int n, void *opaque)
{
lws_sock_file_fd_type sock;
int bc = 1, m;
assert(wsi);
if (ads && (n < 0 || !r)) {
/*
* DNS lookup failed: there are no usable results. Fail the
* overall connection request.
*/
lwsl_notice("%s: bad: n %d, r %p\n", __func__, n, r);
goto bail;
}
m = lws_sort_dns(wsi, r);
#if defined(LWS_WITH_SYS_ASYNC_DNS)
lws_async_dns_freeaddrinfo(&r);
#else
freeaddrinfo((struct addrinfo *)r);
#endif
if (m)
goto bail;
while (lws_dll2_get_head(&wsi->dns_sorted_list)) {
lws_dns_sort_t *s = lws_container_of(
lws_dll2_get_head(&wsi->dns_sorted_list),
lws_dns_sort_t, list);
/*
* Remove it from the head, but don't free it yet... we are
* taking responsibility to free it
*/
lws_dll2_remove(&s->list);
/*
* We have done the dns lookup, identify the result we want
* if any, and then complete the adoption by binding wsi to
* socket opened on it.
*
* Ignore the weak assumptions about protocol driven by port
* number and force to DGRAM / UDP since that's what this
* function is for.
*/
#if !defined(__linux__)
sock.sockfd = socket(s->dest.sa4.sin_family,
SOCK_DGRAM, IPPROTO_UDP);
#else
/* PF_PACKET is linux-only */
sock.sockfd = socket(wsi->pf_packet ? PF_PACKET :
s->dest.sa4.sin_family,
SOCK_DGRAM, wsi->pf_packet ?
htons(0x800) : IPPROTO_UDP);
#endif
if (sock.sockfd == LWS_SOCK_INVALID)
goto resume;
/* ipv6 udp!!! */
if (s->af == AF_INET)
s->dest.sa4.sin_port = htons(wsi->c_port);
#if defined(LWS_WITH_IPV6)
else
s->dest.sa6.sin6_port = htons(wsi->c_port);
#endif
if (setsockopt(sock.sockfd, SOL_SOCKET, SO_REUSEADDR,
(const char *)&bc, sizeof(bc)) < 0)
lwsl_err("%s: failed to set reuse\n", __func__);
if (wsi->do_broadcast &&
setsockopt(sock.sockfd, SOL_SOCKET, SO_BROADCAST,
(const char *)&bc, sizeof(bc)) < 0)
lwsl_err("%s: failed to set broadcast\n", __func__);
/* Bind the udp socket to a particular network interface */
if (opaque &&
lws_plat_BINDTODEVICE(sock.sockfd, (const char *)opaque))
goto resume;
if (wsi->do_bind &&
bind(sock.sockfd, sa46_sockaddr(&s->dest),
#if defined(_WIN32)
(int)sa46_socklen(&s->dest)
#else
sizeof(struct sockaddr)
#endif
) == -1) {
lwsl_err("%s: bind failed\n", __func__);
goto resume;
}
if (!wsi->do_bind && !wsi->pf_packet) {
#if !defined(__APPLE__)
if (connect(sock.sockfd, sa46_sockaddr(&s->dest),
sa46_socklen(&s->dest)) == -1 &&
errno != EADDRNOTAVAIL /* openbsd */ ) {
lwsl_err("%s: conn fd %d fam %d %s:%u failed "
"errno %d\n", __func__, sock.sockfd,
s->dest.sa4.sin_family,
ads ? ads : "null", wsi->c_port,
LWS_ERRNO);
compatible_close(sock.sockfd);
goto resume;
}
#endif
}
if (wsi->udp)
wsi->udp->sa46 = s->dest;
wsi->sa46_peer = s->dest;
/* we connected: complete the udp socket adoption flow */
#if defined(LWS_WITH_SYS_ASYNC_DNS)
{
lws_async_dns_server_t *asds =
__lws_async_dns_server_find_wsi(
&wsi->a.context->async_dns, wsi);
if (asds)
asds->dns_server_connected = 1;
}
#endif
lws_free(s);
lws_addrinfo_clean(wsi);
return lws_adopt_descriptor_vhost2(wsi,
LWS_ADOPT_RAW_SOCKET_UDP, sock);
resume:
lws_free(s);
}
lwsl_err("%s: unable to create INET socket %d\n", __func__, LWS_ERRNO);
lws_addrinfo_clean(wsi);
#if defined(LWS_WITH_SYS_ASYNC_DNS)
{
lws_async_dns_server_t *asds = __lws_async_dns_server_find_wsi(
&wsi->a.context->async_dns, wsi);
if (asds)
lws_async_dns_drop_server(asds);
}
#endif
bail:
/* caller must close */
return NULL;
}
struct lws *
lws_create_adopt_udp(struct lws_vhost *vhost, const char *ads, int port,
int flags, const char *protocol_name, const char *ifname,
struct lws *parent_wsi, void *opaque,
const lws_retry_bo_t *retry_policy, const char *fi_wsi_name)
{
#if !defined(LWS_PLAT_OPTEE)
struct lws *wsi;
int n;
lwsl_info("%s: %s:%u\n", __func__, ads ? ads : "null", port);
/* create the logical wsi without any valid fd */
lws_context_lock(vhost->context, __func__);
wsi = __lws_adopt_descriptor_vhost1(vhost, LWS_ADOPT_SOCKET |
LWS_ADOPT_RAW_SOCKET_UDP,
protocol_name, parent_wsi, opaque,
fi_wsi_name);
lws_context_unlock(vhost->context);
if (!wsi) {
lwsl_err("%s: udp wsi creation failed\n", __func__);
goto bail;
}
// lwsl_notice("%s: role %s\n", __func__, wsi->role_ops->name);
wsi->do_bind = !!(flags & LWS_CAUDP_BIND);
wsi->do_broadcast = !!(flags & LWS_CAUDP_BROADCAST);
wsi->pf_packet = !!(flags & LWS_CAUDP_PF_PACKET);
wsi->c_port = (uint16_t)(unsigned int)port;
if (retry_policy)
wsi->retry_policy = retry_policy;
else
wsi->retry_policy = vhost->retry_policy;
#if !defined(LWS_WITH_SYS_ASYNC_DNS)
{
struct addrinfo *r, h;
char buf[16];
memset(&h, 0, sizeof(h));
h.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
h.ai_socktype = SOCK_DGRAM;
h.ai_protocol = IPPROTO_UDP;
#if defined(AI_PASSIVE)
h.ai_flags = AI_PASSIVE;
#endif
#ifdef AI_ADDRCONFIG
h.ai_flags |= AI_ADDRCONFIG;
#endif
/* if the dns lookup is synchronous, do the whole thing now */
lws_snprintf(buf, sizeof(buf), "%u", port);
n = getaddrinfo(ads, buf, &h, &r);
if (n) {
#if !defined(LWS_PLAT_FREERTOS)
lwsl_cx_info(vhost->context, "getaddrinfo error: %d", n);
#else
#if (_LWS_ENABLED_LOGS & LLL_INFO)
char t16[16];
lwsl_cx_info(vhost->context, "getaddrinfo error: %s",
lws_errno_describe(LWS_ERRNO, t16, sizeof(t16)));
#endif
#endif
//freeaddrinfo(r);
goto bail1;
}
/*
* With synchronous dns, complete it immediately after the
* blocking dns lookup finished... free r when connect either
* completed or failed
*/
wsi = lws_create_adopt_udp2(wsi, ads, r, 0, NULL);
return wsi;
}
#else
if (ads) {
/*
* with async dns, use the wsi as the point about which to do
* the dns lookup and have it call the second part when it's
* done.
*
* Keep a refcount on the results and free it when we connected
* or definitively failed.
*
* Notice wsi has no socket at this point (we don't know what
* kind to ask for until we get the dns back). But it is bound
* to a vhost and can be cleaned up from that at vhost destroy.
*/
n = lws_async_dns_query(vhost->context, 0, ads,
LWS_ADNS_RECORD_A,
lws_create_adopt_udp2, wsi,
(void *)ifname, NULL);
// lwsl_notice("%s: dns query returned %d\n", __func__, n);
if (n == LADNS_RET_FAILED) {
lwsl_err("%s: async dns failed\n", __func__);
wsi = NULL;
/*
* It was already closed by calling callback with error
* from lws_async_dns_query()
*/
goto bail;
}
} else {
lwsl_debug("%s: udp adopt has no ads\n", __func__);
wsi = lws_create_adopt_udp2(wsi, ads, NULL, 0, (void *)ifname);
}
/* dns lookup is happening asynchronously */
// lwsl_notice("%s: returning wsi %p\n", __func__, wsi);
return wsi;
#endif
#if !defined(LWS_WITH_SYS_ASYNC_DNS)
bail1:
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "adopt udp2 fail");
wsi = NULL;
#endif
bail:
return wsi;
#else
return NULL;
#endif
}
#endif
#endif
struct lws *
lws_adopt_socket_readbuf(struct lws_context *context, lws_sockfd_type accept_fd,
const char *readbuf, size_t len)
{
return adopt_socket_readbuf(lws_adopt_socket(context, accept_fd),
readbuf, len);
}
struct lws *
lws_adopt_socket_vhost_readbuf(struct lws_vhost *vhost,
lws_sockfd_type accept_fd,
const char *readbuf, size_t len)
{
return adopt_socket_readbuf(lws_adopt_socket_vhost(vhost, accept_fd),
readbuf, len);
}

View File

@ -0,0 +1,121 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#if defined(LWS_CLIENT_HTTP_PROXYING)
int
lws_set_proxy(struct lws_vhost *vhost, const char *proxy)
{
char authstring[96];
int brackets = 0;
char *p;
if (!proxy)
return -1;
/* we have to deal with a possible redundant leading http:// */
if (!strncmp(proxy, "http://", 7))
proxy += 7;
p = strrchr(proxy, '@');
if (p) { /* auth is around */
if (lws_ptr_diff_size_t(p, proxy) > sizeof(authstring) - 1)
goto auth_too_long;
lws_strncpy(authstring, proxy, lws_ptr_diff_size_t(p, proxy) + 1);
// null termination not needed on input
if (lws_b64_encode_string(authstring, lws_ptr_diff(p, proxy),
vhost->proxy_basic_auth_token,
sizeof vhost->proxy_basic_auth_token) < 0)
goto auth_too_long;
lwsl_vhost_info(vhost, " Proxy auth in use");
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
proxy = p + 1;
#endif
} else
vhost->proxy_basic_auth_token[0] = '\0';
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
#if defined(LWS_WITH_IPV6)
/*
* isolating the address / port is complicated by IPv6 overloading
* the meaning of : in the address. The convention to solve it is to
* put [] around the ipv6 address part, eg, "[::1]:443". This must be
* parsed to "::1" as the address and the port as 443.
*
* IPv4 addresses like myproxy:443 continue to be parsed as normal.
*/
if (proxy[0] == '[')
brackets = 1;
#endif
lws_strncpy(vhost->http.http_proxy_address, proxy + brackets,
sizeof(vhost->http.http_proxy_address));
p = vhost->http.http_proxy_address;
#if defined(LWS_WITH_IPV6)
if (brackets) {
/* original is IPv6 format "[::1]:443" */
p = strchr(vhost->http.http_proxy_address, ']');
if (!p) {
lwsl_vhost_err(vhost, "malformed proxy '%s'", proxy);
return -1;
}
*p++ = '\0';
}
#endif
p = strchr(p, ':');
if (!p && !vhost->http.http_proxy_port) {
lwsl_vhost_err(vhost, "http_proxy needs to be ads:port");
return -1;
}
if (p) {
*p = '\0';
vhost->http.http_proxy_port = (unsigned int)atoi(p + 1);
}
lwsl_vhost_info(vhost, " Proxy %s:%u", vhost->http.http_proxy_address,
vhost->http.http_proxy_port);
#endif
return 0;
auth_too_long:
lwsl_vhost_err(vhost, "proxy auth too long");
return -1;
}
#endif

View File

@ -0,0 +1,155 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Client Connection Latency and DNS reporting
*/
/*
* We want to allocate copies for and append DNS results that we don't already
* have. We take this approach because a) we may be getting duplicated results
* from multiple DNS servers, and b) we may be getting results stacatto over
* time.
*
* We capture DNS results from either getaddrinfo or ASYNC_DNS the same here,
* before they are sorted and filtered.
*
* Because this is relatively expensive, we only do it on client wsi that
* explicitly indicated that they want it with the LCCSCF_CONMON flag.
*/
#include <private-lib-core.h>
int
lws_conmon_append_copy_new_dns_results(struct lws *wsi,
const struct addrinfo *cai)
{
if (!(wsi->flags & LCCSCF_CONMON))
return 0;
/*
* Let's go through the incoming guys, seeing if we already have them,
* or if we want to take a copy
*/
while (cai) {
struct addrinfo *ai = wsi->conmon.dns_results_copy;
char skip = 0;
/* do we already have this guy? */
while (ai) {
if (ai->ai_family != cai->ai_family &&
ai->ai_addrlen != cai->ai_addrlen &&
ai->ai_protocol != cai->ai_protocol &&
ai->ai_socktype != cai->ai_socktype &&
/* either ipv4 or v6 address must match */
((ai->ai_family == AF_INET &&
((struct sockaddr_in *)ai->ai_addr)->
sin_addr.s_addr ==
((struct sockaddr_in *)cai->ai_addr)->
sin_addr.s_addr)
#if defined(LWS_WITH_IPV6)
||
(ai->ai_family == AF_INET6 &&
!memcmp(((struct sockaddr_in6 *)ai->ai_addr)->
sin6_addr.s6_addr,
((struct sockaddr_in6 *)cai->ai_addr)->
sin6_addr.s6_addr, 16))
#endif
)) {
/* yes, we already got a copy then */
skip = 1;
break;
}
ai = ai->ai_next;
}
if (!skip) {
/*
* No we don't already have a copy of this one, let's
* allocate and append it then
*/
size_t al = sizeof(struct addrinfo) +
(size_t)cai->ai_addrlen;
size_t cl = cai->ai_canonname ?
strlen(cai->ai_canonname) + 1 : 0;
ai = lws_malloc(al + cl + 1, __func__);
if (!ai) {
lwsl_wsi_warn(wsi, "OOM");
return 1;
}
*ai = *cai;
ai->ai_addr = (struct sockaddr *)&ai[1];
memcpy(ai->ai_addr, cai->ai_addr, (size_t)cai->ai_addrlen);
if (cl) {
ai->ai_canonname = ((char *)ai->ai_addr) +
cai->ai_addrlen;
memcpy(ai->ai_canonname, cai->ai_canonname,
cl);
ai->ai_canonname[cl] = '\0';
}
ai->ai_next = wsi->conmon.dns_results_copy;
wsi->conmon.dns_results_copy = ai;
}
cai = cai->ai_next;
}
return 0;
}
void
lws_conmon_addrinfo_destroy(struct addrinfo *ai)
{
while (ai) {
struct addrinfo *ai1 = ai->ai_next;
lws_free(ai);
ai = ai1;
}
}
void
lws_conmon_wsi_take(struct lws *wsi, struct lws_conmon *dest)
{
memcpy(dest, &wsi->conmon, sizeof(*dest));
dest->peer46 = wsi->sa46_peer;
/* wsi no longer has to free it... */
wsi->conmon.dns_results_copy = NULL;
wsi->perf_done = 1;
}
void
lws_conmon_release(struct lws_conmon *conmon)
{
if (!conmon)
return;
lws_conmon_addrinfo_destroy(conmon->dns_results_copy);
conmon->dns_results_copy = NULL;
}

View File

@ -0,0 +1,560 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
static const uint8_t hnames[] = {
_WSI_TOKEN_CLIENT_PEER_ADDRESS,
_WSI_TOKEN_CLIENT_URI,
_WSI_TOKEN_CLIENT_HOST,
_WSI_TOKEN_CLIENT_ORIGIN,
_WSI_TOKEN_CLIENT_SENT_PROTOCOLS,
_WSI_TOKEN_CLIENT_METHOD,
_WSI_TOKEN_CLIENT_IFACE,
_WSI_TOKEN_CLIENT_ALPN
};
struct lws *
lws_http_client_connect_via_info2(struct lws *wsi)
{
struct client_info_stash *stash = wsi->stash;
int n;
lwsl_wsi_debug(wsi, "stash %p", stash);
if (!stash)
return wsi;
wsi->a.opaque_user_data = wsi->stash->opaque_user_data;
if (stash->cis[CIS_METHOD] && (!strcmp(stash->cis[CIS_METHOD], "RAW") ||
!strcmp(stash->cis[CIS_METHOD], "MQTT")))
goto no_ah;
/*
* we're not necessarily in a position to action these right away,
* stash them... we only need during connect phase so into a temp
* allocated stash
*/
for (n = 0; n < (int)LWS_ARRAY_SIZE(hnames); n++)
if (hnames[n] && stash->cis[n] &&
lws_hdr_simple_create(wsi, hnames[n], stash->cis[n]))
goto bail;
#if defined(LWS_WITH_SOCKS5)
if (!wsi->a.vhost->socks_proxy_port)
lws_free_set_NULL(wsi->stash);
#endif
no_ah:
return lws_client_connect_2_dnsreq(wsi);
bail:
#if defined(LWS_WITH_SOCKS5)
if (!wsi->a.vhost->socks_proxy_port)
lws_free_set_NULL(wsi->stash);
#endif
lws_free_set_NULL(wsi->stash);
return NULL;
}
int
lws_client_stash_create(struct lws *wsi, const char **cisin)
{
size_t size;
char *pc;
int n;
size = sizeof(*wsi->stash) + 1;
/*
* Let's overallocate the stash object with space for all the args
* in one hit.
*/
for (n = 0; n < CIS_COUNT; n++)
if (cisin[n])
size += strlen(cisin[n]) + 1;
if (wsi->stash)
lws_free_set_NULL(wsi->stash);
wsi->stash = lws_malloc(size, "client stash");
if (!wsi->stash)
return 1;
/* all the pointers default to NULL, but no need to zero the args */
memset(wsi->stash, 0, sizeof(*wsi->stash));
pc = (char *)&wsi->stash[1];
for (n = 0; n < CIS_COUNT; n++)
if (cisin[n]) {
size_t mm;
wsi->stash->cis[n] = pc;
if (n == CIS_PATH && cisin[n][0] != '/')
*pc++ = '/';
mm = strlen(cisin[n]) + 1;
memcpy(pc, cisin[n], mm);
pc += mm;
}
return 0;
}
struct lws *
lws_client_connect_via_info(const struct lws_client_connect_info *i)
{
const char *local = i->protocol;
struct lws *wsi, *safe = NULL;
const struct lws_protocols *p;
const char *cisin[CIS_COUNT];
char buf_localport[8];
struct lws_vhost *vh;
int tsi;
if (i->context->requested_stop_internal_loops)
return NULL;
if (!i->context->protocol_init_done)
if (lws_protocol_init(i->context))
return NULL;
/*
* If we have .local_protocol_name, use it to select the local protocol
* handler to bind to. Otherwise use .protocol if http[s].
*/
if (i->local_protocol_name)
local = i->local_protocol_name;
lws_context_lock(i->context, __func__);
/*
* PHASE 1: if SMP, find out the tsi related to current service thread
*/
tsi = lws_pthread_self_to_tsi(i->context);
assert(tsi >= 0);
/* PHASE 2: create a bare wsi */
wsi = __lws_wsi_create_with_role(i->context, tsi, NULL, i->log_cx);
lws_context_unlock(i->context);
if (wsi == NULL)
return NULL;
vh = i->vhost;
if (!vh) {
#if defined(LWS_WITH_TLS_JIT_TRUST)
if (lws_tls_jit_trust_vhost_bind(i->context, i->address, &vh))
#endif
{
vh = lws_get_vhost_by_name(i->context, "default");
if (!vh) {
vh = i->context->vhost_list;
if (!vh) { /* coverity */
lwsl_cx_err(i->context, "no vhost");
goto bail;
}
if (!strcmp(vh->name, "system"))
vh = vh->vhost_next;
}
}
}
#if defined(LWS_WITH_SECURE_STREAMS)
/* any of these imply we are a client wsi bound to an SS, which
* implies our opaque user ptr is the ss (or sspc if PROXY_LINK) handle
*/
wsi->for_ss = !!(i->ssl_connection & (LCCSCF_SECSTREAM_CLIENT | LCCSCF_SECSTREAM_PROXY_LINK | LCCSCF_SECSTREAM_PROXY_ONWARD));
wsi->client_bound_sspc = !!(i->ssl_connection & LCCSCF_SECSTREAM_PROXY_LINK); /* so wsi close understands need to remove sspc ptr to wsi */
wsi->client_proxy_onward = !!(i->ssl_connection & LCCSCF_SECSTREAM_PROXY_ONWARD);
#endif
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
wsi->fic.name = "wsi";
if (i->fic.fi_owner.count)
/*
* This moves all the lws_fi_t from i->fi to the vhost fi,
* leaving it empty
*/
lws_fi_import(&wsi->fic, &i->fic);
lws_fi_inherit_copy(&wsi->fic, &i->context->fic, "wsi", i->fi_wsi_name);
if (lws_fi(&wsi->fic, "createfail"))
goto bail;
#if defined(LWS_WITH_SECURE_STREAMS)
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
if (wsi->client_bound_sspc) {
lws_sspc_handle_t *fih = (lws_sspc_handle_t *)i->opaque_user_data;
lws_fi_inherit_copy(&wsi->fic, &fih->fic, "wsi", NULL);
}
#endif
if (wsi->for_ss) {
lws_ss_handle_t *fih = (lws_ss_handle_t *)i->opaque_user_data;
lws_fi_inherit_copy(&wsi->fic, &fih->fic, "wsi", NULL);
}
#endif
#endif
lws_wsi_fault_timedclose(wsi);
/*
* Until we exit, we can report connection failure directly to the
* caller without needing to call through to protocol CONNECTION_ERROR.
*/
wsi->client_suppress_CONNECTION_ERROR = 1;
if (i->keep_warm_secs)
wsi->keep_warm_secs = i->keep_warm_secs;
else
wsi->keep_warm_secs = 5;
wsi->flags = i->ssl_connection;
wsi->c_pri = i->priority;
if (i->retry_and_idle_policy)
wsi->retry_policy = i->retry_and_idle_policy;
else
wsi->retry_policy = &i->context->default_retry;
if (i->ssl_connection & LCCSCF_WAKE_SUSPEND__VALIDITY)
wsi->conn_validity_wakesuspend = 1;
lws_vhost_bind_wsi(vh, wsi);
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
/* additionally inerit from vhost we bound to */
lws_fi_inherit_copy(&wsi->fic, &vh->fic, "wsi", i->fi_wsi_name);
#endif
if (!wsi->a.vhost) {
lwsl_wsi_err(wsi, "No vhost in the context");
goto bail;
}
/*
* PHASE 3: Choose an initial role for the wsi and do role-specific init
*
* Note the initial role may not reflect the final role, eg,
* we may want ws, but first we have to go through h1 to get that
*/
if (lws_role_call_client_bind(wsi, i) < 0) {
lwsl_wsi_err(wsi, "unable to bind to role");
goto bail;
}
lwsl_wsi_info(wsi, "role binding to %s", wsi->role_ops->name);
/*
* PHASE 4: fill up the wsi with stuff from the connect_info as far as
* it can go. It's uncertain because not only is our connection
* going to complete asynchronously, we might have bound to h1 and not
* even be able to get ahold of an ah immediately.
*/
wsi->user_space = NULL;
wsi->pending_timeout = NO_PENDING_TIMEOUT;
wsi->position_in_fds_table = LWS_NO_FDS_POS;
wsi->ocport = wsi->c_port = (uint16_t)(unsigned int)i->port;
wsi->sys_tls_client_cert = i->sys_tls_client_cert;
#if defined(LWS_ROLE_H2)
wsi->txc.manual_initial_tx_credit =
(int32_t)i->manual_initial_tx_credit;
#endif
wsi->a.protocol = &wsi->a.vhost->protocols[0];
wsi->client_pipeline = !!(i->ssl_connection & LCCSCF_PIPELINE);
wsi->client_no_follow_redirect = !!(i->ssl_connection &
LCCSCF_HTTP_NO_FOLLOW_REDIRECT);
/*
* PHASE 5: handle external user_space now, generic alloc is done in
* role finalization
*/
if (i->userdata) {
wsi->user_space_externally_allocated = 1;
wsi->user_space = i->userdata;
}
if (local) {
lwsl_wsi_info(wsi, "vh %s protocol binding to %s\n",
wsi->a.vhost->name, local);
p = lws_vhost_name_to_protocol(wsi->a.vhost, local);
if (p)
lws_bind_protocol(wsi, p, __func__);
else
lwsl_wsi_info(wsi, "unknown protocol %s", local);
lwsl_wsi_info(wsi, "%s: %s %s entry",
lws_wsi_tag(wsi), wsi->role_ops->name,
wsi->a.protocol ? wsi->a.protocol->name : "none");
}
/*
* PHASE 5: handle external user_space now, generic alloc is done in
* role finalization
*/
if (!wsi->user_space && i->userdata) {
wsi->user_space_externally_allocated = 1;
wsi->user_space = i->userdata;
}
#if defined(LWS_WITH_TLS)
wsi->tls.use_ssl = (unsigned int)i->ssl_connection;
#else
if (i->ssl_connection & LCCSCF_USE_SSL) {
lwsl_wsi_err(wsi, "lws not configured for tls");
goto bail;
}
#endif
/*
* PHASE 6: stash the things from connect_info that we can't process
* right now, eg, if http binding, without an ah. If h1 and no ah, we
* will go on the ah waiting list and process those things later (after
* the connect_info and maybe the things pointed to have gone out of
* scope)
*
* However these things are stashed in a generic way at this point,
* with no relationship to http or ah
*/
cisin[CIS_ADDRESS] = i->address;
cisin[CIS_PATH] = i->path;
cisin[CIS_HOST] = i->host;
cisin[CIS_ORIGIN] = i->origin;
cisin[CIS_PROTOCOL] = i->protocol;
cisin[CIS_METHOD] = i->method;
cisin[CIS_IFACE] = i->iface;
lws_snprintf(buf_localport, sizeof(buf_localport), "%u", i->local_port);
cisin[CIS_LOCALPORT] = buf_localport;
cisin[CIS_ALPN] = i->alpn;
cisin[CIS_USERNAME] = i->auth_username;
cisin[CIS_PASSWORD] = i->auth_password;
if (lws_client_stash_create(wsi, cisin))
goto bail;
#if defined(LWS_WITH_TLS)
if (i->alpn)
lws_strncpy(wsi->alpn, i->alpn, sizeof(wsi->alpn));
#endif
wsi->a.opaque_user_data = wsi->stash->opaque_user_data =
i->opaque_user_data;
#if defined(LWS_WITH_SECURE_STREAMS)
if (wsi->for_ss) {
/* it's related to ss... the options are
*
* LCCSCF_SECSTREAM_PROXY_LINK : client SSPC link to proxy
* LCCSCF_SECSTREAM_PROXY_ONWARD: proxy's onward connection
*/
__lws_lc_tag(i->context, &i->context->lcg[
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
i->ssl_connection & LCCSCF_SECSTREAM_PROXY_LINK ? LWSLCG_WSI_SSP_CLIENT :
#if defined(LWS_WITH_SERVER)
(i->ssl_connection & LCCSCF_SECSTREAM_PROXY_ONWARD ? LWSLCG_WSI_SSP_ONWARD :
#endif
LWSLCG_WSI_CLIENT
#if defined(LWS_WITH_SERVER)
)
#endif
],
#else
LWSLCG_WSI_CLIENT],
#endif
&wsi->lc, "%s/%s/%s/(%s)", i->method ? i->method : "WS",
wsi->role_ops->name, i->address,
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API)
wsi->client_bound_sspc ?
lws_sspc_tag((lws_sspc_handle_t *)i->opaque_user_data) :
#endif
lws_ss_tag(((lws_ss_handle_t *)i->opaque_user_data)));
} else
#endif
__lws_lc_tag(i->context, &i->context->lcg[LWSLCG_WSI_CLIENT], &wsi->lc,
"%s/%s/%s/%s", i->method ? i->method : "WS",
wsi->role_ops->name ? wsi->role_ops->name : "novh", vh->name, i->address);
lws_metrics_tag_wsi_add(wsi, "vh", wsi->a.vhost->name);
/*
* at this point user callbacks like
* LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER will be interested to
* know the parent... eg for proxying we can grab extra headers from
* the parent's incoming ah and add them to the child client handshake
*/
if (i->parent_wsi) {
lwsl_wsi_info(wsi, "created as child %s",
lws_wsi_tag(i->parent_wsi));
wsi->parent = i->parent_wsi;
safe = wsi->sibling_list = i->parent_wsi->child_list;
i->parent_wsi->child_list = wsi;
}
/*
* PHASE 7: Do any role-specific finalization processing. We can still
* see important info things via wsi->stash
*/
if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_client_bind)) {
int n = lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_client_bind).
client_bind(wsi, NULL);
if (n && i->parent_wsi)
/* unpick from parent */
i->parent_wsi->child_list = safe;
if (n < 0)
/* we didn't survive, wsi is freed */
goto bail2;
if (n)
/* something else failed, wsi needs freeing */
goto bail;
}
/* let the caller's optional wsi storage have the wsi we created */
if (i->pwsi)
*i->pwsi = wsi;
if (!wsi->a.protocol)
/* we must have one protocol or another bound by this point */
goto bail;
/* PHASE 8: notify protocol with role-specific connected callback */
/* raw socket per se doesn't want this... raw socket proxy wants it... */
if (wsi->role_ops != &role_ops_raw_skt ||
(i->local_protocol_name &&
!strcmp(i->local_protocol_name, "raw-proxy"))) {
lwsl_wsi_debug(wsi, "adoption cb %d to %s %s",
wsi->role_ops->adoption_cb[0],
wsi->role_ops->name, wsi->a.protocol->name);
wsi->a.protocol->callback(wsi, wsi->role_ops->adoption_cb[0],
wsi->user_space, NULL, 0);
}
#if defined(LWS_WITH_HUBBUB)
if (i->uri_replace_to)
wsi->http.rw = lws_rewrite_create(wsi, html_parser_cb,
i->uri_replace_from,
i->uri_replace_to);
#endif
if (i->method && (!strcmp(i->method, "RAW") // ||
// !strcmp(i->method, "MQTT")
)) {
/*
* Not for MQTT here, since we don't know if we will
* pipeline it or not...
*/
#if defined(LWS_WITH_TLS)
wsi->tls.ssl = NULL;
if (wsi->role_ops != &role_ops_raw_skt && (wsi->tls.use_ssl & LCCSCF_USE_SSL)) {
const char *cce = NULL;
switch (
#if !defined(LWS_WITH_SYS_ASYNC_DNS)
lws_client_create_tls(wsi, &cce, 1)
#else
lws_client_create_tls(wsi, &cce, 0)
#endif
) {
case 1:
return wsi;
case 0:
break;
default:
goto bail3;
}
}
#endif
/* fallthru */
wsi = lws_http_client_connect_via_info2(wsi);
}
if (wsi)
/*
* If it subsequently fails, report CONNECTION_ERROR,
* because we're going to return a non-error return now.
*/
wsi->client_suppress_CONNECTION_ERROR = 0;
return wsi;
#if defined(LWS_WITH_TLS)
bail3:
lwsl_wsi_info(wsi, "tls start fail");
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "tls start fail");
if (i->pwsi)
*i->pwsi = NULL;
return NULL;
#endif
bail:
#if defined(LWS_WITH_TLS)
if (wsi->tls.ssl)
lws_tls_restrict_return(wsi);
#endif
lws_free_set_NULL(wsi->stash);
lws_fi_destroy(&wsi->fic);
lws_free(wsi);
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
bail2:
#endif
if (i->pwsi)
*i->pwsi = NULL;
return NULL;
}

View File

@ -0,0 +1,395 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#if !defined(WIN32)
#include <netdb.h>
#endif
#ifndef AI_V4MAPPED
#define AI_V4MAPPED 0
#endif
#if !defined(LWS_WITH_SYS_ASYNC_DNS)
static int
lws_getaddrinfo46(struct lws *wsi, const char *ads, struct addrinfo **result)
{
lws_metrics_caliper_declare(cal, wsi->a.context->mt_conn_dns);
struct addrinfo hints;
#if defined(LWS_WITH_SYS_METRICS)
char buckname[32];
#endif
int n;
memset(&hints, 0, sizeof(hints));
*result = NULL;
hints.ai_socktype = SOCK_STREAM;
#ifdef LWS_WITH_IPV6
if (wsi->ipv6) {
#if !defined(__ANDROID__)
hints.ai_family = AF_UNSPEC;
hints.ai_flags = AI_V4MAPPED;
#endif
} else
#endif
{
hints.ai_family = PF_UNSPEC;
}
#if defined(LWS_WITH_CONMON)
wsi->conmon_datum = lws_now_usecs();
#endif
wsi->dns_reachability = 0;
if (lws_fi(&wsi->fic, "dnsfail"))
n = EAI_FAIL;
else
n = getaddrinfo(ads, NULL, &hints, result);
#if defined(LWS_WITH_CONMON)
wsi->conmon.ciu_dns = (lws_conmon_interval_us_t)
(lws_now_usecs() - wsi->conmon_datum);
#endif
/*
* Which EAI_* are available and the meanings are highly platform-
* dependent, even different linux distros differ.
*/
if (0
#if defined(EAI_SYSTEM)
|| n == EAI_SYSTEM
#endif
#if defined(EAI_NODATA)
|| n == EAI_NODATA
#endif
#if defined(EAI_FAIL)
|| n == EAI_FAIL
#endif
#if defined(EAI_AGAIN)
|| n == EAI_AGAIN
#endif
) {
wsi->dns_reachability = 1;
lws_metrics_caliper_report(cal, METRES_NOGO);
#if defined(LWS_WITH_SYS_METRICS)
lws_snprintf(buckname, sizeof(buckname), "dns=\"unreachable %d\"", n);
lws_metrics_hist_bump_priv_wsi(wsi, mth_conn_failures, buckname);
#endif
#if defined(LWS_WITH_CONMON)
wsi->conmon.dns_disposition = LWSCONMON_DNS_SERVER_UNREACHABLE;
#endif
#if 0
lwsl_wsi_debug(wsi, "asking to recheck CPD in 1s");
lws_system_cpd_start_defer(wsi->a.context, LWS_US_PER_SEC);
#endif
}
lwsl_wsi_info(wsi, "getaddrinfo '%s' says %d", ads, n);
#if defined(LWS_WITH_SYS_METRICS)
if (n < 0) {
lws_snprintf(buckname, sizeof(buckname), "dns=\"nores %d\"", n);
lws_metrics_hist_bump_priv_wsi(wsi, mth_conn_failures, buckname);
}
#endif
#if defined(LWS_WITH_CONMON)
wsi->conmon.dns_disposition = n < 0 ? LWSCONMON_DNS_NO_RESULT :
LWSCONMON_DNS_OK;
#endif
lws_metrics_caliper_report(cal, n >= 0 ? METRES_GO : METRES_NOGO);
return n;
}
#endif
#if !defined(LWS_WITH_SYS_ASYNC_DNS) && defined(EAI_NONAME)
static const char * const dns_nxdomain = "DNS NXDOMAIN";
#endif
struct lws *
lws_client_connect_2_dnsreq(struct lws *wsi)
{
struct addrinfo *result = NULL;
const char *meth = NULL;
#if defined(LWS_WITH_IPV6)
struct sockaddr_in addr;
const char *iface;
#endif
const char *adsin;
int n, port = 0;
struct lws *w;
if (lwsi_state(wsi) == LRS_WAITING_DNS ||
lwsi_state(wsi) == LRS_WAITING_CONNECT) {
lwsl_wsi_info(wsi, "LRS_WAITING_DNS / CONNECT");
return wsi;
}
/*
* clients who will create their own fresh connection keep a copy of
* the hostname they originally connected to, in case other connections
* want to use it too
*/
if (!wsi->cli_hostname_copy) {
const char *pa = lws_wsi_client_stash_item(wsi, CIS_HOST,
_WSI_TOKEN_CLIENT_PEER_ADDRESS);
if (pa)
wsi->cli_hostname_copy = lws_strdup(pa);
}
/*
* The first job is figure out if we want to pipeline on or just join
* an existing "active connection" to the same place
*/
meth = lws_wsi_client_stash_item(wsi, CIS_METHOD,
_WSI_TOKEN_CLIENT_METHOD);
/* consult active connections to find out disposition */
adsin = lws_wsi_client_stash_item(wsi, CIS_ADDRESS,
_WSI_TOKEN_CLIENT_PEER_ADDRESS);
/* we only pipeline connections that said it was okay */
if (!wsi->client_pipeline) {
lwsl_wsi_debug(wsi, "new conn on no pipeline flag");
goto solo;
}
if (wsi->keepalive_rejected) {
lwsl_notice("defeating pipelining due to no "
"keepalive on server\n");
goto solo;
}
/* only pipeline things we associate with being a stream */
if (meth && !_lws_is_http_method(meth) && strcmp(meth, "RAW") &&
strcmp(meth, "UDP") && strcmp(meth, "MQTT"))
goto solo;
if (!adsin)
/*
* This cannot happen since user code must provide the client
* address to get this far, it's here to satisfy Coverity
*/
return NULL;
switch (lws_vhost_active_conns(wsi, &w, adsin)) {
case ACTIVE_CONNS_SOLO:
break;
case ACTIVE_CONNS_MUXED:
lwsl_wsi_notice(wsi, "ACTIVE_CONNS_MUXED");
if (lwsi_role_h2(wsi)) {
if (wsi->a.protocol->callback(wsi,
LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP,
wsi->user_space, NULL, 0))
goto failed1;
//lwsi_set_state(wsi, LRS_H1C_ISSUE_HANDSHAKE2);
//lwsi_set_state(w, LRS_ESTABLISHED);
lws_callback_on_writable(wsi);
}
return wsi;
case ACTIVE_CONNS_QUEUED:
lwsl_wsi_debug(wsi, "ACTIVE_CONNS_QUEUED st 0x%x: ",
lwsi_state(wsi));
if (lwsi_state(wsi) == LRS_UNCONNECTED) {
if (lwsi_role_h2(w))
lwsi_set_state(wsi,
LRS_H2_WAITING_TO_SEND_HEADERS);
else
lwsi_set_state(wsi, LRS_H1C_ISSUE_HANDSHAKE2);
}
return lws_client_connect_4_established(wsi, w, 0);
}
solo:
/*
* If we made our own connection, and we're doing a method that can
* take a pipeline, we are an "active client connection".
*
* Add ourselves to the vhost list of those so that others can
* piggyback on our transaction queue
*/
if (meth && (!strcmp(meth, "RAW") || _lws_is_http_method(meth) ||
!strcmp(meth, "MQTT")) &&
lws_dll2_is_detached(&wsi->dll2_cli_txn_queue) &&
lws_dll2_is_detached(&wsi->dll_cli_active_conns)) {
lws_context_lock(wsi->a.context, __func__);
lws_vhost_lock(wsi->a.vhost);
lwsl_wsi_info(wsi, "adding as active conn");
/* caution... we will have to unpick this on oom4 path */
lws_dll2_add_head(&wsi->dll_cli_active_conns,
&wsi->a.vhost->dll_cli_active_conns_owner);
lws_vhost_unlock(wsi->a.vhost);
lws_context_unlock(wsi->a.context);
}
/*
* Since address must be given at client creation, should not be
* possible, but necessary to satisfy coverity
*/
if (!adsin)
return NULL;
#if defined(LWS_WITH_UNIX_SOCK)
/*
* unix socket destination?
*/
if (*adsin == '+') {
wsi->unix_skt = 1;
n = 0;
goto next_step;
}
#endif
/*
* start off allowing ipv6 on connection if vhost allows it
*/
wsi->ipv6 = LWS_IPV6_ENABLED(wsi->a.vhost);
#ifdef LWS_WITH_IPV6
if (wsi->stash)
iface = wsi->stash->cis[CIS_IFACE];
else
iface = lws_hdr_simple_ptr(wsi, _WSI_TOKEN_CLIENT_IFACE);
if (wsi->ipv6 && iface &&
inet_pton(AF_INET, iface, &addr.sin_addr) == 1) {
lwsl_wsi_notice(wsi, "client connection forced to IPv4");
wsi->ipv6 = 0;
}
#endif
#if defined(LWS_CLIENT_HTTP_PROXYING) && \
(defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2))
/* Decide what it is we need to connect to:
*
* Priority 1: connect to http proxy */
if (wsi->a.vhost->http.http_proxy_port) {
adsin = wsi->a.vhost->http.http_proxy_address;
port = (int)wsi->a.vhost->http.http_proxy_port;
#else
if (0) {
#endif
#if defined(LWS_WITH_SOCKS5)
/* Priority 2: Connect to SOCK5 Proxy */
} else if (wsi->a.vhost->socks_proxy_port) {
lwsl_wsi_client(wsi, "Sending SOCKS Greeting");
adsin = wsi->a.vhost->socks_proxy_address;
port = (int)wsi->a.vhost->socks_proxy_port;
#endif
} else {
/* Priority 3: Connect directly */
/* ads already set */
port = wsi->c_port;
}
/*
* prepare the actual connection
* to whatever we decided to connect to
*/
lwsi_set_state(wsi, LRS_WAITING_DNS);
lwsl_wsi_info(wsi, "lookup %s:%u", adsin, port);
wsi->conn_port = (uint16_t)port;
#if !defined(LWS_WITH_SYS_ASYNC_DNS)
n = 0;
if (!wsi->dns_sorted_list.count) {
/*
* blocking dns resolution
*/
n = lws_getaddrinfo46(wsi, adsin, &result);
#if defined(EAI_NONAME)
if (n == EAI_NONAME) {
/*
* The DNS server responded with NXDOMAIN... even
* though this is still in the client creation call,
* we need to make a CCE, otherwise there won't be
* any user indication of what went wrong
*/
wsi->client_suppress_CONNECTION_ERROR = 0;
lws_inform_client_conn_fail(wsi, (void *)dns_nxdomain,
strlen(dns_nxdomain));
goto failed1;
}
#endif
}
#else
/* this is either FAILED, CONTINUING, or already called connect_4 */
if (lws_fi(&wsi->fic, "dnsfail"))
return lws_client_connect_3_connect(wsi, NULL, NULL, -4, NULL);
else
n = lws_async_dns_query(wsi->a.context, wsi->tsi, adsin,
LWS_ADNS_RECORD_A, lws_client_connect_3_connect,
wsi, NULL, NULL);
if (n == LADNS_RET_FAILED_WSI_CLOSED)
return NULL;
if (n == LADNS_RET_FAILED)
goto failed1;
return wsi;
#endif
#if defined(LWS_WITH_UNIX_SOCK)
next_step:
#endif
return lws_client_connect_3_connect(wsi, adsin, result, n, NULL);
//#if defined(LWS_WITH_SYS_ASYNC_DNS)
failed1:
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "client_connect2");
return NULL;
//#endif
}

View File

@ -0,0 +1,808 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#if defined(WIN32)
/*
* Windows doesn't offer a Posix connect() event... we use a sul
* to check the connection status periodically while a connection
* is ongoing.
*
* Leaving this to POLLOUT to retry which is the way for Posix
* platforms instead on win32 causes event-loop busywaiting
* so for win32 we manage the retry interval directly with the sul.
*/
void
lws_client_win32_conn_async_check(lws_sorted_usec_list_t *sul)
{
struct lws *wsi = lws_container_of(sul, struct lws,
win32_sul_connect_async_check);
lwsl_wsi_debug(wsi, "checking ongoing connection attempt");
lws_client_connect_3_connect(wsi, NULL, NULL, 0, NULL);
}
#endif
void
lws_client_conn_wait_timeout(lws_sorted_usec_list_t *sul)
{
struct lws *wsi = lws_container_of(sul, struct lws,
sul_connect_timeout);
/*
* This is used to constrain the time we're willing to wait for a
* connection before giving up on it and retrying.
*/
lwsl_wsi_info(wsi, "connect wait timeout has fired");
lws_client_connect_3_connect(wsi, NULL, NULL, 0, NULL);
}
void
lws_client_dns_retry_timeout(lws_sorted_usec_list_t *sul)
{
struct lws *wsi = lws_container_of(sul, struct lws,
sul_connect_timeout);
/*
* This limits the amount of dns lookups we will try before
* giving up and failing... it reuses sul_connect_timeout, which
* isn't officially used until we connected somewhere.
*/
lwsl_wsi_info(wsi, "dns retry");
if (!lws_client_connect_2_dnsreq(wsi))
lwsl_wsi_notice(wsi, "DNS lookup failed");
}
/*
* Figure out if an ongoing connect() has arrived at a final disposition or not
*
* We can check using getsockopt if our connect actually completed.
* Posix connect() allows nonblocking to redo the connect to
* find out if it succeeded.
*/
typedef enum {
LCCCR_CONNECTED = 1,
LCCCR_CONTINUE = 0,
LCCCR_FAILED = -1,
} lcccr_t;
static lcccr_t
lws_client_connect_check(struct lws *wsi, int *real_errno)
{
#if !defined(LWS_WITH_NO_LOGS)
char t16[16];
#endif
int en = 0;
#if !defined(WIN32)
int e;
socklen_t sl = sizeof(e);
#endif
(void)en;
/*
* This resets SO_ERROR after reading it. If there's an error
* condition, the connect definitively failed.
*/
#if !defined(WIN32)
if (!getsockopt(wsi->desc.sockfd, SOL_SOCKET, SO_ERROR, &e, &sl)) {
en = LWS_ERRNO;
if (!e) {
lwsl_wsi_debug(wsi, "getsockopt: conn OK errno %s",
lws_errno_describe(en, t16, sizeof(t16)));
return LCCCR_CONNECTED;
}
lwsl_wsi_notice(wsi, "getsockopt fd %d says %s", wsi->desc.sockfd,
lws_errno_describe(e, t16, sizeof(t16)));
*real_errno = e;
return LCCCR_FAILED;
}
#else
fd_set write_set, except_set;
struct timeval tv;
int ret;
FD_ZERO(&write_set);
FD_ZERO(&except_set);
FD_SET(wsi->desc.sockfd, &write_set);
FD_SET(wsi->desc.sockfd, &except_set);
tv.tv_sec = 0;
tv.tv_usec = 0;
ret = select((int)wsi->desc.sockfd + 1, NULL, &write_set, &except_set, &tv);
if (FD_ISSET(wsi->desc.sockfd, &write_set)) {
/* actually connected */
lwsl_wsi_debug(wsi, "select write fd set, conn OK");
return LCCCR_CONNECTED;
}
if (FD_ISSET(wsi->desc.sockfd, &except_set)) {
/* Failed to connect */
lwsl_wsi_notice(wsi, "connect failed, select exception fd set");
return LCCCR_FAILED;
}
if (!ret) {
lwsl_wsi_debug(wsi, "select timeout");
return LCCCR_CONTINUE;
}
en = LWS_ERRNO;
#endif
lwsl_wsi_notice(wsi, "connection check FAILED: %s",
lws_errno_describe(*real_errno || en, t16, sizeof(t16)));
return LCCCR_FAILED;
}
/*
* We come here to fire off a connect, and to check its disposition later.
*
* If it did not complete before the individual attempt timeout, we will try to
* connect again with the next dns result.
*/
struct lws *
lws_client_connect_3_connect(struct lws *wsi, const char *ads,
const struct addrinfo *result, int n, void *opaque)
{
#if defined(LWS_WITH_UNIX_SOCK)
struct sockaddr_un sau;
#endif
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
const char *cce = "Unable to connect", *iface, *local_port;
const struct sockaddr *psa = NULL;
uint16_t port = wsi->conn_port;
char dcce[48], t16[16];
lws_dns_sort_t *curr;
ssize_t plen = 0;
lws_dll2_t *d;
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
int cfail;
#endif
int m, af = 0, en;
/*
* If we come here with result set, we need to convert getaddrinfo
* results to a lws_dns_sort_t list one time and free the results.
*
* We use this pattern because ASYNC_DNS will callback here with the
* results when it gets them (and may come here more than once, eg, for
* AAAA then A or vice-versa)
*/
if (result) {
lws_sul_cancel(&wsi->sul_connect_timeout);
#if defined(LWS_WITH_CONMON)
/* append a copy from before the sorting */
lws_conmon_append_copy_new_dns_results(wsi, result);
#endif
lws_sort_dns(wsi, result);
#if defined(LWS_WITH_SYS_ASYNC_DNS)
lws_async_dns_freeaddrinfo(&result);
#else
freeaddrinfo((struct addrinfo *)result);
#endif
result = NULL;
}
#if defined(LWS_WITH_UNIX_SOCK)
memset(&sau, 0, sizeof(sau));
#endif
/*
* async dns calls back here for everybody who cares when it gets a
* result... but if we are piggybacking, we do not want to connect
* ourselves
*/
if (!lws_dll2_is_detached(&wsi->dll2_cli_txn_queue))
return wsi;
if (n && /* calling back with a problem */
!wsi->dns_sorted_list.count && /* there's no results */
!lws_socket_is_valid(wsi->desc.sockfd) && /* no attempt ongoing */
!wsi->speculative_connect_owner.count /* no spec attempt */ ) {
lwsl_wsi_notice(wsi, "dns lookup failed %d", n);
/*
* DNS lookup itself failed... let's try again until we
* timeout
*/
lwsi_set_state(wsi, LRS_UNCONNECTED);
lws_sul_schedule(wsi->a.context, wsi->tsi, &wsi->sul_connect_timeout,
lws_client_dns_retry_timeout,
LWS_USEC_PER_SEC);
return wsi;
// cce = "dns lookup failed";
// goto oom4;
}
/*
* We come back here again when we think the connect() may have
* completed one way or the other, we can't proceed until we know we
* actually connected.
*/
if (lwsi_state(wsi) == LRS_WAITING_CONNECT &&
lws_socket_is_valid(wsi->desc.sockfd)) {
if (!wsi->dns_sorted_list.count &&
!wsi->sul_connect_timeout.list.owner)
/* no dns results and no ongoing timeout for one */
goto connect_to;
/*
* If the connection failed, the OS-level errno may be
* something like EINPROGRESS rather than the actual problem
* that prevented a connection. This value will represent the
* “real” problem that we should report to the caller.
*/
int real_errno = 0;
switch (lws_client_connect_check(wsi, &real_errno)) {
case LCCCR_CONNECTED:
/*
* Oh, it has happened...
*/
goto conn_good;
case LCCCR_CONTINUE:
#if defined(WIN32)
lws_sul_schedule(wsi->a.context, 0, &wsi->win32_sul_connect_async_check,
lws_client_win32_conn_async_check,
wsi->a.context->win32_connect_check_interval_usec);
#endif
return NULL;
default:
if (!real_errno)
real_errno = LWS_ERRNO;
lws_snprintf(dcce, sizeof(dcce), "conn fail: %s",
lws_errno_describe(real_errno, t16, sizeof(t16)));
cce = dcce;
lwsl_wsi_debug(wsi, "%s", dcce);
lws_metrics_caliper_report(wsi->cal_conn, METRES_NOGO);
goto try_next_dns_result_fds;
}
}
#if defined(LWS_WITH_UNIX_SOCK)
if (ads && *ads == '+') {
ads++;
memset(&wsi->sa46_peer, 0, sizeof(wsi->sa46_peer));
af = sau.sun_family = AF_UNIX;
strncpy(sau.sun_path, ads, sizeof(sau.sun_path));
sau.sun_path[sizeof(sau.sun_path) - 1] = '\0';
lwsl_wsi_info(wsi, "Unix skt: %s", ads);
if (sau.sun_path[0] == '@')
sau.sun_path[0] = '\0';
goto ads_known;
}
#endif
#if defined(LWS_WITH_SYS_ASYNC_DNS)
if (n == LADNS_RET_FAILED) {
lwsl_wsi_notice(wsi, "adns failed %s", ads);
/*
* Caller that is giving us LADNS_RET_FAILED will deal
* with cleanup
*/
return NULL;
}
#endif
/*
* Let's try directly connecting to each of the results in turn until
* one works, or we run out of results...
*
* We have a sorted dll2 list with the head one most preferable
*/
next_dns_result:
cce = "Unable to connect";
if (!wsi->dns_sorted_list.count)
goto failed1;
/*
* Copy the wsi head sorted dns result into the wsi->sa46_peer, and
* remove and free the original from the sorted list
*/
d = lws_dll2_get_head(&wsi->dns_sorted_list);
curr = lws_container_of(d, lws_dns_sort_t, list);
lws_dll2_remove(&curr->list);
wsi->sa46_peer = curr->dest;
#if defined(LWS_WITH_NETLINK)
wsi->peer_route_uidx = curr->uidx;
lwsl_wsi_info(wsi, "peer_route_uidx %d", wsi->peer_route_uidx);
#endif
lws_free(curr);
sa46_sockport(&wsi->sa46_peer, htons(port));
psa = sa46_sockaddr(&wsi->sa46_peer);
n = (int)sa46_socklen(&wsi->sa46_peer);
#if defined(LWS_WITH_UNIX_SOCK)
ads_known:
#endif
/*
* Now we prepared psa, if not already connecting, create the related
* socket and add to the fds
*/
if (!lws_socket_is_valid(wsi->desc.sockfd)) {
if (wsi->a.context->event_loop_ops->check_client_connect_ok &&
wsi->a.context->event_loop_ops->check_client_connect_ok(wsi)
) {
cce = "waiting for event loop watcher to close";
goto oom4;
}
#if defined(LWS_WITH_UNIX_SOCK)
af = 0;
if (wsi->unix_skt) {
af = AF_UNIX;
wsi->desc.sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
}
else
#endif
{
af = wsi->sa46_peer.sa4.sin_family;
wsi->desc.sockfd = socket(wsi->sa46_peer.sa4.sin_family,
SOCK_STREAM, 0);
}
if (!lws_socket_is_valid(wsi->desc.sockfd)) {
en = LWS_ERRNO;
lws_snprintf(dcce, sizeof(dcce),
"conn fail: skt creation: %s",
lws_errno_describe(en, t16, sizeof(t16)));
cce = dcce;
lwsl_wsi_warn(wsi, "%s", dcce);
goto try_next_dns_result;
}
if (lws_plat_set_socket_options(wsi->a.vhost, wsi->desc.sockfd,
#if defined(LWS_WITH_UNIX_SOCK)
wsi->unix_skt)) {
#else
0)) {
#endif
en = LWS_ERRNO;
lws_snprintf(dcce, sizeof(dcce),
"conn fail: skt options: %s",
lws_errno_describe(en, t16, sizeof(t16)));
cce = dcce;
lwsl_wsi_warn(wsi, "%s", dcce);
goto try_next_dns_result_closesock;
}
/* apply requested socket options */
if (lws_plat_set_socket_options_ip(wsi->desc.sockfd,
wsi->c_pri, wsi->flags))
lwsl_wsi_warn(wsi, "unable to set ip options");
lwsl_wsi_debug(wsi, "WAITING_CONNECT");
lwsi_set_state(wsi, LRS_WAITING_CONNECT);
if (wsi->a.context->event_loop_ops->sock_accept)
if (wsi->a.context->event_loop_ops->sock_accept(wsi)) {
lws_snprintf(dcce, sizeof(dcce),
"conn fail: sock accept");
cce = dcce;
lwsl_wsi_warn(wsi, "%s", dcce);
goto try_next_dns_result_closesock;
}
lws_pt_lock(pt, __func__);
if (__insert_wsi_socket_into_fds(wsi->a.context, wsi)) {
lws_snprintf(dcce, sizeof(dcce),
"conn fail: insert fd");
cce = dcce;
lws_pt_unlock(pt);
goto try_next_dns_result_closesock;
}
lws_pt_unlock(pt);
/*
* The fd + wsi combination is entered into the wsi tables
* at this point, with a pollfd
*
* Past here, we can't simply free the structs as error
* handling as oom4 does.
*
* We can run the whole close flow, or unpick the fds inclusion
* and anything else we have done.
*/
if (lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
lws_snprintf(dcce, sizeof(dcce),
"conn fail: change pollfd");
cce = dcce;
goto try_next_dns_result_fds;
}
if (!wsi->a.protocol)
wsi->a.protocol = &wsi->a.vhost->protocols[0];
lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_CONNECT_RESPONSE,
wsi->a.vhost->connect_timeout_secs);
iface = lws_wsi_client_stash_item(wsi, CIS_IFACE,
_WSI_TOKEN_CLIENT_IFACE);
local_port = lws_wsi_client_stash_item(wsi, CIS_LOCALPORT,
_WSI_TOKEN_CLIENT_LOCALPORT);
if ((iface && *iface) || (local_port && atoi(local_port))) {
m = lws_socket_bind(wsi->a.vhost, wsi, wsi->desc.sockfd,
(local_port ? atoi(local_port) : 0), iface, af);
if (m < 0) {
lws_snprintf(dcce, sizeof(dcce),
"conn fail: socket bind");
cce = dcce;
goto try_next_dns_result_fds;
}
}
}
#if defined(LWS_WITH_UNIX_SOCK)
if (wsi->unix_skt) {
psa = (const struct sockaddr *)&sau;
if (sau.sun_path[0])
n = (int)(sizeof(uint16_t) + strlen(sau.sun_path));
else
n = (int)(sizeof(uint16_t) +
strlen(&sau.sun_path[1]) + 1);
} else
#endif
if (!psa) /* coverity */
goto try_next_dns_result_fds;
/*
* The actual connection attempt
*/
#if defined(LWS_ESP_PLATFORM)
errno = 0;
#endif
/* grab a copy for peer tracking */
#if defined(LWS_WITH_UNIX_SOCK)
if (!wsi->unix_skt)
#endif
memmove(&wsi->sa46_peer, psa, (unsigned int)n);
/*
* Finally, make the actual connection attempt
*/
#if defined(LWS_WITH_SYS_METRICS)
if (wsi->cal_conn.mt) {
lws_metrics_caliper_report(wsi->cal_conn, METRES_NOGO);
}
lws_metrics_caliper_bind(wsi->cal_conn, wsi->a.context->mt_conn_tcp);
#endif
wsi->socket_is_permanently_unusable = 0;
if (lws_fi(&wsi->fic, "conn_cb_rej") ||
user_callback_handle_rxflow(wsi->a.protocol->callback, wsi,
LWS_CALLBACK_CONNECTING, wsi->user_space,
(void *)(intptr_t)wsi->desc.sockfd, 0)) {
lwsl_wsi_info(wsi, "CONNECTION CB closed");
goto failed1;
}
{
char buf[64];
lws_sa46_write_numeric_address((lws_sockaddr46 *)psa, buf, sizeof(buf));
lwsl_wsi_notice(wsi, "trying %s", buf);
}
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
cfail = lws_fi(&wsi->fic, "connfail");
if (cfail)
m = -1;
else
#endif
m = connect(wsi->desc.sockfd, (const struct sockaddr *)psa,
(socklen_t)n);
#if defined(LWS_WITH_CONMON)
wsi->conmon_datum = lws_now_usecs();
wsi->conmon.ciu_sockconn = 0;
#endif
if (m == -1) {
/*
* Since we're nonblocking, connect not having completed is not
* necessarily indicating any problem... we have to look at
* either errno or the socket to understand if we actually
* failed already...
*/
int errno_copy = LWS_ERRNO;
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
if (cfail)
/* fake an abnormal, fatal situation */
errno_copy = 999;
#endif
lwsl_wsi_debug(wsi, "connect: fd %d, %s",
wsi->desc.sockfd,
lws_errno_describe(errno_copy, t16, sizeof(t16)));
if (errno_copy &&
errno_copy != LWS_EALREADY &&
errno_copy != LWS_EINPROGRESS &&
errno_copy != LWS_EWOULDBLOCK
#ifdef _WIN32
&& errno_copy != WSAEINVAL
&& errno_copy != WSAEISCONN
#endif
) {
/*
* The connect() failed immediately...
*/
#if defined(LWS_WITH_CONMON)
wsi->conmon.ciu_sockconn = (lws_conmon_interval_us_t)
(lws_now_usecs() - wsi->conmon_datum);
#endif
lws_metrics_caliper_report(wsi->cal_conn, METRES_NOGO);
#if defined(_DEBUG)
#if defined(LWS_WITH_UNIX_SOCK)
if (!wsi->unix_skt) {
#endif
char nads[48];
lws_sa46_write_numeric_address(&wsi->sa46_peer, nads,
sizeof(nads));
lws_snprintf(dcce, sizeof(dcce),
"conn fail: %s: %s:%d",
lws_errno_describe(errno_copy, t16, sizeof(t16)),
nads, port);
cce = dcce;
wsi->sa46_peer.sa4.sin_family = 0;
lwsl_wsi_info(wsi, "%s", cce);
#if defined(LWS_WITH_UNIX_SOCK)
} else {
lws_snprintf(dcce, sizeof(dcce),
"conn fail: %s: UDS %s",
lws_errno_describe(errno_copy, t16, sizeof(t16)), ads);
cce = dcce;
}
#endif
#endif
goto try_next_dns_result_fds;
}
#if defined(WIN32)
if (lws_plat_check_connection_error(wsi))
goto try_next_dns_result_fds;
if (errno_copy == WSAEISCONN)
goto conn_good;
#endif
/*
* The connection attempt is ongoing asynchronously... let's set
* a specialized timeout for this connect attempt completion, it
* uses wsi->sul_connect_timeout just for this purpose
*/
lws_sul_schedule(wsi->a.context, wsi->tsi, &wsi->sul_connect_timeout,
lws_client_conn_wait_timeout,
wsi->a.context->timeout_secs *
LWS_USEC_PER_SEC);
#if defined(WIN32)
/*
* Windows is not properly POSIX, we have to manually schedule a
* callback to poll checking its status
*/
lws_sul_schedule(wsi->a.context, 0, &wsi->win32_sul_connect_async_check,
lws_client_win32_conn_async_check,
wsi->a.context->win32_connect_check_interval_usec
);
#else
/*
* POSIX platforms must do specifically a POLLOUT poll to hear
* about the connect completion as a POLLOUT event
*/
if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
goto try_next_dns_result_fds;
#endif
return wsi;
}
conn_good:
/*
* The connection has happened
*/
#if defined(LWS_WITH_CONMON)
wsi->conmon.ciu_sockconn = (lws_conmon_interval_us_t)
(lws_now_usecs() - wsi->conmon_datum);
#endif
#if !defined(LWS_PLAT_OPTEE)
{
socklen_t salen = sizeof(wsi->sa46_local);
#if defined(_DEBUG)
char buf[64];
#endif
if (getsockname((int)wsi->desc.sockfd,
(struct sockaddr *)&wsi->sa46_local,
&salen) == -1) {
en = LWS_ERRNO;
lwsl_warn("getsockname: %s\n", lws_errno_describe(en, t16, sizeof(t16)));
}
#if defined(_DEBUG)
#if defined(LWS_WITH_UNIX_SOCK)
if (wsi->unix_skt)
buf[0] = '\0';
else
#endif
lws_sa46_write_numeric_address(&wsi->sa46_local, buf, sizeof(buf));
lwsl_wsi_info(wsi, "source ads %s", buf);
#endif
}
#endif
lws_sul_cancel(&wsi->sul_connect_timeout);
#if defined(WIN32)
lws_sul_cancel(&wsi->win32_sul_connect_async_check);
#endif
lws_metrics_caliper_report(wsi->cal_conn, METRES_GO);
lws_addrinfo_clean(wsi);
if (wsi->a.protocol)
wsi->a.protocol->callback(wsi, LWS_CALLBACK_WSI_CREATE,
wsi->user_space, NULL, 0);
lwsl_wsi_debug(wsi, "going into connect_4");
return lws_client_connect_4_established(wsi, NULL, plen);
oom4:
/*
* We get here if we're trying to clean up a connection attempt that
* didn't make it as far as getting inserted into the wsi / fd tables
*/
if (lwsi_role_client(wsi) && wsi->a.protocol
/* && lwsi_state_est(wsi) */)
lws_inform_client_conn_fail(wsi,(void *)cce, strlen(cce));
/* take care that we might be inserted in fds already */
if (wsi->position_in_fds_table != LWS_NO_FDS_POS)
/* do the full wsi close flow */
goto failed1;
lws_metrics_caliper_report(wsi->cal_conn, METRES_NOGO);
/*
* We can't be an active client connection any more, if we thought
* that was what we were going to be doing. It should be if we are
* failing by oom4 path, we are still called by
* lws_client_connect_via_info() and will be returning NULL to that,
* so nobody else should have had a chance to queue on us.
*/
{
struct lws_vhost *vhost = wsi->a.vhost;
lws_sockfd_type sfd = wsi->desc.sockfd;
//lws_vhost_lock(vhost);
__lws_free_wsi(wsi); /* acquires vhost lock in wsi reset */
//lws_vhost_unlock(vhost);
sanity_assert_no_wsi_traces(vhost->context, wsi);
sanity_assert_no_sockfd_traces(vhost->context, sfd);
}
return NULL;
connect_to:
/*
* It looks like the sul_connect_timeout fired
*/
lwsl_wsi_info(wsi, "abandoning connect due to timeout");
try_next_dns_result_fds:
lws_pt_lock(pt, __func__);
__remove_wsi_socket_from_fds(wsi);
lws_pt_unlock(pt);
try_next_dns_result_closesock:
/*
* We are killing the socket but leaving
*/
compatible_close(wsi->desc.sockfd);
wsi->desc.sockfd = LWS_SOCK_INVALID;
try_next_dns_result:
lws_sul_cancel(&wsi->sul_connect_timeout);
#if defined(WIN32)
lws_sul_cancel(&wsi->win32_sul_connect_async_check);
#endif
if (lws_dll2_get_head(&wsi->dns_sorted_list))
goto next_dns_result;
lws_addrinfo_clean(wsi);
lws_inform_client_conn_fail(wsi, (void *)cce, strlen(cce));
failed1:
lws_sul_cancel(&wsi->sul_connect_timeout);
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "client_connect3");
return NULL;
}

View File

@ -0,0 +1,338 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
struct lws *
lws_client_connect_4_established(struct lws *wsi, struct lws *wsi_piggyback,
ssize_t plen)
{
#if defined(LWS_CLIENT_HTTP_PROXYING)
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
#endif
const char *meth;
struct lws_pollfd pfd;
const char *cce = "";
int n, m, rawish = 0;
meth = lws_wsi_client_stash_item(wsi, CIS_METHOD,
_WSI_TOKEN_CLIENT_METHOD);
if (meth && (!strcmp(meth, "RAW")
#if defined(LWS_ROLE_MQTT)
|| !strcmp(meth, "MQTT")
#endif
))
rawish = 1;
if (wsi_piggyback)
goto send_hs;
#if defined(LWS_CLIENT_HTTP_PROXYING)
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
/* we are connected to server, or proxy */
/* http proxy */
if (wsi->a.vhost->http.http_proxy_port) {
const char *cpa;
cpa = lws_wsi_client_stash_item(wsi, CIS_ADDRESS,
_WSI_TOKEN_CLIENT_PEER_ADDRESS);
if (!cpa)
goto failed;
lwsl_wsi_info(wsi, "going via proxy");
plen = lws_snprintf((char *)pt->serv_buf, 256,
"CONNECT %s:%u HTTP/1.1\x0d\x0a"
"Host: %s:%u\x0d\x0a"
"User-agent: lws\x0d\x0a", cpa, wsi->ocport,
cpa, wsi->ocport);
#if defined(LWS_WITH_HTTP_BASIC_AUTH)
if (wsi->a.vhost->proxy_basic_auth_token[0])
plen += lws_snprintf((char *)pt->serv_buf + plen, 256,
"Proxy-authorization: basic %s\x0d\x0a",
wsi->a.vhost->proxy_basic_auth_token);
#endif
plen += lws_snprintf((char *)pt->serv_buf + plen, 5,
"\x0d\x0a");
/* lwsl_hexdump_notice(pt->serv_buf, plen); */
/*
* OK from now on we talk via the proxy, so connect to that
*/
if (wsi->stash)
wsi->stash->cis[CIS_ADDRESS] =
wsi->a.vhost->http.http_proxy_address;
else
if (lws_hdr_simple_create(wsi,
_WSI_TOKEN_CLIENT_PEER_ADDRESS,
wsi->a.vhost->http.http_proxy_address))
goto failed;
wsi->c_port = (uint16_t)wsi->a.vhost->http.http_proxy_port;
n = (int)send(wsi->desc.sockfd, (char *)pt->serv_buf,
(unsigned int)plen,
MSG_NOSIGNAL);
if (n < 0) {
lwsl_wsi_debug(wsi, "ERROR writing to proxy socket");
cce = "proxy write failed";
goto failed;
}
lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_PROXY_RESPONSE,
(int)wsi->a.context->timeout_secs);
wsi->conn_port = wsi->c_port;
lwsi_set_state(wsi, LRS_WAITING_PROXY_REPLY);
return wsi;
}
#endif
#endif
/* coverity */
if (!wsi->a.protocol)
return NULL;
#if defined(LWS_WITH_SOCKS5)
if (lwsi_state(wsi) != LRS_ESTABLISHED)
switch (lws_socks5c_greet(wsi, &cce)) {
case -1:
goto failed;
case 1:
return wsi;
default:
break;
}
#endif
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
send_hs:
if (wsi_piggyback &&
!lws_dll2_is_detached(&wsi->dll2_cli_txn_queue)) {
/*
* We are pipelining on an already-established connection...
* we can skip tls establishment.
*
* Set these queued guys to a state where they won't actually
* send their headers until we decide later.
*/
lwsi_set_state(wsi, LRS_H2_WAITING_TO_SEND_HEADERS);
/*
* we can't send our headers directly, because they have to
* be sent when the parent is writeable. The parent will check
* for anybody on his client transaction queue that is in
* LRS_H1C_ISSUE_HANDSHAKE2, and let them write.
*
* If we are trying to do this too early, before the network
* connection has written his own headers, then it will just
* wait in the queue until it's possible to send them.
*/
lws_callback_on_writable(wsi_piggyback);
lwsl_wsi_info(wsi, "waiting to send hdrs (par state 0x%x)",
lwsi_state(wsi_piggyback));
} else {
lwsl_wsi_info(wsi, "%s %s client created own conn "
"(raw %d) vh %s st 0x%x",
wsi->role_ops->name, wsi->a.protocol->name, rawish,
wsi->a.vhost->name, lwsi_state(wsi));
/* we are making our own connection */
if (!rawish
#if defined(LWS_WITH_TLS)
// && (!(wsi->tls.use_ssl & LCCSCF_USE_SSL) || wsi->tls.ssl)
#endif
) {
if (lwsi_state(wsi) != LRS_H1C_ISSUE_HANDSHAKE2)
lwsi_set_state(wsi, LRS_H1C_ISSUE_HANDSHAKE);
} else {
/* for a method = "RAW" connection, this makes us
* established */
#if defined(LWS_WITH_TLS)// && !defined(LWS_WITH_MBEDTLS)
/* we have connected if we got here */
if (lwsi_state(wsi) == LRS_WAITING_CONNECT &&
(wsi->tls.use_ssl & LCCSCF_USE_SSL)) {
int result;
//lwsi_set_state(wsi, LRS_WAITING_SSL);
/*
* We can retry this... just cook the SSL BIO
* the first time
*/
result = lws_client_create_tls(wsi, &cce, 1);
switch (result) {
case CCTLS_RETURN_DONE:
break;
case CCTLS_RETURN_RETRY:
lwsl_wsi_debug(wsi, "create_tls RETRY");
return wsi;
default:
lwsl_wsi_debug(wsi, "create_tls FAIL");
goto failed;
}
/*
* We succeeded to negotiate a new client tls
* tunnel. If it's h2 alpn, we have arranged
* to send the h2 prefix and set our state to
* LRS_H2_WAITING_TO_SEND_HEADERS already.
*/
lwsl_wsi_notice(wsi, "tls established st 0x%x, "
"client_h2_alpn %d", lwsi_state(wsi),
wsi->client_h2_alpn);
if (lwsi_state(wsi) !=
LRS_H2_WAITING_TO_SEND_HEADERS)
lwsi_set_state(wsi,
LRS_H1C_ISSUE_HANDSHAKE2);
lws_set_timeout(wsi,
PENDING_TIMEOUT_AWAITING_CLIENT_HS_SEND,
(int)wsi->a.context->timeout_secs);
#if 0
/* ensure pollin enabled */
if (lws_change_pollfd(wsi, 0, LWS_POLLIN))
lwsl_wsi_notice(wsi,
"unable to set POLLIN");
#endif
goto provoke_service;
}
#endif
/* clear his established timeout */
lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);
m = wsi->role_ops->adoption_cb[0];
if (m) {
n = user_callback_handle_rxflow(
wsi->a.protocol->callback, wsi,
(enum lws_callback_reasons)m,
wsi->user_space, NULL, 0);
if (n < 0) {
lwsl_wsi_info(wsi, "RAW_PROXY_CLI_ADOPT err");
goto failed;
}
}
/* service.c pollout processing wants this */
wsi->hdr_parsing_completed = 1;
#if defined(LWS_ROLE_MQTT)
if (meth && !strcmp(meth, "MQTT")) {
#if defined(LWS_WITH_TLS)
if (wsi->tls.use_ssl & LCCSCF_USE_SSL) {
lwsi_set_state(wsi, LRS_WAITING_SSL);
return wsi;
}
#endif
lwsl_wsi_info(wsi, "settings LRS_MQTTC_IDLE");
lwsi_set_state(wsi, LRS_MQTTC_IDLE);
/*
* provoke service to issue the CONNECT
* directly.
*/
lws_set_timeout(wsi,
PENDING_TIMEOUT_SENT_CLIENT_HANDSHAKE,
(int)wsi->a.context->timeout_secs);
assert(lws_socket_is_valid(wsi->desc.sockfd));
pfd.fd = wsi->desc.sockfd;
pfd.events = LWS_POLLIN;
pfd.revents = LWS_POLLOUT;
lwsl_wsi_info(wsi, "going to service fd");
n = lws_service_fd_tsi(wsi->a.context, &pfd, wsi->tsi);
if (n < 0) {
cce = "first service failed";
goto failed;
}
if (n)
/* returns 1 on fail after close wsi */
return NULL;
return wsi;
}
#endif
lwsl_wsi_info(wsi, "setting ESTABLISHED");
lwsi_set_state(wsi, LRS_ESTABLISHED);
return wsi;
}
/*
* provoke service to issue the handshake directly.
*
* we need to do it this way because in the proxy case, this is
* the next state and executed only if and when we get a good
* proxy response inside the state machine... but notice in
* SSL case this may not have sent anything yet with 0 return,
* and won't until many retries from main loop. To stop that
* becoming endless, cover with a timeout.
*/
#if defined(LWS_WITH_TLS) //&& !defined(LWS_WITH_MBEDTLS)
provoke_service:
#endif
lws_set_timeout(wsi, PENDING_TIMEOUT_SENT_CLIENT_HANDSHAKE,
(int)wsi->a.context->timeout_secs);
assert(lws_socket_is_valid(wsi->desc.sockfd));
pfd.fd = wsi->desc.sockfd;
pfd.events = LWS_POLLIN;
pfd.revents = LWS_POLLIN;
n = lws_service_fd_tsi(wsi->a.context, &pfd, wsi->tsi);
if (n < 0) {
cce = "first service failed";
goto failed;
}
if (n) /* returns 1 on failure after closing wsi */
return NULL;
}
#endif
return wsi;
failed:
lws_inform_client_conn_fail(wsi, (void *)cce, strlen(cce));
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "client_connect4");
return NULL;
}

View File

@ -0,0 +1,778 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*
* Either the libc getaddrinfo() or ASYNC_DNS provides a chain of addrinfo,
* we use lws_sort_dns() to convert it to an lws_dll2 of lws_dns_sort_t, after
* which the addrinfo results are freed.
*
* If the system has no routing table info (from, eg, NETLINK), then that's
* it the sorted results are bound to the wsi and used.
*
* If the system has routing table info, we study the routing table and the
* DNS results in order to sort the lws_dns_sort_t result linked-list into
* most desirable at the head, and strip results we can't see a way to route.
*/
#include "private-lib-core.h"
#if defined(__linux__)
#include <linux/if_addr.h>
#endif
#if defined(__FreeBSD__)
#include <net/if.h>
#include <netinet6/in6_var.h>
#endif
#if defined(LWS_WITH_IPV6) && defined(LWS_WITH_NETLINK)
/*
* RFC6724 default policy table
*
* Prefix Precedence Label
* ::1/128 50 0
* ::/0 40 1
* ::ffff:0:0/96 35 4 (override prec to 100 to prefer ipv4)
* 2002::/16 30 2
* 2001::/32 5 5
* fc00::/7 3 13
* ::/96 1 3
* fec0::/10 1 11
* 3ffe::/16 1 12
*
* implemented using offsets into a combined 40-byte table below
*/
static const uint8_t ma[] = {
/* 0 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
/* 16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff,
/* 28 */ 0x20, 0x02,
/* 30 */ 0x20, 0x01, 0x00, 0x00,
/* 34 */ 0xfc, 0x00,
/* 36 */ 0xfe, 0xc0,
/* 38 */ 0x3f, 0xfe
};
static const uint8_t frac[] = {
0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe
};
/* 9 x 4 byte = 36 byte policy index table */
static const struct score_policy {
uint8_t ma_ofs;
uint8_t prefix;
lws_dns_score_t score;
} rfc6724_policy[] = {
{ 0, 128, { 50, 0 } }, /* ::1/128 */
{ 0, 0, { 40, 1 } }, /* ::0 */
#if 1
/* favour ipv6 as a general policy */
{ 16, 96, { 35, 4 } }, /* ::ffff:0:0/96 */
#else
/* favour ipv4 as a general policy */
{ 16, 96, { 100, 4 } }, /* ::ffff:0:0/96 */
#endif
{ 28, 16, { 30, 2 } }, /* 2002::/16 */
{ 30, 32, { 5, 5 } }, /* 2001::/32 */
{ 34, 7, { 3, 13 } }, /* fc00::/7 */
{ 0, 96, { 1, 3 } }, /* ::/96 */
{ 36, 10, { 1, 11 } }, /* fec0::/10 */
{ 38, 16, { 1, 12 } }, /* 3ffe::/16 */
};
static int
lws_ipv6_prefix_match_len(const struct sockaddr_in6 *a,
const struct sockaddr_in6 *b)
{
const uint8_t *ads_a = (uint8_t *)&a->sin6_addr,
*ads_b = (uint8_t *)&b->sin6_addr;
int n = 0, match = 0;
for (n = 0; n < 16; n++) {
if (ads_a[n] == ads_b[n])
match += 8;
else
break;
}
if (match != 128) {
int m;
for (m = 1; m < 8; m++) {
if ((ads_a[n] & frac[m]) == (ads_b[n] & frac[m]))
match++;
else
break;
}
}
return match;
}
static int
lws_ipv6_unicast_scope(const struct sockaddr_in6 *sa)
{
uint64_t *u;
u = (uint64_t *)&sa->sin6_addr;
if (*u == 0xfe80000000000000ull)
return 2; /* link-local */
return 0xe;
}
static int
lws_sort_dns_scope(lws_sockaddr46 *sa46)
{
if (sa46->sa4.sin_family == AF_INET) {
uint8_t *p = (uint8_t *)&sa46->sa4.sin_addr;
/* RFC6724 3.2 */
if (p[0] == 127 || (p[0] == 169 && p[1] == 254))
return 2; /* link-local */
return 0xe; /* global */
}
return lws_ipv6_unicast_scope(&sa46->sa6);
}
static int
lws_sort_dns_classify(lws_sockaddr46 *sa46, lws_dns_score_t *score)
{
const struct score_policy *pol = rfc6724_policy;
const uint8_t *p, *po;
lws_sockaddr46 s;
int n, m;
memset(score, 0, sizeof(*score));
if (sa46->sa4.sin_family == AF_INET) {
memset(&s, 0, sizeof(s));
s.sa6.sin6_family = AF_INET6;
lws_4to6((uint8_t *)s.sa6.sin6_addr.s6_addr,
(const uint8_t *)&sa46->sa4.sin_addr);
/* use the v6 version of the v4 address */
sa46 = &s;
}
for (n = 0; n < (int)LWS_ARRAY_SIZE(rfc6724_policy); n++) {
po = (uint8_t *)&sa46->sa6.sin6_addr.s6_addr;
p = &ma[pol->ma_ofs];
for (m = 0; m < pol->prefix >> 3; m++)
if (*p++ != *po++)
goto next;
if ((pol->prefix & 7) && (*p & frac[pol->prefix & 7]) !=
(*po & frac[pol->prefix & 7]))
goto next;
*score = pol->score;
return 0;
next:
pol++;
}
return 1;
}
enum {
SAS_PREFER_A = 1,
SAS_SAME = 0,
SAS_PREFER_B = -1
};
/* ifa is laid out with types for ipv4, if it's AF_INET6 case to sockaddr_in6 */
#define to_v6_sa(x) ((struct sockaddr_in6 *)x)
#define to_sa46_sa(x) ((lws_sockaddr46 *)x)
/*
* The source address selection algorithm produces as output a single
* source address for use with a given destination address. This
* algorithm only applies to IPv6 destination addresses, not IPv4
* addresses.
*
* This implements RFC6724 Section 5.
*
* Either or both sa and sb can be dest or gateway routes
*/
static int
lws_sort_dns_scomp(struct lws_context_per_thread *pt, const lws_route_t *sa,
const lws_route_t *sb, const struct sockaddr_in6 *dst)
{
const struct sockaddr_in6 *sa6 = to_v6_sa(&sa->dest),
*sb6 = to_v6_sa(&sb->dest);
lws_dns_score_t scorea, scoreb, scoredst;
int scopea, scopeb, scoped, mla, mlb;
lws_route_t *rd;
if (!sa->dest.sa4.sin_family)
sa6 = to_v6_sa(&sa->gateway);
if (!sb->dest.sa4.sin_family)
sb6 = to_v6_sa(&sb->gateway);
/*
* We shouldn't come here unless sa and sb both have AF_INET6 addresses
*/
assert(sa6->sin6_family == AF_INET6);
assert(sb6->sin6_family == AF_INET6);
/*
* Rule 1: Prefer same address.
* If SA = D, then prefer SA. Similarly, if SB = D, then prefer SB.
*/
if (!memcmp(&sa6->sin6_addr, &dst->sin6_addr, 16))
return SAS_PREFER_A;
if (!memcmp(&sb6->sin6_addr, &dst->sin6_addr, 16))
return SAS_PREFER_B;
/*
* Rule 2: Prefer appropriate scope.
* If Scope(SA) < Scope(SB): If Scope(SA) < Scope(D), then prefer SB
* and otherwise prefer SA.
*
* Similarly, if Scope(SB) < Scope(SA): If Scope(SB) < Scope(D), then
* prefer SA and otherwise prefer SB.
*/
scopea = lws_sort_dns_scope(to_sa46_sa(sa6));
scopeb = lws_sort_dns_scope(to_sa46_sa(sb6));
scoped = lws_sort_dns_scope(to_sa46_sa(dst));
if (scopea < scopeb)
return scopea < scoped ? SAS_PREFER_B : SAS_PREFER_A;
if (scopeb < scopea)
return scopeb < scoped ? SAS_PREFER_A : SAS_PREFER_B;
/*
* Rule 3: Avoid deprecated addresses.
* If one of the two source addresses is "preferred" and one of them
* is "deprecated" (in the RFC 4862 sense), then prefer the one that
* is "preferred".
*/
if (!(sa->ifa_flags & IFA_F_DEPRECATED) &&
(sb->ifa_flags & IFA_F_DEPRECATED))
return SAS_PREFER_A;
if ( (sa->ifa_flags & IFA_F_DEPRECATED) &&
!(sb->ifa_flags & IFA_F_DEPRECATED))
return SAS_PREFER_B;
/*
* Rule 4: Prefer home addresses.
* If SA is simultaneously a home address and care-of address and SB is
* not, then prefer SA. Similarly, if SB is simultaneously a home
* address and care-of address and SA is not, then prefer SB. If SA is
* just a home address and SB is just a care-of address, then prefer SA.
* Similarly, if SB is just a home address and SA is just a care-of
* address, then prefer SB.
*
* !!! not sure how to determine if care-of address
*/
if ( (sa->ifa_flags & IFA_F_HOMEADDRESS) &&
!(sb->ifa_flags & IFA_F_HOMEADDRESS))
return SAS_PREFER_A;
if (!(sa->ifa_flags & IFA_F_HOMEADDRESS) &&
(sb->ifa_flags & IFA_F_HOMEADDRESS))
return SAS_PREFER_B;
/*
* Rule 5: Prefer outgoing interface.
* If SA is assigned to the interface that will be used to send to D
* and SB is assigned to a different interface, then prefer SA.
* Similarly, if SB is assigned to the interface that will be used
* to send to D and SA is assigned to a different interface, then
* prefer SB.
*/
rd = _lws_route_est_outgoing(pt, (lws_sockaddr46 *)dst);
if (rd) {
if (rd->if_idx == sa->if_idx)
return SAS_PREFER_A;
if (rd->if_idx == sb->if_idx)
return SAS_PREFER_B;
}
/*
* Rule 6: Prefer matching label.
* If Label(SA) = Label(D) and Label(SB) <> Label(D), then prefer SA.
* Similarly, if Label(SB) = Label(D) and Label(SA) <> Label(D), then
* prefer SB.
*/
lws_sort_dns_classify(to_sa46_sa(sa6), &scorea);
lws_sort_dns_classify(to_sa46_sa(sb6), &scoreb);
lws_sort_dns_classify(to_sa46_sa(dst), &scoredst);
if (scorea.label == scoredst.label && scoreb.label != scoredst.label)
return SAS_PREFER_A;
if (scoreb.label == scoredst.label && scorea.label != scoredst.label)
return SAS_PREFER_B;
/*
* Rule 7: Prefer temporary addresses.
* If SA is a temporary address and SB is a public address, then
* prefer SA. Similarly, if SB is a temporary address and SA is a
* public address, then prefer SB.
*/
if ( (sa->ifa_flags & IFA_F_TEMPORARY) &&
!(sb->ifa_flags & IFA_F_TEMPORARY))
return SAS_PREFER_A;
if (!(sa->ifa_flags & IFA_F_TEMPORARY) &&
(sb->ifa_flags & IFA_F_TEMPORARY))
return SAS_PREFER_B;
/*
* Rule 8: Use longest matching prefix.
* If CommonPrefixLen(SA, D) > CommonPrefixLen(SB, D), then prefer SA.
* Similarly, if CommonPrefixLen(SB, D) > CommonPrefixLen(SA, D), then
* prefer SB.
*/
mla = lws_ipv6_prefix_match_len(sa6, dst);
mlb = lws_ipv6_prefix_match_len(sb6, dst);
if (mla > mlb)
return SAS_PREFER_A;
return SAS_SAME;
}
/*
* Given two possible source addresses and the destination address, we attempt
* to pick which one is "better".
*
* This implements RFC6724 Section 6.
*/
static int
lws_sort_dns_dcomp(const lws_dns_sort_t *da, const lws_dns_sort_t *db)
{
int scopea, scopeb, scope_srca, scope_srcb, cpla, cplb;
const uint8_t *da_ads = (const uint8_t *)&da->dest.sa6.sin6_addr,
*db_ads = (const uint8_t *)&db->dest.sa6.sin6_addr;
lws_dns_score_t score_srca, score_srcb;
/*
* Rule 1: Avoid unusable destinations
*
* We already strip destinations with no usable source
*/
/*
* Rule 2: Prefer matching scope
*
* If Scope(DA) = Scope(Source(DA)) and Scope(DB) <> Scope(Source(DB)),
* then prefer DA. Similarly, if Scope(DA) <> Scope(Source(DA)) and
* Scope(DB) = Scope(Source(DB)), then prefer DB.
*/
scopea = lws_ipv6_unicast_scope(to_v6_sa(&da->dest));
scopeb = lws_ipv6_unicast_scope(to_v6_sa(&db->dest));
scope_srca = lws_ipv6_unicast_scope(to_v6_sa(&da->source));
scope_srcb = lws_ipv6_unicast_scope(to_v6_sa(&db->source));
if (scopea == scope_srca && scopeb != scope_srcb)
return SAS_PREFER_A;
if (scopea != scope_srca && scopeb == scope_srcb)
return SAS_PREFER_B;
#if defined(IFA_F_DEPRECATED)
/*
* Rule 3: Avoid deprecated addresses.
*
* If Source(DA) is deprecated and Source(DB) is not, then prefer DB.
* Similarly, if Source(DA) is not deprecated and Source(DB) is
* deprecated, then prefer DA.
*/
if (!(da->ifa_flags & IFA_F_DEPRECATED) &&
(db->ifa_flags & IFA_F_DEPRECATED))
return SAS_PREFER_A;
if ( (da->ifa_flags & IFA_F_DEPRECATED) &&
!(db->ifa_flags & IFA_F_DEPRECATED))
return SAS_PREFER_B;
#endif
/*
* Rule 4: Prefer home addresses.
*
* If Source(DA) is simultaneously a home address and care-of address
* and Source(DB) is not, then prefer DA. Similarly, if Source(DB) is
* simultaneously a home address and care-of address and Source(DA) is
* not, then prefer DB.
*
* If Source(DA) is just a home address and Source(DB) is just a care-of
* address, then prefer DA. Similarly, if Source(DA) is just a care-of
* address and Source(DB) is just a home address, then prefer DB.
*
* !!! not sure how to determine if care-of address
*/
if ( (da->ifa_flags & IFA_F_HOMEADDRESS) &&
!(db->ifa_flags & IFA_F_HOMEADDRESS))
return SAS_PREFER_A;
if (!(da->ifa_flags & IFA_F_HOMEADDRESS) &&
(db->ifa_flags & IFA_F_HOMEADDRESS))
return SAS_PREFER_B;
/*
* Rule 5: Prefer matching label.
*
* If Label(Source(DA)) = Label(DA) and Label(Source(DB)) <> Label(DB),
* then prefer DA. Similarly, if Label(Source(DA)) <> Label(DA) and
* Label(Source(DB)) = Label(DB), then prefer DB
*/
if (!da->source)
return SAS_PREFER_B;
if (!db->source)
return SAS_PREFER_A;
lws_sort_dns_classify(&da->source->dest, &score_srca);
lws_sort_dns_classify(&db->source->dest, &score_srcb);
if (score_srca.label == da->score.label &&
score_srcb.label != db->score.label)
return SAS_PREFER_A;
if (score_srca.label != da->score.label &&
score_srcb.label == db->score.label)
return SAS_PREFER_B;
/*
* Rule 6: Prefer higher precedence.
*
* If Precedence(DA) > Precedence(DB), then prefer DA. Similarly, if
* Precedence(DA) < Precedence(DB), then prefer DB.
*/
if (da->score.precedence > db->score.precedence)
return SAS_PREFER_A;
if (da->score.precedence < db->score.precedence)
return SAS_PREFER_B;
/*
* Rule 7: Prefer native transport.
* If DA is reached via an encapsulating transition mechanism (e.g.,
* IPv6 in IPv4) and DB is not, then prefer DB. Similarly, if DB is
* reached via encapsulation and DA is not, then prefer DA.
*/
if (!memcmp(&ma[16], da_ads, 12) && memcmp(&ma[16], db_ads, 12))
return SAS_PREFER_B;
if (memcmp(&ma[16], da_ads, 12) && !memcmp(&ma[16], db_ads, 12))
return SAS_PREFER_A;
/*
* Rule 8: Prefer smaller scope.
* If Scope(DA) < Scope(DB), then prefer DA. Similarly, if Scope(DA) >
* Scope(DB), then prefer DB.
*/
if (scopea < scopeb)
return SAS_PREFER_A;
if (scopea > scopeb)
return SAS_PREFER_B;
/*
* Rule 9: Use longest matching prefix.
* When DA and DB belong to the same address family (both are IPv6 or
* both are IPv4): If CommonPrefixLen(Source(DA), DA) >
* CommonPrefixLen(Source(DB), DB), then prefer DA. Similarly, if
* CommonPrefixLen(Source(DA), DA) < CommonPrefixLen(Source(DB), DB),
* then prefer DB.
*/
cpla = lws_ipv6_prefix_match_len(&da->source->dest.sa6, &da->dest.sa6);
cplb = lws_ipv6_prefix_match_len(&db->source->dest.sa6, &db->dest.sa6);
if (cpla > cplb)
return SAS_PREFER_A;
if (cpla < cplb)
return SAS_PREFER_B;
/*
* Rule 10: Otherwise, leave the order unchanged.
*/
return SAS_SAME;
}
static int
lws_sort_dns_compare(const lws_dll2_t *a, const lws_dll2_t *b)
{
const lws_dns_sort_t *sa = lws_container_of(a, lws_dns_sort_t, list),
*sb = lws_container_of(b, lws_dns_sort_t, list);
return lws_sort_dns_dcomp(sa, sb);
}
#endif /* ipv6 + netlink */
#if defined(_DEBUG)
static void
lws_sort_dns_dump(struct lws *wsi)
{
int n = 1;
(void)n; /* nologs */
if (!lws_dll2_get_head(&wsi->dns_sorted_list))
lwsl_wsi_notice(wsi, "empty");
lws_start_foreach_dll(struct lws_dll2 *, d,
lws_dll2_get_head(&wsi->dns_sorted_list)) {
lws_dns_sort_t *s = lws_container_of(d, lws_dns_sort_t, list);
char dest[48], gw[48];
lws_sa46_write_numeric_address(&s->dest, dest, sizeof(dest));
lws_sa46_write_numeric_address(&s->gateway, gw, sizeof(gw));
lwsl_wsi_info(wsi, "%d: (%d)%s, gw (%d)%s, idi: %d, "
"lbl: %d, prec: %d", n++,
s->dest.sa4.sin_family, dest,
s->gateway.sa4.sin_family, gw,
s->if_idx, s->score.label, s->score.precedence);
} lws_end_foreach_dll(d);
}
#endif
int
lws_sort_dns(struct lws *wsi, const struct addrinfo *result)
{
#if defined(LWS_WITH_NETLINK)
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
#endif
const struct addrinfo *ai = result;
lwsl_wsi_info(wsi, "sort_dns: %p", result);
/*
* We're going to take the dns results and produce our own linked-list
* of them, if we can sorted into descending preferability order, and
* possibly filtered.
*
* First let's just convert the addrinfo list into our expanded
* lws_dns_sort_t list, we can discard the addrinfo list then
*/
while (ai) {
#if defined(LWS_WITH_NETLINK) || \
(defined(LWS_WITH_NETLINK) && defined(LWS_WITH_IPV6))
lws_route_t
#if defined(LWS_WITH_NETLINK)
*estr = NULL
#endif
#if defined(LWS_WITH_NETLINK) && defined(LWS_WITH_IPV6)
, *bestsrc = NULL
#endif
;
#endif
lws_dns_sort_t *ds;
char afip[48];
/*
* Only transfer address families we can cope with
*/
if ((int)ai->ai_addrlen > (int)sizeof(lws_sockaddr46) ||
(ai->ai_family != AF_INET && ai->ai_family != AF_INET6))
goto next;
ds = lws_zalloc(sizeof(*ds), __func__);
if (!ds)
return 1;
memcpy(&ds->dest, ai->ai_addr, (size_t)ai->ai_addrlen);
ds->dest.sa4.sin_family = (sa_family_t)ai->ai_family;
lws_sa46_write_numeric_address(&ds->dest, afip, sizeof(afip));
lwsl_wsi_info(wsi, "unsorted entry (af %d) %s",
ds->dest.sa4.sin_family, afip);
#if defined(LWS_WITH_NETLINK)
/*
* Let's assess this DNS result in terms of route
* selection, eg, if no usable net route or gateway for it,
* we don't have a way to use it if we listed it
*/
if (pt->context->routing_table.count) {
estr = _lws_route_est_outgoing(pt, &ds->dest);
if (!estr) {
lws_free(ds);
lwsl_wsi_notice(wsi, "%s has no route out\n",
afip);
/*
* There's no outbound route for this, it's
* unusable, so don't add it to the list
*/
goto next;
}
ds->if_idx = estr->if_idx;
ds->uidx = estr->uidx;
/*
* ...evidently, there's a way for it to go out...
*/
}
#endif
#if defined(LWS_WITH_NETLINK) && defined(LWS_WITH_IPV6)
/*
* These sorting rules only apply to ipv6. If we have ipv4
* dest and estimate we will use an ipv4 source address to
* route it, then skip this.
*
* However if we have ipv4 dest and estimate we will use an
* ipv6 source address to route it, because of ipv6-only
* egress, then promote it to ipv6 and sort it
*/
if (ds->dest.sa4.sin_family == AF_INET) {
if (!estr ||
estr->dest.sa4.sin_family == AF_INET ||
estr->gateway.sa4.sin_family == AF_INET)
/*
* No estimated route, or v4 estimated route,
* just add it to sorted list
*/
goto just_add;
/*
* v4 dest on estimated v6 source ads route, because
* eg, there's no active v4 source ads just ipv6...
* promote v4 -> v6 address using ::ffff:xx:yy
*/
lwsl_wsi_info(wsi, "promoting v4->v6");
lws_sa46_4to6(&ds->dest,
(uint8_t *)&ds->dest.sa4.sin_addr, 0);
}
/* first, classify this destination ads */
lws_sort_dns_classify(&ds->dest, &ds->score);
/*
* RFC6724 Section 5: Source Address Selection
*
* Go through the source options choosing the best for this
* destination... this can only operate on ipv6 destination
* address
*/
lws_start_foreach_dll(struct lws_dll2 *, d,
lws_dll2_get_head(&pt->context->routing_table)) {
lws_route_t *r = lws_container_of(d, lws_route_t, list);
/* gateway routes are skipped here */
if (ds->dest.sa6.sin6_family == AF_INET6 &&
r->dest.sa4.sin_family == AF_INET6 && (!bestsrc ||
lws_sort_dns_scomp(pt, bestsrc, r, &ds->dest.sa6) ==
SAS_PREFER_B))
bestsrc = r;
} lws_end_foreach_dll(d);
/* bestsrc is the best source route, or NULL if none */
if (!bestsrc && pt->context->routing_table.count) {
/* drop it, no usable source route */
lws_free(ds);
goto next;
}
just_add:
if (!bestsrc) {
lws_dll2_add_tail(&ds->list, &wsi->dns_sorted_list);
goto next;
}
ds->source = bestsrc;
/*
* RFC6724 Section 6: Destination Address Selection
*
* Insert the destination into the list at a position reflecting
* its preferability, so the head entry is the most preferred
*/
lws_dll2_add_sorted(&ds->list, &wsi->dns_sorted_list,
lws_sort_dns_compare);
#else
/*
* We don't have the routing table + source address details in
* order to sort the DNS results... simply make entries in the
* order of the addrinfo results
*/
lws_dll2_add_tail(&ds->list, &wsi->dns_sorted_list);
#endif
next:
ai = ai->ai_next;
}
//lwsl_notice("%s: sorted table: %d\n", __func__,
// wsi->dns_sorted_list.count);
#if defined(_DEBUG)
lws_sort_dns_dump(wsi);
#endif
return !wsi->dns_sorted_list.count;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,869 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
/* max individual proxied header payload size */
#define MAXHDRVAL 1024
#if defined(LWS_WITH_HTTP_PROXY)
static int
proxy_header(struct lws *wsi, struct lws *par, unsigned char *temp,
int temp_len, int index, unsigned char **p, unsigned char *end)
{
int n = lws_hdr_total_length(par, (enum lws_token_indexes)index);
if (n < 1) {
lwsl_wsi_debug(wsi, "no index %d:", index);
return 0;
}
if (lws_hdr_copy(par, (char *)temp, temp_len, (enum lws_token_indexes)index) < 0) {
lwsl_wsi_notice(wsi, "unable to copy par hdr idx %d (len %d)",
index, n);
return -1;
}
lwsl_wsi_debug(wsi, "index %d: %s", index, (char *)temp);
if (lws_add_http_header_by_token(wsi, (enum lws_token_indexes)index, temp, n, p, end)) {
lwsl_wsi_notice(wsi, "unable to append par hdr idx %d (len %d)",
index, n);
return -1;
}
return 0;
}
static int
stream_close(struct lws *wsi)
{
char buf[LWS_PRE + 6], *out = buf + LWS_PRE;
if (wsi->http.did_stream_close)
return 0;
wsi->http.did_stream_close = 1;
if (wsi->mux_substream) {
if (lws_write(wsi, (unsigned char *)buf + LWS_PRE, 0,
LWS_WRITE_HTTP_FINAL) < 0)
goto bail;
return 0;
}
*out++ = '0';
*out++ = '\x0d';
*out++ = '\x0a';
*out++ = '\x0d';
*out++ = '\x0a';
if (lws_write(wsi, (unsigned char *)buf + LWS_PRE, 5,
LWS_WRITE_HTTP_FINAL) < 0)
goto bail;
return 0;
bail:
lwsl_wsi_info(wsi, "h2 fin wr failed");
return -1;
}
#endif
struct lws_proxy_pkt {
struct lws_dll2 pkt_list;
size_t len;
char binary;
char first;
char final;
/* data follows */
};
#if defined(LWS_WITH_HTTP_PROXY) && defined(LWS_ROLE_WS)
int
lws_callback_ws_proxy(struct lws *wsi, enum lws_callback_reasons reason,
void *user, void *in, size_t len)
{
struct lws_proxy_pkt *pkt;
struct lws_dll2 *dll;
switch (reason) {
/* h1 ws proxying... child / client / onward */
case LWS_CALLBACK_CLIENT_ESTABLISHED:
if (!wsi->h1_ws_proxied || !wsi->parent)
break;
if (lws_process_ws_upgrade2(wsi->parent))
return -1;
#if defined(LWS_WITH_HTTP2)
if (wsi->parent->mux_substream)
lwsl_wsi_info(wsi, "proxied h2 -> h1 ws established");
#endif
break;
case LWS_CALLBACK_CLIENT_CONFIRM_EXTENSION_SUPPORTED:
return 1;
case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
case LWS_CALLBACK_CLIENT_CLOSED:
lwsl_wsi_info(wsi, "client closed: parent %s",
lws_wsi_tag(wsi->parent));
if (wsi->parent)
lws_set_timeout(wsi->parent, 1, LWS_TO_KILL_ASYNC);
break;
case LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER:
{
unsigned char **p = (unsigned char **)in, *end = (*p) + len,
tmp[MAXHDRVAL];
proxy_header(wsi, wsi->parent, tmp, sizeof(tmp),
WSI_TOKEN_HTTP_ACCEPT_LANGUAGE, p, end);
proxy_header(wsi, wsi->parent, tmp, sizeof(tmp),
WSI_TOKEN_HTTP_COOKIE, p, end);
proxy_header(wsi, wsi->parent, tmp, sizeof(tmp),
WSI_TOKEN_HTTP_SET_COOKIE, p, end);
break;
}
case LWS_CALLBACK_CLIENT_RECEIVE:
wsi->parent->ws->proxy_buffered += len;
if (wsi->parent->ws->proxy_buffered > 10 * 1024 * 1024) {
lwsl_wsi_err(wsi, "proxied ws connection "
"excessive buffering: dropping");
return -1;
}
pkt = lws_zalloc(sizeof(*pkt) + LWS_PRE + len, __func__);
if (!pkt)
return -1;
pkt->len = len;
pkt->first = (char)lws_is_first_fragment(wsi);
pkt->final = (char)lws_is_final_fragment(wsi);
pkt->binary = (char)lws_frame_is_binary(wsi);
memcpy(((uint8_t *)&pkt[1]) + LWS_PRE, in, len);
lws_dll2_add_tail(&pkt->pkt_list, &wsi->parent->ws->proxy_owner);
lws_callback_on_writable(wsi->parent);
break;
case LWS_CALLBACK_CLIENT_WRITEABLE:
dll = lws_dll2_get_head(&wsi->ws->proxy_owner);
if (!dll)
break;
pkt = (struct lws_proxy_pkt *)dll;
if (lws_write(wsi, ((unsigned char *)&pkt[1]) +
LWS_PRE, pkt->len, (enum lws_write_protocol)lws_write_ws_flags(
pkt->binary ? LWS_WRITE_BINARY : LWS_WRITE_TEXT,
pkt->first, pkt->final)) < 0)
return -1;
lws_dll2_remove(dll);
lws_free(pkt);
if (lws_dll2_get_head(&wsi->ws->proxy_owner))
lws_callback_on_writable(wsi);
break;
/* h1 ws proxying... parent / server / incoming */
case LWS_CALLBACK_CONFIRM_EXTENSION_OKAY:
return 1;
case LWS_CALLBACK_CLOSED:
lwsl_wsi_info(wsi, "closed");
return -1;
case LWS_CALLBACK_RECEIVE:
pkt = lws_zalloc(sizeof(*pkt) + LWS_PRE + len, __func__);
if (!pkt)
return -1;
pkt->len = len;
pkt->first = (char)lws_is_first_fragment(wsi);
pkt->final = (char)lws_is_final_fragment(wsi);
pkt->binary = (char)lws_frame_is_binary(wsi);
memcpy(((uint8_t *)&pkt[1]) + LWS_PRE, in, len);
lws_dll2_add_tail(&pkt->pkt_list, &wsi->child_list->ws->proxy_owner);
lws_callback_on_writable(wsi->child_list);
break;
case LWS_CALLBACK_SERVER_WRITEABLE:
dll = lws_dll2_get_head(&wsi->ws->proxy_owner);
if (!dll)
break;
pkt = (struct lws_proxy_pkt *)dll;
if (lws_write(wsi, ((unsigned char *)&pkt[1]) +
LWS_PRE, pkt->len, (enum lws_write_protocol)lws_write_ws_flags(
pkt->binary ? LWS_WRITE_BINARY : LWS_WRITE_TEXT,
pkt->first, pkt->final)) < 0)
return -1;
wsi->ws->proxy_buffered -= pkt->len;
lws_dll2_remove(dll);
lws_free(pkt);
if (lws_dll2_get_head(&wsi->ws->proxy_owner))
lws_callback_on_writable(wsi);
break;
default:
return 0;
}
return 0;
}
const struct lws_protocols lws_ws_proxy = {
"lws-ws-proxy",
lws_callback_ws_proxy,
0,
8192,
8192, NULL, 0
};
#endif
int
lws_callback_http_dummy(struct lws *wsi, enum lws_callback_reasons reason,
void *user, void *in, size_t len)
{
struct lws_ssl_info *si;
#ifdef LWS_WITH_CGI
struct lws_cgi_args *args;
#endif
#if defined(LWS_WITH_CGI) || defined(LWS_WITH_HTTP_PROXY)
char buf[LWS_PRE + 32 + 8192];
int n;
#endif
#if defined(LWS_WITH_HTTP_PROXY)
unsigned char **p, *end;
struct lws *parent;
#endif
switch (reason) {
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
case LWS_CALLBACK_HTTP:
#if defined(LWS_WITH_SERVER)
if (lws_return_http_status(wsi, HTTP_STATUS_NOT_FOUND, NULL))
return -1;
if (lws_http_transaction_completed(wsi))
#endif
return -1;
break;
#if defined(LWS_WITH_SERVER)
case LWS_CALLBACK_HTTP_BODY_COMPLETION:
#if defined(LWS_WITH_HTTP_PROXY)
if (wsi->child_list) {
lwsl_wsi_info(wsi, "HTTP_BODY_COMPLETION: %d",
(int)len);
lws_callback_on_writable(wsi->child_list);
break;
}
#endif
if (lws_return_http_status(wsi, 200, NULL))
return -1;
break;
/* fallthru */
case LWS_CALLBACK_HTTP_FILE_COMPLETION:
if (lws_http_transaction_completed(wsi))
return -1;
break;
#endif
#if defined(LWS_WITH_HTTP_PROXY)
case LWS_CALLBACK_HTTP_BODY:
if (wsi->child_list) {
lwsl_wsi_info(wsi, "HTTP_BODY: stashing %d", (int)len);
if (lws_buflist_append_segment(
&wsi->http.buflist_post_body, in, len) < 0)
return -1;
lws_client_http_body_pending(wsi->child_list, 1);
lws_callback_on_writable(wsi->child_list);
}
break;
#endif
case LWS_CALLBACK_HTTP_WRITEABLE:
// lwsl_err("%s: LWS_CALLBACK_HTTP_WRITEABLE\n", __func__);
#ifdef LWS_WITH_CGI
if (wsi->reason_bf & (LWS_CB_REASON_AUX_BF__CGI_HEADERS |
LWS_CB_REASON_AUX_BF__CGI)) {
n = lws_cgi_write_split_stdout_headers(wsi);
if (n < 0) {
lwsl_wsi_debug(wsi, "AUX_BF__CGI forcing close");
return -1;
}
if (!n && wsi->http.cgi && wsi->http.cgi->lsp &&
wsi->http.cgi->lsp->stdwsi[LWS_STDOUT])
lws_rx_flow_control(
wsi->http.cgi->lsp->stdwsi[LWS_STDOUT], 1);
if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__CGI_HEADERS)
wsi->reason_bf &=
(char)~LWS_CB_REASON_AUX_BF__CGI_HEADERS;
else
wsi->reason_bf &= (char)~LWS_CB_REASON_AUX_BF__CGI;
if (wsi->http.cgi && wsi->http.cgi->cgi_transaction_over) {
lwsl_wsi_info(wsi, "txn over");
return -1;
}
break;
}
if ((wsi->http.cgi && wsi->http.cgi->cgi_transaction_over) ||
(wsi->reason_bf & LWS_CB_REASON_AUX_BF__CGI_CHUNK_END)) {
if (!wsi->mux_substream) {
memcpy(buf + LWS_PRE, "0\x0d\x0a\x0d\x0a", 5);
lwsl_wsi_debug(wsi, "wr chunk term and exiting");
lws_write(wsi, (unsigned char *)buf +
LWS_PRE, 5, LWS_WRITE_HTTP);
} else
lws_write(wsi, (unsigned char *)buf +
LWS_PRE, 0,
LWS_WRITE_HTTP_FINAL);
/* always close after sending it */
if (lws_http_transaction_completed(wsi))
return -1;
return 0;
}
#endif
#if defined(LWS_WITH_HTTP_PROXY)
if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY_HEADERS) {
wsi->reason_bf &=
(char)~LWS_CB_REASON_AUX_BF__PROXY_HEADERS;
n = LWS_WRITE_HTTP_HEADERS;
if (!wsi->http.prh_content_length)
n |= LWS_WRITE_H2_STREAM_END;
lwsl_wsi_debug(wsi, "issuing proxy headers: clen %d",
(int)wsi->http.prh_content_length);
n = lws_write(wsi, wsi->http.pending_return_headers +
LWS_PRE,
wsi->http.pending_return_headers_len,
(enum lws_write_protocol)n);
lws_free_set_NULL(wsi->http.pending_return_headers);
if (n < 0) {
lwsl_wsi_err(wsi, "EST_CLIENT_HTTP: wr failed");
return -1;
}
lws_callback_on_writable(wsi);
break;
}
if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY) {
char *px = buf + LWS_PRE;
int lenx = sizeof(buf) - LWS_PRE - 32;
/*
* our sink is writeable and our source has something
* to read. So read a lump of source material of
* suitable size to send or what's available, whichever
* is the smaller.
*/
wsi->reason_bf &= (char)~LWS_CB_REASON_AUX_BF__PROXY;
if (!lws_get_child(wsi))
break;
/* this causes LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ */
if (lws_http_client_read(lws_get_child(wsi), &px,
&lenx) < 0) {
lwsl_wsi_info(wsi, "LWS_CB_REASON_AUX_BF__PROXY: "
"client closed");
stream_close(wsi);
return -1;
}
break;
}
if (wsi->reason_bf & LWS_CB_REASON_AUX_BF__PROXY_TRANS_END) {
lwsl_wsi_info(wsi, "PROXY_TRANS_END");
wsi->reason_bf &= (char)~LWS_CB_REASON_AUX_BF__PROXY_TRANS_END;
if (stream_close(wsi))
return -1;
if (lws_http_transaction_completed(wsi))
return -1;
}
#endif
break;
#if defined(LWS_WITH_HTTP_PROXY)
case LWS_CALLBACK_RECEIVE_CLIENT_HTTP:
assert(lws_get_parent(wsi));
if (!lws_get_parent(wsi))
break;
lws_get_parent(wsi)->reason_bf |= LWS_CB_REASON_AUX_BF__PROXY;
lws_callback_on_writable(lws_get_parent(wsi));
break;
case LWS_CALLBACK_RECEIVE_CLIENT_HTTP_READ: {
char *out = buf + LWS_PRE;
assert(lws_get_parent(wsi));
if (wsi->http.proxy_parent_chunked) {
if (len > sizeof(buf) - LWS_PRE - 16) {
lwsl_wsi_err(wsi, "oversize buf %d %d", (int)len,
(int)sizeof(buf) - LWS_PRE - 16);
return -1;
}
/*
* this only needs dealing with on http/1.1 to allow
* pipelining
*/
n = lws_snprintf(out, 14, "%X\x0d\x0a", (int)len);
out += n;
memcpy(out, in, len);
out += len;
*out++ = '\x0d';
*out++ = '\x0a';
n = lws_write(lws_get_parent(wsi),
(unsigned char *)buf + LWS_PRE,
(size_t)(unsigned int)(len + (unsigned int)n + 2), LWS_WRITE_HTTP);
} else
n = lws_write(lws_get_parent(wsi), (unsigned char *)in,
len, LWS_WRITE_HTTP);
if (n < 0)
return -1;
break; }
/* h1 http proxying... */
case LWS_CALLBACK_ESTABLISHED_CLIENT_HTTP: {
unsigned char *start, *p, *end;
/*
* We want to proxy these headers, but we are being called
* at the point the onward client was established, which is
* unrelated to the state or writability of our proxy
* connection.
*
* Therefore produce the headers using the onward client ah
* while we have it, and stick them on the output buflist to be
* written on the proxy connection as soon as convenient.
*/
parent = lws_get_parent(wsi);
if (!parent)
return 0;
start = p = (unsigned char *)buf + LWS_PRE;
end = p + sizeof(buf) - LWS_PRE - MAXHDRVAL;
if (lws_add_http_header_status(lws_get_parent(wsi),
lws_http_client_http_response(wsi), &p, end))
return 1;
/*
* copy these headers from the client connection to the parent
*/
proxy_header(parent, wsi, end, MAXHDRVAL,
WSI_TOKEN_HTTP_CONTENT_LENGTH, &p, end);
proxy_header(parent, wsi, end, MAXHDRVAL,
WSI_TOKEN_HTTP_CONTENT_TYPE, &p, end);
proxy_header(parent, wsi, end, MAXHDRVAL,
WSI_TOKEN_HTTP_ETAG, &p, end);
proxy_header(parent, wsi, end, MAXHDRVAL,
WSI_TOKEN_HTTP_ACCEPT_LANGUAGE, &p, end);
proxy_header(parent, wsi, end, MAXHDRVAL,
WSI_TOKEN_HTTP_CONTENT_ENCODING, &p, end);
proxy_header(parent, wsi, end, MAXHDRVAL,
WSI_TOKEN_HTTP_CACHE_CONTROL, &p, end);
proxy_header(parent, wsi, end, MAXHDRVAL,
WSI_TOKEN_HTTP_SET_COOKIE, &p, end);
proxy_header(parent, wsi, end, MAXHDRVAL,
WSI_TOKEN_HTTP_LOCATION, &p, end);
if (!parent->mux_substream)
if (lws_add_http_header_by_token(parent,
WSI_TOKEN_CONNECTION, (unsigned char *)"close",
5, &p, end))
return -1;
/*
* We proxy using h1 only atm, and strip any chunking so it
* can go back out on h2 just fine.
*
* However if we are actually going out on h1, we need to add
* our own chunking since we still don't know the size.
*/
if (!parent->mux_substream &&
!lws_hdr_total_length(wsi, WSI_TOKEN_HTTP_CONTENT_LENGTH)) {
lwsl_wsi_debug(wsi, "downstream parent chunked");
if (lws_add_http_header_by_token(parent,
WSI_TOKEN_HTTP_TRANSFER_ENCODING,
(unsigned char *)"chunked", 7, &p, end))
return -1;
wsi->http.proxy_parent_chunked = 1;
}
if (lws_finalize_http_header(parent, &p, end))
return 1;
parent->http.prh_content_length = (size_t)-1;
if (lws_hdr_simple_ptr(wsi, WSI_TOKEN_HTTP_CONTENT_LENGTH))
parent->http.prh_content_length = (size_t)atoll(
lws_hdr_simple_ptr(wsi,
WSI_TOKEN_HTTP_CONTENT_LENGTH));
parent->http.pending_return_headers_len = lws_ptr_diff_size_t(p, start);
parent->http.pending_return_headers =
lws_malloc(parent->http.pending_return_headers_len +
LWS_PRE, "return proxy headers");
if (!parent->http.pending_return_headers)
return -1;
memcpy(parent->http.pending_return_headers + LWS_PRE, start,
parent->http.pending_return_headers_len);
parent->reason_bf |= LWS_CB_REASON_AUX_BF__PROXY_HEADERS;
lwsl_wsi_debug(wsi, "ESTABLISHED_CLIENT_HTTP: "
"prepared %d headers (len %d)",
lws_http_client_http_response(wsi),
(int)parent->http.prh_content_length);
/*
* so at this point, the onward client connection can bear
* traffic. We might be doing a POST and have pending cached
* inbound stuff to send, it can go now.
*/
lws_callback_on_writable(parent);
break; }
case LWS_CALLBACK_COMPLETED_CLIENT_HTTP:
lwsl_wsi_info(wsi, "COMPLETED_CLIENT_HTTP: (parent %s)",
lws_wsi_tag(lws_get_parent(wsi)));
if (!lws_get_parent(wsi))
break;
lws_get_parent(wsi)->reason_bf |=
LWS_CB_REASON_AUX_BF__PROXY_TRANS_END;
lws_callback_on_writable(lws_get_parent(wsi));
break;
case LWS_CALLBACK_CLOSED_CLIENT_HTTP:
if (!lws_get_parent(wsi))
break;
// lwsl_err("%s: LWS_CALLBACK_CLOSED_CLIENT_HTTP\n", __func__);
lws_set_timeout(lws_get_parent(wsi),
(enum pending_timeout)LWS_TO_KILL_ASYNC,
(int)PENDING_TIMEOUT_KILLED_BY_PROXY_CLIENT_CLOSE);
break;
case LWS_CALLBACK_CLIENT_APPEND_HANDSHAKE_HEADER:
parent = lws_get_parent(wsi);
if (!parent)
break;
p = (unsigned char **)in;
end = (*p) + len;
/*
* copy these headers from the parent request to the client
* connection's request
*/
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_ETAG, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_IF_MODIFIED_SINCE, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_ACCEPT_LANGUAGE, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_ACCEPT_ENCODING, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_CACHE_CONTROL, p, end);
proxy_header(wsi, parent, (unsigned char *)buf, sizeof(buf),
WSI_TOKEN_HTTP_COOKIE, p, end);
buf[0] = '\0';
lws_get_peer_simple(parent, buf, sizeof(buf));
if (lws_add_http_header_by_token(wsi, WSI_TOKEN_X_FORWARDED_FOR,
(unsigned char *)buf, (int)strlen(buf), p, end))
return -1;
break;
#endif
#ifdef LWS_WITH_CGI
/* CGI IO events (POLLIN/OUT) appear here, our default policy is:
*
* - POST data goes on subprocess stdin
* - subprocess stdout goes on http via writeable callback
* - subprocess stderr goes to the logs
*/
case LWS_CALLBACK_CGI:
args = (struct lws_cgi_args *)in;
switch (args->ch) { /* which of stdin/out/err ? */
case LWS_STDIN:
/* TBD stdin rx flow control */
break;
case LWS_STDOUT:
if (args->stdwsi[LWS_STDOUT])
/* quench POLLIN on STDOUT until MASTER got writeable */
lws_rx_flow_control(args->stdwsi[LWS_STDOUT], 0);
wsi->reason_bf |= LWS_CB_REASON_AUX_BF__CGI;
/* when writing to MASTER would not block */
lws_callback_on_writable(wsi);
break;
case LWS_STDERR:
n = lws_get_socket_fd(args->stdwsi[LWS_STDERR]);
if (n < 0)
break;
n = (int)read(n, buf, sizeof(buf) - 2);
if (n > 0) {
if (buf[n - 1] != '\n')
buf[n++] = '\n';
buf[n] = '\0';
lwsl_wsi_notice(wsi, "CGI-stderr: %s", buf);
}
break;
}
break;
case LWS_CALLBACK_CGI_TERMINATED:
if (wsi->http.cgi) {
lwsl_wsi_debug(wsi, "CGI_TERMINATED: %d %" PRIu64,
wsi->http.cgi->explicitly_chunked,
(uint64_t)wsi->http.cgi->content_length);
if (!(wsi->http.cgi->explicitly_chunked && wsi->mux_substream) &&
!wsi->http.cgi->content_length) {
/* send terminating chunk */
lwsl_wsi_debug(wsi, "LWS_CALLBACK_CGI_TERMINATED: ending");
wsi->reason_bf |= LWS_CB_REASON_AUX_BF__CGI_CHUNK_END;
lws_callback_on_writable(wsi);
lws_set_timeout(wsi, PENDING_TIMEOUT_CGI, 3);
break;
}
if (wsi->mux_substream && !wsi->cgi_stdout_zero_length)
lws_write(wsi, (unsigned char *)buf + LWS_PRE, 0,
LWS_WRITE_HTTP_FINAL);
}
#if defined(LWS_WITH_SERVER)
if (lws_http_transaction_completed(wsi))
return -1;
#endif
return 0;
case LWS_CALLBACK_CGI_STDIN_DATA: /* POST body for stdin */
args = (struct lws_cgi_args *)in;
args->data[args->len] = '\0';
if (!args->stdwsi[LWS_STDIN])
return -1;
n = lws_get_socket_fd(args->stdwsi[LWS_STDIN]);
if (n < 0)
return -1;
#if defined(LWS_WITH_ZLIB)
if (wsi->http.cgi->gzip_inflate) {
/* gzip handling */
if (!wsi->http.cgi->gzip_init) {
lwsl_wsi_info(wsi, "inflating gzip");
memset(&wsi->http.cgi->inflate, 0,
sizeof(wsi->http.cgi->inflate));
if (inflateInit2(&wsi->http.cgi->inflate,
16 + 15) != Z_OK) {
lwsl_wsi_err(wsi, "iniflateInit fail");
return -1;
}
wsi->http.cgi->gzip_init = 1;
}
wsi->http.cgi->inflate.next_in = args->data;
wsi->http.cgi->inflate.avail_in = (unsigned int)args->len;
do {
wsi->http.cgi->inflate.next_out =
wsi->http.cgi->inflate_buf;
wsi->http.cgi->inflate.avail_out =
sizeof(wsi->http.cgi->inflate_buf);
n = inflate(&wsi->http.cgi->inflate,
Z_SYNC_FLUSH);
switch (n) {
case Z_NEED_DICT:
case Z_STREAM_ERROR:
case Z_DATA_ERROR:
case Z_MEM_ERROR:
inflateEnd(&wsi->http.cgi->inflate);
wsi->http.cgi->gzip_init = 0;
lwsl_wsi_err(wsi, "zlib err inflate %d", n);
return -1;
}
if (wsi->http.cgi->inflate.avail_out !=
sizeof(wsi->http.cgi->inflate_buf)) {
int written;
written = (int)write(args->stdwsi[LWS_STDIN]->desc.filefd,
wsi->http.cgi->inflate_buf,
sizeof(wsi->http.cgi->inflate_buf) -
wsi->http.cgi->inflate.avail_out);
if (written != (int)(
sizeof(wsi->http.cgi->inflate_buf) -
wsi->http.cgi->inflate.avail_out)) {
lwsl_wsi_notice(wsi,
"CGI_STDIN_DATA: "
"sent %d only %d went",
n, args->len);
}
if (n == Z_STREAM_END) {
lwsl_wsi_err(wsi,
"gzip inflate end");
inflateEnd(&wsi->http.cgi->inflate);
wsi->http.cgi->gzip_init = 0;
break;
}
} else
break;
if (wsi->http.cgi->inflate.avail_out)
break;
} while (1);
return args->len;
}
#endif /* WITH_ZLIB */
n = (int)write(n, args->data, (unsigned int)args->len);
// lwsl_hexdump_notice(args->data, args->len);
if (n < args->len)
lwsl_wsi_notice(wsi, "CGI_STDIN_DATA: "
"sent %d only %d went", n, args->len);
lwsl_wsi_info(wsi, "proxied %d bytes", n);
if (wsi->http.cgi->post_in_expected && args->stdwsi[LWS_STDIN] &&
args->stdwsi[LWS_STDIN]->desc.filefd > 0) {
wsi->http.cgi->post_in_expected -= (unsigned int)n;
if (!wsi->http.cgi->post_in_expected) {
struct lws *siwsi = args->stdwsi[LWS_STDIN];
/*
* The situation here is that we finished
* proxying the incoming body from the net to
* the STDIN stdwsi... and we want to close it
* so it can understand we are done (necessary
* if no content-length)...
*/
lwsl_wsi_info(siwsi, "expected POST in end: "
"closing stdin fd %d",
siwsi->desc.sockfd);
/*
* We don't want the child / parent relationship
* to be handled in close, since we want the
* rest of the cgi and children to stay up
*/
lws_remove_child_from_any_parent(siwsi);
lws_wsi_close(siwsi, LWS_TO_KILL_ASYNC);
wsi->http.cgi->lsp->stdwsi[LWS_STDIN] = NULL;
lws_spawn_stdwsi_closed(wsi->http.cgi->lsp, siwsi);
}
}
return n;
#endif /* WITH_CGI */
#endif /* ROLE_ H1 / H2 */
case LWS_CALLBACK_SSL_INFO:
si = in;
(void)si;
lwsl_wsi_notice(wsi, "SSL_INFO: where: 0x%x, ret: 0x%x",
si->where, si->ret);
break;
#if LWS_MAX_SMP > 1
case LWS_CALLBACK_GET_THREAD_ID:
#ifdef __PTW32_H
/* If we use implementation of PThreads for Win that is
* distributed by VCPKG */
return (int)(lws_intptr_t)(pthread_self()).p;
#else
return (int)(lws_intptr_t)pthread_self();
#endif // __PTW32_H
#endif
default:
break;
}
return 0;
}

View File

@ -0,0 +1,647 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#if defined(STANDALONE)
#undef lws_malloc
#define lws_malloc(a, b) malloc(a)
#undef lws_free
#define lws_free(a) free(a)
#undef lws_free_set_NULL
#define lws_free_set_NULL(a) { if (a) { free(a); a = NULL; }}
#endif
struct lws_dsh_search {
size_t required;
ssize_t natural_required;
int kind;
lws_dsh_obj_t *best;
lws_dsh_t *dsh;
lws_dsh_obj_t *tail_obj;
void *natural; /* coalesce address against last tail */
lws_dsh_t *already_checked;
lws_dsh_t *this_dsh;
char coalesce;
};
static int
_lws_dsh_alloc_tail(lws_dsh_t *dsh, int kind, const void *src1, size_t size1,
const void *src2, size_t size2, lws_dll2_t *replace);
static size_t
lws_dsh_align(size_t length)
{
size_t align = sizeof(int *);
if (length & (align - 1))
length += align - (length & (align - 1));
return length;
}
void
lws_dsh_empty(struct lws_dsh *dsh)
{
lws_dsh_obj_t *obj;
size_t oha_len;
int n;
if (!dsh)
return;
oha_len = sizeof(lws_dsh_obj_head_t) * (unsigned int)dsh->count_kinds;
/* clear down the obj heads array */
memset(dsh->oha, 0, oha_len);
for (n = 0; n < dsh->count_kinds; n++) {
dsh->oha[n].kind = n;
dsh->oha[n].total_size = 0;
}
/* initially the whole buffer is on the free kind (0) list */
obj = (lws_dsh_obj_t *)dsh->buf;
memset(obj, 0, sizeof(*obj));
obj->asize = dsh->buffer_size - sizeof(*obj);
lws_dll2_add_head(&obj->list, &dsh->oha[0].owner);
dsh->locally_free = obj->asize;
dsh->locally_in_use = 0;
}
lws_dsh_t *
lws_dsh_create(lws_dll2_owner_t *owner, size_t buf_len, int _count_kinds)
{
int count_kinds = _count_kinds & 0xff;
lws_dsh_t *dsh;
size_t oha_len;
oha_len = sizeof(lws_dsh_obj_head_t) * (unsigned int)(++count_kinds);
assert(buf_len);
assert(count_kinds > 1);
assert(buf_len > sizeof(lws_dsh_t) + oha_len);
buf_len += 64;
dsh = lws_malloc(sizeof(lws_dsh_t) + buf_len + oha_len, __func__);
if (!dsh)
return NULL;
/* set convenience pointers to the overallocated parts */
lws_dll2_clear(&dsh->list);
dsh->oha = (lws_dsh_obj_head_t *)&dsh[1];
dsh->buf = ((uint8_t *)dsh->oha) + oha_len;
dsh->count_kinds = count_kinds;
dsh->buffer_size = buf_len;
dsh->being_destroyed = 0;
dsh->splitat = 0;
dsh->flags = (unsigned int)_count_kinds & 0xff000000u;
lws_dsh_empty(dsh);
if (owner)
lws_dll2_add_head(&dsh->list, owner);
// lws_dsh_describe(dsh, "post-init");
return dsh;
}
/*
* We're flicking through the hole list... if we find a suitable hole starting
* right after the current tail, it means we can coalesce against the current
* tail, that overrides all other considerations
*/
static int
search_best_free(struct lws_dll2 *d, void *user)
{
struct lws_dsh_search *s = (struct lws_dsh_search *)user;
lws_dsh_obj_t *obj = lws_container_of(d, lws_dsh_obj_t, list);
// lwsl_debug("%s: obj %p, asize %zu (req %zu)\n", __func__, obj,
// obj->asize, s->required);
// if (s->tail_obj)
// lwsl_notice("%s: tail est %d, splitat %d\n", __func__,
// (int)(s->tail_obj->asize + (size_t)s->natural_required), (int)s->dsh->splitat);
if (s->dsh->flags & LWS_DSHFLAG_ENABLE_COALESCE) {
if (obj == s->natural && s->tail_obj &&
(int)obj->asize >= s->natural_required
&&
(!s->dsh->splitat ||
(size_t)(s->tail_obj->asize +
(size_t)s->natural_required) <= s->dsh->splitat)
) {
// lwsl_user("%s: found natural\n", __func__);
s->dsh = s->this_dsh;
s->best = obj;
s->coalesce = 1;
}
if (s->coalesce)
return 0;
}
if (obj->asize >= s->required &&
(!s->best || obj->asize < s->best->asize)) {
s->best = obj;
s->dsh = s->this_dsh;
}
return 0;
}
static int
buf_compare(const lws_dll2_t *d, const lws_dll2_t *i)
{
return (int)lws_ptr_diff(d, i);
}
void
lws_dsh_destroy(lws_dsh_t **pdsh)
{
lws_dsh_t *dsh = *pdsh;
if (!dsh)
return;
dsh->being_destroyed = 1;
lws_dll2_remove(&dsh->list);
lws_dsh_empty(dsh);
/* everything else is in one heap allocation */
lws_free_set_NULL(*pdsh);
}
size_t
lws_dsh_get_size(struct lws_dsh *dsh, int kind)
{
kind++;
assert(kind < dsh->count_kinds);
return dsh->oha[kind].total_size;
}
static int
_lws_dsh_alloc_tail(lws_dsh_t *dsh, int kind, const void *src1, size_t size1,
const void *src2, size_t size2, lws_dll2_t *replace)
{
size_t asize = sizeof(lws_dsh_obj_t) + lws_dsh_align(size1 + size2);
struct lws_dsh_search s;
assert(kind >= 0);
kind++;
assert(!dsh || kind < dsh->count_kinds);
/*
* Search our free list looking for the smallest guy who will fit
* what we want to allocate
*/
s.dsh = dsh;
s.required = asize;
s.kind = kind;
s.best = NULL;
s.already_checked = NULL;
s.this_dsh = dsh;
s.natural = NULL;
s.coalesce = 0;
s.natural_required = 0;
/* list is at the very start, so we can cast */
s.tail_obj = (lws_dsh_obj_t *)dsh->oha[kind].owner.tail;
if (s.tail_obj) {
assert(s.tail_obj->kind == kind);
/*
* there's a tail... precompute where a natural hole would
* have to start to be coalescable
*/
s.natural = (uint8_t *)s.tail_obj + s.tail_obj->asize;
/*
* ... and precompute the needed hole extent (including its
* obj part we would no longer need if we coalesced, and
* accounting for any unused / alignment part in the tail
*/
s.natural_required = (ssize_t)(lws_dsh_align(s.tail_obj->size + size1 + size2) -
s.tail_obj->asize + sizeof(lws_dsh_obj_t));
// lwsl_notice("%s: natural %p, tail len %d, nreq %d, splitat %d\n", __func__, s.natural,
// (int)s.tail_obj->size, (int)s.natural_required, (int)dsh->splitat);
}
if (dsh && !dsh->being_destroyed)
lws_dll2_foreach_safe(&dsh->oha[0].owner, &s, search_best_free);
if (!s.best) {
//lwsl_notice("%s: no buffer has space for %lu\n",
// __func__, (unsigned long)asize);
return 1;
}
if (s.coalesce) {
uint8_t *nf = (uint8_t *)&s.tail_obj[1] + s.tail_obj->size,
*e = (uint8_t *)s.best + s.best->asize, *ce;
lws_dsh_obj_t *rh;
size_t le;
// lwsl_notice("%s: coalescing\n", __func__);
/*
* logically remove the free list entry we're taking over the
* memory footprint of
*/
lws_dll2_remove(&s.best->list);
s.dsh->locally_free -= s.best->asize;
if (s.dsh->oha[kind].total_size < s.tail_obj->asize) {
lwsl_err("%s: total_size %d, asize %d, hdr size %d\n", __func__,
(int)s.dsh->oha[kind].total_size,
(int)s.tail_obj->asize, (int)sizeof(lws_dsh_obj_t));
assert(0);
}
s.dsh->oha[kind].total_size -= s.tail_obj->asize;
s.dsh->locally_in_use -= s.tail_obj->asize;
if (size1) {
memcpy(nf, src1, size1);
nf += size1;
}
if (size2) {
memcpy(nf, src2, size2);
nf += size2;
}
/*
* adjust the tail guy's sizes to account for the coalesced
* data and alignment for the end point
*/
s.tail_obj->size = s.tail_obj->size + size1 + size2;
s.tail_obj->asize = sizeof(lws_dsh_obj_t) +
lws_dsh_align(s.tail_obj->size);
ce = (uint8_t *)s.tail_obj + s.tail_obj->asize;
assert(ce <= e);
le = lws_ptr_diff_size_t(e, ce);
/*
* Now we have to decide what to do with any leftovers...
*/
if (le < 64)
/*
* just absorb it into the coalesced guy as spare, and
* no need for a replacement hole
*/
s.tail_obj->asize += le;
else {
rh = (lws_dsh_obj_t *)ce;
memset(rh, 0, sizeof(*rh));
rh->asize = le;
lws_dll2_add_sorted(&rh->list, &s.dsh->oha[0].owner,
buf_compare);
s.dsh->locally_free += rh->asize;
}
s.dsh->oha[kind].total_size += s.tail_obj->asize;
s.dsh->locally_in_use += s.tail_obj->asize;
return 0;
}
/* anything coming out of here must be aligned */
assert(!(((size_t)(intptr_t)s.best) & (sizeof(int *) - 1)));
if (s.best->asize < asize + (2 * sizeof(*s.best))) {
// lwsl_notice("%s: exact\n", __func__);
/*
* Exact fit, or close enough we can't / don't want to have to
* track the little bit of free area that would be left.
*
* Move the object from the free list to the oha of the
* desired kind
*/
lws_dll2_remove(&s.best->list);
s.best->dsh = s.dsh;
s.best->kind = kind;
s.best->size = size1 + size2;
memcpy(&s.best[1], src1, size1);
if (src2)
memcpy((uint8_t *)&s.best[1] + size1, src2, size2);
if (replace) {
s.best->list.prev = replace->prev;
s.best->list.next = replace->next;
s.best->list.owner = replace->owner;
if (replace->prev)
replace->prev->next = &s.best->list;
if (replace->next)
replace->next->prev = &s.best->list;
} else
if (dsh) {
assert(!(((unsigned long)(intptr_t)(s.best)) &
(sizeof(int *) - 1)));
lws_dll2_add_tail(&s.best->list,
&dsh->oha[kind].owner);
}
assert(s.dsh->locally_free >= s.best->asize);
s.dsh->locally_free -= s.best->asize;
s.dsh->locally_in_use += s.best->asize;
dsh->oha[kind].total_size += s.best->asize;
assert(s.dsh->locally_in_use <= s.dsh->buffer_size);
} else {
lws_dsh_obj_t *nf;
#if defined(_DEBUG)
uint8_t *e = ((uint8_t *)s.best) + s.best->asize;
#endif
/*
* Free area was oversize enough that we need to split it.
*
* Unlink the free area and move its header forward to account
* for our usage of its start area. It's like this so that we
* can coalesce sequential objects.
*/
//lwsl_notice("%s: splitting... free reduce %zu -> %zu\n",
// __func__, s.best->asize, s.best->asize - asize);
assert(s.best->asize >= asize);
/* unlink the entire original hole object at s.best */
lws_dll2_remove(&s.best->list);
s.dsh->locally_free -= s.best->asize;
s.dsh->locally_in_use += asize;
/* latter part becomes new hole object */
nf = (lws_dsh_obj_t *)(((uint8_t *)s.best) + asize);
assert((uint8_t *)nf < e);
memset(nf, 0, sizeof(*nf));
nf->asize = s.best->asize - asize; /* rump free part only */
assert(((uint8_t *)nf) + nf->asize <= e);
lws_dll2_add_sorted(&nf->list, &s.dsh->oha[0].owner, buf_compare);
s.dsh->locally_free += s.best->asize;
/* take over s.best as the new allocated object, fill it in */
s.best->dsh = s.dsh;
s.best->kind = kind;
s.best->size = size1 + size2;
s.best->asize = asize;
// lwsl_notice("%s: split off kind %d\n", __func__, kind);
assert((uint8_t *)s.best + s.best->asize < e);
assert((uint8_t *)s.best + s.best->asize <= (uint8_t *)nf);
if (size1)
memcpy(&s.best[1], src1, size1);
if (src2)
memcpy((uint8_t *)&s.best[1] + size1, src2, size2);
if (replace) {
s.best->list.prev = replace->prev;
s.best->list.next = replace->next;
s.best->list.owner = replace->owner;
if (replace->prev)
replace->prev->next = &s.best->list;
if (replace->next)
replace->next->prev = &s.best->list;
} else
if (dsh) {
assert(!(((unsigned long)(intptr_t)(s.best)) &
(sizeof(int *) - 1)));
lws_dll2_add_tail(&s.best->list,
&dsh->oha[kind].owner);
}
assert(s.dsh->locally_free >= asize);
dsh->oha[kind].total_size += asize;
assert(s.dsh->locally_in_use <= s.dsh->buffer_size);
}
// lws_dsh_describe(dsh, "post-alloc");
return 0;
}
int
lws_dsh_alloc_tail(lws_dsh_t *dsh, int kind, const void *src1, size_t size1,
const void *src2, size_t size2)
{
int r;
do {
size_t s1 = size1, s2 = size2;
if (!dsh->splitat || !(dsh->flags & LWS_DSHFLAG_ENABLE_SPLIT)) {
s1 = size1;
s2 = size2;
} else
if (s1 > dsh->splitat) {
s1 = dsh->splitat;
s2 = 0;
} else {
if (s1 + s2 > dsh->splitat)
s2 = dsh->splitat - s1;
}
r = _lws_dsh_alloc_tail(dsh, kind, src1, s1, src2, s2, NULL);
if (r)
return r;
src1 = (void *)((uint8_t *)src1 + s1);
src2 = (void *)((uint8_t *)src2 + s2);
size1 -= s1;
size2 -= s2;
} while (size1 + size2);
return 0;
}
void
lws_dsh_consume(struct lws_dsh *dsh, int kind, size_t len)
{
lws_dsh_obj_t *h = (lws_dsh_obj_t *)dsh->oha[kind + 1].owner.head;
assert(len <= h->size);
assert(h->pos + len <= h->size);
if (len == h->size || h->pos + len == h->size) {
lws_dsh_free((void **)&h);
return;
}
assert(0);
h->pos += len;
}
void
lws_dsh_free(void **pobj)
{
lws_dsh_obj_t *_o = (lws_dsh_obj_t *)((uint8_t *)(*pobj) - sizeof(*_o)),
*_o2;
lws_dsh_t *dsh = _o->dsh;
/* anything coming out of here must be aligned */
assert(!(((size_t)(intptr_t)_o) & (sizeof(int *) - 1)));
/*
* Remove the object from its list and place on the free list of the
* dsh the buffer space belongs to
*/
lws_dll2_remove(&_o->list);
*pobj = NULL;
assert(dsh->locally_in_use >= _o->asize);
dsh->locally_free += _o->asize;
dsh->locally_in_use -= _o->asize;
assert(dsh->oha[_o->kind].total_size >= _o->asize);
dsh->oha[_o->kind].total_size -= _o->asize; /* account for usage by kind */
assert(dsh->locally_in_use <= dsh->buffer_size);
/*
* The free space list is sorted in buffer address order, so detecting
* coalescing opportunities is cheap. Because the free list should be
* continuously tending to reduce by coalescing, the sorting should not
* be expensive to maintain.
*/
_o->size = 0; /* not meaningful when on free list */
lws_dll2_add_sorted(&_o->list, &_o->dsh->oha[0].owner, buf_compare);
/* First check for already-free block at the end we can subsume.
* Because the free list is sorted, if there is such a guy he is
* already our list.next */
_o2 = (lws_dsh_obj_t *)_o->list.next;
if (_o2 && (uint8_t *)_o + _o->asize == (uint8_t *)_o2) {
/*
* since we are freeing _obj, we can coalesce with a
* free area immediately ahead of it
*
* [ _o (being freed) ][ _o2 (free) ] -> [ larger _o ]
*/
_o->asize += _o2->asize;
/* guy next to us was absorbed into us */
lws_dll2_remove(&_o2->list);
}
/* Then check if we can be subsumed by a free block behind us.
* Because the free list is sorted, if there is such a guy he is
* already our list.prev */
_o2 = (lws_dsh_obj_t *)_o->list.prev;
if (_o2 && (uint8_t *)_o2 + _o2->asize == (uint8_t *)_o) {
/*
* since we are freeing obj, we can coalesce it with
* the previous free area that abuts it
*
* [ _o2 (free) ][ _o (being freed) ] -> [ larger _o2 ]
*/
_o2->asize += _o->asize;
/* we were absorbed! */
lws_dll2_remove(&_o->list);
}
// lws_dsh_describe(dsh, "post-alloc");
}
int
lws_dsh_get_head(lws_dsh_t *dsh, int kind, void **obj, size_t *size)
{
lws_dsh_obj_t *_obj;
if (!dsh)
return 1;
_obj = (lws_dsh_obj_t *)lws_dll2_get_head(&dsh->oha[kind + 1].owner);
if (!_obj) {
*obj = 0;
*size = 0;
return 1; /* there is no head */
}
*obj = (void *)(&_obj[1]);
*size = _obj->size;
/* anything coming out of here must be aligned */
assert(!(((unsigned long)(intptr_t)(*obj)) & (sizeof(int *) - 1)));
return 0; /* we returned the head */
}
#if defined(_DEBUG) && !defined(LWS_WITH_NO_LOGS)
static int
describe_kind(struct lws_dll2 *d, void *user)
{
lws_dsh_obj_t *obj = lws_container_of(d, lws_dsh_obj_t, list);
lwsl_notice(" _obj %p - %p, dsh %p, size %zu, asize %zu\n",
obj, (uint8_t *)obj + obj->asize,
obj->dsh, obj->size, obj->asize);
return 0;
}
void
lws_dsh_describe(lws_dsh_t *dsh, const char *desc)
{
int n = 0;
lwsl_notice("%s: dsh %p, bufsize %zu, kinds %d, lf: %zu, liu: %zu, %s\n",
__func__, dsh, dsh->buffer_size, dsh->count_kinds,
dsh->locally_free, dsh->locally_in_use, desc);
for (n = 0; n < dsh->count_kinds; n++) {
lwsl_notice(" Kind %d:\n", n);
lws_dll2_foreach_safe(&dsh->oha[n].owner, dsh, describe_kind);
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,385 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
/*
* notice this returns number of bytes consumed, or -1
*/
int
lws_issue_raw(struct lws *wsi, unsigned char *buf, size_t len)
{
struct lws_context *context = lws_get_context(wsi);
size_t real_len = len;
unsigned int n, m;
/*
* If you're looking to dump data being sent down the tls tunnel, see
* lws_ssl_capable_write() in lib/tls/mbedtls/mbedtls-ssl.c or
* lib/tls/openssl/openssl-ssl.c.
*
* There's also a corresponding lws_ssl_capable_read() in those files
* where you can enable a dump of decrypted data as soon as it was
* read.
*/
/* just ignore sends after we cleared the truncation buffer */
if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE &&
!lws_has_buffered_out(wsi)
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
&& !wsi->http.comp_ctx.may_have_more
#endif
)
return (int)len;
if (buf && lws_has_buffered_out(wsi)) {
lwsl_wsi_info(wsi, "** prot: %s, incr buflist_out by %lu",
wsi->a.protocol->name, (unsigned long)len);
/*
* already buflist ahead of this, add it on the tail of the
* buflist, then ignore it for now and act like we're flushing
* the buflist...
*/
if (lws_buflist_append_segment(&wsi->buflist_out, buf, len))
return -1;
buf = NULL;
len = 0;
}
if (wsi->buflist_out) {
/* we have to drain the earliest buflist_out stuff first */
len = lws_buflist_next_segment_len(&wsi->buflist_out, &buf);
real_len = len;
lwsl_wsi_debug(wsi, "draining %d", (int)len);
}
if (!len || !buf)
return 0;
if (!wsi->mux_substream && !lws_socket_is_valid(wsi->desc.sockfd))
lwsl_wsi_err(wsi, "invalid sock");
/* limit sending */
if (wsi->a.protocol->tx_packet_size)
n = (unsigned int)wsi->a.protocol->tx_packet_size;
else {
n = (unsigned int)wsi->a.protocol->rx_buffer_size;
if (!n)
n = context->pt_serv_buf_size;
}
n += LWS_PRE + 4;
if (n > len)
n = (unsigned int)len;
/* nope, send it on the socket directly */
if (lws_fi(&wsi->fic, "sendfail"))
m = (unsigned int)LWS_SSL_CAPABLE_ERROR;
else
m = (unsigned int)lws_ssl_capable_write(wsi, buf, n);
lwsl_wsi_info(wsi, "ssl_capable_write (%d) says %d", n, m);
/* something got written, it can have been truncated now */
wsi->could_have_pending = 1;
switch ((int)m) {
case LWS_SSL_CAPABLE_ERROR:
/* we're going to close, let close know sends aren't possible */
wsi->socket_is_permanently_unusable = 1;
return -1;
case LWS_SSL_CAPABLE_MORE_SERVICE:
/*
* nothing got sent, not fatal. Retry the whole thing later,
* ie, implying treat it was a truncated send so it gets
* retried
*/
m = 0;
break;
}
if ((int)m < 0)
m = 0;
/*
* we were sending this from buflist_out? Then not sending everything
* is a small matter of advancing ourselves only by the amount we did
* send in the buflist.
*/
if (lws_has_buffered_out(wsi)) {
if (m) {
lwsl_wsi_info(wsi, "partial adv %d (vs %ld)",
m, (long)real_len);
lws_buflist_use_segment(&wsi->buflist_out, m);
}
if (!lws_has_buffered_out(wsi)) {
lwsl_wsi_info(wsi, "buflist_out flushed");
m = (unsigned int)real_len;
if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
lwsl_wsi_info(wsi, "*signalling to close now");
return -1; /* retry closing now */
}
if (wsi->close_when_buffered_out_drained) {
wsi->close_when_buffered_out_drained = 0;
return -1;
}
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
#if defined(LWS_WITH_SERVER)
if (wsi->http.deferred_transaction_completed) {
lwsl_wsi_notice(wsi, "partial completed, doing "
"deferred transaction completed");
wsi->http.deferred_transaction_completed = 0;
return lws_http_transaction_completed(wsi) ?
-1 : (int)real_len;
}
#endif
#endif
#if defined(LWS_ROLE_WS)
/* Since buflist_out flushed, we're not inside a frame any more */
if (wsi->ws)
wsi->ws->inside_frame = 0;
#endif
}
/* always callback on writeable */
lws_callback_on_writable(wsi);
return (int)m;
}
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
if (wsi->http.comp_ctx.may_have_more)
lws_callback_on_writable(wsi);
#endif
if (m == real_len)
/* what we just sent went out cleanly */
return (int)m;
/*
* We were not able to send everything... and we were not sending from
* an existing buflist_out. So we are starting a fresh buflist_out, by
* buffering the unsent remainder on it.
* (it will get first priority next time the socket is writable).
*/
lwsl_wsi_debug(wsi, "new partial sent %d from %lu total",
m, (unsigned long)real_len);
if (lws_buflist_append_segment(&wsi->buflist_out, buf + m,
real_len - m) < 0)
return -1;
#if defined(LWS_WITH_UDP)
if (lws_wsi_is_udp(wsi))
/* stash original destination for fulfilling UDP partials */
wsi->udp->sa46_pending = wsi->udp->sa46;
#endif
/* since something buffered, force it to get another chance to send */
lws_callback_on_writable(wsi);
return (int)real_len;
}
int
lws_write(struct lws *wsi, unsigned char *buf, size_t len,
enum lws_write_protocol wp)
{
int m;
if ((int)len < 0) {
lwsl_wsi_err(wsi, "suspicious len int %d, ulong %lu",
(int)len, (unsigned long)len);
return -1;
}
#ifdef LWS_WITH_ACCESS_LOG
wsi->http.access_log.sent += len;
#endif
assert(wsi->role_ops);
if (!lws_rops_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol))
m = lws_issue_raw(wsi, buf, len);
else
m = lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol).
write_role_protocol(wsi, buf, len, &wp);
#if defined(LWS_WITH_SYS_METRICS)
if (wsi->a.vhost)
lws_metric_event(wsi->a.vhost->mt_traffic_tx, (char)
(m < 0 ? METRES_NOGO : METRES_GO), len);
#endif
return m;
}
int
lws_ssl_capable_read_no_ssl(struct lws *wsi, unsigned char *buf, size_t len)
{
int n = 0, en;
errno = 0;
#if defined(LWS_WITH_UDP)
if (lws_wsi_is_udp(wsi)) {
socklen_t slt = sizeof(wsi->udp->sa46);
n = (int)recvfrom(wsi->desc.sockfd, (char *)buf,
#if defined(WIN32)
(int)
#endif
len, 0,
sa46_sockaddr(&wsi->udp->sa46), &slt);
} else
#endif
n = (int)recv(wsi->desc.sockfd, (char *)buf,
#if defined(WIN32)
(int)
#endif
len, 0);
en = LWS_ERRNO;
if (n >= 0) {
if (!n && wsi->unix_skt)
goto do_err;
/*
* See https://libwebsockets.org/
* pipermail/libwebsockets/2019-March/007857.html
*/
if (!n && !wsi->unix_skt)
goto do_err;
#if defined(LWS_WITH_SYS_METRICS) && defined(LWS_WITH_SERVER)
if (wsi->a.vhost)
lws_metric_event(wsi->a.vhost->mt_traffic_rx,
METRES_GO /* rx */, (unsigned int)n);
#endif
return n;
}
if (en == LWS_EAGAIN ||
en == LWS_EWOULDBLOCK ||
en == LWS_EINTR)
return LWS_SSL_CAPABLE_MORE_SERVICE;
do_err:
#if defined(LWS_WITH_SYS_METRICS) && defined(LWS_WITH_SERVER)
if (wsi->a.vhost)
lws_metric_event(wsi->a.vhost->mt_traffic_rx, METRES_NOGO, 0u);
#endif
lwsl_wsi_info(wsi, "error on reading from skt : %d, errno %d", n, en);
return LWS_SSL_CAPABLE_ERROR;
}
int
lws_ssl_capable_write_no_ssl(struct lws *wsi, unsigned char *buf, size_t len)
{
int n = 0;
#if defined(LWS_PLAT_OPTEE)
ssize_t send(int sockfd, const void *buf, size_t len, int flags);
#endif
#if defined(LWS_WITH_UDP)
if (lws_wsi_is_udp(wsi)) {
if (lws_fi(&wsi->fic, "udp_tx_loss")) {
/* pretend it was sent */
n = (int)(ssize_t)len;
goto post_send;
}
if (lws_has_buffered_out(wsi))
n = (int)sendto(wsi->desc.sockfd, (const char *)buf,
#if defined(WIN32)
(int)
#endif
len, 0, sa46_sockaddr(&wsi->udp->sa46_pending),
sa46_socklen(&wsi->udp->sa46_pending));
else
n = (int)sendto(wsi->desc.sockfd, (const char *)buf,
#if defined(WIN32)
(int)
#endif
len, 0, sa46_sockaddr(&wsi->udp->sa46),
sa46_socklen(&wsi->udp->sa46));
} else
#endif
if (wsi->role_ops->file_handle)
n = (int)write((int)(lws_intptr_t)wsi->desc.filefd, buf,
#if defined(WIN32)
(int)
#endif
len);
else
n = (int)send(wsi->desc.sockfd, (char *)buf,
#if defined(WIN32)
(int)
#endif
len, MSG_NOSIGNAL);
// lwsl_info("%s: sent len %d result %d", __func__, len, n);
#if defined(LWS_WITH_UDP)
post_send:
#endif
if (n >= 0)
return n;
if (LWS_ERRNO == LWS_EAGAIN ||
LWS_ERRNO == LWS_EWOULDBLOCK ||
LWS_ERRNO == LWS_EINTR) {
if (LWS_ERRNO == LWS_EWOULDBLOCK) {
lws_set_blocking_send(wsi);
}
return LWS_SSL_CAPABLE_MORE_SERVICE;
}
lwsl_wsi_debug(wsi, "ERROR writing len %d to skt fd %d err %d / errno %d",
(int)(ssize_t)len, wsi->desc.sockfd, n, LWS_ERRNO);
return LWS_SSL_CAPABLE_ERROR;
}
int
lws_ssl_pending_no_ssl(struct lws *wsi)
{
(void)wsi;
#if defined(LWS_PLAT_FREERTOS)
return 100;
#else
return 0;
#endif
}

View File

@ -0,0 +1,663 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
int
_lws_change_pollfd(struct lws *wsi, int _and, int _or, struct lws_pollargs *pa)
{
#if !defined(LWS_WITH_EVENT_LIBS)
volatile struct lws_context_per_thread *vpt;
#endif
struct lws_context_per_thread *pt;
struct lws_context *context;
int ret = 0, pa_events;
struct lws_pollfd *pfd;
int sampled_tid, tid;
if (!wsi)
return 0;
assert(wsi->position_in_fds_table == LWS_NO_FDS_POS ||
wsi->position_in_fds_table >= 0);
if (wsi->position_in_fds_table == LWS_NO_FDS_POS)
return 0;
if (((volatile struct lws *)wsi)->handling_pollout &&
!_and && _or == LWS_POLLOUT) {
/*
* Happening alongside service thread handling POLLOUT.
* The danger is when he is finished, he will disable POLLOUT,
* countermanding what we changed here.
*
* Instead of changing the fds, inform the service thread
* what happened, and ask it to leave POLLOUT active on exit
*/
((volatile struct lws *)wsi)->leave_pollout_active = 1;
/*
* by definition service thread is not in poll wait, so no need
* to cancel service
*/
lwsl_wsi_debug(wsi, "using leave_pollout_active");
return 0;
}
context = wsi->a.context;
pt = &context->pt[(int)wsi->tsi];
#if !defined(LWS_WITH_EVENT_LIBS)
/*
* This only applies when we use the default poll() event loop.
*
* BSD can revert pa->events at any time, when the kernel decides to
* exit from poll(). We can't protect against it using locking.
*
* Therefore we must check first if the service thread is in poll()
* wait; if so, we know we must be being called from a foreign thread,
* and we must keep a strictly ordered list of changes we made instead
* of trying to apply them, since when poll() exits, which may happen
* at any time it would revert our changes.
*
* The plat code will apply them when it leaves the poll() wait
* before doing anything else.
*/
vpt = (volatile struct lws_context_per_thread *)pt;
vpt->foreign_spinlock = 1;
lws_memory_barrier();
if (vpt->inside_poll) {
struct lws_foreign_thread_pollfd *ftp, **ftp1;
/*
* We are certainly a foreign thread trying to change events
* while the service thread is in the poll() wait.
*
* Create a list of changes to be applied after poll() exit,
* instead of trying to apply them now.
*/
ftp = lws_malloc(sizeof(*ftp), "ftp");
if (!ftp) {
vpt->foreign_spinlock = 0;
lws_memory_barrier();
ret = -1;
goto bail;
}
ftp->_and = _and;
ftp->_or = _or;
ftp->next = NULL;
lws_pt_lock(pt, __func__);
assert(wsi->position_in_fds_table < (int)pt->fds_count);
ftp->fd_index = wsi->position_in_fds_table;
/* place at END of list to maintain order */
ftp1 = (struct lws_foreign_thread_pollfd **)
&vpt->foreign_pfd_list;
while (*ftp1)
ftp1 = &((*ftp1)->next);
*ftp1 = ftp;
vpt->foreign_spinlock = 0;
lws_memory_barrier();
lws_pt_unlock(pt);
lws_cancel_service_pt(wsi);
return 0;
}
vpt->foreign_spinlock = 0;
lws_memory_barrier();
#endif
#if !defined(__linux__) && !defined(WIN32)
/* OSX couldn't see close on stdin pipe side otherwise; WSAPOLL
* blows up if we give it POLLHUP
*/
_or |= LWS_POLLHUP;
#endif
lws_pt_lock(pt, __func__);
assert(wsi->position_in_fds_table < (int)pt->fds_count);
pfd = &pt->fds[wsi->position_in_fds_table];
pa->prev_events = pfd->events;
pa->events = pfd->events = (short)((pfd->events & ~_and) | _or);
lws_pt_unlock(pt);
pa->fd = wsi->desc.sockfd;
lwsl_wsi_debug(wsi, "fd %d events %d -> %d", pa->fd, pa->prev_events,
pa->events);
if (wsi->mux_substream)
return 0;
#if defined(LWS_WITH_EXTERNAL_POLL)
if (wsi->a.vhost &&
wsi->a.vhost->protocols[0].callback(wsi,
LWS_CALLBACK_CHANGE_MODE_POLL_FD,
wsi->user_space, (void *)pa, 0)) {
ret = -1;
goto bail;
}
#endif
if (context->event_loop_ops->io) {
if (_and & LWS_POLLIN)
context->event_loop_ops->io(wsi,
LWS_EV_STOP | LWS_EV_READ);
if (_or & LWS_POLLIN)
context->event_loop_ops->io(wsi,
LWS_EV_START | LWS_EV_READ);
if (_and & LWS_POLLOUT)
context->event_loop_ops->io(wsi,
LWS_EV_STOP | LWS_EV_WRITE);
if (_or & LWS_POLLOUT)
context->event_loop_ops->io(wsi,
LWS_EV_START | LWS_EV_WRITE);
}
/*
* if we changed something in this pollfd...
* ... and we're running in a different thread context
* than the service thread...
* ... and the service thread is waiting ...
* then cancel it to force a restart with our changed events
*/
pa_events = pa->prev_events != pa->events;
pfd->events = (short)pa->events;
if (pa_events) {
if (lws_plat_change_pollfd(context, wsi, pfd)) {
lwsl_wsi_info(wsi, "failed");
ret = -1;
goto bail;
}
sampled_tid = pt->service_tid;
if (sampled_tid && wsi->a.vhost) {
tid = wsi->a.vhost->protocols[0].callback(wsi,
LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
if (tid == -1) {
ret = -1;
goto bail;
}
if (tid != sampled_tid)
lws_cancel_service_pt(wsi);
}
}
bail:
return ret;
}
#if defined(LWS_WITH_SERVER)
/*
* Enable or disable listen sockets on this pt globally...
* it's modulated according to the pt having space for a new accept.
*/
static void
lws_accept_modulation(struct lws_context *context,
struct lws_context_per_thread *pt, int allow)
{
struct lws_vhost *vh = context->vhost_list;
struct lws_pollargs pa1;
while (vh) {
lws_start_foreach_dll(struct lws_dll2 *, d,
lws_dll2_get_head(&vh->listen_wsi)) {
struct lws *wsi = lws_container_of(d, struct lws,
listen_list);
_lws_change_pollfd(wsi, allow ? 0 : LWS_POLLIN,
allow ? LWS_POLLIN : 0, &pa1);
} lws_end_foreach_dll(d);
vh = vh->vhost_next;
}
}
#endif
#if _LWS_ENABLED_LOGS & LLL_WARN
void
__dump_fds(struct lws_context_per_thread *pt, const char *s)
{
unsigned int n;
lwsl_cx_warn(pt->context, "fds_count %u, %s", pt->fds_count, s);
for (n = 0; n < pt->fds_count; n++) {
struct lws *wsi = wsi_from_fd(pt->context, pt->fds[n].fd);
lwsl_cx_warn(pt->context, " %d: fd %d, wsi %s, pos_in_fds: %d",
n + 1, pt->fds[n].fd, lws_wsi_tag(wsi),
wsi ? wsi->position_in_fds_table : -1);
}
}
#else
#define __dump_fds(x, y)
#endif
int
__insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi)
{
#if defined(LWS_WITH_EXTERNAL_POLL)
struct lws_pollargs pa = { wsi->desc.sockfd, LWS_POLLIN, 0 };
#endif
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
int ret = 0;
// __dump_fds(pt, "pre insert");
lws_pt_assert_lock_held(pt);
lwsl_wsi_debug(wsi, "tsi=%d, sock=%d, pos-in-fds=%d",
wsi->tsi, wsi->desc.sockfd, pt->fds_count);
if ((unsigned int)pt->fds_count >= context->fd_limit_per_thread) {
lwsl_cx_err(context, "Too many fds (%d vs %d)", context->max_fds,
context->fd_limit_per_thread);
return 1;
}
#if !defined(_WIN32)
if (!wsi->a.context->max_fds_unrelated_to_ulimit &&
wsi->desc.sockfd - lws_plat_socket_offset() >= (int)context->max_fds) {
lwsl_cx_err(context, "Socket fd %d is too high (%d) offset %d",
wsi->desc.sockfd, context->max_fds,
lws_plat_socket_offset());
return 1;
}
#endif
assert(wsi);
#if defined(LWS_WITH_NETLINK)
assert(wsi->event_pipe || wsi->a.vhost || wsi == pt->context->netlink);
#else
assert(wsi->event_pipe || wsi->a.vhost);
#endif
assert(lws_socket_is_valid(wsi->desc.sockfd));
#if defined(LWS_WITH_EXTERNAL_POLL)
if (wsi->a.vhost &&
wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
wsi->user_space, (void *) &pa, 1))
return -1;
#endif
if (insert_wsi(context, wsi))
return -1;
pt->count_conns++;
wsi->position_in_fds_table = (int)pt->fds_count;
pt->fds[wsi->position_in_fds_table].fd = wsi->desc.sockfd;
pt->fds[wsi->position_in_fds_table].events = LWS_POLLIN;
#if defined(LWS_WITH_EXTERNAL_POLL)
pa.events = pt->fds[pt->fds_count].events;
#endif
lws_plat_insert_socket_into_fds(context, wsi);
#if defined(LWS_WITH_EXTERNAL_POLL)
/* external POLL support via protocol 0 */
if (wsi->a.vhost &&
wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_ADD_POLL_FD,
wsi->user_space, (void *) &pa, 0))
ret = -1;
#endif
#if defined(LWS_WITH_SERVER)
/* if no more room, defeat accepts on this service thread */
if ((unsigned int)pt->fds_count == context->fd_limit_per_thread - 1)
lws_accept_modulation(context, pt, 0);
#endif
#if defined(LWS_WITH_EXTERNAL_POLL)
if (wsi->a.vhost &&
wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
wsi->user_space, (void *)&pa, 1))
ret = -1;
#endif
// __dump_fds(pt, "post insert");
return ret;
}
/* requires pt lock */
int
__remove_wsi_socket_from_fds(struct lws *wsi)
{
struct lws_context *context = wsi->a.context;
#if defined(LWS_WITH_EXTERNAL_POLL)
struct lws_pollargs pa = { wsi->desc.sockfd, 0, 0 };
#endif
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
struct lws *end_wsi;
int v, m, ret = 0;
lws_pt_assert_lock_held(pt);
// __dump_fds(pt, "pre remove");
#if !defined(_WIN32)
if (!wsi->a.context->max_fds_unrelated_to_ulimit &&
wsi->desc.sockfd - lws_plat_socket_offset() > (int)context->max_fds) {
lwsl_wsi_err(wsi, "fd %d too high (%d)",
wsi->desc.sockfd,
context->max_fds);
return 1;
}
#endif
#if defined(LWS_WITH_EXTERNAL_POLL)
if (wsi->a.vhost && wsi->a.vhost->protocols &&
wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
wsi->user_space, (void *)&pa, 1))
return -1;
#endif
__lws_same_vh_protocol_remove(wsi);
/* the guy who is to be deleted's slot index in pt->fds */
m = wsi->position_in_fds_table;
/* these are the only valid possibilities for position_in_fds_table */
assert(m == LWS_NO_FDS_POS || (m >= 0 && (unsigned int)m < pt->fds_count));
if (context->event_loop_ops->io)
context->event_loop_ops->io(wsi, LWS_EV_STOP | LWS_EV_READ |
LWS_EV_WRITE);
/*
lwsl_notice("%s: wsi=%s, skt=%d, fds pos=%d, end guy pos=%d, endfd=%d\n",
__func__, lws_wsi_tag(wsi), wsi->desc.sockfd, wsi->position_in_fds_table,
pt->fds_count, pt->fds[pt->fds_count - 1].fd); */
if (m != LWS_NO_FDS_POS) {
char fixup = 0;
assert(pt->fds_count && (unsigned int)m != pt->fds_count);
/* deletion guy's lws_lookup entry needs nuking */
delete_from_fd(context, wsi->desc.sockfd);
if ((unsigned int)m != pt->fds_count - 1) {
/* have the last guy take up the now vacant slot */
pt->fds[m] = pt->fds[pt->fds_count - 1];
fixup = 1;
}
pt->fds[pt->fds_count - 1].fd = -1;
/* this decrements pt->fds_count */
lws_plat_delete_socket_from_fds(context, wsi, m);
pt->count_conns--;
if (fixup) {
v = (int) pt->fds[m].fd;
/* old end guy's "position in fds table" is now the
* deletion guy's old one */
end_wsi = wsi_from_fd(context, v);
if (!end_wsi) {
lwsl_wsi_err(wsi, "no wsi for fd %d pos %d, "
"pt->fds_count=%d",
(int)pt->fds[m].fd, m,
pt->fds_count);
// assert(0);
} else
end_wsi->position_in_fds_table = m;
}
/* removed wsi has no position any more */
wsi->position_in_fds_table = LWS_NO_FDS_POS;
#if defined(LWS_WITH_EXTERNAL_POLL)
/* remove also from external POLL support via protocol 0 */
if (lws_socket_is_valid(wsi->desc.sockfd) && wsi->a.vhost &&
wsi->a.vhost->protocols[0].callback(wsi,
LWS_CALLBACK_DEL_POLL_FD,
wsi->user_space,
(void *) &pa, 0))
ret = -1;
#endif
}
#if defined(LWS_WITH_SERVER)
if (!context->being_destroyed &&
/* if this made some room, accept connects on this thread */
(unsigned int)pt->fds_count < context->fd_limit_per_thread - 1)
lws_accept_modulation(context, pt, 1);
#endif
#if defined(LWS_WITH_EXTERNAL_POLL)
if (wsi->a.vhost &&
wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
wsi->user_space, (void *) &pa, 1))
ret = -1;
#endif
// __dump_fds(pt, "post remove");
return ret;
}
int
__lws_change_pollfd(struct lws *wsi, int _and, int _or)
{
struct lws_context *context;
struct lws_pollargs pa;
int ret = 0;
if (!wsi || (!wsi->a.protocol && !wsi->event_pipe) ||
wsi->position_in_fds_table == LWS_NO_FDS_POS)
return 0;
context = lws_get_context(wsi);
if (!context)
return 1;
#if defined(LWS_WITH_EXTERNAL_POLL)
if (wsi->a.vhost &&
wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
wsi->user_space, (void *) &pa, 0))
return -1;
#endif
ret = _lws_change_pollfd(wsi, _and, _or, &pa);
#if defined(LWS_WITH_EXTERNAL_POLL)
if (wsi->a.vhost &&
wsi->a.vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
wsi->user_space, (void *) &pa, 0))
ret = -1;
#endif
return ret;
}
int
lws_change_pollfd(struct lws *wsi, int _and, int _or)
{
struct lws_context_per_thread *pt;
int ret = 0;
pt = &wsi->a.context->pt[(int)wsi->tsi];
lws_pt_lock(pt, __func__);
ret = __lws_change_pollfd(wsi, _and, _or);
lws_pt_unlock(pt);
return ret;
}
int
lws_callback_on_writable(struct lws *wsi)
{
struct lws *w = wsi;
if (lwsi_state(wsi) == LRS_SHUTDOWN)
return 0;
if (wsi->socket_is_permanently_unusable)
return 0;
if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_callback_on_writable)) {
int q = lws_rops_func_fidx(wsi->role_ops,
LWS_ROPS_callback_on_writable).
callback_on_writable(wsi);
if (q)
return 1;
w = lws_get_network_wsi(wsi);
} else
if (w->position_in_fds_table == LWS_NO_FDS_POS) {
lwsl_wsi_debug(wsi, "failed to find socket %d",
wsi->desc.sockfd);
return -1;
}
if (__lws_change_pollfd(w, 0, LWS_POLLOUT))
return -1;
return 1;
}
/*
* stitch protocol choice into the vh protocol linked list
* We always insert ourselves at the start of the list
*
* X <-> B
* X <-> pAn <-> pB
*
* Illegal to attach more than once without detach inbetween
*/
void
lws_same_vh_protocol_insert(struct lws *wsi, int n)
{
lws_context_lock(wsi->a.context, __func__);
lws_vhost_lock(wsi->a.vhost);
lws_dll2_remove(&wsi->same_vh_protocol);
lws_dll2_add_head(&wsi->same_vh_protocol,
&wsi->a.vhost->same_vh_protocol_owner[n]);
wsi->bound_vhost_index = (uint8_t)n;
lws_vhost_unlock(wsi->a.vhost);
lws_context_unlock(wsi->a.context);
}
void
__lws_same_vh_protocol_remove(struct lws *wsi)
{
if (wsi->a.vhost && wsi->a.vhost->same_vh_protocol_owner)
lws_dll2_remove(&wsi->same_vh_protocol);
}
void
lws_same_vh_protocol_remove(struct lws *wsi)
{
if (!wsi->a.vhost)
return;
lws_context_lock(wsi->a.context, __func__);
lws_vhost_lock(wsi->a.vhost);
__lws_same_vh_protocol_remove(wsi);
lws_vhost_unlock(wsi->a.vhost);
lws_context_unlock(wsi->a.context);
}
int
lws_callback_on_writable_all_protocol_vhost(const struct lws_vhost *vhost,
const struct lws_protocols *protocol)
{
struct lws *wsi;
int n;
if (protocol < vhost->protocols ||
protocol >= (vhost->protocols + vhost->count_protocols)) {
lwsl_vhost_err((struct lws_vhost *)vhost,
"protocol %p is not from vhost %p (%p - %p)",
protocol, vhost->protocols, vhost,
(vhost->protocols + vhost->count_protocols));
return -1;
}
n = (int)(protocol - vhost->protocols);
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
lws_dll2_get_head(&vhost->same_vh_protocol_owner[n])) {
wsi = lws_container_of(d, struct lws, same_vh_protocol);
assert(wsi->a.protocol &&
wsi->a.protocol->callback == protocol->callback &&
!strcmp(protocol->name, wsi->a.protocol->name));
lws_callback_on_writable(wsi);
} lws_end_foreach_dll_safe(d, d1);
return 0;
}
int
lws_callback_on_writable_all_protocol(const struct lws_context *context,
const struct lws_protocols *protocol)
{
struct lws_vhost *vhost;
int n;
if (!context)
return 0;
vhost = context->vhost_list;
while (vhost) {
for (n = 0; n < vhost->count_protocols; n++)
if (protocol->callback ==
vhost->protocols[n].callback &&
!strcmp(protocol->name, vhost->protocols[n].name))
break;
if (n != vhost->count_protocols)
lws_callback_on_writable_all_protocol_vhost(
vhost, &vhost->protocols[n]);
vhost = vhost->vhost_next;
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,406 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* We mainly focus on the routing table / gateways because those are the
* elements that decide if we can get on to the internet or not.
*
* Everything here is _ because the caller needs to hold the pt lock in order
* to access the pt routing table safely
*/
#include <private-lib-core.h>
#if defined(_DEBUG)
void
_lws_routing_entry_dump(struct lws_context *cx, lws_route_t *rou)
{
char sa[48], fin[192], *end = &fin[sizeof(fin)];
char *it = fin;
int n;
fin[0] = '\0';
if (rou->dest.sa4.sin_family) {
lws_sa46_write_numeric_address(&rou->dest, sa, sizeof(sa));
n = lws_snprintf(it, lws_ptr_diff_size_t(end, it),
"dst: %s/%d, ", sa, rou->dest_len);
it = it + n;
}
if (rou->src.sa4.sin_family) {
lws_sa46_write_numeric_address(&rou->src, sa, sizeof(sa));
n = lws_snprintf(it, lws_ptr_diff_size_t(end, it),
"src: %s/%d, ", sa, rou->src_len);
it = it + n;
}
if (rou->gateway.sa4.sin_family) {
lws_sa46_write_numeric_address(&rou->gateway, sa, sizeof(sa));
n = lws_snprintf(it, lws_ptr_diff_size_t(end, it),
"gw: %s, ", sa);
it = it + n;
}
lwsl_cx_info(cx, " %s ifidx: %d, pri: %d, proto: %d\n", fin,
rou->if_idx, rou->priority, rou->proto);
}
void
_lws_routing_table_dump(struct lws_context *cx)
{
lwsl_cx_info(cx, "\n");
lws_start_foreach_dll(struct lws_dll2 *, d,
lws_dll2_get_head(&cx->routing_table)) {
lws_route_t *rou = lws_container_of(d, lws_route_t, list);
_lws_routing_entry_dump(cx, rou);
} lws_end_foreach_dll(d);
}
#endif
/*
* We will provide a "fingerprint ordinal" as the route uidx that is unique in
* the routing table. Wsi that connect mark themselves with the uidx of the
* route they are estimated to be using.
*
* This lets us detect things like gw changes, eg when switching from wlan to
* lte there may still be a valid gateway route, but all existing tcp
* connections previously using the wlan gateway will be broken, since their
* connections are from its gateway to the peer.
*
* So when we take down a route, we take care to look for any wsi that was
* estimated to be using that route, eg, for gateway, and close those wsi.
*
* It's OK if the route uidx wraps, we explicitly confirm nobody else is using
* the uidx before assigning one to a new route.
*
* We won't use uidx 0, so it can be understood to mean the uidx was never set.
*/
lws_route_uidx_t
_lws_route_get_uidx(struct lws_context *cx)
{
lws_route_uidx_t ou;
if (!cx->route_uidx)
cx->route_uidx++;
ou = cx->route_uidx;
do {
uint8_t again = 0;
/* Anybody in the table already uses the pt's next uidx? */
lws_start_foreach_dll(struct lws_dll2 *, d,
lws_dll2_get_head(&cx->routing_table)) {
lws_route_t *rou = lws_container_of(d, lws_route_t, list);
if (rou->uidx == cx->route_uidx) {
/* if so, bump and restart the check */
cx->route_uidx++;
if (!cx->route_uidx)
cx->route_uidx++;
if (cx->route_uidx == ou) {
assert(0); /* we have filled up the 8-bit uidx space? */
return 0;
}
again = 1;
break;
}
} lws_end_foreach_dll(d);
if (!again)
return cx->route_uidx++;
} while (1);
}
lws_route_t *
_lws_route_remove(struct lws_context_per_thread *pt, lws_route_t *robj, int flags)
{
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
lws_dll2_get_head(&pt->context->routing_table)) {
lws_route_t *rou = lws_container_of(d, lws_route_t, list);
if ((!(flags & LRR_MATCH_SRC) || !lws_sa46_compare_ads(&robj->src, &rou->src)) &&
(!(flags & LRR_MATCH_DST) || !lws_sa46_compare_ads(&robj->dest, &rou->dest)) &&
(!robj->gateway.sa4.sin_family ||
!lws_sa46_compare_ads(&robj->gateway, &rou->gateway)) &&
robj->dest_len <= rou->dest_len &&
robj->if_idx == rou->if_idx &&
((flags & LRR_IGNORE_PRI) ||
robj->priority == rou->priority)
) {
lwsl_cx_info(pt->context, "deleting route");
_lws_route_pt_close_route_users(pt, robj->uidx);
lws_dll2_remove(&rou->list);
lws_free(rou);
}
} lws_end_foreach_dll_safe(d, d1);
return NULL;
}
void
_lws_route_table_empty(struct lws_context_per_thread *pt)
{
if (!pt->context)
return;
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
lws_dll2_get_head(&pt->context->routing_table)) {
lws_route_t *rou = lws_container_of(d, lws_route_t, list);
lws_dll2_remove(&rou->list);
lws_free(rou);
} lws_end_foreach_dll_safe(d, d1);
}
void
_lws_route_table_ifdown(struct lws_context_per_thread *pt, int idx)
{
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
lws_dll2_get_head(&pt->context->routing_table)) {
lws_route_t *rou = lws_container_of(d, lws_route_t, list);
if (rou->if_idx == idx) {
lws_dll2_remove(&rou->list);
lws_free(rou);
}
} lws_end_foreach_dll_safe(d, d1);
}
lws_route_t *
_lws_route_est_outgoing(struct lws_context_per_thread *pt,
const lws_sockaddr46 *dest)
{
lws_route_t *best_gw = NULL;
int best_gw_priority = INT_MAX;
if (!dest->sa4.sin_family) {
lwsl_cx_notice(pt->context, "dest has 0 AF");
/* leave it alone */
return NULL;
}
/*
* Given the dest address and the current routing table, select the
* route we think it would go out on... if we find a matching network
* route, just return that, otherwise find the "best" gateway by
* looking at the priority of them.
*/
lws_start_foreach_dll(struct lws_dll2 *, d,
lws_dll2_get_head(&pt->context->routing_table)) {
lws_route_t *rou = lws_container_of(d, lws_route_t, list);
// _lws_routing_entry_dump(rou);
if (rou->dest.sa4.sin_family &&
!lws_sa46_on_net(dest, &rou->dest, rou->dest_len))
/*
* Yes, he has a matching network route, it beats out
* any gateway route. This is like finding a route for
* 192.168.0.0/24 when dest is 192.168.0.1.
*/
return rou;
lwsl_cx_debug(pt->context, "dest af %d, rou gw af %d, pri %d",
dest->sa4.sin_family, rou->gateway.sa4.sin_family,
rou->priority);
if (rou->gateway.sa4.sin_family &&
/*
* dest gw
* 4 4 OK
* 4 6 OK with ::ffff:x:x
* 6 4 not supported directly
* 6 6 OK
*/
(dest->sa4.sin_family == rou->gateway.sa4.sin_family ||
(dest->sa4.sin_family == AF_INET &&
rou->gateway.sa4.sin_family == AF_INET6)) &&
rou->priority < best_gw_priority) {
lwsl_cx_info(pt->context, "gw hit");
best_gw_priority = rou->priority;
best_gw = rou;
}
} lws_end_foreach_dll(d);
/*
* Either best_gw is the best gw route and we set *best_gw_priority to
* the best one's priority, or we're returning NULL as no network or
* gw route for dest.
*/
lwsl_cx_info(pt->context, "returning %p", best_gw);
return best_gw;
}
/*
* Determine if the source still exists
*/
lws_route_t *
_lws_route_find_source(struct lws_context_per_thread *pt,
const lws_sockaddr46 *src)
{
lws_start_foreach_dll(struct lws_dll2 *, d,
lws_dll2_get_head(&pt->context->routing_table)) {
lws_route_t *rou = lws_container_of(d, lws_route_t, list);
// _lws_routing_entry_dump(rou);
if (rou->src.sa4.sin_family &&
!lws_sa46_compare_ads(src, &rou->src))
/*
* Source route still exists
*/
return rou;
} lws_end_foreach_dll(d);
return NULL;
}
int
_lws_route_check_wsi(struct lws *wsi)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
char buf[72];
if (!wsi->sa46_peer.sa4.sin_family ||
#if defined(LWS_WITH_UNIX_SOCK)
wsi->unix_skt ||
wsi->sa46_peer.sa4.sin_family == AF_UNIX ||
#endif
wsi->desc.sockfd == LWS_SOCK_INVALID)
/* not a socket, cannot judge by route, or not connected,
* leave it alone */
return 0; /* OK */
/* the route to the peer is still workable? */
if (!_lws_route_est_outgoing(pt, &wsi->sa46_peer)) {
/* no way to talk to the peer */
lwsl_wsi_notice(wsi, "dest route gone");
return 1;
}
/* the source address is still workable? */
lws_sa46_write_numeric_address(&wsi->sa46_local,
buf, sizeof(buf));
//lwsl_notice("%s: %s sa46_local %s fam %d\n", __func__, wsi->lc.gutag,
// buf, wsi->sa46_local.sa4.sin_family);
if (wsi->sa46_local.sa4.sin_family &&
!_lws_route_find_source(pt, &wsi->sa46_local)) {
lws_sa46_write_numeric_address(&wsi->sa46_local,
buf, sizeof(buf));
lwsl_wsi_notice(wsi, "source %s gone", buf);
return 1;
}
lwsl_wsi_debug(wsi, "source + dest OK");
return 0;
}
int
_lws_route_pt_close_unroutable(struct lws_context_per_thread *pt)
{
struct lws *wsi;
unsigned int n;
if (!pt->context->nl_initial_done
#if defined(LWS_WITH_SYS_STATE)
||
pt->context->mgr_system.state < LWS_SYSTATE_IFACE_COLDPLUG
#endif
)
return 0;
lwsl_cx_debug(pt->context, "in");
#if defined(_DEBUG)
_lws_routing_table_dump(pt->context);
#endif
for (n = 0; n < pt->fds_count; n++) {
wsi = wsi_from_fd(pt->context, pt->fds[n].fd);
if (!wsi)
continue;
if (_lws_route_check_wsi(wsi)) {
lwsl_wsi_info(wsi, "culling wsi");
lws_wsi_close(wsi, LWS_TO_KILL_ASYNC);
}
}
return 0;
}
int
_lws_route_pt_close_route_users(struct lws_context_per_thread *pt,
lws_route_uidx_t uidx)
{
struct lws *wsi;
unsigned int n;
if (!uidx)
return 0;
lwsl_cx_info(pt->context, "closing users of route %d", uidx);
for (n = 0; n < pt->fds_count; n++) {
wsi = wsi_from_fd(pt->context, pt->fds[n].fd);
if (!wsi)
continue;
if (wsi->desc.sockfd != LWS_SOCK_INVALID &&
#if defined(LWS_WITH_UNIX_SOCK)
!wsi->unix_skt &&
wsi->sa46_peer.sa4.sin_family != AF_UNIX &&
#endif
wsi->sa46_peer.sa4.sin_family &&
wsi->peer_route_uidx == uidx) {
lwsl_wsi_notice(wsi, "culling wsi");
lws_wsi_close(wsi, LWS_TO_KILL_ASYNC);
}
}
return 0;
}

View File

@ -0,0 +1,879 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
#if defined(_DEBUG)
void
lws_service_assert_loop_thread(struct lws_context *cx, int tsi)
{
if (!cx->event_loop_ops->foreign_thread)
/* we can't judge it */
return;
if (!cx->event_loop_ops->foreign_thread(cx, tsi))
/* OK */
return;
/*
* Lws apis are NOT THREADSAFE with the sole exception of
* lws_cancel_service(). If you look at the assert backtrace, you
* should see you're illegally calling an lws api from another thread.
*/
assert(0);
}
#endif
int
lws_callback_as_writeable(struct lws *wsi)
{
int n, m;
n = wsi->role_ops->writeable_cb[lwsi_role_server(wsi)];
m = user_callback_handle_rxflow(wsi->a.protocol->callback,
wsi, (enum lws_callback_reasons) n,
wsi->user_space, NULL, 0);
return m;
}
int
lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd)
{
volatile struct lws *vwsi = (volatile struct lws *)wsi;
int n;
if (wsi->socket_is_permanently_unusable)
return 0;
vwsi->leave_pollout_active = 0;
vwsi->handling_pollout = 1;
/*
* if another thread wants POLLOUT on us, from here on while
* handling_pollout is set, he will only set leave_pollout_active.
* If we are going to disable POLLOUT, we will check that first.
*/
wsi->could_have_pending = 0; /* clear back-to-back write detection */
/*
* user callback is lowest priority to get these notifications
* actually, since other pending things cannot be disordered
*
* Priority 1: pending truncated sends are incomplete ws fragments
* If anything else sent first the protocol would be
* corrupted.
*
* These are post- any compression transform
*/
if (lws_has_buffered_out(wsi)) {
if (lws_issue_raw(wsi, NULL, 0) < 0) {
lwsl_wsi_info(wsi, "signalling to close");
goto bail_die;
}
/* leave POLLOUT active either way */
goto bail_ok;
} else
if (lwsi_state(wsi) == LRS_FLUSHING_BEFORE_CLOSE) {
wsi->socket_is_permanently_unusable = 1;
goto bail_die; /* retry closing now */
}
/* Priority 2: pre- compression transform */
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
if (wsi->http.comp_ctx.buflist_comp ||
wsi->http.comp_ctx.may_have_more) {
enum lws_write_protocol wp = LWS_WRITE_HTTP;
lwsl_wsi_info(wsi, "compl comp partial (buflist_comp %p, may %d)",
wsi->http.comp_ctx.buflist_comp,
wsi->http.comp_ctx.may_have_more);
if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol) &&
lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_write_role_protocol).
write_role_protocol(wsi, NULL, 0, &wp) < 0) {
lwsl_wsi_info(wsi, "signalling to close");
goto bail_die;
}
lws_callback_on_writable(wsi);
goto bail_ok;
}
#endif
#ifdef LWS_WITH_CGI
/*
* A cgi connection's wire protocol remains h1 or h2. He is just
* getting his data from his child cgis.
*/
if (wsi->http.cgi) {
/* also one shot */
if (pollfd)
if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
lwsl_wsi_info(wsi, "failed at set pollfd");
return 1;
}
goto user_service_go_again;
}
#endif
/* if we got here, we should have wire protocol ops set on the wsi */
assert(wsi->role_ops);
if (!lws_rops_fidx(wsi->role_ops, LWS_ROPS_handle_POLLOUT))
goto bail_ok;
n = lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_handle_POLLOUT).
handle_POLLOUT(wsi);
switch (n) {
case LWS_HP_RET_BAIL_OK:
goto bail_ok;
case LWS_HP_RET_BAIL_DIE:
goto bail_die;
case LWS_HP_RET_DROP_POLLOUT:
case LWS_HP_RET_USER_SERVICE:
break;
default:
assert(0);
}
/* one shot */
if (pollfd) {
int eff = vwsi->leave_pollout_active;
if (!eff) {
if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) {
lwsl_wsi_info(wsi, "failed at set pollfd");
goto bail_die;
}
}
vwsi->handling_pollout = 0;
/* cannot get leave_pollout_active set after the above */
if (!eff && wsi->leave_pollout_active) {
/*
* got set inbetween sampling eff and clearing
* handling_pollout, force POLLOUT on
*/
lwsl_wsi_debug(wsi, "leave_pollout_active");
if (lws_change_pollfd(wsi, 0, LWS_POLLOUT)) {
lwsl_wsi_info(wsi, "failed at set pollfd");
goto bail_die;
}
}
vwsi->leave_pollout_active = 0;
}
if (lwsi_role_client(wsi) && !wsi->hdr_parsing_completed &&
lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS &&
lwsi_state(wsi) != LRS_ISSUE_HTTP_BODY)
goto bail_ok;
if (n == LWS_HP_RET_DROP_POLLOUT)
goto bail_ok;
#ifdef LWS_WITH_CGI
user_service_go_again:
#endif
if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_perform_user_POLLOUT)) {
if (lws_rops_func_fidx(wsi->role_ops,
LWS_ROPS_perform_user_POLLOUT).
perform_user_POLLOUT(wsi) == -1)
goto bail_die;
else
goto bail_ok;
}
lwsl_wsi_debug(wsi, "non mux: wsistate 0x%lx, ops %s",
(unsigned long)wsi->wsistate, wsi->role_ops->name);
vwsi = (volatile struct lws *)wsi;
vwsi->leave_pollout_active = 0;
n = lws_callback_as_writeable(wsi);
vwsi->handling_pollout = 0;
if (vwsi->leave_pollout_active)
if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
goto bail_die;
return n;
/*
* since these don't disable the POLLOUT, they are always doing the
* right thing for leave_pollout_active whether it was set or not.
*/
bail_ok:
vwsi->handling_pollout = 0;
vwsi->leave_pollout_active = 0;
return 0;
bail_die:
vwsi->handling_pollout = 0;
vwsi->leave_pollout_active = 0;
return -1;
}
int
lws_rxflow_cache(struct lws *wsi, unsigned char *buf, size_t n, size_t len)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
uint8_t *buffered;
size_t blen;
int ret = LWSRXFC_CACHED, m;
/* his RX is flowcontrolled, don't send remaining now */
blen = lws_buflist_next_segment_len(&wsi->buflist, &buffered);
if (blen) {
if (buf >= buffered && buf + len <= buffered + blen &&
blen != (size_t)len) {
/*
* rxflow while we were spilling prev rxflow
*
* len indicates how much was unused, then... so trim
* the head buflist to match that situation
*/
lws_buflist_use_segment(&wsi->buflist, blen - len);
lwsl_wsi_debug(wsi, "trim existing rxflow %d -> %d",
(int)blen, (int)len);
return LWSRXFC_TRIMMED;
}
ret = LWSRXFC_ADDITIONAL;
}
/* a new rxflow, buffer it and warn caller */
lwsl_wsi_debug(wsi, "rxflow append %d", (int)(len - n));
m = lws_buflist_append_segment(&wsi->buflist, buf + n, len - n);
if (m < 0)
return LWSRXFC_ERROR;
if (m) {
lwsl_wsi_debug(wsi, "added to rxflow list");;
if (lws_dll2_is_detached(&wsi->dll_buflist))
lws_dll2_add_head(&wsi->dll_buflist, &pt->dll_buflist_owner);
}
return ret;
}
/* this is used by the platform service code to stop us waiting for network
* activity in poll() when we have something that already needs service
*/
int
lws_service_adjust_timeout(struct lws_context *context, int timeout_ms, int tsi)
{
struct lws_context_per_thread *pt;
if (!context)
return 1;
if (!context->protocol_init_done)
if (lws_protocol_init(context))
return 1;
#if defined(LWS_WITH_SYS_SMD)
if (!tsi && lws_smd_message_pending(context)) {
lws_smd_msg_distribute(context);
if (lws_smd_message_pending(context))
return 0;
}
#endif
pt = &context->pt[tsi];
if (pt->evlib_pt) {
lws_usec_t u;
lws_pt_lock(pt, __func__); /* -------------- pt { */
u = __lws_sul_service_ripe(pt->pt_sul_owner,
LWS_COUNT_PT_SUL_OWNERS, lws_now_usecs());
/*
* We will come back with 0 if nothing to do at the moment, or
* the number of us until something to do
*/
if (u && u < (lws_usec_t)timeout_ms * (lws_usec_t)1000)
timeout_ms = (int)(u / 1000);
lws_pt_unlock(pt);
}
/*
* Figure out if we really want to wait in poll()... we only need to
* wait if really nothing already to do and we have to wait for
* something from network
*/
#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
/* 1) if we know we are draining rx ext, do not wait in poll */
if (pt->ws.rx_draining_ext_list)
return 0;
#endif
#if defined(LWS_WITH_TLS)
/* 2) if we know we have non-network pending data,
* do not wait in poll */
if (pt->context->tls_ops &&
pt->context->tls_ops->fake_POLLIN_for_buffered &&
pt->context->tls_ops->fake_POLLIN_for_buffered(pt))
return 0;
#endif
/*
* 4) If there is any wsi with rxflow buffered and in a state to process
* it, we should not wait in poll
*/
lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
if (!lws_is_flowcontrolled(wsi) &&
lwsi_state(wsi) != LRS_DEFERRING_ACTION)
return 0;
/*
* 5) If any guys with http compression to spill, we shouldn't wait in
* poll but hurry along and service them
*/
} lws_end_foreach_dll(d);
return timeout_ms;
}
/*
* POLLIN said there is something... we must read it, and either use it; or
* if other material already in the buflist append it and return the buflist
* head material.
*/
int
lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
struct lws_tokens *ebuf, char fr, const char *hint)
{
int n, e, bns;
uint8_t *ep, *b;
// lwsl_debug("%s: %s: %s: prior %d\n", __func__, lws_wsi_tag(wsi), hint, prior);
// lws_buflist_describe(&wsi->buflist, wsi, __func__);
(void)hint;
if (!ebuf->token)
ebuf->token = pt->serv_buf + LWS_PRE;
if (!ebuf->len ||
(unsigned int)ebuf->len > wsi->a.context->pt_serv_buf_size - LWS_PRE)
ebuf->len = (int)(wsi->a.context->pt_serv_buf_size - LWS_PRE);
e = ebuf->len;
ep = ebuf->token;
/* h2 or muxed stream... must force the read due to HOL blocking */
if (wsi->mux_substream)
fr = 1;
/* there's something on the buflist? */
bns = (int)lws_buflist_next_segment_len(&wsi->buflist, &ebuf->token);
b = ebuf->token;
if (!fr && bns)
goto buflist_material;
/* we're going to read something */
ebuf->token = ep;
ebuf->len = n = lws_ssl_capable_read(wsi, ep, (size_t)e);
lwsl_wsi_debug(wsi, "%s: ssl_capable_read %d", hint, ebuf->len);
if (!bns && /* only acknowledge error when we handled buflist content */
n == LWS_SSL_CAPABLE_ERROR) {
lwsl_debug("%s: SSL_CAPABLE_ERROR\n", __func__);
return -1;
}
if (n <= 0 && bns)
/*
* There wasn't anything to read yet, but there's something
* on the buflist to give him
*/
goto buflist_material;
/* we read something */
if (fr && bns) {
/*
* Stash what we read, since there's earlier buflist material
*/
n = lws_buflist_append_segment(&wsi->buflist, ebuf->token, (size_t)ebuf->len);
if (n < 0)
return -1;
if (n && lws_dll2_is_detached(&wsi->dll_buflist))
lws_dll2_add_head(&wsi->dll_buflist,
&pt->dll_buflist_owner);
goto buflist_material;
}
/*
* directly return what we read
*/
return 0;
buflist_material:
ebuf->token = b;
if (e < bns)
/* restrict to e, if more than e available */
ebuf->len = e;
else
ebuf->len = bns;
return 1; /* from buflist */
}
int
lws_buflist_aware_finished_consuming(struct lws *wsi, struct lws_tokens *ebuf,
int used, int buffered, const char *hint)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
int m;
/* it's in the buflist; we didn't use any */
if (!used && buffered)
return 0;
if (used && buffered) {
if (wsi->buflist) {
m = (int)lws_buflist_use_segment(&wsi->buflist,
(size_t)used);
if (m)
return 0;
}
lwsl_wsi_info(wsi, "removed from dll_buflist");
lws_dll2_remove(&wsi->dll_buflist);
return 0;
}
/* any remainder goes on the buflist */
if (used < ebuf->len && ebuf->len >= 0 && used >= 0) {
m = lws_buflist_append_segment(&wsi->buflist,
ebuf->token + used,
(unsigned int)(ebuf->len - used));
if (m < 0)
return 1; /* OOM */
if (m) {
lwsl_wsi_debug(wsi, "added to rxflow list");
if (lws_dll2_is_detached(&wsi->dll_buflist))
lws_dll2_add_head(&wsi->dll_buflist,
&pt->dll_buflist_owner);
}
}
return 0;
}
void
lws_service_do_ripe_rxflow(struct lws_context_per_thread *pt)
{
struct lws_pollfd pfd;
if (!pt->dll_buflist_owner.head)
return;
/*
* service all guys with pending rxflow that reached a state they can
* accept the pending data
*/
lws_pt_lock(pt, __func__);
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
pt->dll_buflist_owner.head) {
struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
pfd.events = LWS_POLLIN;
pfd.revents = LWS_POLLIN;
pfd.fd = -1;
lwsl_wsi_debug(wsi, "rxflow processing: fc=%d, 0x%lx",
lws_is_flowcontrolled(wsi),
(unsigned long)wsi->wsistate);
if (!lws_is_flowcontrolled(wsi) &&
lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
pt->inside_lws_service = 1;
if (lws_rops_func_fidx(wsi->role_ops,
LWS_ROPS_handle_POLLIN).
handle_POLLIN(pt, wsi, &pfd) ==
LWS_HPI_RET_PLEASE_CLOSE_ME)
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
"close_and_handled");
pt->inside_lws_service = 0;
}
} lws_end_foreach_dll_safe(d, d1);
lws_pt_unlock(pt);
}
/*
* guys that need POLLIN service again without waiting for network action
* can force POLLIN here if not flowcontrolled, so they will get service.
*
* Return nonzero if anybody got their POLLIN faked
*/
int
lws_service_flag_pending(struct lws_context *context, int tsi)
{
struct lws_context_per_thread *pt;
int forced = 0;
if (!context)
return 1;
pt = &context->pt[tsi];
lws_pt_lock(pt, __func__);
/*
* 1) If there is any wsi with a buflist and in a state to process
* it, we should not wait in poll
*/
lws_start_foreach_dll(struct lws_dll2 *, d, pt->dll_buflist_owner.head) {
struct lws *wsi = lws_container_of(d, struct lws, dll_buflist);
if (!lws_is_flowcontrolled(wsi) &&
lwsi_state(wsi) != LRS_DEFERRING_ACTION) {
forced = 1;
break;
}
} lws_end_foreach_dll(d);
#if defined(LWS_ROLE_WS)
forced |= lws_rops_func_fidx(&role_ops_ws,
LWS_ROPS_service_flag_pending).
service_flag_pending(context, tsi);
#endif
#if defined(LWS_WITH_TLS)
/*
* 2) For all guys with buffered SSL read data already saved up, if they
* are not flowcontrolled, fake their POLLIN status so they'll get
* service to use up the buffered incoming data, even though their
* network socket may have nothing
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, p, p1,
lws_dll2_get_head(&pt->tls.dll_pending_tls_owner)) {
struct lws *wsi = lws_container_of(p, struct lws,
tls.dll_pending_tls);
if (wsi->position_in_fds_table >= 0) {
pt->fds[wsi->position_in_fds_table].revents = (short)(
pt->fds[wsi->position_in_fds_table].revents |
(pt->fds[wsi->position_in_fds_table].events &
LWS_POLLIN));
if (pt->fds[wsi->position_in_fds_table].revents &
LWS_POLLIN)
/*
* We're not going to remove the wsi from the
* pending tls list. The processing will have
* to do it if he exhausts the pending tls.
*/
forced = 1;
}
} lws_end_foreach_dll_safe(p, p1);
#endif
lws_pt_unlock(pt);
return forced;
}
int
lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd,
int tsi)
{
struct lws_context_per_thread *pt;
struct lws *wsi;
char cow = 0;
if (!context || context->service_no_longer_possible)
return -1;
pt = &context->pt[tsi];
if (pt->event_loop_pt_unused)
return -1;
if (!pollfd) {
/*
* calling with NULL pollfd for periodic background processing
* is no longer needed and is now illegal.
*/
assert(pollfd);
return -1;
}
assert(lws_socket_is_valid(pollfd->fd));
/* no, here to service a socket descriptor */
wsi = wsi_from_fd(context, pollfd->fd);
if (!wsi)
/* not lws connection ... leave revents alone and return */
return 0;
#if LWS_MAX_SMP > 1
if (wsi->undergoing_init_from_other_pt)
/*
* Temporary situation that other service thread is initializing
* this wsi right now for use on our service thread.
*/
return 0;
#endif
/*
* so that caller can tell we handled, past here we need to
* zero down pollfd->revents after handling
*/
/*
* Whatever the situation with buffered rx packets, or explicitly read-
* and-buffered rx going to be handled before we want to acknowledge the
* socket is gone, any sign of HUP always immediately means no more tx
* is possible.
*/
if ((pollfd->revents & LWS_POLLHUP) == LWS_POLLHUP) {
wsi->socket_is_permanently_unusable = 1;
if (!(pollfd->revents & pollfd->events & LWS_POLLIN)) {
/* ... there are no pending rx packets waiting... */
if (!lws_buflist_total_len(&wsi->buflist)) {
/*
* ... nothing stashed in the buflist either,
* so acknowledge the wsi is done
*/
lwsl_wsi_debug(wsi, "Session Socket %d dead",
pollfd->fd);
goto close_and_handled;
}
/*
* ... in fact we have some unread rx buffered in the
* input buflist. Hold off the closing a bit...
*/
lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_ACK, 3);
}
}
#ifdef _WIN32
if (pollfd->revents & LWS_POLLOUT)
wsi->sock_send_blocking = FALSE;
#endif
#if defined(LWS_WITH_TLS)
if (lwsi_state(wsi) == LRS_SHUTDOWN &&
lws_is_ssl(wsi) && wsi->tls.ssl) {
switch (__lws_tls_shutdown(wsi)) {
case LWS_SSL_CAPABLE_DONE:
case LWS_SSL_CAPABLE_ERROR:
goto close_and_handled;
case LWS_SSL_CAPABLE_MORE_SERVICE_READ:
case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE:
case LWS_SSL_CAPABLE_MORE_SERVICE:
goto handled;
}
}
#endif
if ((pollfd->revents & LWS_POLLOUT) == LWS_POLLOUT &&
wsi->tls_read_wanted_write) {
/*
* If this wsi has a pending WANT_WRITE from SSL_read(), it has
* asked for a callback on writeable so it can retry the read.
*
* Let's consume the POLLOUT by turning it into a POLLIIN, and
* setting a flag to request a new writeable
*/
wsi->tls_read_wanted_write = 0;
pollfd->revents &= ~(LWS_POLLOUT);
pollfd->revents |= LWS_POLLIN;
cow = 1;
}
wsi->could_have_pending = 0; /* clear back-to-back write detection */
pt->inside_lws_service = 1;
/* okay, what we came here to do... */
/* if we got here, we should have wire protocol ops set on the wsi */
assert(wsi->role_ops);
// lwsl_notice("%s: %s: wsistate 0x%x\n", __func__, wsi->role_ops->name,
// wsi->wsistate);
switch (lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_handle_POLLIN).
handle_POLLIN(pt, wsi, pollfd)) {
case LWS_HPI_RET_WSI_ALREADY_DIED:
pt->inside_lws_service = 0;
#if defined (_WIN32)
break;
#else
return 1;
#endif
case LWS_HPI_RET_HANDLED:
break;
case LWS_HPI_RET_PLEASE_CLOSE_ME:
//lwsl_notice("%s: %s pollin says please close me\n", __func__,
// wsi->role_ops->name);
close_and_handled:
lwsl_wsi_debug(wsi, "Close and handled");
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
"close_and_handled");
#if defined(_DEBUG) && defined(LWS_WITH_LIBUV)
/*
* confirm close has no problem being called again while
* it waits for libuv service to complete the first async
* close
*/
if (!strcmp(context->event_loop_ops->name, "libuv"))
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
"close_and_handled uv repeat test");
#endif
/*
* pollfd may point to something else after the close
* due to pollfd swapping scheme on delete on some platforms
* we can't clear revents now because it'd be the wrong guy's
* revents
*/
pt->inside_lws_service = 0;
return 1;
default:
assert(0);
}
#if defined(LWS_WITH_TLS)
handled:
#endif
pollfd->revents = 0;
if (cow)
lws_callback_on_writable(wsi);
pt->inside_lws_service = 0;
return 0;
}
int
lws_service_fd(struct lws_context *context, struct lws_pollfd *pollfd)
{
return lws_service_fd_tsi(context, pollfd, 0);
}
int
lws_service(struct lws_context *context, int timeout_ms)
{
struct lws_context_per_thread *pt;
int n;
if (!context)
return 1;
pt = &context->pt[0];
pt->inside_service = 1;
if (context->event_loop_ops->run_pt) {
/* we are configured for an event loop */
context->event_loop_ops->run_pt(context, 0);
pt->inside_service = 0;
return 1;
}
n = lws_plat_service(context, timeout_ms);
if (n != -1)
pt->inside_service = 0;
return n;
}
int
lws_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
{
struct lws_context_per_thread *pt;
int n;
if (!context)
return 1;
pt = &context->pt[tsi];
pt->inside_service = 1;
#if LWS_MAX_SMP > 1
pt->self = pthread_self();
#endif
if (context->event_loop_ops->run_pt) {
/* we are configured for an event loop */
context->event_loop_ops->run_pt(context, tsi);
pt->inside_service = 0;
return 1;
}
n = _lws_plat_service_tsi(context, timeout_ms, tsi);
pt->inside_service = 0;
return n;
}

View File

@ -0,0 +1,379 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Socks5 Client -related helpers
*/
#include "private-lib-core.h"
int
lws_set_socks(struct lws_vhost *vhost, const char *socks)
{
char *p_at, *p_colon;
char user[96];
char password[96];
if (!socks)
return -1;
vhost->socks_user[0] = '\0';
vhost->socks_password[0] = '\0';
p_at = strrchr(socks, '@');
if (p_at) { /* auth is around */
if (lws_ptr_diff_size_t(p_at, socks) > (sizeof(user) +
sizeof(password) - 2)) {
lwsl_vhost_err(vhost, "auth too long");
goto bail;
}
p_colon = strchr(socks, ':');
if (p_colon) {
if (lws_ptr_diff_size_t(p_colon, socks) >
sizeof(user) - 1) {
lwsl_vhost_err(vhost, "user too long");
goto bail;
}
if (lws_ptr_diff_size_t(p_at, p_colon) >
sizeof(password) - 1) {
lwsl_vhost_err(vhost, "pw too long");
goto bail;
}
lws_strncpy(vhost->socks_user, socks,
lws_ptr_diff_size_t(p_colon, socks) + 1);
lws_strncpy(vhost->socks_password, p_colon + 1,
lws_ptr_diff_size_t(p_at, (p_colon + 1)) + 1);
}
lwsl_vhost_info(vhost, " Socks auth, user: %s, password: %s",
vhost->socks_user,
vhost->socks_password);
socks = p_at + 1;
}
lws_strncpy(vhost->socks_proxy_address, socks,
sizeof(vhost->socks_proxy_address));
p_colon = strchr(vhost->socks_proxy_address, ':');
if (!p_colon && !vhost->socks_proxy_port) {
lwsl_vhost_err(vhost, "socks_proxy needs to be address:port");
return -1;
}
if (p_colon) {
*p_colon = '\0';
vhost->socks_proxy_port = (unsigned int)atoi(p_colon + 1);
}
lwsl_vhost_debug(vhost, "Connections via Socks5 %s:%u",
vhost->socks_proxy_address,
vhost->socks_proxy_port);
return 0;
bail:
return -1;
}
int
lws_socks5c_generate_msg(struct lws *wsi, enum socks_msg_type type,
ssize_t *msg_len)
{
struct lws_context *context = wsi->a.context;
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
uint8_t *p = pt->serv_buf, *end = &p[context->pt_serv_buf_size];
ssize_t n, passwd_len;
short net_num;
char *cp;
switch (type) {
case SOCKS_MSG_GREETING:
if (lws_ptr_diff(end, p) < 4)
return 1;
/* socks version, version 5 only */
*p++ = SOCKS_VERSION_5;
/* number of methods */
*p++ = 2;
/* username password method */
*p++ = SOCKS_AUTH_USERNAME_PASSWORD;
/* no authentication method */
*p++ = SOCKS_AUTH_NO_AUTH;
break;
case SOCKS_MSG_USERNAME_PASSWORD:
n = (ssize_t)strlen(wsi->a.vhost->socks_user);
passwd_len = (ssize_t)strlen(wsi->a.vhost->socks_password);
if (n > 254 || passwd_len > 254)
return 1;
if (lws_ptr_diff(end, p) < 3 + n + passwd_len)
return 1;
/* the subnegotiation version */
*p++ = SOCKS_SUBNEGOTIATION_VERSION_1;
/* length of the user name */
*p++ = (uint8_t)n;
/* user name */
memcpy(p, wsi->a.vhost->socks_user, (size_t)n);
p += (uint8_t)n;
/* length of the password */
*p++ = (uint8_t)passwd_len;
/* password */
memcpy(p, wsi->a.vhost->socks_password, (size_t)passwd_len);
p += passwd_len;
break;
case SOCKS_MSG_CONNECT:
n = (ssize_t)strlen(wsi->stash->cis[CIS_ADDRESS]);
if (n > 254 || lws_ptr_diff(end, p) < 5 + n + 2)
return 1;
cp = (char *)&net_num;
/* socks version */
*p++ = SOCKS_VERSION_5;
/* socks command */
*p++ = SOCKS_COMMAND_CONNECT;
/* reserved */
*p++ = 0;
/* address type */
*p++ = SOCKS_ATYP_DOMAINNAME;
/* length of ---> */
*p++ = (uint8_t)n;
/* the address we tell SOCKS proxy to connect to */
memcpy(p, wsi->stash->cis[CIS_ADDRESS], (size_t)n);
p += n;
net_num = (short)htons(wsi->c_port);
/* the port we tell SOCKS proxy to connect to */
*p++ = (uint8_t)cp[0];
*p++ = (uint8_t)cp[1];
break;
default:
return 1;
}
*msg_len = lws_ptr_diff(p, pt->serv_buf);
return 0;
}
int
lws_socks5c_ads_server(struct lws_vhost *vh,
const struct lws_context_creation_info *info)
{
/* socks proxy */
if (info->socks_proxy_address) {
/* override for backwards compatibility */
if (info->socks_proxy_port)
vh->socks_proxy_port = info->socks_proxy_port;
lws_set_socks(vh, info->socks_proxy_address);
return 0;
}
#ifdef LWS_HAVE_GETENV
{
char *p = getenv("socks_proxy");
if (p && strlen(p) > 0 && strlen(p) < 95)
lws_set_socks(vh, p);
}
#endif
return 0;
}
/*
* Returns 0 = nothing for caller to do, 1 = return wsi, -1 = goto failed
*/
int
lws_socks5c_greet(struct lws *wsi, const char **pcce)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
ssize_t plen;
int n;
/* socks proxy */
if (!wsi->a.vhost->socks_proxy_port)
return 0;
if (lws_socks5c_generate_msg(wsi, SOCKS_MSG_GREETING, &plen)) {
*pcce = "socks msg too large";
return -1;
}
// lwsl_hexdump_notice(pt->serv_buf, plen);
n = (int)send(wsi->desc.sockfd, (char *)pt->serv_buf, (size_t)plen,
MSG_NOSIGNAL);
if (n < 0) {
lwsl_wsi_debug(wsi, "ERROR writing socks greeting");
*pcce = "socks write failed";
return -1;
}
lws_set_timeout(wsi, PENDING_TIMEOUT_AWAITING_SOCKS_GREETING_REPLY,
(int)wsi->a.context->timeout_secs);
lwsi_set_state(wsi, LRS_WAITING_SOCKS_GREETING_REPLY);
return 1;
}
int
lws_socks5c_handle_state(struct lws *wsi, struct lws_pollfd *pollfd,
const char **pcce)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
int conn_mode = 0, pending_timeout = 0;
ssize_t len;
int n;
/* handle proxy hung up on us */
if (pollfd->revents & LWS_POLLHUP) {
lwsl_wsi_warn(wsi, "SOCKS fd=%d dead", pollfd->fd);
*pcce = "socks conn dead";
return LW5CHS_RET_BAIL3;
}
n = (int)recv(wsi->desc.sockfd, (void *)pt->serv_buf,
wsi->a.context->pt_serv_buf_size, 0);
if (n < 0) {
if (LWS_ERRNO == LWS_EAGAIN) {
lwsl_wsi_debug(wsi, "SOCKS read EAGAIN, retrying");
return LW5CHS_RET_RET0;
}
lwsl_wsi_err(wsi, "ERROR reading from SOCKS socket");
*pcce = "socks recv fail";
return LW5CHS_RET_BAIL3;
}
// lwsl_hexdump_warn(pt->serv_buf, n);
switch (lwsi_state(wsi)) {
case LRS_WAITING_SOCKS_GREETING_REPLY:
if (pt->serv_buf[0] != SOCKS_VERSION_5)
goto socks_reply_fail;
if (pt->serv_buf[1] == SOCKS_AUTH_NO_AUTH) {
lwsl_wsi_client(wsi, "SOCKS GR: No Auth Method");
if (lws_socks5c_generate_msg(wsi, SOCKS_MSG_CONNECT,
&len)) {
lwsl_wsi_err(wsi, "generate connect msg fail");
goto socks_send_msg_fail;
}
conn_mode = LRS_WAITING_SOCKS_CONNECT_REPLY;
pending_timeout =
PENDING_TIMEOUT_AWAITING_SOCKS_CONNECT_REPLY;
goto socks_send;
}
if (pt->serv_buf[1] == SOCKS_AUTH_USERNAME_PASSWORD) {
lwsl_wsi_client(wsi, "SOCKS GR: User/Pw Method");
if (lws_socks5c_generate_msg(wsi,
SOCKS_MSG_USERNAME_PASSWORD,
&len))
goto socks_send_msg_fail;
conn_mode = LRS_WAITING_SOCKS_AUTH_REPLY;
pending_timeout =
PENDING_TIMEOUT_AWAITING_SOCKS_AUTH_REPLY;
goto socks_send;
}
goto socks_reply_fail;
case LRS_WAITING_SOCKS_AUTH_REPLY:
if (pt->serv_buf[0] != SOCKS_SUBNEGOTIATION_VERSION_1 ||
pt->serv_buf[1] !=
SOCKS_SUBNEGOTIATION_STATUS_SUCCESS)
goto socks_reply_fail;
lwsl_wsi_client(wsi, "SOCKS password OK, sending connect");
if (lws_socks5c_generate_msg(wsi, SOCKS_MSG_CONNECT, &len)) {
socks_send_msg_fail:
*pcce = "socks gen msg fail";
return LW5CHS_RET_BAIL3;
}
conn_mode = LRS_WAITING_SOCKS_CONNECT_REPLY;
pending_timeout =
PENDING_TIMEOUT_AWAITING_SOCKS_CONNECT_REPLY;
socks_send:
// lwsl_hexdump_notice(pt->serv_buf, len);
n = (int)send(wsi->desc.sockfd, (char *)pt->serv_buf,
(size_t)len, MSG_NOSIGNAL);
if (n < 0) {
lwsl_wsi_debug(wsi, "ERROR writing to socks proxy");
*pcce = "socks write fail";
return LW5CHS_RET_BAIL3;
}
lws_set_timeout(wsi, (enum pending_timeout)pending_timeout,
(int)wsi->a.context->timeout_secs);
lwsi_set_state(wsi, (lws_wsi_state_t)conn_mode);
break;
socks_reply_fail:
lwsl_wsi_err(wsi, "socks reply: v%d, err %d",
pt->serv_buf[0], pt->serv_buf[1]);
*pcce = "socks reply fail";
return LW5CHS_RET_BAIL3;
case LRS_WAITING_SOCKS_CONNECT_REPLY:
if (pt->serv_buf[0] != SOCKS_VERSION_5 ||
pt->serv_buf[1] != SOCKS_REQUEST_REPLY_SUCCESS)
goto socks_reply_fail;
lwsl_wsi_client(wsi, "socks connect OK");
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
if (lwsi_role_http(wsi) &&
lws_hdr_simple_create(wsi, _WSI_TOKEN_CLIENT_PEER_ADDRESS,
wsi->a.vhost->socks_proxy_address)) {
*pcce = "socks connect fail";
return LW5CHS_RET_BAIL3;
}
#endif
wsi->c_port = (uint16_t)wsi->a.vhost->socks_proxy_port;
/* clear his proxy connection timeout */
lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0);
return LW5CHS_RET_STARTHS;
default:
break;
}
return LW5CHS_RET_NOTHING;
}

View File

@ -0,0 +1,393 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
static int
sul_compare(const lws_dll2_t *d, const lws_dll2_t *i)
{
lws_usec_t a = ((lws_sorted_usec_list_t *)d)->us;
lws_usec_t b = ((lws_sorted_usec_list_t *)i)->us;
/*
* Simply returning (a - b) in an int
* may lead to an integer overflow bug
*/
if (a > b)
return 1;
if (a < b)
return -1;
return 0;
}
/*
* notice owner was chosen already, and sul->us was already computed
*/
int
__lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul)
{
lws_dll2_remove(&sul->list);
assert(sul->cb);
/*
* we sort the pt's list of sequencers with pending timeouts, so it's
* cheap to check it every poll wait
*/
lws_dll2_add_sorted(&sul->list, own, sul_compare);
return 0;
}
void
lws_sul_cancel(lws_sorted_usec_list_t *sul)
{
lws_dll2_remove(&sul->list);
/* we are clearing the timeout and leaving ourselves detached */
sul->us = 0;
}
void
lws_sul2_schedule(struct lws_context *context, int tsi, int flags,
lws_sorted_usec_list_t *sul)
{
struct lws_context_per_thread *pt = &context->pt[tsi];
lws_pt_assert_lock_held(pt);
assert(sul->cb);
__lws_sul_insert(
&pt->pt_sul_owner[!!(flags & LWSSULLI_WAKE_IF_SUSPENDED)], sul);
}
/*
* own points to the first in an array of length own_len
*
* While any sul list owner has a "ripe", ie, ready to handle sul we do them
* strictly in order of sul time. When nobody has a ripe sul we return 0, if
* actually nobody has any sul, or the interval between usnow and the next
* earliest scheduled event on any list.
*/
lws_usec_t
__lws_sul_service_ripe(lws_dll2_owner_t *own, int own_len, lws_usec_t usnow)
{
struct lws_context_per_thread *pt = (struct lws_context_per_thread *)
lws_container_of(own, struct lws_context_per_thread,
pt_sul_owner);
if (pt->attach_owner.count)
lws_system_do_attach(pt);
lws_pt_assert_lock_held(pt);
/* must be at least 1 */
assert(own_len > 0);
/*
* Of the own_len sul owning lists, the earliest next sul could be on
* any of them. We have to find it and handle each in turn until no
* ripe sul left on any owning list, and we can exit.
*
* This ensures the ripe sul are handled strictly in the right order no
* matter which owning list they are on.
*/
do {
lws_sorted_usec_list_t *hit = NULL;
lws_usec_t lowest = 0;
int n = 0;
for (n = 0; n < own_len; n++) {
lws_sorted_usec_list_t *sul;
if (!own[n].count)
continue;
sul = (lws_sorted_usec_list_t *)
lws_dll2_get_head(&own[n]);
if (!hit || sul->us <= lowest) {
hit = sul;
lowest = sul->us;
}
}
if (!hit)
return 0;
if (lowest > usnow)
return lowest - usnow;
/* his moment has come... remove him from his owning list */
if (!hit->cb) {
lwsl_err("%s: sul with NULL callback (did not cancel on destory?)\n", __func__);
return 0;
}
lws_dll2_remove(&hit->list);
hit->us = 0;
// lwsl_notice("%s: sul: %p\n", __func__, hit->cb);
pt->inside_lws_service = 1;
hit->cb(hit);
pt->inside_lws_service = 0;
} while (1);
/* unreachable */
return 0;
}
/*
* Normally we use the OS monotonic time, which does not step when the
* gettimeofday() time is adjusted after, eg, ntpclient. But on some OSes,
* high resolution monotonic time doesn't exist; sul time is computed from and
* compared against gettimeofday() time and breaks when that steps.
*
* For those cases, this allows us to retrospectively adjust existing suls on
* all owning lists by the step amount, at the same time we adjust the
* nonmonotonic clock. Then nothing breaks so long as we do this when the
* gettimeofday() clock is stepped.
*
* Linux and so on offer Posix MONOTONIC, which lws uses. FreeRTOS doesn't
* have a high-resolution monotonic clock and has to use gettimeofday(), which
* requires this adjustment when it is stepped.
*/
lws_usec_t
lws_sul_nonmonotonic_adjust(struct lws_context *ctx, int64_t step_us)
{
struct lws_context_per_thread *pt = &ctx->pt[0];
int n, m;
/*
* for each pt
*/
for (m = 0; m < ctx->count_threads; m++) {
/*
* For each owning list...
*/
lws_pt_lock(pt, __func__);
for (n = 0; n < LWS_COUNT_PT_SUL_OWNERS; n++) {
if (!pt->pt_sul_owner[n].count)
continue;
/* ... and for every existing sul on a list... */
lws_start_foreach_dll(struct lws_dll2 *, p,
lws_dll2_get_head(
&pt->pt_sul_owner[n])) {
lws_sorted_usec_list_t *sul = lws_container_of(
p, lws_sorted_usec_list_t, list);
/*
* ... retrospectively step its ripe time by the
* step we will adjust the gettimeofday() clock
* with
*/
sul->us += step_us;
} lws_end_foreach_dll(p);
}
lws_pt_unlock(pt);
pt++;
}
return 0;
}
/*
* Earliest wakeable event on any pt
*/
int
lws_sul_earliest_wakeable_event(struct lws_context *ctx, lws_usec_t *pearliest)
{
struct lws_context_per_thread *pt;
int n = 0, hit = -1;
lws_usec_t lowest = 0;
for (n = 0; n < ctx->count_threads; n++) {
pt = &ctx->pt[n];
lws_pt_lock(pt, __func__);
if (pt->pt_sul_owner[LWSSULLI_WAKE_IF_SUSPENDED].count) {
lws_sorted_usec_list_t *sul = (lws_sorted_usec_list_t *)
lws_dll2_get_head(&pt->pt_sul_owner[
LWSSULLI_WAKE_IF_SUSPENDED]);
if (hit == -1 || sul->us < lowest) {
hit = n;
lowest = sul->us;
}
}
lws_pt_unlock(pt);
}
if (hit == -1)
/* there is no pending event */
return 1;
*pearliest = lowest;
return 0;
}
void
lws_sul_schedule(struct lws_context *ctx, int tsi, lws_sorted_usec_list_t *sul,
sul_cb_t _cb, lws_usec_t _us)
{
struct lws_context_per_thread *_pt = &ctx->pt[tsi];
assert(_cb);
lws_pt_lock(_pt, __func__);
if (_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL)
lws_sul_cancel(sul);
else {
sul->cb = _cb;
sul->us = lws_now_usecs() + _us;
lws_sul2_schedule(ctx, tsi, LWSSULLI_MISS_IF_SUSPENDED, sul);
}
lws_pt_unlock(_pt);
}
void
lws_sul_schedule_wakesuspend(struct lws_context *ctx, int tsi,
lws_sorted_usec_list_t *sul, sul_cb_t _cb,
lws_usec_t _us)
{
struct lws_context_per_thread *_pt = &ctx->pt[tsi];
assert(_cb);
lws_pt_lock(_pt, __func__);
if (_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL)
lws_sul_cancel(sul);
else {
sul->cb = _cb;
sul->us = lws_now_usecs() + _us;
lws_sul2_schedule(ctx, tsi, LWSSULLI_WAKE_IF_SUSPENDED, sul);
}
lws_pt_unlock(_pt);
}
#if defined(LWS_WITH_SUL_DEBUGGING)
/*
* Sanity checker for any sul left scheduled when its containing object is
* freed... code scheduling suls must take care to cancel them when destroying
* their object. This optional debugging helper checks that when an object is
* being destroyed, there is no live sul scheduled from inside the object.
*/
void
lws_sul_debug_zombies(struct lws_context *ctx, void *po, size_t len,
const char *destroy_description)
{
struct lws_context_per_thread *pt;
int n, m;
for (n = 0; n < ctx->count_threads; n++) {
pt = &ctx->pt[n];
lws_pt_lock(pt, __func__);
for (m = 0; m < LWS_COUNT_PT_SUL_OWNERS; m++) {
lws_start_foreach_dll(struct lws_dll2 *, p,
lws_dll2_get_head(&pt->pt_sul_owner[m])) {
lws_sorted_usec_list_t *sul =
lws_container_of(p,
lws_sorted_usec_list_t, list);
if (!po) {
lwsl_cx_err(ctx, "%s",
destroy_description);
/* just sanity check the list */
assert(sul->cb);
}
/*
* Is the sul resident inside the object that is
* indicated as being deleted?
*/
if (po &&
(void *)sul >= po &&
(size_t)lws_ptr_diff(sul, po) < len) {
lwsl_cx_err(ctx, "ERROR: Zombie Sul "
"(on list %d) %s, cb %p\n", m,
destroy_description, sul->cb);
/*
* This assert fires if you have left
* a sul scheduled to fire later, but
* are about to destroy the object the
* sul lives in. You must take care to
* do lws_sul_cancel(&sul) on any suls
* that may be scheduled before
* destroying the object the sul lives
* inside.
*
* You can look up the cb pointer in
* your mapfile to find out which
* callback function the sul was using
* which usually tells you which sul
* it is.
*/
assert(0);
}
} lws_end_foreach_dll(p);
}
lws_pt_unlock(pt);
}
}
#endif

View File

@ -0,0 +1,153 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
void
lws_state_reg_notifier(lws_state_manager_t *mgr,
lws_state_notify_link_t *notify_link)
{
lws_dll2_add_head(&notify_link->list, &mgr->notify_list);
}
void
lws_state_reg_deregister(lws_state_notify_link_t *nl)
{
lws_dll2_remove(&nl->list);
}
void
lws_state_reg_notifier_list(lws_state_manager_t *mgr,
lws_state_notify_link_t * const *notify_link_array)
{
if (notify_link_array)
while (*notify_link_array)
lws_state_reg_notifier(mgr, *notify_link_array++);
}
#if (_LWS_ENABLED_LOGS & (LLL_INFO | LLL_DEBUG))
static const char *
_systnm(lws_state_manager_t *mgr, int state, char *temp8)
{
if (!mgr->state_names) {
lws_snprintf(temp8, 8, "%d", state);
return temp8;
}
return mgr->state_names[state];
}
#endif
static int
_report(lws_state_manager_t *mgr, int a, int b)
{
#if (_LWS_ENABLED_LOGS & LLL_INFO)
char temp8[8];
#endif
lws_start_foreach_dll(struct lws_dll2 *, d, mgr->notify_list.head) {
lws_state_notify_link_t *l =
lws_container_of(d, lws_state_notify_link_t, list);
if (l->notify_cb(mgr, l, a, b)) {
/* a dependency took responsibility for retry */
#if (_LWS_ENABLED_LOGS & LLL_INFO)
lwsl_cx_info(mgr->context, "%s: %s: rejected '%s' -> '%s'",
mgr->name, l->name,
_systnm(mgr, a, temp8),
_systnm(mgr, b, temp8));
#endif
return 1;
}
} lws_end_foreach_dll(d);
return 0;
}
static int
_lws_state_transition(lws_state_manager_t *mgr, int target)
{
#if (_LWS_ENABLED_LOGS & LLL_DEBUG)
char temp8[8];
#endif
if (_report(mgr, mgr->state, target))
return 1;
#if (_LWS_ENABLED_LOGS & LLL_DEBUG)
if (mgr->context)
lwsl_cx_debug(mgr->context, "%s: changed %d '%s' -> %d '%s'", mgr->name,
mgr->state, _systnm(mgr, mgr->state, temp8), target,
_systnm(mgr, target, temp8));
#endif
mgr->state = target;
/* Indicate success by calling the notifers again with both args same */
_report(mgr, target, target);
#if defined(LWS_WITH_SYS_SMD)
if (mgr->smd_class && mgr->context)
(void)lws_smd_msg_printf(mgr->context,
mgr->smd_class, "{\"state\":\"%s\"}",
mgr->state_names[target]);
#endif
return 0;
}
int
lws_state_transition_steps(lws_state_manager_t *mgr, int target)
{
int n = 0;
#if (_LWS_ENABLED_LOGS & LLL_INFO)
int i = mgr->state;
char temp8[8];
#endif
if (mgr->state > target)
return 0;
while (!n && mgr->state != target)
n = _lws_state_transition(mgr, mgr->state + 1);
#if (_LWS_ENABLED_LOGS & LLL_INFO)
lwsl_cx_info(mgr->context, "%s -> %s", _systnm(mgr, i, temp8),
_systnm(mgr, mgr->state, temp8));
#endif
return 0;
}
int
lws_state_transition(lws_state_manager_t *mgr, int target)
{
if (mgr->state != target)
_lws_state_transition(mgr, target);
return 0;
}

View File

@ -0,0 +1,339 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*
* Transport mux / demux
*/
#include <private-lib-core.h>
#if defined(STANDALONE)
struct lws_context_standalone;
#define lws_context lws_context_standalone
#endif
void
lws_transport_mux_client_request_tx(lws_transport_mux_t *tm)
{
assert_is_tm(tm);
tm->info.txp_cpath.ops_onw->req_write(tm->info.txp_cpath.priv_onw);
}
void
lws_transport_mux_destroy(lws_transport_mux_t **tm);
#if defined(_DEBUG)
void
lws_transport_path_client_dump(lws_txp_path_client_t *path, const char *ctx)
{
char buf[200], *p = buf, *end = buf + sizeof(buf) - 1;
uint32_t magic;
int n;
n = snprintf(p, lws_ptr_diff_size_t(end, p),
"MUX: %p, IN: ops=%s, priv=%p",
path->mux, path->ops_in ? path->ops_in->name : "null",
path->priv_in);
p = (p + n > end) ? end : p + n;
if (path->priv_in) {
magic = *(uint32_t *)path->priv_in;
if (magic & 0xff000000) {
n = snprintf(p, lws_ptr_diff_size_t(end, p), " (%c%c%c%c)",
(int)(magic >> 24), (int)((magic >> 16) & 0xff),
(int)((magic >> 8) & 0xff), (int)(magic & 0xff));
p = (p + n > end) ? end : p + n;
}
}
n = snprintf(p, lws_ptr_diff_size_t(end, p), ", ONW: ops=%s, priv=%p",
path->ops_onw ? path->ops_onw->name : "null", path->priv_onw);
p = (p + n > end) ? end : p + n;
if (path->priv_onw) {
magic = *(uint32_t *)path->priv_onw;
if (magic & 0xff000000) {
n = snprintf(p, lws_ptr_diff_size_t(end, p), " (%c%c%c%c)",
(int)(magic >> 24), (int)((magic >> 16) & 0xff),
(int)((magic >> 8) & 0xff), (int)(magic & 0xff));
p = (p + n > end) ? end : p + n;
}
}
*end = '\0';
lwsl_notice("%s: %s: %s\n", __func__, ctx, buf);
}
#endif
/*
* These are transport ops that let the mux transport encapsulate another
* transport transparently.
*/
static int
lws_transport_mux_retry_connect(lws_txp_path_client_t *path,
struct lws_sspc_handle *h)
{
lws_transport_mux_ch_t *tmc;
lwsl_user("%s\n", __func__);
lws_transport_path_client_dump(path, __func__);
if (path->mux->link_state != LWSTM_OPERATIONAL) {
lwsl_user("%s: transport not operational\n", __func__);
goto fail;
}
tmc = lws_transport_mux_add_channel(path->mux, (lws_transport_priv_t)h);
if (!tmc)
goto fail;
lwsl_notice("%s: added channel\n", __func__);
path->priv_onw = (lws_transport_priv_t)tmc;
tmc->state = LWSTMC_PENDING_CREATE_CHANNEL;
lws_dll2_add_tail(&tmc->list_pending_tx, &path->mux->pending_tx);
lws_transport_mux_client_request_tx(path->mux);
return 0;
fail:
h->txp_path.ops_in->event_connect_disposition(h, 1);
return 1;
}
static void
lws_transport_mux_ch_req_write(lws_transport_priv_t priv)
{
lws_transport_mux_ch_t *tmc = (lws_transport_mux_ch_t *)priv;
lws_transport_mux_t *tm;
assert_is_tmch(tmc);
if (!tmc->list.owner) {
lwsl_err("%s: unlisted tmc %p\n", __func__, tmc);
return;
}
tm = lws_container_of(tmc->list.owner, lws_transport_mux_t, owner);
assert_is_tm(tm);
lws_transport_mux_client_request_tx(tm);
/* we want to write inside the channel, so register ch as pending */
if (lws_dll2_is_detached(&tmc->list_pending_tx))
lws_dll2_add_tail(&tmc->list_pending_tx, &tm->pending_tx);
}
#if 0
static void
lws_transport_mux_req_write(lws_transport_priv_t priv)
{
lws_transport_mux_t *tm = (lws_transport_mux_t *)priv;
assert_is_tm(tm);
lws_transport_mux_client_request_tx(tm);
}
#endif
static int
lws_transport_mux_write(lws_transport_priv_t priv, uint8_t *buf, size_t len)
{
lws_transport_mux_ch_t *tmc = (lws_transport_mux_ch_t *)priv;
lws_transport_mux_t *tm = lws_container_of(tmc->list.owner,
lws_transport_mux_t, owner);
assert_is_tmch(tmc);
lwsl_user("%s: %d\n", __func__, (int)len);
assert(len < 0xffff);
buf[-4] = LWSSSS_LLM_MUX;
buf[-3] = tmc->ch_idx;
buf[-2] = (len >> 8) & 0xff;
buf[-1] = len & 0xff;
tm->info.txp_cpath.ops_onw->_write(tm->info.txp_cpath.priv_onw,
buf - 4, len + 4);
return 0;
}
static void
lws_transport_mux_close(lws_transport_priv_t priv)
{
}
static void
lws_transport_mux_stream_up(lws_transport_priv_t priv)
{
}
/* incoming parsed channel cbs */
static int
ltm_ch_payload(lws_transport_mux_ch_t *tmc, const uint8_t *buf, size_t len)
{
lws_ss_state_return_t r;
// lwsl_notice("%s: len %d\n", __func__, (int)len);
assert_is_tmch(tmc);
// lwsl_hexdump_notice(buf, len);
r = lws_txp_inside_sspc.event_read(tmc->priv, buf, len);
if (r) {
/*
* Basically the sspc parser rejected it as malformed... we
* lost something somewhere
*
*/
lwsl_notice("%s: r %d\n", __func__, r);
return 1;
}
// return tm->info.txp_cpath.ops_in->event_read(tm->info.txp_cpath.priv_in,
// buf, len);
return 0;
}
static int
ltm_ch_opens(lws_transport_mux_ch_t *tmc, int determination)
{
struct lws_sspc_handle *h = (struct lws_sspc_handle *)tmc->priv;
// lws_transport_path_client_dump(&tm->info.txp_cpath, __func__);
lwsl_sspc_err(h, "%d", determination);
if (lws_txp_inside_sspc.event_connect_disposition(h, determination))
return -1;
return 0;
}
static int
ltm_ch_closes(lws_transport_mux_ch_t *tmc)
{
lwsl_notice("%s\n", __func__);
return 0;
}
static void
ltm_txp_req_write(lws_transport_mux_t *tm)
{
lws_transport_mux_client_request_tx(tm);
}
static int
ltm_txp_can_write(lws_transport_mux_ch_t *tmc)
{
assert_is_tmch(tmc);
return lws_txp_inside_sspc.event_can_write(
(struct lws_sspc_handle *)tmc->priv, 2048);
}
static const lws_txp_mux_parse_cbs_t cbs = {
.payload = ltm_ch_payload,
.ch_opens = ltm_ch_opens,
.ch_closes = ltm_ch_closes,
.txp_req_write = ltm_txp_req_write,
.txp_can_write = ltm_txp_can_write,
};
lws_ss_state_return_t
lws_transport_mux_event_read(lws_transport_priv_t priv,
const uint8_t *buf, size_t len)
{
lws_transport_mux_t *tm = (lws_transport_mux_t *)priv;
lws_ss_state_return_t r;
assert_is_tm(tm);
r = lws_transport_mux_rx_parse(tm, buf, len, &cbs);
return r;
}
lws_ss_state_return_t
lws_transport_mux_event_can_write(struct lws_sspc_handle *h,
size_t metadata_limit)
{
lwsl_notice("%s\n", __func__);
return lws_txp_inside_sspc.event_can_write(h, metadata_limit);
}
void
lws_transport_mux_lost_coherence(lws_transport_priv_t priv)
{
lws_transport_mux_t *tm = (lws_transport_mux_t *)priv;
if (!tm)
return;
assert_is_tm(tm);
lwsl_warn("%s: entering link LOST_SYNC\n", __func__);
lws_transport_set_link(tm, LWSTM_TRANSPORT_DOWN);
}
lws_ss_state_return_t
lws_transport_mux_event_closed(lws_transport_priv_t priv)
{
lws_transport_mux_ch_t *tmc = (lws_transport_mux_ch_t *)priv;
#if defined(_DEBUG)
lws_transport_mux_t *tm = lws_container_of(tmc->list.owner,
lws_transport_mux_t, owner);
#endif
if (!tmc)
return 0;
assert_is_tmch(tmc);
assert_is_tm(tm);
if (tmc->priv) {
lwsl_notice("%s: calling sspc event closed\n", __func__);
lws_txp_inside_sspc.event_closed(tmc->priv);
}
return 0;
}
const lws_transport_client_ops_t lws_transport_mux_client_ops = {
.name = "txpmuxc",
.event_retry_connect = lws_transport_mux_retry_connect,
.req_write = lws_transport_mux_ch_req_write,
._write = lws_transport_mux_write,
._close = lws_transport_mux_close,
.event_stream_up = lws_transport_mux_stream_up,
.event_read = lws_transport_mux_event_read,
.lost_coherence = lws_transport_mux_lost_coherence,
.event_can_write = lws_transport_mux_event_can_write,
.event_closed = lws_transport_mux_event_closed,
.flags = LWS_DSHFLAG_ENABLE_COALESCE |
LWS_DSHFLAG_ENABLE_SPLIT
};
#if defined(STANDALONE)
#undef lws_context
#endif

View File

@ -0,0 +1,813 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*
* Transport mux / demux
*/
#include <private-lib-core.h>
#if defined(STANDALONE)
struct lws_context_standalone;
#define lws_context lws_context_standalone
#if defined(_DEBUG)
void
lws_assert_fourcc(uint32_t fourcc, uint32_t expected)
{
if (fourcc == expected)
return;
lwsl_err("%s: fourcc mismatch, expected %c%c%c%c, saw %c%c%c%c\n",
__func__, (int)(expected >> 24), (int)((expected >> 16) & 0xff),
(int)((expected >> 8) & 0xff), (int)(expected & 0xff),
(int)(fourcc >> 24), (int)((fourcc >> 16) & 0xff),
(int)((fourcc >> 8) & 0xff), (int)(fourcc & 0xff));
assert(0);
}
#endif
#endif
lws_transport_mux_ch_t *
lws_transport_mux_get_channel(lws_transport_mux_t *tm, lws_mux_ch_idx_t i)
{
lws_transport_mux_ch_t *mc;
lws_start_foreach_dll(struct lws_dll2 *, d,
lws_dll2_get_head(&tm->owner)) {
mc = lws_container_of(d, lws_transport_mux_ch_t,
list);
if (mc->ch_idx == i)
return mc;
} lws_end_foreach_dll(d);
return NULL;
}
int
lws_transport_mux_next_free(lws_transport_mux_t *tm, lws_mux_ch_idx_t *result)
{
int n = tm->info.flags & LWSTMINFO_SERVER ? 1 : LWS_MUCH_RANGE - 1;
if (tm->owner.count >= LWS_MUCH_RANGE - 3)
/* too full to be safe against new muc ch selection collision */
return 1;
do {
if (!(tm->_open[n >> 5] & (1u << (n & 31)))) {
/*
* Additionally check if any placeholders for this
* channel, that did not reach open yet
*/
if (lws_transport_mux_get_channel(tm, (lws_mux_ch_idx_t)n))
goto go_on;
/*
* No it seems good to try it
*/
*result = (lws_mux_ch_idx_t)n;
return 0;
}
go_on:
n += tm->info.flags & LWSTMINFO_SERVER ? 1 : -1;
} while (n >= 0 && n < LWS_MUCH_RANGE);
return 1;
}
void
lws_transport_set_link(lws_transport_mux_t *tm, int link_state)
{
if (tm->link_state && !link_state) {
lws_transport_mux_ch_t *mc;
lwsl_user("%s: ******* transport mux link is DOWN\n", __func__);
/* destroy any mux channels that were using the link */
while (tm->owner.head) {
mc = lws_container_of(tm->owner.head,
lws_transport_mux_ch_t, list);
lws_transport_mux_destroy_channel(&mc);
}
memset(tm->_open, 0, sizeof(tm->_open));
tm->issue_ping = 1;
tm->awaiting_pong = 0;
lws_sul_schedule((struct lws_context *)tm->cx, 0, &tm->sul_ping,
sul_ping_cb, 2 * LWS_US_PER_SEC);
} else if (!tm->link_state && link_state) {
lwsl_user("%s: ******* transport mux link is UP\n", __func__);
}
tm->link_state = (uint8_t)link_state;
}
void
sul_ping_cb(lws_sorted_usec_list_t *sul)
{
lws_transport_mux_t *tm = lws_container_of(sul, lws_transport_mux_t,
sul_ping);
/*
* Some interval expired on the transport...
*
* ...because we need to send a ping now?
*/
if (!tm->awaiting_pong) {
/*
* We start the pong timer when we decided we wanted to send
* it, not when we sent it, so we can catch unable to send
*/
lwsl_notice("%s: issuing ping\n", __func__);
tm->issue_ping = 1;
tm->awaiting_pong = 1;
lws_sul_schedule((struct lws_context *)tm->cx, 0, &tm->sul_ping,
sul_ping_cb, tm->info.pong_grace_us);
if (tm->info.txp_ppath.ops_onw)
tm->info.txp_ppath.ops_onw->proxy_req_write(
tm->info.txp_ppath.priv_onw);
else
tm->info.txp_cpath.ops_onw->req_write(
tm->info.txp_cpath.priv_onw);
return;
}
/*
* ... hm it's because our PONG never arrived in the grace period...
* it means we take it that the transport is no longer passing data
*/
lwsl_notice("%s: no PONG came\n", __func__);
tm->issue_ping = 1;
tm->awaiting_pong = 0;
lws_transport_set_link(tm, LWSTM_TRANSPORT_DOWN);
lws_sul_schedule((struct lws_context *)tm->cx, 0, &tm->sul_ping,
sul_ping_cb, 2 * LWS_US_PER_SEC);
}
#if defined(PICO_SDK_PATH) || defined(LWS_PLAT_BAREMETAL)
#if 0
struct stv {
uint32_t tv_sec;
uint32_t tv_usec;
};
static uint64_t
get_us_timeofday(void)
{
struct stv tv;
gettimeofday((struct timeval *)&tv, NULL);
return ((uint64_t)((lws_usec_t)tv.tv_sec * LWS_US_PER_SEC) +
(uint64_t)tv.tv_usec);
}
#else
static uint64_t
get_us_timeofday(void)
{
return (uint64_t)lws_now_usecs();
}
#endif
#else
static
uint64_t
get_us_timeofday(void)
{
struct timeval tv;
gettimeofday(&tv, NULL);
return ((uint64_t)((lws_usec_t)tv.tv_sec * LWS_US_PER_SEC) +
(uint64_t)tv.tv_usec);
}
#endif
/*
* If the mux channel wants to do something, pack together as much as will
* fit and return nonzero to announce that the mux layer has commandeered this
* write opportunity
*
* Caution, this is called by both client and proxy mux sides
*/
// !!! response timeouts
int
lws_transport_mux_pending(lws_transport_mux_t *tm, uint8_t *buf, size_t *len,
const lws_txp_mux_parse_cbs_t *cbs)
{
uint8_t *p = buf, *end = buf + (*len) - 1u;
lws_transport_mux_ch_t *mc;
int n;
/* pings and pongs go first */
if (tm->issue_ping) {
if (tm->link_state == LWSTM_TRANSPORT_DOWN) {
lwsl_info("%s: send RESET_TRANSPORT\n", __func__);
*p++ = LWSSSS_LLM_RESET_TRANSPORT;
}
lwsl_info("%s: issuing PING\n", __func__);
*p++ = LWSSSS_LLM_PING;
tm->us_ping_out = (uint64_t)lws_now_usecs();
lws_ser_wu64be(p, tm->us_ping_out);
p += 8;
tm->issue_ping = 0;
cbs->txp_req_write(tm);
}
if (lws_ptr_diff_size_t(end, p) < 18)
goto issue;
if (tm->issue_pong) {
lwsl_info("%s: issuing PONG\n", __func__);
*p++ = LWSSSS_LLM_PONG;
lws_ser_wu64be(p, tm->us_ping_in);
p += 8;
lws_ser_wu64be(p, (uint64_t)lws_now_usecs());
p += 8;
tm->issue_pong = 0;
cbs->txp_req_write(tm);
}
if (lws_ptr_diff_size_t(end, p) < 18)
goto issue;
if (tm->issue_pongack) {
lwsl_info("%s: issuing PONGACK\n", __func__);
*p++ = LWSSSS_LLM_PONGACK;
lws_ser_wu64be(p, (uint64_t)get_us_timeofday());
p += 8;
tm->issue_pongack = 0;
lws_sul_cancel(&tm->sul_ping);
tm->awaiting_pong = 0;
lws_sul_schedule((struct lws_context *)tm->cx, 0, &tm->sul_ping,
sul_ping_cb, tm->info.ping_interval_us);
lws_transport_set_link(tm, LWSTM_OPERATIONAL);
cbs->txp_req_write(tm);
}
for (n = 0; n < LWS_MUCH_RANGE / 32; n++)
if (tm->fin[n] && lws_ptr_diff_size_t(end, p) > 2) {
int m;
for (m = 0; m < 32 && lws_ptr_diff_size_t(end, p) > 2; m++)
if (tm->fin[n] & (1u << m)) {
lwsl_notice("%s: FIN on closed ch %d\n", __func__, (n << 5) |m);
tm->fin[n] &= (uint32_t)~(1 << m);
*p++ = LWSSSS_LLM_CHANNEL_NACK;
*p++ = (uint8_t)((n << 5) | m);
cbs->txp_req_write(tm);
}
}
if (lws_ptr_diff_size_t(end, p) < 18)
goto issue;
if (tm->link_state == LWSTM_TRANSPORT_DOWN)
/*
* We can't do anything except PING / PONG probes if the
* transport state is down
*/
goto issue;
/* let's do any mux control packets first */
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
tm->pending_tx.head) {
mc = lws_container_of(d, lws_transport_mux_ch_t,
list_pending_tx);
if (lws_ptr_diff_size_t(end, p) < 18)
break;
if (mc->state != LWSTMC_OPERATIONAL)
lws_dll2_remove(&mc->list_pending_tx);
/* he wants to write something... let's see how he is */
switch (mc->state) {
case LWSTMC_PENDING_CREATE_CHANNEL:
*p++ = LWSSSS_LLM_CHANNEL_REQ;
*p++ = mc->ch_idx;
mc->state = LWSTMC_AWAITING_CREATE_CHANNEL_ACK;
break;
case LWSTMC_PENDING_CREATE_CHANNEL_ACK:
*p++ = LWSSSS_LLM_CHANNEL_ACK;
*p++ = mc->ch_idx;
tm->_open[mc->ch_idx >> 5] = (uint32_t)(
tm->_open[mc->ch_idx >> 5] |
(1u << (mc->ch_idx & 31)));
cbs->ch_opens(mc, 0);
mc->state = LPCSPROX_OPERATIONAL;
break;
case LWSTMC_PENDING_CREATE_CHANNEL_NACK:
*p++ = LWSSSS_LLM_CHANNEL_NACK;
*p++ = mc->ch_idx;
/*
* We're not on board with creating the proposed
* channel, so let's reply with that and then delete the
* placeholder channel we speculatively created
*/
cbs->ch_closes(mc);
lws_transport_mux_destroy_channel(&mc);
break;
case LWSTMC_PENDING_CLOSE_CHANNEL:
*p++ = LWSSSS_LLM_CHANNEL_CLOSE;
*p++ = mc->ch_idx;
mc->state = LWSTMC_AWAITING_CLOSE_CHANNEL_ACK;
break;
case LWSSSS_LLM_CHANNEL_CLOSE_ACK:
/*
* We're telling the peer we saw and actioned his
* close request. Then we can remove our side.
*/
*p++ = LWSSSS_LLM_CHANNEL_CLOSE;
*p++ = mc->ch_idx;
cbs->ch_closes(mc);
lws_transport_mux_destroy_channel(&mc);
break;
}
} lws_end_foreach_dll_safe(d, d1);
/* if none, do the first OPERATIONAL that wants to write */
if (buf == p) {
//lwsl_notice("%s: looking for OPERATIONAL\n", __func__);
lws_start_foreach_dll(struct lws_dll2 *, d, tm->pending_tx.head) {
mc = lws_container_of(d, lws_transport_mux_ch_t,
list_pending_tx);
if (mc->state == LWSTMC_OPERATIONAL) {
lws_dll2_remove(&mc->list_pending_tx);
// lwsl_notice("%s: passing up event_can_write\n",
// __func__);
if (cbs->txp_can_write(mc))
return -1;
break;
}
} lws_end_foreach_dll(d);
}
if (tm->pending_tx.head || buf != p)
cbs->txp_req_write(tm);
issue:
*len = lws_ptr_diff_size_t(p, buf);
return p != buf;
}
int
lws_transport_mux_rx_parse(lws_transport_mux_t *tm,
const uint8_t *buf, size_t len,
const lws_txp_mux_parse_cbs_t *cbs)
{
const uint8_t *end = buf + len;
lws_transport_mux_ch_t *mc;
size_t av;
//lwsl_hexdump_notice(buf, len);
while (buf < end) {
// lwsl_user("%s: state %d\n", __func__, tm->mp_state);
switch (tm->mp_state) {
case LWSTMCPAR_CMD:
tm->mp_cmd = *buf++;
switch (tm->mp_cmd) {
case LWSSSS_LLM_CHANNEL_REQ:
case LWSSSS_LLM_CHANNEL_ACK:
case LWSSSS_LLM_CHANNEL_NACK:
case LWSSSS_LLM_CHANNEL_CLOSE:
case LWSSSS_LLM_CHANNEL_CLOSE_ACK:
tm->mp_state = LWSTMCPAR_CHIDX_DONE;
break;
case LWSSSS_LLM_MUX:
tm->mp_state = LWSTMCPAR_CHIDX;
break;
case LWSSSS_LLM_PING:
case LWSSSS_LLM_PONG:
case LWSSSS_LLM_PONGACK:
tm->mp_ctr = 8;
tm->mp_state = LWSTMCPAR_T64_1;
break;
case LWSSSS_LLM_RESET_TRANSPORT:
/*
* The other side is telling us he lost
* framing coherence, the transport must be
* reset
*/
lws_transport_set_link(tm, LWSTM_TRANSPORT_DOWN);
break;
default:
/* uhhh... */
lwsl_warn("%s: unknown mux cmd 0x%x\n",
__func__, tm->mp_cmd);
// assert(0); /* temp */
goto fail_transport;
}
break;
case LWSTMCPAR_CHIDX_DONE:
tm->mp_idx = *buf++;
tm->mp_state = LWSTMCPAR_CMD;
switch (tm->mp_cmd) {
case LWSSSS_LLM_CHANNEL_REQ:
/*
* peer wants to open a specific channel, how
* do we feel about that?
*/
mc = lws_transport_mux_create_channel(tm,
tm->mp_idx);
if (mc) {
/* We want to try it... */
mc->state = LWSTMC_PENDING_CREATE_CHANNEL_ACK;
goto ask_to_send;
}
/*
* else already pending or open for that
* channel, just ignore and let timeout
*/
break;
case LWSSSS_LLM_CHANNEL_NACK:
case LWSSSS_LLM_CHANNEL_ACK:
/* peer says we can open this channel, but did
* we ask to open it? */
mc = lws_transport_mux_get_channel(tm, tm->mp_idx);
if (!mc) {
lwsl_warn("%s: (N)ACK for open %u we don't "
"remember asking for\n",
__func__, tm->mp_idx);
break;
}
if (tm->_open[tm->mp_idx >> 5] &
1u << (tm->mp_idx & 31)) {
lwsl_warn("%s: (N)ACK for channel "
"already fully open\n",
__func__);
if (tm->mp_cmd == LWSSSS_LLM_CHANNEL_NACK) {
lwsl_warn("%s: taking as FIN ch %d\n",
__func__, tm->mp_idx);
tm->_open[tm->mp_idx >> 5] &= (uint32_t)~(
1 << (tm->mp_idx & 31));
cbs->ch_closes(mc);
}
break;
}
if (tm->mp_cmd == LWSSSS_LLM_CHANNEL_ACK) {
/* peer said 'yes' to the channel
* we wanted */
tm->_open[tm->mp_idx >> 5] =
(uint32_t)(tm->_open[tm->mp_idx >> 5] |
(1u << (tm->mp_idx & 31)));
lwsl_notice("%s: ch %d fully open\n",
__func__, tm->mp_idx);
mc->state = LWSTMC_OPERATIONAL;
cbs->ch_opens(mc, 0);
goto ask_to_send;
}
/* peer said 'no' to the channel we wanted */
cbs->ch_opens(mc, 1);
lws_transport_mux_destroy_channel(&mc);
break;
case LWSSSS_LLM_CHANNEL_CLOSE:
mc = lws_transport_mux_get_channel(tm, tm->mp_idx);
if (!mc) {
lwsl_warn("%s: CLOSE for unknown ch\n",
__func__);
break;
}
if (!(tm->_open[tm->mp_idx >> 5] &
1u << (tm->mp_idx & 31))) {
lwsl_warn("%s: CLOSE for channel "
"not fully open\n",
__func__);
break;
}
mc->state = LWSTMC_PENDING_CLOSE_CHANNEL_ACK;
goto ask_to_send;
case LWSSSS_LLM_CHANNEL_CLOSE_ACK:
/* ok... so we did ask to close that channel? */
mc = lws_transport_mux_get_channel(tm, tm->mp_idx);
if (!mc) {
lwsl_warn("%s: CLOSE_ACK for unknown ch\n",
__func__);
break;
}
if (mc->state != LWSTMC_AWAITING_CLOSE_CHANNEL_ACK) {
lwsl_warn("%s: CLOSE_ACK on ch not waiting for it\n", __func__);
break;
}
/* nothing more should come on this channel */
lws_transport_mux_destroy_channel(&mc);
break;
}
break;
/* mux payload encapsulation */
case LWSTMCPAR_CHIDX:
tm->mp_idx = *buf++;
tm->mp_state++;
break;
case LWSTMCPAR_PLENH:
tm->mp_pay = (uint32_t)((*buf++) << 8);
tm->mp_state++;
break;
case LWSTMCPAR_PLENL:
tm->mp_pay |= *buf++;
mc = lws_transport_mux_get_channel(tm, tm->mp_idx);
if (!mc) {
lwsl_warn("%s: DATA for unknown ch\n",
__func__);
/* assertively NAK the channel */
tm->fin[tm->mp_idx >> 5] |= 1u << (tm->mp_idx & 31);
av = lws_ptr_diff_size_t(end, buf);
if (av > tm->mp_pay)
av = tm->mp_pay;
buf += av;
tm->mp_pay = (uint32_t)(tm->mp_pay - av);
if (!tm->mp_pay)
tm->mp_state = LWSTMCPAR_CMD;
else
tm->mp_state = LWSTMCPAR_PAY;
goto ask_to_send;
}
// lwsl_notice("%s: mux data frame len %d\n", __func__, (int)tm->mp_pay);
assert(tm->_open[tm->mp_idx >> 5] & (1u << (tm->mp_idx & 31)));
if (!tm->mp_pay)
tm->mp_state = LWSTMCPAR_CMD;
else
tm->mp_state = LWSTMCPAR_PAY;
break;
case LWSTMCPAR_PAY:
av = lws_ptr_diff_size_t(end, buf);
if (av > tm->mp_pay)
av = tm->mp_pay;
mc = lws_transport_mux_get_channel(tm, tm->mp_idx);
if (mc) {
if (cbs->payload(mc, buf, av)) {
/*
* indication of broken framing...
* other outcomes handled at SSPC layer
*/
goto fail_transport;
}
}
buf += av;
// lwsl_notice("%s: mp_pay %d -> %d\n", __func__,
// (int)tm->mp_pay, (int)(tm->mp_pay - av));
tm->mp_pay -= (uint32_t)av;
if (!tm->mp_pay)
tm->mp_state = LWSTMCPAR_CMD;
break;
case LWSTMCPAR_T64_1:
tm->mp_time = (tm->mp_time << 8) | *buf++;
if (!--tm->mp_ctr) {
tm->mp_ctr = 8;
if (tm->mp_cmd == LWSSSS_LLM_PING) {
lwsl_user("%s: got PING\n", __func__);
tm->mp_state = LWSTMCPAR_CMD;
tm->us_ping_in = tm->mp_time;
tm->issue_pong = 1;
cbs->txp_req_write(tm);
break;
}
if (tm->mp_cmd == LWSSSS_LLM_PONGACK) {
lwsl_user("%s: got PONGACK: ustime %llu\n",
__func__,
(unsigned long long)tm->mp_time);
tm->us_unixtime_peer = tm->mp_time;
tm->us_unixtime_peer_loc = (uint64_t)lws_now_usecs();
tm->mp_state = LWSTMCPAR_CMD;
lws_transport_set_link(tm, LWSTM_OPERATIONAL);
lws_sul_cancel(&tm->sul_ping);
tm->awaiting_pong = 0;
lws_sul_schedule((struct lws_context *)tm->cx, 0, &tm->sul_ping,
sul_ping_cb, tm->info.ping_interval_us);
break;
}
tm->mp_state++;
}
break;
case LWSTMCPAR_T64_2:
tm->mp_time1 = (tm->mp_time1 << 8) | *buf++;
if (--tm->mp_ctr)
break;
tm->mp_state = LWSTMCPAR_CMD;
if (tm->mp_time != tm->us_ping_out) {
lwsl_warn("%s: PONG payload mismatch 0x%llx 0x%llx\n",
__func__, (unsigned long long)tm->mp_time,
(unsigned long long)tm->us_ping_out);
break;
}
lwsl_user("%s: got PONG\n", __func__);
tm->awaiting_pong = 0;
lws_sul_cancel(&tm->sul_ping);
lws_sul_schedule((struct lws_context *)tm->cx, 0, &tm->sul_ping,
sul_ping_cb, tm->info.ping_interval_us);
tm->issue_pongack = 1;
cbs->txp_req_write(tm);
break;
}
continue;
ask_to_send:
if (mc && lws_dll2_is_detached(&mc->list_pending_tx))
lws_dll2_add_tail(&mc->list_pending_tx, &tm->pending_tx);
cbs->txp_req_write(tm);
}
return 0;
fail_transport:
lws_transport_set_link(tm, LWSTM_TRANSPORT_DOWN);
return -1;
}
lws_transport_mux_ch_t *
lws_transport_mux_create_channel(lws_transport_mux_t *tm, lws_mux_ch_idx_t i)
{
lws_transport_mux_ch_t *mc;
if (tm->_open[i >> 5] & (1u << (i & 31)))
return NULL;
if (lws_transport_mux_get_channel(tm, i))
return NULL;
mc = malloc(sizeof(*mc));
if (!mc)
return NULL;
memset(mc, 0, sizeof(*mc));
#if defined(_DEBUG)
mc->magic = LWS_TRANSPORT_MUXCH_MAGIC;
#endif
mc->ch_idx = i;
lws_dll2_add_tail(&mc->list, &tm->owner);
return mc;
}
lws_transport_mux_ch_t *
lws_transport_mux_add_channel(lws_transport_mux_t *tm, lws_transport_priv_t priv)
{
lws_transport_mux_ch_t *mc;
lws_mux_ch_idx_t i;
if (lws_transport_mux_next_free(tm, &i)) {
lwsl_err("%s: unable to add new mux channel\n", __func__);
return NULL;
}
mc = lws_transport_mux_create_channel(tm, i);
if (mc)
mc->priv = priv;
return mc;
}
void
lws_transport_mux_destroy_channel(lws_transport_mux_ch_t **_mc)
{
lws_transport_mux_ch_t *mc = *_mc;
lws_transport_mux_t *tm = lws_container_of(mc->list.owner,
lws_transport_mux_t, owner);
lwsl_notice("%s: mux ch %u\n", __func__, mc->ch_idx);
if (mc->state >= LWSTMC_PENDING_CREATE_CHANNEL_ACK)
/* he only sets the open bit on receipt of the ACK */
tm->_open[mc->ch_idx >> 5] &= (lws_mux_ch_idx_t)
~(1 << (mc->ch_idx & 31));
/*
* We must report channel closure... client side
*/
if (tm->info.txp_cpath.ops_in &&
tm->info.txp_cpath.ops_in->event_closed) {
lwsl_notice("%s: calling %s event closed\n", __func__,
tm->info.txp_cpath.ops_in->name);
tm->info.txp_cpath.ops_in->event_closed((lws_transport_priv_t)mc);
}
/*
* We must report channel closure... proxy side
*/
if (tm->info.txp_ppath.ops_in &&
tm->info.txp_ppath.ops_in->event_close_conn) {
lwsl_notice("%s: calling %s event_close_conn\n", __func__,
tm->info.txp_ppath.ops_in->name);
tm->info.txp_ppath.ops_in->event_close_conn(
(lws_transport_priv_t)mc->priv);
}
lws_sul_cancel(&mc->sul);
lws_dll2_remove(&mc->list_pending_tx);
lws_dll2_remove(&mc->list);
free(mc);
*_mc = NULL;
}
lws_transport_mux_t *
lws_transport_mux_create(struct lws_context *cx, lws_transport_info_t *info,
void *txp_handle)
{
lws_transport_mux_t *tm = malloc(sizeof(*tm));
if (tm) {
memset(tm, 0, sizeof(*tm));
#if defined(_DEBUG)
tm->magic = LWS_TRANSPORT_MUX_MAGIC;
#endif
tm->cx = cx;
tm->info = *info;
tm->txp_handle = txp_handle;
tm->link_state = LWSTM_TRANSPORT_DOWN;
assert_is_tm(tm);
/* let's try a ping straight off */
if (tm->cx)
lws_sul_schedule((struct lws_context *)tm->cx, 0,
&tm->sul_ping, sul_ping_cb, 1);
}
return tm;
}
void
lws_transport_mux_destroy(lws_transport_mux_t **tm)
{
lws_transport_mux_ch_t *mc;
while ((*tm)->owner.head) {
mc = lws_container_of((*tm)->owner.head,
lws_transport_mux_ch_t, list);
lws_transport_mux_destroy_channel(&mc);
}
free(*tm);
*tm = NULL;
}

View File

@ -0,0 +1,404 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2019 - 2021 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*
* Transport mux / demux
*/
#include <private-lib-core.h>
#if defined(_DEBUG)
void
lws_transport_path_proxy_dump(lws_txp_path_proxy_t *path, const char *ctx)
{
char buf[128], *p = buf, *end = buf + sizeof(buf) - 1;
uint32_t magic;
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p),
"MUX: %p, IN: ops %s, priv %p",
path->mux, path->ops_in ? path->ops_in->name : "null",
path->priv_in);
if (path->priv_in) {
magic = *(uint32_t *)path->priv_in;
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p), " (%c%c%c%c)",
(int)(magic >> 24), (int)((magic >> 16) & 0xff),
(int)((magic >> 8) & 0xff), (int)(magic & 0xff));
}
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p), ", ONW: ops %s, priv %p",
path->ops_in ? path->ops_in->name : "null", path->priv_in);
if (path->priv_in) {
magic = *(uint32_t *)path->priv_in;
p += lws_snprintf(p, lws_ptr_diff_size_t(end, p), " (%c%c%c%c)",
(int)(magic >> 24), (int)((magic >> 16) & 0xff),
(int)((magic >> 8) & 0xff), (int)(magic & 0xff));
}
*end = '\0';
lwsl_notice("%s: %s: %s\n", __func__, ctx, buf);
}
#endif
void
lws_transport_mux_proxy_request_tx(lws_transport_mux_t *tm)
{
tm->info.txp_ppath.ops_onw->proxy_req_write(tm->info.txp_ppath.priv_onw);
}
/*
* We're the outer, mux server creation, we should instantiate the mux and
* onward transport
*
* Our transport_priv is the mux object itself.
*/
static int
lws_transport_mux_init_proxy_server(struct lws_context *cx,
const struct lws_transport_proxy_ops *txp_ops_inward,
lws_transport_priv_t txp_priv_inward,
lws_txp_path_proxy_t *txp_ppath,
const void *txp_info,
const char *bind, int port)
{
lws_transport_info_t *info = (lws_transport_info_t *)txp_info;
lws_txp_path_proxy_t txp_ppath_temp;
lws_transport_mux_t *tm;
lwsl_user("%s: priv_inward %p\n", __func__, txp_priv_inward);
assert(info);
assert(info->txp_ppath.ops_onw);
/* let's create the mux... */
tm = malloc(sizeof(*tm));
if (!tm)
return 1;
memset(tm, 0, sizeof(*tm));
txp_ppath->mux = tm;
#if defined(_DEBUG)
tm->magic = LWS_TRANSPORT_MUX_MAGIC;
#endif
tm->cx = cx;
tm->info = *info;
tm->info.txp_ppath.ops_in = txp_ops_inward;
tm->info.txp_ppath.priv_in = txp_priv_inward;
tm->info.txp_ppath.mux = tm;
/* Let's see about creating the onward transport instance after...
* This is creating the transport-serial instance or whatever.
*
* For channels, priv is a conn. For the proxy itself, it's NULL here.
*/
if (info->txp_ppath.ops_onw->init_proxy_server(cx,
&lws_transport_mux_proxy_ops,
(lws_transport_priv_t)tm,
&txp_ppath_temp,
info->onward_txp_info,
bind, port)) {
lwsl_err("%s: onward %s server int fail\n", __func__,
info->txp_ppath.ops_onw->name);
return 1;
}
tm->info.txp_ppath.ops_onw = info->txp_ppath.ops_onw;
tm->info.txp_ppath.priv_onw = txp_ppath_temp.priv_onw;
/* ...let's schedule a ping straight off at the mux layer */
lws_sul_schedule((struct lws_context *)tm->cx, 0, &tm->sul_ping,
sul_ping_cb, 1);
lwsl_user("%s: OK\n", __func__);
return 0;
}
static int
lws_transport_mux_destroy_proxy_server(struct lws_context *cx)
{
if (!cx->txp_ppath.mux)
return 0;
lws_transport_mux_destroy(&cx->txp_ppath.mux);
return 0;
}
lws_ss_state_return_t
lws_transport_mux_proxy_new_conn(struct lws_context *cx,
const struct lws_transport_proxy_ops *txp_ops_inward,
lws_transport_priv_t txp_priv_inward,
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
const lws_fi_ctx_t *fic,
#endif
struct lws_sss_proxy_conn **conn,
lws_transport_priv_t txp_priv)
{
return 0;
}
lws_ss_state_return_t
lws_transport_mux_proxy_close_conn(struct lws_sss_proxy_conn *conn)
{
return 0;
}
/* incoming parsed channel cbs */
static int
ltm_ch_payload(lws_transport_mux_ch_t *tmc, const uint8_t *buf, size_t len)
{
#if defined(_DEBUG)
lws_transport_mux_t *tm;
#endif
assert_is_tmch(tmc);
#if defined(_DEBUG)
tm = lws_container_of(tmc->list.owner, lws_transport_mux_t, owner);
assert_is_tm(tm);
#endif
lwsl_notice("%s\n", __func__);
// lwsl_hexdump_err(buf, len);
#if defined(_DEBUG)
lws_transport_path_proxy_dump(&tm->info.txp_ppath, __func__);
#endif
lws_txp_inside_proxy.proxy_read(tmc->priv, buf, len);
return 0;
}
static int
ltm_ch_opens(lws_transport_mux_ch_t *tmc, int determination)
{
lws_transport_mux_t *tm;
struct lws_sss_proxy_conn *conn;
lwsl_notice("%s\n", __func__);
assert_is_tmch(tmc);
tm = lws_container_of(tmc->list.owner, lws_transport_mux_t, owner);
assert_is_tm(tm);
if (lws_txp_inside_proxy.event_new_conn(
tm->cx, &lws_txp_inside_proxy,
(lws_transport_priv_t)NULL,
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
NULL,
#endif
&conn,
(lws_transport_priv_t)tmc)) {
lwsl_err("%s: hangup from new_conn\n", __func__);
return -1;
}
tmc->priv = (lws_transport_priv_t)conn;
return 0;
}
static int
ltm_ch_closes(lws_transport_mux_ch_t *tmc)
{
lwsl_notice("%s\n", __func__);
return 0;
}
static void
ltm_txp_req_write(lws_transport_mux_t *tm)
{
// lws_transport_mux_proxy_request_tx(tm);
if (tm->info.txp_ppath.priv_onw)
tm->info.txp_ppath.ops_onw->proxy_req_write(tm->info.txp_ppath.priv_onw);
}
static int
ltm_txp_can_write(lws_transport_mux_ch_t *tmc)
{
assert_is_tmch(tmc);
return lws_txp_inside_proxy.event_proxy_can_write(tmc->priv
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
, NULL
#endif
);
}
static const lws_txp_mux_parse_cbs_t cbs = {
.payload = ltm_ch_payload ,
.ch_opens = ltm_ch_opens,
.ch_closes = ltm_ch_closes,
.txp_req_write = ltm_txp_req_write,
.txp_can_write = ltm_txp_can_write,
};
lws_ss_state_return_t
lws_transport_mux_proxy_event_proxy_can_write(
lws_transport_priv_t priv
//struct lws_sss_proxy_conn *conn
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
, const lws_fi_ctx_t *fic
#endif
)
{
lws_transport_mux_t *tm = (lws_transport_mux_t *)priv;
struct lws_sss_proxy_conn *conn;
uint8_t buf[2048];
size_t r = sizeof(buf), r1;
assert_is_tm(tm);
if (lws_transport_mux_pending(tm, buf, &r, &cbs)) {
r1 = r;
tm->info.txp_ppath.ops_onw->proxy_write(tm->info.txp_ppath.priv_onw, buf, &r);
if (r != r1)
assert(0);
return 0;
}
conn = (struct lws_sss_proxy_conn *)tm->info.txp_ppath.priv_in;
if (conn) {
assert_is_conn(conn);
tm->info.txp_ppath.ops_in->event_proxy_can_write(conn
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
, fic
#endif
);
}
return 0;
}
static void
lws_transport_mux_onward_bind(lws_transport_priv_t priv, struct lws_ss_handle *h)
{
}
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
static const lws_fi_ctx_t *
lws_transport_mux_fault_context(lws_transport_priv_t priv)
{
return NULL;
}
#endif
static void
lws_transport_mux_client_up(lws_transport_priv_t priv)
{
}
static void
lws_transport_mux_proxy_req_write(lws_transport_priv_t priv)
{
lws_transport_mux_ch_t *tmc = (lws_transport_mux_ch_t *)priv;
lws_transport_mux_t *tm;
assert_is_tmch(tmc);
tm = lws_container_of(tmc->list.owner, lws_transport_mux_t, owner);
assert_is_tm(tm);
if (!tm->info.txp_ppath.priv_onw)
return;
if (lws_dll2_is_detached(&tmc->list_pending_tx))
lws_dll2_add_tail(&tmc->list_pending_tx, &tm->pending_tx);
tm->info.txp_ppath.ops_onw->proxy_req_write(tm->info.txp_ppath.priv_onw);
}
/**< Get the proxy to write to out on the onward (back to client) transport on this channel */
int
lws_transport_mux_proxy_write(lws_transport_priv_t priv, uint8_t *buf, size_t *len)
{
lws_transport_mux_ch_t *tmc = (lws_transport_mux_ch_t *)priv;
lws_transport_mux_t *tm;
size_t olen;
//lwsl_notice("%s\n", __func__);
assert_is_tmch(tmc);
tm = lws_container_of(tmc->list.owner, lws_transport_mux_t, owner);
assert_is_tm(tm);
assert(*len < 0xffff);
/* use the LWS_PRE area to encapsulate the SSS inside the mux protocol */
buf[-4] = LWSSSS_LLM_MUX;
buf[-3] = tmc->ch_idx;
buf[-2] = (*len >> 8) & 0xff;
buf[-1] = *len & 0xff;
olen = (*len) + 4;
tm->info.txp_ppath.ops_onw->proxy_write(tm->info.txp_ppath.priv_onw,
buf - 4, &olen);
assert(olen == (*len) + 4);
return 0;
}
lws_ss_state_return_t
lws_transport_mux_proxy_read(lws_transport_priv_t priv,
const uint8_t *buf, size_t len)
{
lws_transport_mux_t *tm = (lws_transport_mux_t *)priv;
lws_ss_state_return_t r;
assert_is_tm(tm);
r = lws_transport_mux_rx_parse(tm, buf, len, &cbs);
return r;
}
const lws_transport_proxy_ops_t lws_transport_mux_proxy_ops = {
.name = "txpmuxp",
.init_proxy_server = lws_transport_mux_init_proxy_server,
.destroy_proxy_server = lws_transport_mux_destroy_proxy_server,
.proxy_read = lws_transport_mux_proxy_read,
.proxy_req_write = lws_transport_mux_proxy_req_write,
.proxy_write = lws_transport_mux_proxy_write,
.event_onward_bind = lws_transport_mux_onward_bind,
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
.fault_context = lws_transport_mux_fault_context,
#endif
.event_close_conn = lws_transport_mux_proxy_close_conn,
.event_proxy_can_write = lws_transport_mux_proxy_event_proxy_can_write,
.event_new_conn = lws_transport_mux_proxy_new_conn,
.event_client_up = lws_transport_mux_client_up,
.flags = LWS_DSHFLAG_ENABLE_COALESCE |
LWS_DSHFLAG_ENABLE_SPLIT
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,86 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2023 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
int
lws_wol(struct lws_context *ctx, const char *ip_or_NULL, uint8_t *mac_6_bytes)
{
int n, m, ofs = 0, fd, optval = 1, ret = 1;
uint8_t pkt[17 * ETHER_ADDR_LEN];
struct sockaddr_in addr;
fd = (int)(intptr_t)socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
if (fd < 0) {
lwsl_cx_err(ctx, "failed to open UDP, errno %d\n", errno);
goto bail;
}
if (setsockopt(fd, SOL_SOCKET, SO_BROADCAST,
(char *)&optval, sizeof(optval)) < 0) {
lwsl_cx_err(ctx, "failed to set broadcast, errno %d\n", errno);
goto bail;
}
/*
* Lay out the magic packet
*/
for (n = 0; n < ETHER_ADDR_LEN; n++)
pkt[ofs++] = 0xff;
for (m = 0; m < 16; m++)
for (n = 0; n < ETHER_ADDR_LEN; n++)
pkt[ofs++] = mac_6_bytes[n];
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_port = htons(9);
if (!inet_pton(AF_INET, ip_or_NULL ? ip_or_NULL : "255.255.255.255",
&addr.sin_addr)) {
lwsl_cx_err(ctx, "failed to convert to ipv4 broadcast ads, errno %d\n",
errno);
goto bail;
}
lwsl_cx_notice(ctx, "Sending WOL to %02X:%02X:%02X:%02X:%02X:%02X %s\n",
mac_6_bytes[0], mac_6_bytes[1], mac_6_bytes[2], mac_6_bytes[3],
mac_6_bytes[4], mac_6_bytes[5], ip_or_NULL ? ip_or_NULL : "");
/* arg2 is normally const void *, on mingw it's const char * */
if (sendto(fd, (const char *)pkt, sizeof(pkt), 0, (struct sockaddr *)&addr,
sizeof(addr)) < 0) {
lwsl_cx_err(ctx, "failed to sendto broadcast ads, errno %d\n",
errno);
goto bail;
}
ret = 0;
bail:
if (fd >= 0) /* coverity */
close(fd);
return ret;
}

View File

@ -0,0 +1,289 @@
/*
* libwebsockets - small server side websockets and web server implementation
*
* Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "private-lib-core.h"
void
__lws_wsi_remove_from_sul(struct lws *wsi)
{
lws_sul_cancel(&wsi->sul_timeout);
lws_sul_cancel(&wsi->sul_hrtimer);
lws_sul_cancel(&wsi->sul_validity);
#if defined(LWS_WITH_SYS_FAULT_INJECTION)
lws_sul_cancel(&wsi->sul_fault_timedclose);
#endif
}
/*
* hrtimer
*/
static void
lws_sul_hrtimer_cb(lws_sorted_usec_list_t *sul)
{
struct lws *wsi = lws_container_of(sul, struct lws, sul_hrtimer);
if (wsi->a.protocol &&
wsi->a.protocol->callback(wsi, LWS_CALLBACK_TIMER,
wsi->user_space, NULL, 0))
__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
"hrtimer cb errored");
}
void
__lws_set_timer_usecs(struct lws *wsi, lws_usec_t us)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
wsi->sul_hrtimer.cb = lws_sul_hrtimer_cb;
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
&wsi->sul_hrtimer, us);
}
void
lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
{
__lws_set_timer_usecs(wsi, usecs);
}
/*
* wsi timeout
*/
static void
lws_sul_wsitimeout_cb(lws_sorted_usec_list_t *sul)
{
struct lws *wsi = lws_container_of(sul, struct lws, sul_timeout);
struct lws_context *cx = wsi->a.context;
struct lws_context_per_thread *pt = &cx->pt[(int)wsi->tsi];
/* no need to log normal idle keepalive timeout */
// if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
lwsl_wsi_info(wsi, "TIMEDOUT WAITING %d, dhdr %d, ah %p, wl %d",
wsi->pending_timeout,
wsi->hdr_parsing_completed, wsi->http.ah,
pt->http.ah_wait_list_length);
#if defined(LWS_WITH_CGI)
if (wsi->http.cgi)
lwsl_wsi_notice(wsi, "CGI timeout: %s", wsi->http.cgi->summary);
#endif
#else
if (wsi->pending_timeout != PENDING_TIMEOUT_USER_OK)
lwsl_wsi_info(wsi, "TIMEDOUT WAITING on %d ",
wsi->pending_timeout);
#endif
/* cgi timeout */
if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
/*
* Since he failed a timeout, he already had a chance to
* do something and was unable to... that includes
* situations like half closed connections. So process
* this "failed timeout" close as a violent death and
* don't try to do protocol cleanup like flush partials.
*/
wsi->socket_is_permanently_unusable = 1;
#if defined(LWS_WITH_CLIENT)
if (lwsi_state(wsi) == LRS_WAITING_SSL)
lws_inform_client_conn_fail(wsi,
(void *)"Timed out waiting SSL", 21);
if (lwsi_state(wsi) == LRS_WAITING_SERVER_REPLY)
lws_inform_client_conn_fail(wsi,
(void *)"Timed out waiting server reply", 30);
#endif
lws_context_lock(cx, __func__);
lws_pt_lock(pt, __func__);
__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "timeout");
lws_pt_unlock(pt);
lws_context_unlock(cx);
}
void
__lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
wsi->sul_timeout.cb = lws_sul_wsitimeout_cb;
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
&wsi->sul_timeout,
((lws_usec_t)secs) * LWS_US_PER_SEC);
lwsl_wsi_debug(wsi, "%d secs, reason %d\n", secs, reason);
wsi->pending_timeout = (char)reason;
}
void
lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
lws_context_lock(pt->context, __func__);
lws_pt_lock(pt, __func__);
lws_dll2_remove(&wsi->sul_timeout.list);
lws_pt_unlock(pt);
if (!secs)
goto bail;
if (secs == LWS_TO_KILL_SYNC) {
lwsl_wsi_debug(wsi, "TO_KILL_SYNC");
lws_context_unlock(pt->context);
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
"to sync kill");
return;
}
if (secs == LWS_TO_KILL_ASYNC)
secs = 0;
// assert(!secs || !wsi->mux_stream_immortal);
if (secs && wsi->mux_stream_immortal)
lwsl_wsi_err(wsi, "on immortal stream %d %d", reason, secs);
lws_pt_lock(pt, __func__);
__lws_set_timeout(wsi, reason, secs);
lws_pt_unlock(pt);
bail:
lws_context_unlock(pt->context);
}
void
lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
lws_pt_lock(pt, __func__);
lws_dll2_remove(&wsi->sul_timeout.list);
lws_pt_unlock(pt);
if (!us)
return;
lws_pt_lock(pt, __func__);
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
&wsi->sul_timeout, us);
lwsl_wsi_notice(wsi, "%llu us, reason %d",
(unsigned long long)us, reason);
wsi->pending_timeout = (char)reason;
lws_pt_unlock(pt);
}
static void
lws_validity_cb(lws_sorted_usec_list_t *sul)
{
struct lws *wsi = lws_container_of(sul, struct lws, sul_validity);
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
const lws_retry_bo_t *rbo = wsi->retry_policy;
/* one of either the ping or hangup validity threshold was crossed */
if (wsi->validity_hup) {
lwsl_wsi_info(wsi, "validity too old");
struct lws_context *cx = wsi->a.context;
struct lws_context_per_thread *pt = &cx->pt[(int)wsi->tsi];
lws_context_lock(cx, __func__);
lws_pt_lock(pt, __func__);
__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
"validity timeout");
lws_pt_unlock(pt);
lws_context_unlock(cx);
return;
}
/* schedule a protocol-dependent ping */
lwsl_wsi_info(wsi, "scheduling validity check");
if (lws_rops_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive))
lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive).
issue_keepalive(wsi, 0);
/*
* We arrange to come back here after the additional ping to hangup time
* and do the hangup, unless we get validated (by, eg, a PONG) and
* reset the timer
*/
assert(rbo->secs_since_valid_hangup > rbo->secs_since_valid_ping);
wsi->validity_hup = 1;
__lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
&wsi->sul_validity,
((uint64_t)rbo->secs_since_valid_hangup -
rbo->secs_since_valid_ping) * LWS_US_PER_SEC);
}
/*
* The role calls this back to actually confirm validity on a particular wsi
* (which may not be the original wsi)
*/
void
_lws_validity_confirmed_role(struct lws *wsi)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
const lws_retry_bo_t *rbo = wsi->retry_policy;
if (!rbo || !rbo->secs_since_valid_hangup)
return;
wsi->validity_hup = 0;
wsi->sul_validity.cb = lws_validity_cb;
wsi->validity_hup = rbo->secs_since_valid_ping >=
rbo->secs_since_valid_hangup;
lwsl_wsi_info(wsi, "setting validity timer %ds (hup %d)",
wsi->validity_hup ? rbo->secs_since_valid_hangup :
rbo->secs_since_valid_ping,
wsi->validity_hup);
__lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
&wsi->sul_validity,
((uint64_t)(wsi->validity_hup ?
rbo->secs_since_valid_hangup :
rbo->secs_since_valid_ping)) * LWS_US_PER_SEC);
}
void
lws_validity_confirmed(struct lws *wsi)
{
/*
* This may be a stream inside a muxed network connection... leave it
* to the role to figure out who actually needs to understand their
* validity was confirmed.
*/
if (!wsi->h2_stream_carries_ws && /* only if not encapsulated */
wsi->role_ops &&
lws_rops_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive))
lws_rops_func_fidx(wsi->role_ops, LWS_ROPS_issue_keepalive).
issue_keepalive(wsi, 1);
}

File diff suppressed because it is too large Load Diff