#include "ctx.hpp"
#include <utility> // <pair>
static bool parent_config(void*& ctx, char* value) {
ctx = (void*)tcalloc(sockaddr_t);
if (!str2addr(value, *(sockaddr_t*)ctx)) {
safe_free(ctx);
return false;
}
return true;
}
static void parent_unconfig(void*& ctx) {
safe_free(ctx);
}
static bool http_err_doc_config(void*& ctx, char* value) {
ctx = (void*)AioFileIn::enqueue(value);
return (ctx != NULL);
}
static void http_err_doc_unconfig(void*& ctx) {
if (ctx) {
((AioFileIn::Result*)ctx)->delInst();
ctx = NULL;
}
}
CONF_DEF(config) {
ConfigKey parent;
ConfigKey dump_dir;
ConfigKey http_err_doc; // TODO: aio really needed? can mimic using config? blocking and not reconfigurable?
ConfigKey error_cache;
};
typedef HashCache<std::pair<sockaddr_t, sockaddr_t>, time_t> ErrorCache;
CONF_INIT(config) {
CONF_KEY_INIT(parent, true, false, &parent_config, &parent_unconfig);
CONF_KEY_INIT(dump_dir, true, "bump");
CONF_KEY_INIT(http_err_doc, true, false, http_err_doc_config, http_err_doc_unconfig);
CONF_KEY_INST(error_cache, true, ErrorCache, size_t);
}
HOOK(CACHE_CLEAN, HOOK_PRIO_MID) {
if (config) {
ErrorCache* cache = config->error_cache.ctx_as<ErrorCache>();
if (cache) {
cache->invalidate();
}
}
}
static INLINE bool error_cache_get(const sockaddr_t& src, const sockaddr_t& dst) {
const ErrorCache* error_cache = config? config->error_cache.ctx_as<ErrorCache>(): NULL;
return error_cache && error_cache->at(std::pair<sockaddr_t, sockaddr_t>(src, dst), 0) > NOW - 300;
}
static INLINE void error_cache_set(const sockaddr_t& src, const sockaddr_t& dst) {
ErrorCache* error_cache = config? config->error_cache.ctx_as<ErrorCache>(): NULL;
if (error_cache) *error_cache->get(std::pair<sockaddr_t, sockaddr_t>(src, dst)) = NOW;
}
#if (SPLICE)
static const char* bump_fn(unsigned id, char c, const sockaddr_t& addr) {
static char buf[512];
unless (config) return NULL;
int l = snprintf(buf, sizeof(buf), "%s/%u_%lld_%c_", config->dump_dir.val.str.str, id, (long long)NOW, c);
if (l <= 0 || l >= (int)sizeof(buf)) return NULL;
return (addr2str(addr, buf+l, false) != buf+l)? buf: NULL;
}
#endif
//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//
unsigned Ctx::last_ctx_no = 0;
unsigned Ctx::ctx_num = 0;
const Ctx::state_info_t Ctx::state_info[] = {
{ false, true, EVENT_OUT, EVENT_NONE }, // STATE_INIT (server blocked/ignored, listen only for EVENT_IN|EVENT_CLOSE or EVENT_TOUT for PROXY or SNI)
{ false, true, EVENT_OUT, EVENT_NONE }, // STATE_PROXY (server blocked/ignored, listen only for EVENT_IN|EVENT_CLOSE or EVENT_TOUT for SNI)
{ false, true, EVENT_OUT|EVENT_IN, EVENT_NONE }, // STATE_DNS (server blocked/ignored, listen only for EVENT_CLOSE or EVENT_TOUT while waiting for result)
{ true, true, 0, 0 }, // STATE_SNI
{ true, true, 0, 0 }, // STATE_BUMP_OP
{ false, true, EVENT_OUT|EVENT_IN, EVENT_NONE }, // STATE_AC (server blocked/ignored, listen only for EVENT_CLOSE or EVENT_TOUT)
{ false, true, EVENT_OUT|EVENT_IN, EVENT_NONE }, // STATE_BUMP (server blocked/ignored, listen only for EVENT_CLOSE or EVENT_TOUT)
{ false, true, EVENT_OUT, EVENT_NONE }, // STATE_HTTP_REQ (server blocked/ignored, listen only for EVENT_IN|EVENT_CLOSE or EVENT_TOUT)
{ true, true, 0, 0 }, // STATE_DENY
{ true, true, 0, 0 }, // STATE_CONNECT
{ false, true, EVENT_NONE, EVENT_TOUT }, // STATE_CERT_CN (client blocked/ignored, listen only for EVENT_IN|EVENT_OUT|EVENT_CLOSE or EVENT_TOUT)
{ false, true, EVENT_TOUT, EVENT_TOUT }, // STATE_CN_AC (client and server blocked/ignored, listen only for EVENT_CLOSE or EVENT_TOUT)
{ false, false, EVENT_NONE, EVENT_NONE } // STATE_IO
};
Ctx::Ctx(int fd, sockaddr_t _src, sockaddr_t _dst):
state(STATE_INIT), max_state(STATE_INIT), src(_src), dst(_dst), client(fd),
client_in(&client, &upstream), server_in(&server, &downstream),
client_out(&client, &downstream), server_out(&server, &upstream),
host(NULL), host_peek_buf(NULL), resolver(NULL), ac(NULL), bump_op(BUMP_NONE), bump(NULL), is_http(TRI_NONE),
last_io(NOW), last_pollid(0), ctx_no(++last_ctx_no), start_msec(NOW_MSEC) {
log(debug, "creating ctx #%u for fd %d", ctx_no, (int)client);
++ctx_num;
stats_inc(num);
client_in.rx_stats = stat_get(tx);
server_in.rx_stats = stat_get(rx);
assert(client.is_valid());
log_ctx.no = &ctx_no;
log_ctx.cfd = client;
log_ctx.sfd = server;
}
Ctx::~Ctx() {
LogCtx l(&log_ctx);
log(debug, "closing ctx #%u", ctx_no);
--ctx_num;
// flush out and to disk. then see if there is some leftover data in the chain that hasn't been flushed yet as the bufs were not fully used.
server_out.do_io(); // will most likely fail as the main fds should be closed now (as this is the main reason for destruction)
#if (SPLICE)
server_splice.do_io();
if (upstream_splice.add_data(&upstream)) {
server_splice.do_io();
}
#endif
client_out.do_io();
#if (SPLICE)
client_splice.do_io();
if (downstream_splice.add_data(&downstream)) {
client_splice.do_io();
}
#endif
access_log();
if (ac) delete ac;
if (bump) delete bump;
if (resolver) {
resolver->delInst();
resolver = NULL;
}
free(host);
if (host_peek_buf) buf_pool.push(host_peek_buf);
if (client_in.err == EPROTO || client_out.err == EPROTO || server_in.err == EPROTO || server_out.err == EPROTO) {
#ifdef DEBUG
log(debug, "not adding to error cache");
#else
error_cache_set(src, dst);
#endif
}
}
void Ctx::access_log() {
char c[ADDRSTRLEN];
char s[ADDRSTRLEN];
addr2str(src, c, false);
addr2str(dst, s, true);
msec_t stime = (client_out.stime ?: NOW_MSEC)-start_msec;
stats_add(stimes, stime);
log_access(
"%s -> %s, sni %s, rx/tx %zu/%zu (%zu/%zu), msec %" PRIu64 "/%" PRIu64 ", op %d, state %d/%d, err %d/%d -> %d/%d",
c, s, host?:"-", server_in.get_total(), server_out.get_total(), client_out.get_total(), client_in.get_total(), stime, NOW_MSEC-start_msec, (int)bump_op, (int)state, (int)max_state, client_in.err, client_out.err, server_in.err, server_out.err
);
}
bool Ctx::handler(int fd, event_t ev, unsigned pollid) {
LogCtx l(&log_ctx);
assert(fd != -1);
#if 1
// skip repeated processing if it is a simple i/o event
if (pollid == last_pollid && event_isonly(ev, EVENT_IN|EVENT_OUT)) {
log(debug, "skipping event loop %u", pollid);
return false;
}
last_pollid = pollid;
#endif
assert(!state_info[state].internal);
if (fd == client) {
if (event_isset(ev, EVENT_CLOSE)) client.closed(true);
c_handler(ev);
} else if (fd == server) {
if (event_isset(ev, EVENT_CLOSE)) server.closed(true);
s_handler(ev);
} else {
assert(false);
}
assert(!state_info[state].internal);
if (state != STATE_IO && state > max_state) {
max_state = state;
}
return true;
}
void Ctx::do_io() {
struct local { static bool INLINE do_io(IoSource& src, IoSink& dst) {
bool rv = false;
rv |= src.do_io();
rv |= dst.do_io();
return rv;
}};
assert(state == STATE_IO);
assert(state_info[state].client_ign_ev == EVENT_NONE && state_info[state].server_ign_ev == EVENT_NONE);
bool upstream_busy = local::do_io(client_in, server_out);
#if (SPLICE)
server_splice.do_io();
#endif
bool downsteam_busy = local::do_io(server_in, client_out);
#if (SPLICE)
client_splice.do_io();
#endif
// now try the ones that were no noop speculatively again
if (upstream_busy) {
last_io = NOW;
local::do_io(client_in, server_out);
#if (SPLICE)
server_splice.do_io();
#endif
}
if (downsteam_busy) {
last_io = NOW;
local::do_io(server_in, client_out);
#if (SPLICE)
client_splice.do_io();
#endif
}
}
bool Ctx::update_events(poll_handler_t handler) {
struct local {
static INLINE void update_events(Ctx* inst, poll_handler_t handler, const state_info_t& state, IoWrapper& io, const IoWrapper* peer, bool is_client, const IoSource& src, const IoSink& sink) {
assert(io.is_valid());
const event_t ignored_events = is_client? state.client_ign_ev: state.server_ign_ev;
const event_t other_ignored_events = is_client? state.server_ign_ev: state.client_ign_ev;
unless (ignored_events == EVENT_NONE && other_ignored_events == EVENT_NONE) {
peer = NULL; // we're currently filtering one and blocking the other side, so don't check for chain events (other side might be invalid, too)
}
event_t ev = src.events(peer)|sink.events(peer);
unless (ignored_events == EVENT_NONE) {
event_unset(ev, ignored_events); // keep all events but the given ones, or NONE: as the other side is blocked in this state, die in case we cannot make progress.
}
unless (other_ignored_events == EVENT_NONE) {
event_setonly(ev, EVENT_CLOSE); // only CLOSE, but keep NONE: as we are blocked in this state, die in case we did not try to make progress. if there will be a close, the next time we'll get NONE (cannot write due to close, cannot read due to no peer & closed)
}
if (ev == EVENT_NONE) { // makes no sense wrt. peer or we're stuck without a peer
io.reset();
} else if (event_isset(ev, EVENT_TOUT)) { // src indicates that we must not poll due to existing close event
io.poll(handler, EVENT_NONE, state.short_timeout, inst); // will delete it from pollset only TODO: keep a timeout also for this side
} else {
io.poll(handler, ev, state.short_timeout, inst);
}
}
};
LogCtx l(&log_ctx);
if (last_io <= NOW-MAX_IDLE) {
log(info, "timeout");
client.reset();
server.reset();
}
if (client.is_valid()) {
local::update_events(this, handler, state_info[state], client, &server, true, client_in, client_out);
}
if (server.is_valid()) {
local::update_events(this, handler, state_info[state], server, &client, false, server_in, server_out);
if (!server.is_valid() && client.is_valid()) { // check other side again, as peer->is_valid conditions might have changed due to this
local::update_events(this, handler, state_info[state], client, &server, true, client_in, client_out);
}
}
return client.is_valid() || server.is_valid(); // is finished?
}
void Ctx::c_handler(event_t ev) {
// we have to check this after each io/op on the client below, but check it also here just in case (we won't recover from it, so can skip AC etc). server side is not checked here, though.
unless (client.is_valid()) {
if (state != STATE_IO) {
log(debug, "client already down in state %d", state);
state = STATE_IO;
}
}
// first (read) event
// -> STATE_AC upon (cached) error (so no bumping)
// -> STATE_PROXY otherwise
if (state == STATE_INIT) {
if (dst.family && error_cache_get(src, dst)) {
log(info, "cached bump error");
state = STATE_AC;
} else { // transparent connection for the first time or after listening again after proxy request
state = STATE_PROXY;
}
}
// for direct connections, try to perform an immediate proxy handshake for getting the original destination
// #dst, #host
// -> STATE_SNI if not needed (transparent conn) or upon next read event after proxy handshake
// -> STATE_AC if not needed (transparent conn) and timeout (neither SNI nor proxy will be tried)
// -> STATE_IO upon no direct proxy request or error
if (state == STATE_PROXY) {
if (event_isset(ev, EVENT_TOUT)) { // no error_cache_set() in this case
log(info, "client init timeouted");
if (!dst.family) { // cannot do much for direct connections
client.reset();
state = STATE_IO;
} else { // is most probably no SSL then so we dont try STATE_CERT
state = STATE_AC;
}
} else if (dst.family) { // transparent connection for the first time or after listening again after proxy request
state = STATE_SNI;
} else if (!handle_proxy_request(client, dst, host)) { // for direct connections, we need dst from some higher supported proxying protocol
log(error, "cannot determine original destination");
client.reset(); // might have already written out some error message (or tried to)
state = STATE_IO;
} else {
log(info, "PROXY request to '%s' ('%s') detected", addr2str(dst, true), host?:"-");
assert(dst.family || host); // either one must have been found above
if (dst.family && host) { // great success, can go on immediately
state = STATE_SNI;
} else if (!dst.family) { // have to resolve now
state = STATE_DNS;
} else { // stay in this state until next read event for SNI
assert(dst.family && !host);
}
}
event_unset(ev, EVENT_TOUT);
}
// dispatch DNS query and wait in this state for completion or timeout
// #resolver, #dst
// -> STATE_IO with invalid client upon error
// -> STATE_SNI upon success (will be noop though, as host must be available anyhow)
if (state == STATE_DNS) {
assert(!dst.family && host);
if (!resolver) {
resolver = DNSResult::getInst(host, src.family, client);
assert(resolver);
}
switch (resolver->get(dst)) {
case TRI_TRUE:
log(debug, "got DNS result");
state = STATE_SNI;
break;
case TRI_NONE:
if (event_isset(ev, EVENT_TOUT)) {
log(notice, "DNS timeout");
resolver->delInst();
resolver = NULL;
client.reset();
state = STATE_IO;
} else {
log(debug, "will wait for DNS result");
}
break;
case TRI_FALSE:
log(notice, "DNS fail");
client.reset();
state = STATE_IO;
break;
}
event_unset(ev, EVENT_TOUT);
}
// peek at SNI for hostname (or from cache) if needed
// #host, #host_peek_buf
// -> STATE_AC upon handshake error (so no bumping)
// -> STATE_BUMP_OP otherwise
if (state == STATE_SNI) {
assert(dst.family);
state = STATE_BUMP_OP;
if (!host) { // might come from proxy request
if (SNI_PEEK_PORT && htons(dst.addr4.sin_port) != SNI_PEEK_PORT) { // strange transparent traffic redirection or got dst from proxy
log(debug, "not trying SNI peeking");
} else if (!sni(client, src, dst, host_peek_buf, host)) { // false if bumping should be deferred, even with SNI available
log(info, "will defer bumping");
state = STATE_AC;
}
}
log(info, "SNI '%s' (peeked %zd)", host ?: "", host_peek_buf? host_peek_buf->get_data(): 0);
}
// let config ruleset decide whether to bump or connect to parent
// #bump_op
// -> STATE_AC
if (state == STATE_BUMP_OP) {
if (host) { // != BUMP_NONE won't make much sense w/o a CN
bump_op = bump_ruleset_lookup(host, src, dst);
if (bump_op == BUMP_PARENT && (!config || !config->parent.ctx_as<sockaddr_t*>())) { // this check might be too early but provides better shortcut
log(notice, "cannot bump to parent: '%s'", host);
bump_op = BUMP_NONE;
}
}
log(debug, "bump op %d", bump_op);
state = STATE_AC;
}
// let config decide whether to block this connection lateron in STATE_DENY (possibly after bumping for errorpage)
// #ac
// -> STATE_BUMP
if (state == STATE_AC) {
if (!ac) {
ac = AccessControl::getInst(client, host, host? strlen(host): 0, src, dst);
assert(ac);
}
switch (ac->get()) {
case TRI_NONE:
if (!event_isset(ev, EVENT_TOUT)) break; // keep waiting
log(notice, "ac timeout");
safe_delete(ac); // keep ac object otherwise
state = STATE_BUMP;
break;
case TRI_TRUE:
log(info, "ac says ok");
state = STATE_BUMP;
break;
case TRI_FALSE:
log(info, "ac says no");
state = STATE_BUMP;
break;
}
event_unset(ev, EVENT_TOUT);
}
// get bump certificate if needed and upgrade client i/o to ssl upon success or fall back to no bumping upon error
// #bump, #client.ssl, #bump_op, #host_peek_buf
// -> STATE_HTTP_REQ
if (state == STATE_BUMP) {
if (!bump && bump_op != BUMP_NONE) {
assert(host); // no bump_op otherwise
bump = Bump::getInst(client, host, strlen(host));
}
if (!bump) {
bump_op = BUMP_NONE;
state = STATE_HTTP_REQ;
} else {
switch (bump->ready()) {
case TRI_NONE: // keep waiting until (short) timeout or bump_done, events will be disabled
if (!event_isset(ev, EVENT_TOUT)) break;
log(notice, "bump timeout");
// no break
case TRI_FALSE: // failed or defered
log(info, "bump failed");
safe_delete(bump);
bump_op = BUMP_NONE; // can still safely fall back to proxying due to SNI peek
state = STATE_HTTP_REQ;
break;
case TRI_TRUE: // ok, have now cert and ctx for talking to client
log(debug, "bump cert ready");
ssl_ctx_t* sslctx = bump->get();
assert(sslctx);
ssl_t* ssl = ssl_get(sslctx, client, host_peek_buf);
if (ssl) {
host_peek_buf = NULL;
client.set(ssl); // keep fd valid in case of error
} else {
bump_op = BUMP_NONE;
}
ssl_ctx_del(sslctx);
safe_delete(bump); // not needed anymore. TODO: pooling
state = STATE_HTTP_REQ;
break;
}
event_unset(ev, EVENT_TOUT); // handled in any case
}
if (state != STATE_BUMP && host_peek_buf) { // we're going here in any case and don't perform client io before
log(debug, "recycling peeked client handshake data");
assert(bump_op == BUMP_NONE);
assert(!client.is_ssl());
upstream.prepend(host_peek_buf);
host_peek_buf = NULL;
}
}
// perform some client i/o if decryptable for parent proxy to check for http request
// #is_http, #client_in.do_io
// -> STATE_DENY
if (state == STATE_HTTP_REQ) {
assert(is_http == TRI_NONE);
if (!client.is_valid() || !client.is_ssl()) { // client already down or not bumped/decryptable
assert(bump_op == BUMP_NONE);
state = STATE_DENY;
} else if (bump_op == BUMP_PARENT || (ac && ac->get() == TRI_FALSE)) {
assert(!host_peek_buf);
(void)client_in.do_io(); // don't check for rv
unless (client.is_valid()) { // but check if client becomes closed due to error (or detected now)
state = STATE_DENY;
} else {
switch ((is_http = upstream.is_http_req())) {
case TRI_NONE: // go on, not sure yet (until timeout)
if (!event_isset(ev, EVENT_TOUT)) break;
log(info, "HTTP detect timeout");
error_cache_set(src, dst);
// no break
case TRI_FALSE: // already bumped but cannot send to parent
log(info, "NONHTTP");
state = STATE_DENY;
break;
case TRI_TRUE: // in case of ssl, peek at bumped data for HTTP request
log(info, "HTTP");
state = STATE_DENY;
break;
}
}
event_unset(ev, EVENT_TOUT); // handled in any case
} else { // no detection needed
assert(!host_peek_buf);
state = STATE_DENY;
}
}
// deny client connection if ac said so
// -> STATE_IO if denied
// -> STATE_CONNECT otherwise
if (state == STATE_DENY) {
if (!client.is_valid()) {
state = STATE_IO;
} else if (ac && ac->get() == TRI_FALSE) {
log(info, "denying connection");
if (!client.is_ssl()) {
// seems ok to assume its ssl and as its a 'raw' socket, no handshake has been performed yet.
(void)client.send_alert();
client.reset();
} else {
// is ssl, already bumped and nothing sent in plain (but might have (partially) performed handshake and/or read plain or raw)
const AioFileIn::Result* errordoc = config? config->http_err_doc.ctx_as<const AioFileIn::Result>(): NULL;
if (is_http == TRI_TRUE && errordoc && errordoc->len) { // bumped http, can send errorpage
downstream.reset();
downstream.add_data(errordoc->buf, errordoc->len);
assert(!server.is_valid());
} else { // TODO: finish handshake first if needed and then switch to raw buffer?
(void)client.send_alert();
client.reset();
}
}
state = STATE_IO;
} else {
state = STATE_CONNECT;
}
}
// connect server to upstream or parent proxy, either raw or with ssl, acc. to bump_op
// #server, #bump_op
// -> STATE_CERT_CN
if (state == STATE_CONNECT) {
assert(client.is_valid() && !server.is_valid());
assert(client.is_ssl() == (bump_op != BUMP_NONE));
if (bump_op == BUMP_PARENT) { // connect to parent instead of original dst, falling back to BUMP
if (!config || !config->parent.ctx_as<sockaddr_t*>()) {
log(info, "no parent to connect to");
bump_op = BUMP_BUMP; // cannot fallback to raw because client is already bumped
} else if (is_http != TRI_TRUE) {
log(info, "not forwarding nonhttp to parent");
bump_op = BUMP_BUMP;
} else if (!server_out.set_http_req(src, dst, host, host? strlen(host): 0)) {
log(info, "cannot set forwarding headers for parent");
bump_op = BUMP_BUMP;
} else {
log(debug, "connecting to parent");
server.set(socket_connect(NULL, config->parent.ctx_as<sockaddr_t>()));
}
} else if (bump_op == BUMP_NONE) { // connect to original dst
log(debug, "connecting to upstream");
server.set(socket_connect(&src, &dst));
}
#if (SPLICE)
if (bump_op == BUMP_BUMP || bump_op == BUMP_SPLICE) {
#else
if (bump_op == BUMP_BUMP) {
#endif
// connect using own ssl ctx for encryption
log(debug, "connecting to upstream w/ ssl");
server.set(socket_connect(&src, &dst));
if (server.is_valid()) {
ssl_t* sssl = ssl_get(host, server);
if (sssl) {
server.set(sssl);
} else {
server.reset(); // no fallback
}
}
}
#if (SPLICE)
if (server.is_valid() && bump_op == BUMP_SPLICE) {
if (server_out.splice_to(&upstream_splice) &&
client_out.splice_to(&downstream_splice) &&
server_splice.set(bump_fn(ctx_no, 'c', src), &upstream_splice) &&
client_splice.set(bump_fn(ctx_no, 's', dst), &downstream_splice)) {
log(debug, "will splice");
} else {
server_out.splice_to(NULL);
client_out.splice_to(NULL);
server_splice.set(NULL, NULL);
client_splice.set(NULL, NULL);
log(debug, "cannot splice");
}
}
#endif
unless (server.is_valid()) {
error_cache_set(src, dst); // pretty hopeless -> no fallback
}
state = STATE_CERT_CN;
}
// if there still is no hostname, peek at the cert CN as we're connected now to upstream while the client side is blocked
// #host
// -> STATE_IO if not senseful/possible/needed
// -> STATE_CN_AC upon cn detection
// -> STATE_IO otherwise (AC w/o hostname already done)
if (state == STATE_CERT_CN) {
if (host) { // nothing to do, and ac already had its chance
state = STATE_IO;
} else if (bump_op != BUMP_NONE) { // no raw cert peeking possible?
log(info, "skipping cert cn detection");
state = STATE_IO;
} else if (SNI_PEEK_PORT && htons(dst.addr4.sin_port) != SNI_PEEK_PORT) {
log(info, "not trying cert cn detection");
state = STATE_IO;
} else {
(void)client_in.do_io();
(void)server_out.do_io();
(void)server_in.do_io();
unless (server.is_valid()) {
state = STATE_IO; // makes no sense continuing or trying i/o
} else {
const char* b;
size_t l;
if (downstream.get_data(b, l)) {
switch (cn_cert(b, l, src, dst, host)) {
case TRI_NONE:
assert(!host);
break; // wait until ev or timeout
case TRI_FALSE:
assert(!host);
log(info, "could not detect cert cn");
state = STATE_IO;
break;
case TRI_TRUE:
assert(host);
log(info, "got cert cn '%s'", host);
safe_delete(ac); // had its chance, would have been denied by now if false
state = STATE_CN_AC;
break;
}
}
}
if (event_isset(ev, EVENT_TOUT)) {
if (state == STATE_CERT_CN) {
log(info, "cert detect timeout");
state = STATE_IO;
}
event_unset(ev, EVENT_TOUT);
}
}
}
// perform AC on the freshly obtained hostname from the server's certificate - close connections upon negative result (not bumped)
// #ac
// -> STATE_IO
if (state == STATE_CN_AC) {
if (!ac) {
assert(host);
ac = AccessControl::getInst(client, host, strlen(host), src, dst);
assert(ac);
}
switch (ac->get()) { // must be our ac, the original one would have denied or deleted by now
case TRI_NONE:
if (!event_isset(ev, EVENT_TOUT)) break; // keep waiting
log(notice, "cert ac timeout");
safe_delete(ac); // keep ac object otherwise
state = STATE_IO;
break;
case TRI_TRUE:
log(info, "cert ac says ok");
state = STATE_IO;
break;
case TRI_FALSE:
log(info, "cert ac says no");
(void)client.send_alert();
client.reset();
server.reset();
state = STATE_IO;
break;
}
event_unset(ev, EVENT_TOUT);
}
// final state
if (state == STATE_IO) {
if (ev & EVENT_TOUT) {
assert(ev == EVENT_TOUT); // should be the only event
log(debug, "ignoring client timeout"); // only for wakeup, but this will trigger our timeout detection in update_events()
}
do_io();
}
}
void Ctx::s_handler(event_t ev) {
if (ev & EVENT_TOUT) {
assert(ev == EVENT_TOUT); // should be the only event
log(debug, "ignoring upstream timeout"); // only for wakeup, but this will trigger our timeout detection in update_events()
}
if (state == STATE_IO) {
do_io();
} else { // state requires logic from main (client) handler
assert(state_info[state].server_ign_ev != EVENT_NONE); // some intermediary state where the client is disabled (only STATE_CERT_AC atm)
c_handler(event_mask(ev, EVENT_TOUT)); // don't pass unrelated i/o or close events
}
}
//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//\\//
void CtxAcceptor::handler(int fd, event_t ev, unsigned pollid, void* s) {
Ctx* sess = (Ctx*)s;
assert(sess);
if (unlikely(SHUTDOWN > SHUTDOWN_GRACEFUL)) {
delete sess;
} else {
sess->handler(fd, ev, pollid);
if (!sess->update_events(&handler)) {
delete sess;
}
}
}
void CtxAcceptor::accept_handler(int fd, event_t ev, unsigned, void*) { ///< creates new instance for every accepted connection
if (event_isset(ev, EVENT_IN)) {
sockaddr_t src, dst;
int newfd;
int num = MAX_ACCEPTS;
while (num-- && ((newfd = socket_accept(fd, src, dst)) != -1)) {
if (unlikely(Ctx::ctx_num >= MAX_CLIENTS)) {
log(error, "max clients limit %d reached!", MAX_CLIENTS);
EINTR_RETRY(close(newfd));
continue;
}
Ctx* ctx = new Ctx(newfd, src, dst);
if (!ctx->update_events(&handler)) {
delete ctx;
}
}
} else if (event_isset(ev, EVENT_CLOSE)) {
log(error, "main fd fail");
if (!SHUTDOWN) {
SHUTDOWN = SHUTDOWN_GRACEFUL;
}
}
}