User-Space thread balancing + refactoring

This commit is contained in:
Domingo Dirutigliano
2025-02-16 16:33:34 +01:00
parent 63e7f73139
commit 7f7e3353ec
22 changed files with 805 additions and 729 deletions

View File

@@ -19,7 +19,7 @@ Dockerfile
/frontend/build/ /frontend/build/
/frontend/build/** /frontend/build/**
/frontend/node_modules/ /frontend/node_modules/
/backend/modules/cppqueue /backend/modules/cppregex
/backend/modules/proxy /backend/modules/proxy
docker-compose.yml docker-compose.yml

6
.gitignore vendored
View File

@@ -21,8 +21,10 @@
/frontend/build/** /frontend/build/**
/frontend/dist/ /frontend/dist/
/frontend/dist/** /frontend/dist/**
/backend/modules/cppqueue /backend/modules/cppregex
/backend/binsrc/cppqueue /backend/binsrc/cppregex
/backend/modules/cpproxy
/backend/binsrc/cpproxy
/backend/modules/proxy /backend/modules/proxy
/docker-compose.yml /docker-compose.yml
/firegex-compose.yml /firegex-compose.yml

View File

@@ -15,9 +15,9 @@ RUN bun run build
#Building main conteiner #Building main conteiner
FROM --platform=$TARGETARCH registry.fedoraproject.org/fedora:latest FROM --platform=$TARGETARCH registry.fedoraproject.org/fedora:latest
RUN dnf -y update && dnf install -y python3-pip @development-tools gcc-c++ \ RUN dnf -y update && dnf install -y python3-devel python3-pip @development-tools gcc-c++ \
libnetfilter_queue-devel libnfnetlink-devel libmnl-devel libcap-ng-utils \ libnetfilter_queue-devel libnfnetlink-devel libmnl-devel libcap-ng-utils nftables \
nftables vectorscan-devel libtins-devel python3-nftables libpcap-devel boost-devel vectorscan-devel libtins-devel python3-nftables libpcap-devel boost-devel
RUN mkdir -p /execute/modules RUN mkdir -p /execute/modules
WORKDIR /execute WORKDIR /execute
@@ -26,8 +26,8 @@ ADD ./backend/requirements.txt /execute/requirements.txt
RUN pip3 install --no-cache-dir --break-system-packages -r /execute/requirements.txt --no-warn-script-location RUN pip3 install --no-cache-dir --break-system-packages -r /execute/requirements.txt --no-warn-script-location
COPY ./backend/binsrc /execute/binsrc COPY ./backend/binsrc /execute/binsrc
RUN g++ binsrc/nfqueue.cpp -o modules/cppqueue -std=c++23 -O3 -lnetfilter_queue -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libhs libmnl) RUN g++ binsrc/nfregex.cpp -o modules/cppregex -std=c++23 -O3 -lnetfilter_queue -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libhs libmnl)
#RUN g++ binsrc/nfproxy-tun.cpp -o modules/cpproxy -std=c++23 -O3 -lnetfilter_queue -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libmnl) RUN g++ binsrc/nfproxy-tun.cpp -o modules/cpproxy -std=c++23 -O3 -lnetfilter_queue -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libmnl python3)
COPY ./backend/ /execute/ COPY ./backend/ /execute/
COPY --from=frontend /app/dist/ ./frontend/ COPY --from=frontend /app/dist/ ./frontend/

View File

@@ -42,14 +42,6 @@ app = FastAPI(
title="Firegex API", title="Firegex API",
version=API_VERSION, version=API_VERSION,
) )
utils.socketio = socketio.AsyncServer(
async_mode="asgi",
cors_allowed_origins=[],
transports=["websocket"]
)
sio_app = socketio.ASGIApp(utils.socketio, socketio_path="/sock/socket.io", other_asgi_app=app)
app.mount("/sock", sio_app)
if DEBUG: if DEBUG:
app.add_middleware( app.add_middleware(
@@ -61,6 +53,15 @@ if DEBUG:
) )
utils.socketio = socketio.AsyncServer(
async_mode="asgi",
cors_allowed_origins=[],
transports=["websocket"]
)
sio_app = socketio.ASGIApp(utils.socketio, socketio_path="/sock/socket.io", other_asgi_app=app)
app.mount("/sock", sio_app)
def APP_STATUS(): return "init" if db.get("password") is None else "run" def APP_STATUS(): return "init" if db.get("password") is None else "run"
def JWT_SECRET(): return db.get("secret") def JWT_SECRET(): return db.get("secret")
@@ -197,10 +198,11 @@ if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.realpath(__file__))) os.chdir(os.path.dirname(os.path.realpath(__file__)))
uvicorn.run( uvicorn.run(
"app:app", "app:app",
host=None, #"::" if DEBUG else None, host="::" if DEBUG else None,
port=FIREGEX_PORT, port=FIREGEX_PORT,
reload=False,#DEBUG, reload=DEBUG,
access_log=True, access_log=True,
workers=1, # Multiple workers will cause a crash due to the creation workers=1, # Firewall module can't be replicated in multiple workers
# of multiple processes with separated memory # Later the firewall module will be moved to a separate process
# The webserver will communicate using redis (redis is also needed for websockets)
) )

View File

@@ -1,244 +1,101 @@
#include <linux/netfilter/nfnetlink_queue.h> #include <vector>
#include <libnetfilter_queue/libnetfilter_queue.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include <libmnl/libmnl.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/types.h>
#include <thread> #include <thread>
#include <iostream> #include <type_traits>
#include <functional> #include "../utils.cpp"
#include <netinet/in.h> #include "nfqueue.cpp"
using namespace std;
#ifndef NETFILTER_CLASS_CPP #ifndef NETFILTER_CLASS_CPP
#define NETFILTER_CLASS_CPP #define NETFILTER_CLASS_CPP
typedef int QueueCallbackFunction(const nlmsghdr *, const mnl_socket*, void *); namespace Firegex {
namespace NfQueue {
struct nfqueue_execution_data_tmp{ template <typename Derived>
mnl_socket* nl = nullptr; class ThreadNfQueue {
function<QueueCallbackFunction> queue_cb = nullptr; public:
void *data = nullptr; ThreadNfQueue() = default;
virtual ~ThreadNfQueue() = default;
std::thread thr;
BlockingQueue<PktRequest<Derived>*> queue;
virtual void before_loop() {}
virtual void handle_next_packet(PktRequest<Derived>* pkt){}
void loop() {
static_cast<Derived*>(this)->before_loop();
for(;;) {
PktRequest<Derived>* pkt;
queue.take(pkt);
static_cast<Derived*>(this)->handle_next_packet(pkt);
delete pkt;
}
}
void run_thread_loop() {
thr = std::thread([this]() { this->loop(); });
}
}; };
class NfQueueExecutor { template <typename Worker, typename = is_base_of<ThreadNfQueue<Worker>, Worker>>
private: void __real_handler(PktRequest<std::vector<Worker>>* pkt) {
size_t BUF_SIZE = 0xffff + (MNL_SOCKET_BUFFER_SIZE/2); const size_t idx = hash_stream_id(pkt->sid) % pkt->ctx->size();
char *queue_msg_buffer = nullptr;
QueueCallbackFunction * _queue_callback_hook = nullptr;
public:
unsigned int portid; auto* converted_pkt = reinterpret_cast<PktRequest<Worker>*>(pkt);
u_int16_t queue_num; converted_pkt->ctx = &((*pkt->ctx)[idx]);
mnl_socket* nl = nullptr;
NfQueueExecutor(u_int16_t queue_num, QueueCallbackFunction* queue_cb): queue_num(queue_num), _queue_callback_hook(queue_cb){ converted_pkt->ctx->queue.put(converted_pkt);
nl = mnl_socket_open(NETLINK_NETFILTER); }
if (nl == nullptr) { throw runtime_error( "mnl_socket_open" );}
if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) {
mnl_socket_close(nl);
throw runtime_error( "mnl_socket_bind" );
}
portid = mnl_socket_get_portid(nl);
queue_msg_buffer = (char*) malloc(BUF_SIZE);
if (!queue_msg_buffer) {
mnl_socket_close(nl);
throw runtime_error( "allocate receive buffer" );
}
if (_send_config_cmd(NFQNL_CFG_CMD_BIND) < 0) {
_clear();
throw runtime_error( "mnl_socket_send" );
}
//TEST if BIND was successful
if (_send_config_cmd(NFQNL_CFG_CMD_NONE) < 0) { // SEND A NONE cmmand to generate an error meessage
_clear();
throw runtime_error( "mnl_socket_send" );
}
if (_recv_packet() == -1) { //RECV the error message
_clear();
throw runtime_error( "mnl_socket_recvfrom" );
}
struct nlmsghdr *nlh = (struct nlmsghdr *) queue_msg_buffer;
if (nlh->nlmsg_type != NLMSG_ERROR) {
_clear();
throw runtime_error( "unexpected packet from kernel (expected NLMSG_ERROR packet)" );
}
//nfqnl_msg_config_cmd
nlmsgerr* error_msg = (nlmsgerr *)mnl_nlmsg_get_payload(nlh);
// error code taken from the linux kernel:
// https://elixir.bootlin.com/linux/v5.18.12/source/include/linux/errno.h#L27
#define ENOTSUPP 524 /* Operation is not supported */
if (error_msg->error != -ENOTSUPP) {
_clear();
throw invalid_argument( "queueid is already busy" );
}
//END TESTING BIND
nlh = nfq_nlmsg_put(queue_msg_buffer, NFQNL_MSG_CONFIG, queue_num);
nfq_nlmsg_cfg_put_params(nlh, NFQNL_COPY_PACKET, 0xffff);
mnl_attr_put_u32(nlh, NFQA_CFG_FLAGS, htonl(NFQA_CFG_F_GSO));
mnl_attr_put_u32(nlh, NFQA_CFG_MASK, htonl(NFQA_CFG_F_GSO));
if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
_clear();
throw runtime_error( "mnl_socket_send" );
}
}
NfQueueExecutor(u_int16_t queue_num): NfQueueExecutor(queue_num, nullptr) {}
// --- Functions to be implemented by the user
virtual void before_loop() {
// Do nothing by default
}
virtual void * callback_data_fetch(){
return nullptr;
}
// --- End of functions to be implemented by the user
void run(){
/*
* ENOBUFS is signalled to userspace when packets were lost
* on kernel side. In most cases, userspace isn't interested
* in this information, so turn it off.
*/
int ret = 1;
mnl_socket_setsockopt(nl, NETLINK_NO_ENOBUFS, &ret, sizeof(int));
before_loop();
for (;;) {
ret = _recv_packet();
if (ret == -1) {
throw runtime_error( "mnl_socket_recvfrom" );
}
nfqueue_execution_data_tmp data = {
nl: nl,
queue_cb: _queue_callback_hook,
data: callback_data_fetch()
};
ret = mnl_cb_run(queue_msg_buffer, ret, 0, portid, _real_queue_cb, &data);
if (ret < 0){
throw runtime_error( "mnl_cb_run" );
}
}
}
~NfQueueExecutor() { template <typename Worker, typename = is_base_of<ThreadNfQueue<Worker>, Worker>>
_send_config_cmd(NFQNL_CFG_CMD_UNBIND); class MultiThreadQueue {
_clear(); static_assert(std::is_base_of_v<ThreadNfQueue<Worker>, Worker>,
} "Worker must inherit from ThreadNfQueue<Worker>");
private: private:
std::vector<Worker> workers;
static int _real_queue_cb(const nlmsghdr *nlh, void *data_ptr) { NfQueue<std::vector<Worker>, __real_handler<Worker>> * nfq;
nfqueue_execution_data_tmp* info = (nfqueue_execution_data_tmp*) data_ptr; uint16_t queue_num_;
if (info->queue_cb == nullptr) return MNL_CB_OK;
return info->queue_cb(nlh, info->nl, info->data);
}
inline void _clear(){
if (queue_msg_buffer != nullptr) {
free(queue_msg_buffer);
queue_msg_buffer = nullptr;
}
mnl_socket_close(nl);
nl = nullptr;
}
inline ssize_t _send_config_cmd(nfqnl_msg_config_cmds cmd){
struct nlmsghdr *nlh = nfq_nlmsg_put(queue_msg_buffer, NFQNL_MSG_CONFIG, queue_num);
nfq_nlmsg_cfg_put_cmd(nlh, AF_INET, cmd);
return mnl_socket_sendto(nl, nlh, nlh->nlmsg_len);
}
inline ssize_t _recv_packet(){
return mnl_socket_recvfrom(nl, queue_msg_buffer, BUF_SIZE);
}
};
template <typename Executor, typename = enable_if_t<is_base_of_v<NfQueueExecutor, Executor>>> public:
class NFQueueSequence{ const size_t n_threads;
static constexpr int QUEUE_BASE_NUM = 1000;
private: explicit MultiThreadQueue(size_t n_threads)
vector<Executor *> nfq; : n_threads(n_threads), workers(n_threads)
uint16_t _init; {
uint16_t _end; if(n_threads == 0) throw std::invalid_argument("At least 1 thread required");
vector<thread> threads;
public:
static const int QUEUE_BASE_NUM = 1000;
NFQueueSequence(uint16_t seq_len){ for(uint16_t qnum = QUEUE_BASE_NUM; ; qnum++) {
if (seq_len <= 0) throw invalid_argument("seq_len <= 0"); try {
nfq = vector<Executor*>(seq_len); nfq = new NfQueue<std::vector<Worker>, __real_handler<Worker>>(qnum);
_init = QUEUE_BASE_NUM; queue_num_ = qnum;
while(nfq[0] == nullptr){
if (_init+seq_len-1 >= 65536){
throw runtime_error("NFQueueSequence: too many queues!");
}
for (int i=0;i<seq_len;i++){
try{
nfq[i] = new Executor(_init+i);
}catch(const invalid_argument e){
for(int j = 0; j < i; j++) {
delete nfq[j];
nfq[j] = nullptr;
}
_init += seq_len - i;
break; break;
} }
catch(const std::invalid_argument&) {
if(qnum == std::numeric_limits<uint16_t>::max())
throw std::runtime_error("No available queue numbers");
} }
} }
_end = _init + seq_len - 1;
}
void start(){
if (threads.size() != 0) throw runtime_error("NFQueueSequence: already started!");
for (int i=0;i<nfq.size();i++){
threads.push_back(thread([executor = nfq[i]](){
executor->run();
}));
}
} }
void join(){ ~MultiThreadQueue() {
for (int i=0;i<nfq.size();i++){ delete nfq;
threads[i].join();
}
threads.clear();
} }
uint16_t init(){ void start() {
return _init; for(auto& worker : workers) {
worker.run_thread_loop();
}
for (;;){
nfq->handle_next_packet(&workers);
} }
uint16_t end(){
return _end;
} }
~NFQueueSequence(){ uint16_t queue_num() const { return queue_num_; }
for (int i=0;i<nfq.size();i++){
delete nfq[i];
}
}
}; };
}} // namespace Firegex::NfQueue
#endif // NETFILTER_CLASS_CPP #endif // NETFILTER_CLASS_CPP

View File

@@ -0,0 +1,370 @@
#ifndef NFQUEUE_CLASS_CPP
#define NFQUEUE_CLASS_CPP
#include <libnetfilter_queue/libnetfilter_queue.h>
#include <linux/netfilter/nfnetlink_queue.h>
#include <tins/tcp_ip/stream_identifier.h>
#include <libmnl/libmnl.h>
#include <tins/tins.h>
using namespace std;
namespace Firegex{
namespace NfQueue{
enum class FilterAction{ DROP, ACCEPT, MANGLE, NOACTION };
enum class L4Proto { TCP, UDP, RAW };
typedef Tins::TCPIP::StreamIdentifier stream_id;
template<typename T>
class PktRequest {
private:
FilterAction action = FilterAction::NOACTION;
mnl_socket* nl = nullptr;
nfgenmsg * nfg = nullptr;
nfqnl_msg_packet_hdr *ph;
shared_ptr<char[]> packet_buffer; // Will be deallocated here
size_t data_size = 0;
public:
const bool is_ipv6;
Tins::IP* ipv4 = nullptr;
Tins::IPv6* ipv6 = nullptr;
Tins::TCP* tcp = nullptr;
Tins::UDP* udp = nullptr;
const L4Proto l4_proto;
const bool is_input;
const string packet;
const string data;
const stream_id sid;
T* ctx;
private:
inline void fetch_data_size(Tins::PDU* pdu){
auto inner = pdu->inner_pdu();
if (inner == nullptr){
data_size = 0;
}else{
data_size = inner->size();
}
}
L4Proto fill_l4_info(){
if (is_ipv6){
tcp = ipv6->find_pdu<Tins::TCP>();
if (tcp == nullptr){
udp = ipv6->find_pdu<Tins::UDP>();
if (udp == nullptr){
fetch_data_size(ipv6);
return L4Proto::RAW;
}else{
fetch_data_size(udp);
return L4Proto::UDP;
}
}else{
fetch_data_size(tcp);
return L4Proto::TCP;
}
}else{
tcp = ipv4->find_pdu<Tins::TCP>();
if (tcp == nullptr){
udp = ipv4->find_pdu<Tins::UDP>();
if (udp == nullptr){
fetch_data_size(ipv4);
return L4Proto::RAW;
}else{
fetch_data_size(udp);
return L4Proto::UDP;
}
}else{
fetch_data_size(tcp);
return L4Proto::TCP;
}
}
}
public:
PktRequest(shared_ptr<char[]> buf, Tins::IP* ipv4, const char* payload, size_t plen, stream_id sid, T* ctx, mnl_socket* nl, nfgenmsg *nfg, nfqnl_msg_packet_hdr *ph, bool is_input):
is_ipv6(false), ipv4(ipv4), packet(string(payload, plen)), sid(sid), ctx(ctx), nl(nl), nfg(nfg), ph(ph),
is_input(is_input), packet_buffer(buf), l4_proto(fill_l4_info()), data(string(payload+(plen-data_size), data_size)) {}
PktRequest(shared_ptr<char[]> buf, Tins::IPv6* ipv6, const char* payload, size_t plen, stream_id sid, T* ctx, mnl_socket* nl, nfgenmsg *nfg, nfqnl_msg_packet_hdr *ph, bool is_input):
is_ipv6(true), ipv6(ipv6), packet(string(payload, plen)), sid(sid), ctx(ctx), nl(nl), nfg(nfg), ph(ph),
is_input(is_input), packet_buffer(buf), l4_proto(fill_l4_info()), data(string(payload+(plen-data_size), data_size)) {}
void drop(){
if (action == FilterAction::NOACTION){
action = FilterAction::DROP;
perfrom_action();
}else{
throw invalid_argument("Cannot drop a packet that has already been dropped or accepted");
}
}
void accept(){
if (action == FilterAction::NOACTION){
action = FilterAction::ACCEPT;
perfrom_action();
}else{
throw invalid_argument("Cannot accept a packet that has already been dropped or accepted");
}
}
void mangle(){
if (action == FilterAction::NOACTION){
action = FilterAction::MANGLE;
perfrom_action();
}else{
throw invalid_argument("Cannot mangle a packet that has already been accepted or dropped");
}
}
FilterAction get_action(){
return action;
}
~PktRequest(){
if (ipv4 != nullptr){
delete ipv4;
}
if (ipv6 != nullptr){
delete ipv6;
}
}
private:
void perfrom_action(){
char buf[MNL_SOCKET_BUFFER_SIZE];
struct nlmsghdr *nlh_verdict;
nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(nfg->res_id));
switch (action)
{
case FilterAction::ACCEPT:
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
break;
case FilterAction::DROP:
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_DROP );
break;
case FilterAction::MANGLE:{
if (is_ipv6){
nfq_nlmsg_verdict_put_pkt(nlh_verdict, ipv6->serialize().data(), ipv6->size());
}else{
nfq_nlmsg_verdict_put_pkt(nlh_verdict, ipv4->serialize().data(), ipv4->size());
}
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
break;
}
default:
throw invalid_argument("Invalid action");
}
if (mnl_socket_sendto(nl, nlh_verdict, nlh_verdict->nlmsg_len) < 0) {
throw runtime_error( "mnl_socket_send" );
}
}
};
struct internal_nfqueue_execution_data_tmp{
mnl_socket* nl = nullptr;
void *data = nullptr;
shared_ptr<char[]> packet_buffer;
};
const size_t NFQUEUE_BUFFER_SIZE = 0xffff + (MNL_SOCKET_BUFFER_SIZE/2);
/* NfQueue wrapper class to handle nfqueue packets
this class is made to be possible enqueue multiple packets to multiple threads
--> handle function is responsable to delete the PktRequest object */
template <typename D, void handle_func(PktRequest<D>*)>
class NfQueue {
private:
mnl_socket* nl = nullptr;
unsigned int portid;
public:
const uint16_t queue_num;
NfQueue(u_int16_t queue_num): queue_num(queue_num) {
nl = mnl_socket_open(NETLINK_NETFILTER);
if (nl == nullptr) { throw runtime_error( "mnl_socket_open" );}
if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) {
mnl_socket_close(nl);
throw runtime_error( "mnl_socket_bind" );
}
portid = mnl_socket_get_portid(nl);
char queue_msg_buffer[NFQUEUE_BUFFER_SIZE];
if (_send_config_cmd(NFQNL_CFG_CMD_BIND, queue_msg_buffer) < 0) {
_clear();
throw runtime_error( "mnl_socket_send" );
}
//TEST if BIND was successful
if (_send_config_cmd(NFQNL_CFG_CMD_NONE, queue_msg_buffer) < 0) { // SEND A NONE command to generate an error meessage
_clear();
throw runtime_error( "mnl_socket_send" );
}
if (_recv_packet(queue_msg_buffer) == -1) { //RECV the error message
_clear();
throw runtime_error( "mnl_socket_recvfrom" );
}
struct nlmsghdr *nlh = (struct nlmsghdr *) queue_msg_buffer;
if (nlh->nlmsg_type != NLMSG_ERROR) {
_clear();
throw runtime_error( "unexpected packet from kernel (expected NLMSG_ERROR packet)" );
}
//nfqnl_msg_config_cmd
nlmsgerr* error_msg = (nlmsgerr *)mnl_nlmsg_get_payload(nlh);
// error code taken from the linux kernel:
// https://elixir.bootlin.com/linux/v5.18.12/source/include/linux/errno.h#L27
#define ENOTSUPP 524 /* Operation is not supported */
if (error_msg->error != -ENOTSUPP) {
_clear();
throw invalid_argument( "queueid is already busy" );
}
//END TESTING BIND
nlh = nfq_nlmsg_put(queue_msg_buffer, NFQNL_MSG_CONFIG, queue_num);
nfq_nlmsg_cfg_put_params(nlh, NFQNL_COPY_PACKET, 0xffff);
mnl_attr_put_u32(nlh, NFQA_CFG_FLAGS, htonl(NFQA_CFG_F_GSO));
mnl_attr_put_u32(nlh, NFQA_CFG_MASK, htonl(NFQA_CFG_F_GSO));
if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
_clear();
throw runtime_error( "mnl_socket_send" );
}
/*
* ENOBUFS is signalled to userspace when packets were lost
* on kernel side. In most cases, userspace isn't interested
* in this information, so turn it off.
*/
int tmp = 1;
mnl_socket_setsockopt(nl, NETLINK_NO_ENOBUFS, &tmp, sizeof(int));
}
void handle_next_packet(D* data){
auto queue_msg_buffer = make_shared<char[]>(NFQUEUE_BUFFER_SIZE);
int ret = _recv_packet(queue_msg_buffer.get(), NFQUEUE_BUFFER_SIZE);
if (ret == -1) {
throw runtime_error( "mnl_socket_recvfrom" );
}
internal_nfqueue_execution_data_tmp raw_ptr = {
nl: nl,
data: data,
packet_buffer: queue_msg_buffer
};
ret = mnl_cb_run(queue_msg_buffer.get(), ret, 0, portid, _real_queue_cb, &raw_ptr);
if (ret <= 0){
cerr << "[error] [NfQueue.handle_next_packet] mnl_cb_run error with: " << ret << endl;
throw runtime_error( "mnl_cb_run error!" );
}
}
~NfQueue() {
char queue_msg_buffer[NFQUEUE_BUFFER_SIZE];
_send_config_cmd(NFQNL_CFG_CMD_UNBIND, queue_msg_buffer);
_clear();
}
private:
template<typename T, typename = enable_if_t<is_base_of_v<Tins::PDU, T>>>
static void inline _send_verdict(shared_ptr<char[]> raw_buf, T* packet, char *payload, uint16_t plen, nfgenmsg *nfg, nfqnl_msg_packet_hdr *ph, internal_nfqueue_execution_data_tmp* ctx, bool is_input){
handle_func(new PktRequest<D>(
raw_buf, packet, payload, plen,
stream_id::make_identifier(*packet),
(D*)ctx->data, ctx->nl, nfg, ph, is_input
));
}
static int _real_queue_cb(const nlmsghdr *nlh, void *data_ptr) {
internal_nfqueue_execution_data_tmp* info = (internal_nfqueue_execution_data_tmp*) data_ptr;
//Extract attributes from the nlmsghdr
nlattr *attr[NFQA_MAX+1] = {};
if (nfq_nlmsg_parse(nlh, attr) < 0) {
cerr << "[error] [NfQueue._real_queue_cb] problems parsing" << endl;
return MNL_CB_ERROR;
}
if (attr[NFQA_PACKET_HDR] == nullptr) {
cerr << "[error] [NfQueue._real_queue_cb] packet header not set" << endl;
return MNL_CB_ERROR;
}
if (attr[NFQA_MARK] == nullptr) {
cerr << "[error] [NfQueue._real_queue_cb] mark not set" << endl;
return MNL_CB_ERROR;
}
//Get Payload
uint16_t plen = mnl_attr_get_payload_len(attr[NFQA_PAYLOAD]);
char *payload = (char *)mnl_attr_get_payload(attr[NFQA_PAYLOAD]);
//Return result to the kernel
struct nfqnl_msg_packet_hdr *ph = (nfqnl_msg_packet_hdr*) mnl_attr_get_payload(attr[NFQA_PACKET_HDR]);
struct nfgenmsg *nfg = (nfgenmsg *)mnl_nlmsg_get_payload(nlh);
bool is_input = ntohl(mnl_attr_get_u32(attr[NFQA_MARK])) & 0x1; // == 0x1337 that is odd
// Check IP protocol version
if ( (payload[0] & 0xf0) == 0x40 ){
_send_verdict(info->packet_buffer, new Tins::IP((uint8_t*)payload, plen), payload, plen, nfg, ph, info, is_input);
}else{
_send_verdict(info->packet_buffer, new Tins::IPv6((uint8_t*)payload, plen), payload, plen, nfg, ph, info, is_input);
}
return MNL_CB_OK;
}
inline void _clear(){
if (nl != nullptr) {
mnl_socket_close(nl);
nl = nullptr;
}
}
inline ssize_t _send_config_cmd(nfqnl_msg_config_cmds cmd, char* buf){
struct nlmsghdr *nlh = nfq_nlmsg_put(buf, NFQNL_MSG_CONFIG, queue_num);
nfq_nlmsg_cfg_put_cmd(nlh, AF_INET, cmd);
return mnl_socket_sendto(nl, nlh, nlh->nlmsg_len);
}
inline ssize_t _recv_packet(char* buf, size_t buf_size = NFQUEUE_BUFFER_SIZE){
return mnl_socket_recvfrom(nl, buf, buf_size);
}
};
uint32_t hash_stream_id(const stream_id &sid) {
uint32_t addr_hash = 0;
const uint32_t* min_addr = reinterpret_cast<const uint32_t*>(sid.min_address.data());
const uint32_t* max_addr = reinterpret_cast<const uint32_t*>(sid.max_address.data());
addr_hash ^= min_addr[0] ^ min_addr[1] ^ min_addr[2] ^ min_addr[3];
addr_hash ^= max_addr[0] ^ max_addr[1] ^ max_addr[2] ^ max_addr[3];
uint32_t ports = (static_cast<uint32_t>(sid.min_address_port) << 16) | sid.max_address_port;
uint32_t hash = addr_hash ^ ports;
hash *= 0x9e3779b9;
return hash;
}
}}
#endif // NFQUEUE_CLASS_CPP

View File

@@ -1,26 +1,40 @@
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "proxytun/settings.cpp" #include "proxytun/settings.cpp"
#include "proxytun/proxytun.cpp" #include "proxytun/proxytun.cpp"
#include "classes/netfilter.cpp" #include "classes/netfilter.cpp"
#include <syncstream> #include <syncstream>
#include <iostream> #include <iostream>
#include <stdexcept>
#include <cstdlib>
using namespace std; using namespace std;
using namespace Firegex::PyProxy;
using Firegex::NfQueue::MultiThreadQueue;
ssize_t read_check(int __fd, void *__buf, size_t __nbytes){
ssize_t bytes = read(__fd, __buf, __nbytes);
if (bytes == 0){
cerr << "[fatal] [updater] read() returned EOF" << endl;
throw invalid_argument("read() returned EOF");
}
if (bytes < 0){
cerr << "[fatal] [updater] read() returned an error" << bytes << endl;
throw invalid_argument("read() returned an error");
}
return bytes;
}
void config_updater (){ void config_updater (){
while (true){ while (true){
//TODO read config getline(cin, line); uint32_t code_size;
if (cin.eof()){ read_check(STDIN_FILENO, &code_size, 4);
cerr << "[fatal] [updater] cin.eof()" << endl; vector<uint8_t> code(code_size);
exit(EXIT_FAILURE); read_check(STDIN_FILENO, code.data(), code_size);
}
if (cin.bad()){
cerr << "[fatal] [updater] cin.bad()" << endl;
exit(EXIT_FAILURE);
}
cerr << "[info] [updater] Updating configuration" << endl; cerr << "[info] [updater] Updating configuration" << endl;
try{ try{
//TODO add data config.reset(new PyCodeConfig("")); config.reset(new PyCodeConfig(code));
cerr << "[info] [updater] Config update done" << endl; cerr << "[info] [updater] Config update done" << endl;
osyncstream(cout) << "ACK OK" << endl; osyncstream(cout) << "ACK OK" << endl;
}catch(const std::exception& e){ }catch(const std::exception& e){
@@ -31,18 +45,28 @@ void config_updater (){
} }
int main(int argc, char *argv[]){ int main(int argc, char *argv[]){
Py_Initialize();
atexit(Py_Finalize);
if (freopen(nullptr, "rb", stdin) == nullptr){ // We need to read from stdin binary data
cerr << "[fatal] [main] Failed to reopen stdin in binary mode" << endl;
return 1;
}
int n_of_threads = 1; int n_of_threads = 1;
char * n_threads_str = getenv("NTHREADS"); char * n_threads_str = getenv("NTHREADS");
if (n_threads_str != nullptr) n_of_threads = ::atoi(n_threads_str); if (n_threads_str != nullptr) n_of_threads = ::atoi(n_threads_str);
if(n_of_threads <= 0) n_of_threads = 1; if(n_of_threads <= 0) n_of_threads = 1;
config.reset(new PyCodeConfig("")); config.reset(new PyCodeConfig());
MultiThreadQueue<PyProxyQueue> queue(n_of_threads);
NFQueueSequence<PyProxyQueue> queues(n_of_threads); osyncstream(cout) << "QUEUE " << queue.queue_num() << endl;
queues.start(); cerr << "[info] [main] Queue: " << queue.queue_num() << " threads assigned: " << n_of_threads << endl;
osyncstream(cout) << "QUEUES " << queues.init() << " " << queues.end() << endl;
cerr << "[info] [main] Queues: " << queues.init() << ":" << queues.end() << " threads assigned: " << n_of_threads << endl;
thread qthr([&](){
queue.start();
});
config_updater(); config_updater();
qthr.join();
} }

View File

@@ -5,6 +5,8 @@
#include <iostream> #include <iostream>
using namespace std; using namespace std;
using namespace Firegex::Regex;
using Firegex::NfQueue::MultiThreadQueue;
void config_updater (){ void config_updater (){
string line; string line;
@@ -55,11 +57,16 @@ int main(int argc, char *argv[]){
regex_config.reset(new RegexRules(stream_mode)); regex_config.reset(new RegexRules(stream_mode));
NFQueueSequence<RegexQueue> queues(n_of_threads);
queues.start();
osyncstream(cout) << "QUEUES " << queues.init() << " " << queues.end() << endl; MultiThreadQueue<RegexNfQueue> queue_manager(n_of_threads);
cerr << "[info] [main] Queues: " << queues.init() << ":" << queues.end() << " threads assigned: " << n_of_threads << " stream mode: " << stream_mode << endl;
osyncstream(cout) << "QUEUE " << queue_manager.queue_num() << endl;
cerr << "[info] [main] Queue: " << queue_manager.queue_num() << " threads assigned: " << n_of_threads << " stream mode: " << stream_mode << endl;
thread qthr([&](){
queue_manager.start();
});
config_updater(); config_updater();
qthr.join();
} }

View File

@@ -13,7 +13,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <stdexcept> #include <stdexcept>
#include <thread> #include <thread>
#include <hs.h>
#include <syncstream> #include <syncstream>
#include <iostream> #include <iostream>
#include "../classes/netfilter.cpp" #include "../classes/netfilter.cpp"
@@ -24,24 +23,32 @@ using Tins::TCPIP::Stream;
using Tins::TCPIP::StreamFollower; using Tins::TCPIP::StreamFollower;
using namespace std; using namespace std;
class PyProxyQueue: public NfQueueExecutor { namespace Firegex {
namespace PyProxy {
class PyProxyQueue: public NfQueue::ThreadNfQueue<PyProxyQueue> {
public: public:
stream_ctx sctx; stream_ctx sctx;
StreamFollower follower;
struct {
bool matching_has_been_called = false;
bool already_closed = false;
bool result;
NfQueue::PktRequest<PyProxyQueue>* pkt;
} match_ctx;
void before_loop() override { void before_loop() override {
sctx.follower.new_stream_callback(bind(on_new_stream, placeholders::_1, &sctx)); follower.new_stream_callback(bind(on_new_stream, placeholders::_1, this));
sctx.follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, &sctx)); follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, this));
} }
void * callback_data_fetch() override{ bool filter_action(NfQueue::PktRequest<PyProxyQueue>* pkt){
return &sctx;
}
static bool filter_action(packet_info& info){
shared_ptr<PyCodeConfig> conf = config; shared_ptr<PyCodeConfig> conf = config;
auto stream_search = info.sctx->streams_ctx.find(info.sid);
pyfilter_ctx stream_match; auto stream_search = sctx.streams_ctx.find(pkt->sid);
if (stream_search == info.sctx->streams_ctx.end()){ pyfilter_ctx* stream_match;
if (stream_search == sctx.streams_ctx.end()){
// TODO: New pyfilter_ctx // TODO: New pyfilter_ctx
}else{ }else{
stream_match = stream_search->second; stream_match = stream_search->second;
@@ -59,86 +66,76 @@ class PyProxyQueue: public NfQueueExecutor {
} }
//If the stream has already been matched, drop all data, and try to close the connection //If the stream has already been matched, drop all data, and try to close the connection
static void keep_fin_packet(stream_ctx* sctx){ static void keep_fin_packet(PyProxyQueue* pkt){
sctx->match_info.matching_has_been_called = true; pkt->match_ctx.matching_has_been_called = true;
sctx->match_info.already_closed = true; pkt->match_ctx.already_closed = true;
} }
static void on_data_recv(Stream& stream, stream_ctx* sctx, string data) { static void on_data_recv(Stream& stream, PyProxyQueue* pkt, string data) {
sctx->match_info.matching_has_been_called = true; pkt->match_ctx.matching_has_been_called = true;
sctx->match_info.already_closed = false; pkt->match_ctx.already_closed = false;
bool result = filter_action(*sctx->match_info.pkt_info); bool result = pkt->filter_action(pkt->match_ctx.pkt);
if (!result){ if (!result){
sctx->clean_stream_by_id(sctx->match_info.pkt_info->sid); pkt->sctx.clean_stream_by_id(pkt->match_ctx.pkt->sid);
stream.client_data_callback(bind(keep_fin_packet, sctx)); stream.client_data_callback(bind(keep_fin_packet, pkt));
stream.server_data_callback(bind(keep_fin_packet, sctx)); stream.server_data_callback(bind(keep_fin_packet, pkt));
} }
sctx->match_info.result = result; pkt->match_ctx.result = result;
} }
//Input data filtering //Input data filtering
static void on_client_data(Stream& stream, stream_ctx* sctx) { static void on_client_data(Stream& stream, PyProxyQueue* pkt) {
sctx->match_info.pkt_info->is_input = true; on_data_recv(stream, pkt, string(stream.client_payload().begin(), stream.client_payload().end()));
on_data_recv(stream, sctx, string(stream.client_payload().begin(), stream.client_payload().end()));
} }
//Server data filtering //Server data filtering
static void on_server_data(Stream& stream, stream_ctx* sctx) { static void on_server_data(Stream& stream, PyProxyQueue* pkt) {
sctx->match_info.pkt_info->is_input = false; on_data_recv(stream, pkt, string(stream.server_payload().begin(), stream.server_payload().end()));
on_data_recv(stream, sctx, string(stream.server_payload().begin(), stream.server_payload().end()));
} }
// A stream was terminated. The second argument is the reason why it was terminated // A stream was terminated. The second argument is the reason why it was terminated
static void on_stream_close(Stream& stream, stream_ctx* sctx) { static void on_stream_close(Stream& stream, PyProxyQueue* pkt) {
stream_id stream_id = stream_id::make_identifier(stream); stream_id stream_id = stream_id::make_identifier(stream);
sctx->clean_stream_by_id(stream_id); pkt->sctx.clean_stream_by_id(stream_id);
} }
static void on_new_stream(Stream& stream, stream_ctx* sctx) { static void on_new_stream(Stream& stream, PyProxyQueue* pkt) {
stream.auto_cleanup_payloads(true); stream.auto_cleanup_payloads(true);
if (stream.is_partial_stream()) { if (stream.is_partial_stream()) {
//TODO take a decision about this... //TODO take a decision about this...
stream.enable_recovery_mode(10 * 1024); stream.enable_recovery_mode(10 * 1024);
} }
stream.client_data_callback(bind(on_client_data, placeholders::_1, sctx)); stream.client_data_callback(bind(on_client_data, placeholders::_1, pkt));
stream.server_data_callback(bind(on_server_data, placeholders::_1, sctx)); stream.server_data_callback(bind(on_server_data, placeholders::_1, pkt));
stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, sctx)); stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, pkt));
} }
template<typename T>
static void build_verdict(T packet, uint8_t *payload, uint16_t plen, nlmsghdr *nlh_verdict, nfqnl_msg_packet_hdr *ph, stream_ctx* sctx, bool is_ipv6){ void handle_next_packet(NfQueue::PktRequest<PyProxyQueue>* pkt) override{
Tins::TCP* tcp = packet.template find_pdu<Tins::TCP>(); if (pkt->l4_proto != NfQueue::L4Proto::TCP){
if (!tcp){
throw invalid_argument("Only TCP and UDP are supported"); throw invalid_argument("Only TCP and UDP are supported");
} }
Tins::PDU* application_layer = tcp->inner_pdu(); Tins::PDU* application_layer = pkt->tcp->inner_pdu();
u_int16_t payload_size = 0; u_int16_t payload_size = 0;
if (application_layer != nullptr){ if (application_layer != nullptr){
payload_size = application_layer->size(); payload_size = application_layer->size();
} }
packet_info pktinfo{ match_ctx.matching_has_been_called = false;
payload: string(payload+plen - payload_size, payload+plen), match_ctx.pkt = pkt;
sid: stream_id::make_identifier(packet), if (pkt->is_ipv6){
is_ipv6: is_ipv6, follower.process_packet(*pkt->ipv6);
sctx: sctx, }else{
packet_pdu: &packet, follower.process_packet(*pkt->ipv4);
tcp: tcp, }
};
sctx->match_info.matching_has_been_called = false;
sctx->match_info.pkt_info = &pktinfo;
sctx->follower.process_packet(packet);
// Do an action only is an ordered packet has been received // Do an action only is an ordered packet has been received
if (sctx->match_info.matching_has_been_called){ if (match_ctx.matching_has_been_called){
bool empty_payload = pktinfo.payload.empty(); bool empty_payload = payload_size == 0;
//In this 2 cases we have to remove all data about the stream //In this 2 cases we have to remove all data about the stream
if (!sctx->match_info.result || sctx->match_info.already_closed){ if (!match_ctx.result || match_ctx.already_closed){
#ifdef DEBUG sctx.clean_stream_by_id(pkt->sid);
cerr << "[DEBUG] [NetfilterQueue.build_verdict] Stream matched, removing all data about it" << endl;
#endif
sctx->clean_stream_by_id(pktinfo.sid);
//If the packet has data, we have to remove it //If the packet has data, we have to remove it
if (!empty_payload){ if (!empty_payload){
Tins::PDU* data_layer = tcp->release_inner_pdu(); Tins::PDU* data_layer = pkt->tcp->release_inner_pdu();
if (data_layer != nullptr){ if (data_layer != nullptr){
delete data_layer; delete data_layer;
} }
@@ -146,64 +143,23 @@ class PyProxyQueue: public NfQueueExecutor {
//For the first matched data or only for data packets, we set FIN bit //For the first matched data or only for data packets, we set FIN bit
//This only for client packets, because this will trigger server to close the connection //This only for client packets, because this will trigger server to close the connection
//Packets will be filtered anyway also if client don't send packets //Packets will be filtered anyway also if client don't send packets
if ((!sctx->match_info.result || !empty_payload) && is_input){ if ((!match_ctx.result || !empty_payload) && pkt->is_input){
tcp->set_flag(Tins::TCP::FIN,1); pkt->tcp->set_flag(Tins::TCP::FIN,1);
tcp->set_flag(Tins::TCP::ACK,1); pkt->tcp->set_flag(Tins::TCP::ACK,1);
tcp->set_flag(Tins::TCP::SYN,0); pkt->tcp->set_flag(Tins::TCP::SYN,0);
} }
//Send the edited packet to the kernel //Send the edited packet to the kernel
nfq_nlmsg_verdict_put_pkt(nlh_verdict, packet.serialize().data(), packet.size()); return pkt->mangle();
} }
} }
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT ); return pkt->accept();
} }
static int queue_cb(const nlmsghdr *nlh, const mnl_socket* nl, void *data_ptr) {
stream_ctx* sctx = (stream_ctx*)data_ptr;
//Extract attributes from the nlmsghdr
nlattr *attr[NFQA_MAX+1] = {};
if (nfq_nlmsg_parse(nlh, attr) < 0) {
perror("problems parsing");
return MNL_CB_ERROR;
}
if (attr[NFQA_PACKET_HDR] == nullptr) {
fputs("metaheader not set\n", stderr);
return MNL_CB_ERROR;
}
//Get Payload
uint16_t plen = mnl_attr_get_payload_len(attr[NFQA_PAYLOAD]);
uint8_t *payload = (uint8_t *)mnl_attr_get_payload(attr[NFQA_PAYLOAD]);
//Return result to the kernel
struct nfqnl_msg_packet_hdr *ph = (nfqnl_msg_packet_hdr*) mnl_attr_get_payload(attr[NFQA_PACKET_HDR]);
struct nfgenmsg *nfg = (nfgenmsg *)mnl_nlmsg_get_payload(nlh);
char buf[MNL_SOCKET_BUFFER_SIZE];
struct nlmsghdr *nlh_verdict;
nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(nfg->res_id));
// Check IP protocol version
if ( (payload[0] & 0xf0) == 0x40 ){
build_verdict(Tins::IP(payload, plen), payload, plen, nlh_verdict, ph, sctx, false);
}else{
build_verdict(Tins::IPv6(payload, plen), payload, plen, nlh_verdict, ph, sctx, true);
}
if (mnl_socket_sendto(nl, nlh_verdict, nlh_verdict->nlmsg_len) < 0) {
throw runtime_error( "mnl_socket_send" );
}
return MNL_CB_OK;
}
PyProxyQueue(int queue) : NfQueueExecutor(queue, &queue_cb) {}
~PyProxyQueue() { ~PyProxyQueue() {
sctx.clean(); sctx.clean();
} }
}; };
}}
#endif // PROXY_TUNNEL_CLASS_CPP #endif // PROXY_TUNNEL_CLASS_CPP

View File

@@ -1,21 +1,17 @@
#ifndef PROXY_TUNNEL_SETTINGS_CPP #ifndef PROXY_TUNNEL_SETTINGS_CPP
#define PROXY_TUNNEL_SETTINGS_CPP #define PROXY_TUNNEL_SETTINGS_CPP
#include <iostream>
#include <cstring>
#include <sstream>
#include "../utils.hpp"
#include <vector> #include <vector>
#include <hs.h>
#include <memory> #include <memory>
using namespace std; using namespace std;
class PyCodeConfig{ class PyCodeConfig{
public: public:
const string code; const vector<uint8_t> code;
public: public:
PyCodeConfig(string pycode): code(pycode){} PyCodeConfig(vector<uint8_t> pycode): code(pycode){}
PyCodeConfig(): code(vector<uint8_t>()){}
~PyCodeConfig(){} ~PyCodeConfig(){}
}; };

View File

@@ -3,11 +3,8 @@
#define STREAM_CTX_CPP #define STREAM_CTX_CPP
#include <iostream> #include <iostream>
#include <tins/tcp_ip/stream_follower.h>
#include <tins/tcp_ip/stream_identifier.h> #include <tins/tcp_ip/stream_identifier.h>
using Tins::TCPIP::Stream;
using Tins::TCPIP::StreamFollower;
using namespace std; using namespace std;
typedef Tins::TCPIP::StreamIdentifier stream_id; typedef Tins::TCPIP::StreamIdentifier stream_id;
@@ -17,44 +14,25 @@ struct pyfilter_ctx {
string pycode; string pycode;
}; };
typedef map<stream_id, pyfilter_ctx> matching_map; typedef map<stream_id, pyfilter_ctx*> matching_map;
struct packet_info;
struct tcp_stream_tmp {
bool matching_has_been_called = false;
bool already_closed = false;
bool result;
packet_info *pkt_info;
};
struct stream_ctx { struct stream_ctx {
matching_map streams_ctx; matching_map streams_ctx;
StreamFollower follower;
tcp_stream_tmp match_info;
void clean_stream_by_id(stream_id sid){ void clean_stream_by_id(stream_id sid){
auto stream_search = streams_ctx.find(sid); auto stream_search = streams_ctx.find(sid);
if (stream_search != streams_ctx.end()){ if (stream_search != streams_ctx.end()){
auto stream_match = stream_search->second; auto stream_match = stream_search->second;
//DEALLOC PY GLOB TODO //DEALLOC PY GLOB TODO
delete stream_match;
} }
} }
void clean(){ void clean(){
for (auto ele: streams_ctx){ for (auto ele: streams_ctx){
//TODO dealloc ele.second.pyglob //TODO dealloc ele.second.pyglob
delete ele.second;
} }
} }
}; };
struct packet_info {
string payload;
stream_id sid;
bool is_input;
bool is_ipv6;
stream_ctx* sctx;
Tins::PDU* packet_pdu;
Tins::TCP* tcp;
};
#endif // STREAM_CTX_CPP #endif // STREAM_CTX_CPP

View File

@@ -4,13 +4,16 @@
#include <iostream> #include <iostream>
#include <cstring> #include <cstring>
#include <sstream> #include <sstream>
#include "../utils.hpp" #include "../utils.cpp"
#include <vector> #include <vector>
#include <hs.h> #include <hs.h>
#include <memory> #include <memory>
using namespace std; using namespace std;
namespace Firegex {
namespace Regex {
enum FilterDirection{ CTOS, STOC }; enum FilterDirection{ CTOS, STOC };
struct decoded_regex { struct decoded_regex {
@@ -181,10 +184,6 @@ void inline scratch_setup(regex_ruleset &conf, hs_scratch_t* & scratch){
} }
} }
struct matched_data{ }}
unsigned int matched = 0;
bool has_matched = false;
};
#endif // REGEX_FILTER_CPP #endif // REGEX_FILTER_CPP

View File

@@ -15,53 +15,59 @@
#include <thread> #include <thread>
#include <hs.h> #include <hs.h>
#include <syncstream> #include <syncstream>
#include <functional>
#include <iostream> #include <iostream>
#include "../classes/netfilter.cpp" #include "../classes/netfilter.cpp"
#include "stream_ctx.cpp" #include "stream_ctx.cpp"
#include "regex_rules.cpp" #include "regex_rules.cpp"
using Tins::TCPIP::Stream;
using Tins::TCPIP::StreamFollower;
using namespace std; using namespace std;
class RegexQueue: public NfQueueExecutor {
public: namespace Firegex {
namespace Regex {
using Tins::TCPIP::Stream;
using Tins::TCPIP::StreamFollower;
class RegexNfQueue : public NfQueue::ThreadNfQueue<RegexNfQueue> {
public:
stream_ctx sctx; stream_ctx sctx;
u_int16_t latest_config_ver = 0;
StreamFollower follower;
struct {
bool matching_has_been_called = false;
bool already_closed = false;
bool result;
NfQueue::PktRequest<RegexNfQueue>* pkt;
} match_ctx;
void before_loop() override {
sctx.follower.new_stream_callback(bind(on_new_stream, placeholders::_1, &sctx));
sctx.follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, &sctx));
}
void * callback_data_fetch() override{ bool filter_action(NfQueue::PktRequest<RegexNfQueue>* pkt){
return &sctx;
}
static bool filter_action(packet_info& info){
shared_ptr<RegexRules> conf = regex_config; shared_ptr<RegexRules> conf = regex_config;
auto current_version = conf->ver();
if (current_version != info.sctx->latest_config_ver){
#ifdef DEBUG
cerr << "[DEBUG] [filter_callback] Configuration has changed (" << current_version << "!=" << info.sctx->latest_config_ver << "), cleaning scratch spaces" << endl;
#endif
info.sctx->clean();
info.sctx->latest_config_ver = current_version;
}
scratch_setup(conf->input_ruleset, info.sctx->in_scratch);
scratch_setup(conf->output_ruleset, info.sctx->out_scratch);
hs_database_t* regex_matcher = info.is_input ? conf->input_ruleset.hs_db : conf->output_ruleset.hs_db; auto current_version = conf->ver();
if (current_version != latest_config_ver){
sctx.clean();
latest_config_ver = current_version;
}
scratch_setup(conf->input_ruleset, sctx.in_scratch);
scratch_setup(conf->output_ruleset, sctx.out_scratch);
hs_database_t* regex_matcher = pkt->is_input ? conf->input_ruleset.hs_db : conf->output_ruleset.hs_db;
if (regex_matcher == nullptr){ if (regex_matcher == nullptr){
return true; return true;
} }
#ifdef DEBUG struct matched_data{
cerr << "[DEBUG] [filter_callback] Matching packet with " << (info.is_input ? "input" : "output") << " ruleset" << endl; unsigned int matched = 0;
#endif bool has_matched = false;
} match_res;
matched_data match_res;
hs_error_t err; hs_error_t err;
hs_scratch_t* scratch_space = info.is_input ? info.sctx->in_scratch: info.sctx->out_scratch; hs_scratch_t* scratch_space = pkt->is_input ? sctx.in_scratch: sctx.out_scratch;
auto match_func = [](unsigned int id, auto from, auto to, auto flags, auto ctx){ auto match_func = [](unsigned int id, auto from, auto to, auto flags, auto ctx){
auto res = (matched_data*)ctx; auto res = (matched_data*)ctx;
res->has_matched = true; res->has_matched = true;
@@ -70,49 +76,32 @@ class RegexQueue: public NfQueueExecutor {
}; };
hs_stream_t* stream_match; hs_stream_t* stream_match;
if (conf->stream_mode()){ if (conf->stream_mode()){
matching_map* match_map = info.is_input ? &info.sctx->in_hs_streams : &info.sctx->out_hs_streams; matching_map* match_map = pkt->is_input ? &sctx.in_hs_streams : &sctx.out_hs_streams;
#ifdef DEBUG auto stream_search = match_map->find(pkt->sid);
cerr << "[DEBUG] [filter_callback] Dumping match_map " << match_map << endl;
for (auto ele: *match_map){
cerr << "[DEBUG] [filter_callback] " << ele.first << " -> " << ele.second << endl;
}
cerr << "[DEBUG] [filter_callback] End of match_map" << endl;
#endif
auto stream_search = match_map->find(info.sid);
if (stream_search == match_map->end()){ if (stream_search == match_map->end()){
#ifdef DEBUG
cerr << "[DEBUG] [filter_callback] Creating new stream matcher for " << info.sid << endl;
#endif
if (hs_open_stream(regex_matcher, 0, &stream_match) != HS_SUCCESS) { if (hs_open_stream(regex_matcher, 0, &stream_match) != HS_SUCCESS) {
cerr << "[error] [filter_callback] Error opening the stream matcher (hs)" << endl; cerr << "[error] [filter_callback] Error opening the stream matcher (hs)" << endl;
throw invalid_argument("Cannot open stream match on hyperscan"); throw invalid_argument("Cannot open stream match on hyperscan");
} }
if (info.is_tcp){ if (pkt->l4_proto == NfQueue::L4Proto::TCP){
match_map->insert_or_assign(info.sid, stream_match); match_map->insert_or_assign(pkt->sid, stream_match);
} }
}else{ }else{
stream_match = stream_search->second; stream_match = stream_search->second;
} }
#ifdef DEBUG
cerr << "[DEBUG] [filter_callback] Matching as a stream" << endl;
#endif
err = hs_scan_stream( err = hs_scan_stream(
stream_match,info.payload.c_str(), info.payload.length(), stream_match,pkt->data.c_str(), pkt->data.size(),
0, scratch_space, match_func, &match_res 0, scratch_space, match_func, &match_res
); );
}else{ }else{
#ifdef DEBUG
cerr << "[DEBUG] [filter_callback] Matching as a block" << endl;
#endif
err = hs_scan( err = hs_scan(
regex_matcher,info.payload.c_str(), info.payload.length(), regex_matcher,pkt->data.c_str(), pkt->data.size(),
0, scratch_space, match_func, &match_res 0, scratch_space, match_func, &match_res
); );
} }
if ( if (
!info.is_tcp && conf->stream_mode() && pkt->l4_proto != NfQueue::L4Proto::TCP && conf->stream_mode() &&
hs_close_stream(stream_match, scratch_space, nullptr, nullptr) != HS_SUCCESS hs_close_stream(stream_match, scratch_space, nullptr, nullptr) != HS_SUCCESS
){ ){
cerr << "[error] [filter_callback] Error closing the stream matcher (hs)" << endl; cerr << "[error] [filter_callback] Error closing the stream matcher (hs)" << endl;
@@ -123,117 +112,34 @@ class RegexQueue: public NfQueueExecutor {
throw invalid_argument("Error while matching the stream with hyperscan"); throw invalid_argument("Error while matching the stream with hyperscan");
} }
if (match_res.has_matched){ if (match_res.has_matched){
auto rules_vector = info.is_input ? conf->input_ruleset.regexes : conf->output_ruleset.regexes; auto& rules_vector = pkt->is_input ? conf->input_ruleset.regexes : conf->output_ruleset.regexes;
osyncstream(cout) << "BLOCKED " << rules_vector[match_res.matched] << endl; osyncstream(cout) << "BLOCKED " << rules_vector[match_res.matched] << endl;
return false; return false;
} }
return true; return true;
} }
//If the stream has already been matched, drop all data, and try to close the connection void handle_next_packet(NfQueue::PktRequest<RegexNfQueue>* pkt) override{
static void keep_fin_packet(stream_ctx* sctx){ bool empty_payload = pkt->data.size() == 0;
sctx->match_info.matching_has_been_called = true; if (pkt->tcp){
sctx->match_info.already_closed = true; match_ctx.matching_has_been_called = false;
} match_ctx.pkt = pkt;
static void on_data_recv(Stream& stream, stream_ctx* sctx, string data) { if (pkt->ipv4){
sctx->match_info.matching_has_been_called = true; follower.process_packet(*pkt->ipv4);
sctx->match_info.already_closed = false;
bool result = filter_action(*sctx->match_info.pkt_info);
#ifdef DEBUG
cerr << "[DEBUG] [NetfilterQueue.on_data_recv] result: " << result << endl;
#endif
if (!result){
#ifdef DEBUG
cerr << "[DEBUG] [NetfilterQueue.on_data_recv] Stream matched, removing all data about it" << endl;
#endif
sctx->clean_stream_by_id(sctx->match_info.pkt_info->sid);
stream.client_data_callback(bind(keep_fin_packet, sctx));
stream.server_data_callback(bind(keep_fin_packet, sctx));
}
sctx->match_info.result = result;
}
//Input data filtering
static void on_client_data(Stream& stream, stream_ctx* sctx) {
on_data_recv(stream, sctx, string(stream.client_payload().begin(), stream.client_payload().end()));
}
//Server data filtering
static void on_server_data(Stream& stream, stream_ctx* sctx) {
on_data_recv(stream, sctx, string(stream.server_payload().begin(), stream.server_payload().end()));
}
// A stream was terminated. The second argument is the reason why it was terminated
static void on_stream_close(Stream& stream, stream_ctx* sctx) {
stream_id stream_id = stream_id::make_identifier(stream);
#ifdef DEBUG
cerr << "[DEBUG] [NetfilterQueue.on_stream_close] Stream terminated, deleting all data" << endl;
#endif
sctx->clean_stream_by_id(stream_id);
}
static void on_new_stream(Stream& stream, stream_ctx* sctx) {
#ifdef DEBUG
cerr << "[DEBUG] [NetfilterQueue.on_new_stream] New stream detected" << endl;
#endif
stream.auto_cleanup_payloads(true);
if (stream.is_partial_stream()) {
#ifdef DEBUG
cerr << "[DEBUG] [NetfilterQueue.on_new_stream] Partial stream detected" << endl;
#endif
stream.enable_recovery_mode(10 * 1024);
}
stream.client_data_callback(bind(on_client_data, placeholders::_1, sctx));
stream.server_data_callback(bind(on_server_data, placeholders::_1, sctx));
stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, sctx));
}
template<typename T>
static void build_verdict(T packet, uint8_t *payload, uint16_t plen, nlmsghdr *nlh_verdict, nfqnl_msg_packet_hdr *ph, stream_ctx* sctx, bool is_input, bool is_ipv6){
Tins::TCP* tcp = packet.template find_pdu<Tins::TCP>();
if (tcp){
Tins::PDU* application_layer = tcp->inner_pdu();
u_int16_t payload_size = 0;
if (application_layer != nullptr){
payload_size = application_layer->size();
}
packet_info pktinfo{
payload: string(payload+plen - payload_size, payload+plen),
sid: stream_id::make_identifier(packet),
is_input: is_input,
is_tcp: true,
is_ipv6: is_ipv6,
sctx: sctx,
packet_pdu: &packet,
layer4_pdu: tcp,
};
sctx->match_info.matching_has_been_called = false;
sctx->match_info.pkt_info = &pktinfo;
#ifdef DEBUG
cerr << "[DEBUG] [NetfilterQueue.build_verdict] TCP Packet received " << packet.src_addr() << ":" << tcp->sport() << " -> " << packet.dst_addr() << ":" << tcp->dport() << " thr: " << this_thread::get_id() << ", sending to libtins StreamFollower" << endl;
#endif
sctx->follower.process_packet(packet);
#ifdef DEBUG
if (sctx->tcp_match_util.matching_has_been_called){
cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has called matching functions" << endl;
}else{ }else{
cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has NOT called matching functions" << endl; follower.process_packet(*pkt->ipv6);
} }
#endif
// Do an action only is an ordered packet has been received // Do an action only is an ordered packet has been received
if (sctx->match_info.matching_has_been_called){ if (match_ctx.matching_has_been_called){
bool empty_payload = pktinfo.payload.empty();
//In this 2 cases we have to remove all data about the stream //In this 2 cases we have to remove all data about the stream
if (!sctx->match_info.result || sctx->match_info.already_closed){ if (!match_ctx.result || match_ctx.already_closed){
#ifdef DEBUG sctx.clean_stream_by_id(pkt->sid);
cerr << "[DEBUG] [NetfilterQueue.build_verdict] Stream matched, removing all data about it" << endl;
#endif
sctx->clean_stream_by_id(pktinfo.sid);
//If the packet has data, we have to remove it //If the packet has data, we have to remove it
if (!empty_payload){ if (!empty_payload){
Tins::PDU* data_layer = tcp->release_inner_pdu(); Tins::PDU* data_layer = pkt->tcp->release_inner_pdu();
if (data_layer != nullptr){ if (data_layer != nullptr){
delete data_layer; delete data_layer;
} }
@@ -241,106 +147,83 @@ class RegexQueue: public NfQueueExecutor {
//For the first matched data or only for data packets, we set FIN bit //For the first matched data or only for data packets, we set FIN bit
//This only for client packets, because this will trigger server to close the connection //This only for client packets, because this will trigger server to close the connection
//Packets will be filtered anyway also if client don't send packets //Packets will be filtered anyway also if client don't send packets
if ((!sctx->match_info.result || !empty_payload) && is_input){ if ((!match_ctx.result || !empty_payload) && pkt->is_input){
tcp->set_flag(Tins::TCP::FIN,1); pkt->tcp->set_flag(Tins::TCP::FIN,1);
tcp->set_flag(Tins::TCP::ACK,1); pkt->tcp->set_flag(Tins::TCP::ACK,1);
tcp->set_flag(Tins::TCP::SYN,0); pkt->tcp->set_flag(Tins::TCP::SYN,0);
} }
//Send the edited packet to the kernel //Send the edited packet to the kernel
nfq_nlmsg_verdict_put_pkt(nlh_verdict, packet.serialize().data(), packet.size()); return pkt->mangle();
} }
} }
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT ); return pkt->accept();
}else{ }else{
Tins::UDP* udp = packet.template find_pdu<Tins::UDP>(); if (!pkt->udp){
if (!udp){
throw invalid_argument("Only TCP and UDP are supported"); throw invalid_argument("Only TCP and UDP are supported");
} }
Tins::PDU* application_layer = udp->inner_pdu(); if(empty_payload){
u_int16_t payload_size = 0; return pkt->accept();
if (application_layer != nullptr){ }else if (filter_action(pkt)){
payload_size = application_layer->size(); return pkt->accept();
}
if((udp->inner_pdu() == nullptr)){
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
}
packet_info pktinfo{
payload: string(payload+plen - payload_size, payload+plen),
sid: stream_id::make_identifier(packet),
is_input: is_input,
is_tcp: false,
is_ipv6: is_ipv6,
sctx: sctx,
packet_pdu: &packet,
layer4_pdu: udp,
};
if (filter_action(pktinfo)){
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
}else{ }else{
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_DROP ); return pkt->drop();
} }
} }
} }
//If the stream has already been matched, drop all data, and try to close the connection
static int queue_cb(const nlmsghdr *nlh, const mnl_socket* nl, void *data_ptr) { static void keep_fin_packet(RegexNfQueue* nfq){
nfq->match_ctx.matching_has_been_called = true;
stream_ctx* sctx = (stream_ctx*)data_ptr; nfq->match_ctx.already_closed = true;
//Extract attributes from the nlmsghdr
nlattr *attr[NFQA_MAX+1] = {};
if (nfq_nlmsg_parse(nlh, attr) < 0) {
perror("problems parsing");
return MNL_CB_ERROR;
}
if (attr[NFQA_PACKET_HDR] == nullptr) {
fputs("metaheader not set\n", stderr);
return MNL_CB_ERROR;
}
if (attr[NFQA_MARK] == nullptr) {
fputs("mark not set\n", stderr);
return MNL_CB_ERROR;
}
//Get Payload
uint16_t plen = mnl_attr_get_payload_len(attr[NFQA_PAYLOAD]);
uint8_t *payload = (uint8_t *)mnl_attr_get_payload(attr[NFQA_PAYLOAD]);
//Return result to the kernel
struct nfqnl_msg_packet_hdr *ph = (nfqnl_msg_packet_hdr*) mnl_attr_get_payload(attr[NFQA_PACKET_HDR]);
struct nfgenmsg *nfg = (nfgenmsg *)mnl_nlmsg_get_payload(nlh);
char buf[MNL_SOCKET_BUFFER_SIZE];
struct nlmsghdr *nlh_verdict;
nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(nfg->res_id));
bool is_input = ntohl(mnl_attr_get_u32(attr[NFQA_MARK])) & 0x1; // == 0x1337 that is odd
#ifdef DEBUG
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet received" << endl;
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet ID: " << ntohl(ph->packet_id) << endl;
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Payload size: " << plen << endl;
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Is input: " << is_input << endl;
#endif
// Check IP protocol version
if ( (payload[0] & 0xf0) == 0x40 ){
build_verdict(Tins::IP(payload, plen), payload, plen, nlh_verdict, ph, sctx, is_input, false);
}else{
build_verdict(Tins::IPv6(payload, plen), payload, plen, nlh_verdict, ph, sctx, is_input, true);
} }
if (mnl_socket_sendto(nl, nlh_verdict, nlh_verdict->nlmsg_len) < 0) { static void on_data_recv(Stream& stream, RegexNfQueue* nfq, string data) {
throw runtime_error( "mnl_socket_send" ); nfq->match_ctx.matching_has_been_called = true;
nfq->match_ctx.already_closed = false;
bool result = nfq->filter_action(nfq->match_ctx.pkt);
if (!result){
nfq->sctx.clean_stream_by_id(nfq->match_ctx.pkt->sid);
stream.client_data_callback(bind(keep_fin_packet, nfq));
stream.server_data_callback(bind(keep_fin_packet, nfq));
}
nfq->match_ctx.result = result;
} }
return MNL_CB_OK; //Input data filtering
static void on_client_data(Stream& stream, RegexNfQueue* nfq) {
on_data_recv(stream, nfq, string(stream.client_payload().begin(), stream.client_payload().end()));
} }
RegexQueue(int queue) : NfQueueExecutor(queue, &queue_cb) {} //Server data filtering
static void on_server_data(Stream& stream, RegexNfQueue* nfq) {
on_data_recv(stream, nfq, string(stream.server_payload().begin(), stream.server_payload().end()));
}
~RegexQueue() { // A stream was terminated. The second argument is the reason why it was terminated
static void on_stream_close(Stream& stream, RegexNfQueue* nfq) {
stream_id stream_id = stream_id::make_identifier(stream);
nfq->sctx.clean_stream_by_id(stream_id);
}
static void on_new_stream(Stream& stream, RegexNfQueue* nfq) {
stream.auto_cleanup_payloads(true);
if (stream.is_partial_stream()) {
stream.enable_recovery_mode(10 * 1024);
}
stream.client_data_callback(bind(on_client_data, placeholders::_1, nfq));
stream.server_data_callback(bind(on_server_data, placeholders::_1, nfq));
stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, nfq));
}
void before_loop() override{
follower.new_stream_callback(bind(on_new_stream, placeholders::_1, this));
follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, this));
}
~RegexNfQueue(){
sctx.clean(); sctx.clean();
} }
}; };
}}
#endif // REGEX_FILTER_CLASS_CPP #endif // REGEX_FILTER_CLASS_CPP

View File

@@ -4,30 +4,19 @@
#include <iostream> #include <iostream>
#include <hs.h> #include <hs.h>
#include <tins/tcp_ip/stream_follower.h>
#include <tins/tcp_ip/stream_identifier.h> #include <tins/tcp_ip/stream_identifier.h>
#include <functional>
#include <map>
#include "regexfilter.cpp"
using Tins::TCPIP::Stream;
using Tins::TCPIP::StreamFollower;
using namespace std; using namespace std;
namespace Firegex {
namespace Regex {
typedef Tins::TCPIP::StreamIdentifier stream_id; typedef Tins::TCPIP::StreamIdentifier stream_id;
typedef map<stream_id, hs_stream_t*> matching_map; typedef map<stream_id, hs_stream_t*> matching_map;
/* Considering to use unorder_map using this hash of stream_id
namespace std {
template<>
struct hash<stream_id> {
size_t operator()(const stream_id& sid) const
{
return std::hash<std::uint32_t>()(sid.max_address[0] + sid.max_address[1] + sid.max_address[2] + sid.max_address[3] + sid.max_address_port + sid.min_address[0] + sid.min_address[1] + sid.min_address[2] + sid.min_address[3] + sid.min_address_port);
}
};
}
*/
#ifdef DEBUG #ifdef DEBUG
ostream& operator<<(ostream& os, const Tins::TCPIP::StreamIdentifier::address_type &sid){ ostream& operator<<(ostream& os, const Tins::TCPIP::StreamIdentifier::address_type &sid){
bool first_print = false; bool first_print = false;
@@ -46,24 +35,11 @@ ostream& operator<<(ostream& os, const stream_id &sid){
} }
#endif #endif
struct packet_info;
struct tcp_stream_tmp {
bool matching_has_been_called = false;
bool already_closed = false;
bool result;
packet_info *pkt_info;
};
struct stream_ctx { struct stream_ctx {
matching_map in_hs_streams; matching_map in_hs_streams;
matching_map out_hs_streams; matching_map out_hs_streams;
hs_scratch_t* in_scratch = nullptr; hs_scratch_t* in_scratch = nullptr;
hs_scratch_t* out_scratch = nullptr; hs_scratch_t* out_scratch = nullptr;
u_int16_t latest_config_ver = 0;
StreamFollower follower;
tcp_stream_tmp match_info;
void clean_scratches(){ void clean_scratches(){
if (out_scratch != nullptr){ if (out_scratch != nullptr){
@@ -77,9 +53,6 @@ struct stream_ctx {
} }
void clean_stream_by_id(stream_id sid){ void clean_stream_by_id(stream_id sid){
#ifdef DEBUG
cerr << "[DEBUG] [NetfilterQueue.clean_stream_by_id] Cleaning stream context of " << sid << endl;
#endif
auto stream_search = in_hs_streams.find(sid); auto stream_search = in_hs_streams.find(sid);
hs_stream_t* stream_match; hs_stream_t* stream_match;
if (stream_search != in_hs_streams.end()){ if (stream_search != in_hs_streams.end()){
@@ -103,11 +76,6 @@ struct stream_ctx {
} }
void clean(){ void clean(){
#ifdef DEBUG
cerr << "[DEBUG] [NetfilterQueue.clean] Cleaning stream context" << endl;
#endif
if (in_scratch){ if (in_scratch){
for(auto ele: in_hs_streams){ for(auto ele: in_hs_streams){
if (hs_close_stream(ele.second, in_scratch, nullptr, nullptr) != HS_SUCCESS) { if (hs_close_stream(ele.second, in_scratch, nullptr, nullptr) != HS_SUCCESS) {
@@ -131,16 +99,5 @@ struct stream_ctx {
} }
}; };
struct packet_info { }}
string payload;
stream_id sid;
bool is_input;
bool is_tcp;
bool is_ipv6;
stream_ctx* sctx;
Tins::PDU* packet_pdu;
Tins::PDU* layer4_pdu;
};
#endif // STREAM_CTX_CPP #endif // STREAM_CTX_CPP

64
backend/binsrc/utils.cpp Normal file
View File

@@ -0,0 +1,64 @@
#include <string>
#include <unistd.h>
#include <queue>
#include <condition_variable>
#ifndef UTILS_CPP
#define UTILS_CPP
bool unhexlify(std::string const &hex, std::string &newString) {
try{
int len = hex.length();
for(int i=0; i< len; i+=2)
{
std::string byte = hex.substr(i,2);
char chr = (char) (int)strtol(byte.c_str(), nullptr, 16);
newString.push_back(chr);
}
return true;
}
catch (...){
return false;
}
}
template<typename T, int MAX = 1024> //same of kernel nfqueue max
class BlockingQueue
{
private:
std::mutex mut;
std::queue<T> private_std_queue;
std::condition_variable condNotEmpty;
std::condition_variable condNotFull;
size_t count; // Guard with Mutex
public:
void put(T new_value)
{
std::unique_lock<std::mutex> lk(mut);
//Condition takes a unique_lock and waits given the false condition
condNotFull.wait(lk,[this]{
if (count == MAX) {
return false;
}else{
return true;
}
});
private_std_queue.push(new_value);
count++;
condNotEmpty.notify_one();
}
void take(T& value)
{
std::unique_lock<std::mutex> lk(mut);
//Condition takes a unique_lock and waits given the false condition
condNotEmpty.wait(lk,[this]{return !private_std_queue.empty();});
value=private_std_queue.front();
private_std_queue.pop();
count--;
condNotFull.notify_one();
}
};
#endif // UTILS_CPP

View File

@@ -1,23 +0,0 @@
#include <string>
#include <unistd.h>
#ifndef UTILS_HPP
#define UTILS_HPP
bool unhexlify(std::string const &hex, std::string &newString) {
try{
int len = hex.length();
for(int i=0; i< len; i+=2)
{
std::string byte = hex.substr(i,2);
char chr = (char) (int)strtol(byte.c_str(), nullptr, 16);
newString.push_back(chr);
}
return true;
}
catch (...){
return false;
}
}
#endif

View File

@@ -57,9 +57,9 @@ class FiregexInterceptor:
self.process.kill() self.process.kill()
raise Exception("Invalid binary output") raise Exception("Invalid binary output")
line = line_fut.decode() line = line_fut.decode()
if line.startswith("QUEUES "): if line.startswith("QUEUE "):
params = line.split() params = line.split()
return (int(params[1]), int(params[2])) return (int(params[1]), int(params[1]))
else: else:
self.process.kill() self.process.kill()
raise Exception("Invalid binary output") raise Exception("Invalid binary output")

View File

@@ -62,6 +62,7 @@ class FiregexTables(NFTableManager):
"expr": [ "expr": [
{'match': {'left': {'payload': {'protocol': ip_family(srv.ip_int), 'field': 'saddr'}}, 'op': '==', 'right': nftables_int_to_json(srv.ip_int)}}, {'match': {'left': {'payload': {'protocol': ip_family(srv.ip_int), 'field': 'saddr'}}, 'op': '==', 'right': nftables_int_to_json(srv.ip_int)}},
{'match': {"left": { "payload": {"protocol": str(srv.proto), "field": "sport"}}, "op": "==", "right": int(srv.port)}}, {'match': {"left": { "payload": {"protocol": str(srv.proto), "field": "sport"}}, "op": "==", "right": int(srv.port)}},
{"mangle": {"key": {"meta": {"key": "mark"}},"value": 0x1338}},
{"queue": {"num": str(init) if init == end else {"range":[init, end] }, "flags": ["bypass"]}} {"queue": {"num": str(init) if init == end else {"range":[init, end] }, "flags": ["bypass"]}}
] ]
}}}, }}},
@@ -72,6 +73,7 @@ class FiregexTables(NFTableManager):
"expr": [ "expr": [
{'match': {'left': {'payload': {'protocol': ip_family(srv.ip_int), 'field': 'daddr'}}, 'op': '==', 'right': nftables_int_to_json(srv.ip_int)}}, {'match': {'left': {'payload': {'protocol': ip_family(srv.ip_int), 'field': 'daddr'}}, 'op': '==', 'right': nftables_int_to_json(srv.ip_int)}},
{'match': {"left": { "payload": {"protocol": str(srv.proto), "field": "dport"}}, "op": "==", "right": int(srv.port)}}, {'match': {"left": { "payload": {"protocol": str(srv.proto), "field": "dport"}}, "op": "==", "right": int(srv.port)}},
{"mangle": {"key": {"meta": {"key": "mark"}},"value": 0x1337}},
{"queue": {"num": str(init) if init == end else {"range":[init, end] }, "flags": ["bypass"]}} {"queue": {"num": str(init) if init == end else {"range":[init, end] }, "flags": ["bypass"]}}
] ]
}}} }}}

View File

@@ -84,7 +84,7 @@ class FiregexInterceptor:
return self return self
async def _start_binary(self): async def _start_binary(self):
proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../cppqueue") proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../cppregex")
self.process = await asyncio.create_subprocess_exec( self.process = await asyncio.create_subprocess_exec(
proxy_binary_path, proxy_binary_path,
stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE,
@@ -97,9 +97,9 @@ class FiregexInterceptor:
self.process.kill() self.process.kill()
raise Exception("Invalid binary output") raise Exception("Invalid binary output")
line = line_fut.decode() line = line_fut.decode()
if line.startswith("QUEUES "): if line.startswith("QUEUE "):
params = line.split() params = line.split()
return (int(params[1]), int(params[2])) return (int(params[1]), int(params[1]))
else: else:
self.process.kill() self.process.kill()
raise Exception("Invalid binary output") raise Exception("Invalid binary output")

View File

@@ -48,10 +48,12 @@ class FiregexTables(NFTableManager):
def add(self, srv:Service, queue_range): def add(self, srv:Service, queue_range):
for ele in self.get(): for ele in self.get():
if ele.__eq__(srv): return if ele.__eq__(srv):
return
init, end = queue_range init, end = queue_range
if init > end: init, end = end, init if init > end:
init, end = end, init
self.cmd( self.cmd(
{ "insert":{ "rule": { { "insert":{ "rule": {
"family": "inet", "family": "inet",

View File

@@ -15,7 +15,7 @@ puts(f"{args.address}", color=colors.yellow)
firegex = FiregexAPI(args.address) firegex = FiregexAPI(args.address)
#Connect to Firegex #Connect to Firegex
if firegex.status()["status"] =="init": if firegex.status()["status"] == "init":
if (firegex.set_password(args.password)): if (firegex.set_password(args.password)):
puts(f"Sucessfully set password to {args.password}", color=colors.green) puts(f"Sucessfully set password to {args.password}", color=colors.green)
else: else: