push: code changes x2

This commit is contained in:
Domingo Dirutigliano
2025-02-28 21:14:09 +01:00
parent 6a11dd0d16
commit 8ae533e8f7
31 changed files with 544 additions and 397 deletions

View File

@@ -219,7 +219,7 @@ if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.realpath(__file__)))
uvicorn.run(
"app:app",
host="::" if DEBUG else None,
host="0.0.0.0" if DEBUG else None,
port=FIREGEX_PORT,
reload=DEBUG and not NORELOAD,
access_log=True,

View File

@@ -7,6 +7,7 @@
#include <tins/tcp_ip/stream_identifier.h>
#include <libmnl/libmnl.h>
#include <tins/tins.h>
#include <map>
using namespace std;
@@ -17,7 +18,17 @@ enum class FilterAction{ DROP, ACCEPT, MANGLE, NOACTION };
enum class L4Proto { TCP, UDP, RAW };
typedef Tins::TCPIP::StreamIdentifier stream_id;
//TODO DUBBIO: I PACCHETTI INVIATI A PYTHON SONO GIA' FIXATI?
struct tcp_ack_seq_ctx{
int64_t in = 0;
int64_t out = 0;
tcp_ack_seq_ctx(){}
void reset(){
in = 0;
out = 0;
}
};
typedef map<stream_id, tcp_ack_seq_ctx*> tcp_ack_map;
template<typename T>
class PktRequest {
@@ -28,6 +39,7 @@ class PktRequest {
uint32_t packet_id;
size_t _original_size;
size_t _data_original_size;
size_t _header_size;
bool need_tcp_fixing = false;
public:
bool is_ipv6;
@@ -39,18 +51,15 @@ class PktRequest {
bool is_input;
string packet;
char* data;
size_t data_size;
stream_id sid;
int64_t* tcp_in_offset = nullptr;
int64_t* tcp_out_offset = nullptr;
tcp_ack_seq_ctx* ack_seq_offset = nullptr;
T* ctx;
T* ctx = nullptr;
private:
static size_t inner_data_size(Tins::PDU* pdu){
static inline size_t inner_data_size(Tins::PDU* pdu){
if (pdu == nullptr){
return 0;
}
@@ -61,9 +70,9 @@ class PktRequest {
return inner->size();
}
inline void fetch_data_size(Tins::PDU* pdu){
data_size = inner_data_size(pdu);
_data_original_size = data_size;
inline void __internal_fetch_data_size(Tins::PDU* pdu){
_data_original_size = inner_data_size(pdu);
_header_size = _original_size - _data_original_size;
}
L4Proto fill_l4_info(){
@@ -72,14 +81,14 @@ class PktRequest {
if (tcp == nullptr){
udp = ipv6->find_pdu<Tins::UDP>();
if (udp == nullptr){
fetch_data_size(ipv6);
__internal_fetch_data_size(ipv6);
return L4Proto::RAW;
}else{
fetch_data_size(udp);
__internal_fetch_data_size(udp);
return L4Proto::UDP;
}
}else{
fetch_data_size(tcp);
__internal_fetch_data_size(tcp);
return L4Proto::TCP;
}
}else{
@@ -87,73 +96,23 @@ class PktRequest {
if (tcp == nullptr){
udp = ipv4->find_pdu<Tins::UDP>();
if (udp == nullptr){
fetch_data_size(ipv4);
__internal_fetch_data_size(ipv4);
return L4Proto::RAW;
}else{
fetch_data_size(udp);
__internal_fetch_data_size(udp);
return L4Proto::UDP;
}
}else{
fetch_data_size(tcp);
__internal_fetch_data_size(tcp);
return L4Proto::TCP;
}
}
}
bool need_tcp_fix(){
return (tcp_in_offset != nullptr && *tcp_in_offset != 0) || (tcp_out_offset != nullptr && *tcp_out_offset != 0);
return tcp && ack_seq_offset != nullptr && (ack_seq_offset->in != 0 || ack_seq_offset->out != 0);
}
Tins::PDU::serialization_type reserialize_raw_data(const uint8_t* data, const size_t& data_size){
if (is_ipv6){
Tins::IPv6 ipv6_new = Tins::IPv6(data, data_size);
if (tcp){
Tins::TCP* tcp_new = ipv6_new.find_pdu<Tins::TCP>();
}
return ipv6_new.serialize();
}else{
Tins::IP ipv4_new = Tins::IP(data, data_size);
if (tcp){
Tins::TCP* tcp_new = ipv4_new.find_pdu<Tins::TCP>();
}
return ipv4_new.serialize();
}
}
void _fix_ack_seq_tcp(Tins::TCP* this_tcp){
need_tcp_fixing = need_tcp_fix();
#ifdef DEBUG
if (need_tcp_fixing){
cerr << "[DEBUG] Fixing ack_seq with offsets " << *tcp_in_offset << " " << *tcp_out_offset << endl;
}
#endif
if(this_tcp == nullptr){
return;
}
if (is_input){
if (tcp_in_offset != nullptr){
this_tcp->seq(this_tcp->seq() + *tcp_in_offset);
}
if (tcp_out_offset != nullptr){
this_tcp->ack_seq(this_tcp->ack_seq() - *tcp_out_offset);
}
}else{
if (tcp_in_offset != nullptr){
this_tcp->ack_seq(this_tcp->ack_seq() - *tcp_in_offset);
}
if (tcp_out_offset != nullptr){
this_tcp->seq(this_tcp->seq() + *tcp_out_offset);
}
}
#ifdef DEBUG
if (need_tcp_fixing){
size_t new_size = inner_data_size(this_tcp);
cerr << "[DEBUG] FIXED PKT " << (is_input?"-> IN ":"<- OUT") << " [SEQ: " << this_tcp->seq() << "] \t[ACK: " << this_tcp->ack_seq() << "] \t[SIZE: " << new_size << "]" << endl;
}
#endif
}
public:
PktRequest(const char* payload, size_t plen, T* ctx, mnl_socket* nl, nfgenmsg *nfg, nfqnl_msg_packet_hdr *ph, bool is_input):
@@ -168,22 +127,129 @@ class PktRequest {
sid = stream_id::make_identifier(*ipv6);
_original_size = ipv6->size();
}else{
ipv4 = new Tins::IP((uint8_t*)packet.data(), plen);
ipv4 = new Tins::IP((uint8_t*)packet.c_str(), plen);
sid = stream_id::make_identifier(*ipv4);
_original_size = ipv4->size();
}
l4_proto = fill_l4_info();
data = packet.data()+(plen-data_size);
#ifdef DEBUG
if (tcp){
cerr << "[DEBUG] NEW_PACKET " << (is_input?"-> IN ":"<- OUT") << " [SEQ: " << tcp->seq() << "] \t[ACK: " << tcp->ack_seq() << "] \t[SIZE: " << data_size << "]" << endl;
cerr << "[DEBUG] NEW_PACKET " << (is_input?"-> IN ":"<- OUT") << " [SEQ: " << tcp->seq() << "] \t[ACK: " << tcp->ack_seq() << "] \t[SIZE: " << data_size() << "]" << endl;
}
#endif
}
void fix_tcp_ack(){
inline size_t header_size(){
return _header_size;
}
char* data(){
return packet.data()+_header_size;
}
size_t data_size(){
return packet.size()-_header_size;
}
size_t data_original_size(){
return _data_original_size;
}
void reserialize(){
auto data = serialize();
packet.resize(data.size());
memcpy(packet.data(), data.data(), data.size());
}
void set_data(const char* data, const size_t& data_size){
auto bef_raw = before_raw_pdu_ptr();
if (bef_raw){
delete before_raw_pdu_ptr()->release_inner_pdu();
if (data_size > 0){
before_raw_pdu_ptr() /= move(Tins::RawPDU((uint8_t*)data, data_size));
}
}
}
Tins::PDU* before_raw_pdu_ptr(){
if (tcp){
_fix_ack_seq_tcp(tcp);
return tcp;
}else if (udp){
return udp;
}else if (ipv4){
return ipv4;
}else if (ipv6){
return ipv6;
}
return nullptr;
}
void set_packet(const char* data, size_t data_size){
// Parsing only the header with libtins
Tins::PDU *data_pdu = nullptr;
size_t total_size;
if (is_ipv6){
delete ipv6;
ipv6 = new Tins::IPv6((uint8_t*)data, data_size);
if (tcp){
tcp = ipv6->find_pdu<Tins::TCP>();
data_pdu = tcp;
}else if (udp){
udp = ipv6->find_pdu<Tins::UDP>();
data_pdu = udp;
}else{
data_pdu = ipv6;
}
total_size = ipv6->size();
}else{
delete ipv4;
ipv4 = new Tins::IP((uint8_t*)data, data_size);
if (tcp){
tcp = ipv4->find_pdu<Tins::TCP>();
data_pdu = tcp;
}else if(udp){
udp = ipv4->find_pdu<Tins::UDP>();
data_pdu = udp;
}else{
data_pdu = ipv4;
}
total_size = ipv4->size();
}
_header_size = total_size - inner_data_size(data_pdu);
// Libtins can skip data if the lenght is changed to a bigger len (due to ip header total lenght), so we need to specify the data section manually
set_data(data+_header_size, data_size-_header_size);
}
void fix_tcp_ack(){
need_tcp_fixing = need_tcp_fix();
if(!need_tcp_fixing){
return;
}
#ifdef DEBUG
cerr << "[DEBUG] Fixing ack_seq with offsets " << ((int32_t)ack_seq_offset->in) << " " << ((int32_t)ack_seq_offset->out) << endl;
#endif
if (is_input){
tcp->seq(tcp->seq() + ack_seq_offset->in);
tcp->ack_seq(tcp->ack_seq() - ack_seq_offset->out);
}else{
tcp->ack_seq(tcp->ack_seq() - ack_seq_offset->in);
tcp->seq(tcp->seq() + ack_seq_offset->out);
}
#ifdef DEBUG
size_t new_size = inner_data_size(tcp);
cerr << "[DEBUG] FIXED PKT " << (is_input?"-> IN ":"<- OUT") << " [SEQ: " << tcp->seq() << "] \t[ACK: " << tcp->ack_seq() << "] \t[SIZE: " << new_size << "]" << endl;
#endif
}
void fix_data_payload(){
//Stream follower move the payload data, so we need to reinizialize RawPDU
auto bef_raw = before_raw_pdu_ptr();
if (bef_raw){
delete bef_raw->release_inner_pdu();
auto new_data_size = packet.size()-_header_size;
if (new_data_size > 0){
bef_raw /= move(Tins::RawPDU((uint8_t*)packet.data()+_header_size, new_data_size));
}
}
}
@@ -196,10 +262,6 @@ class PktRequest {
}
}
size_t data_original_size(){
return _data_original_size;
}
size_t original_size(){
return _original_size;
}
@@ -225,11 +287,11 @@ class PktRequest {
void reject(){
if (tcp){
//If the packet has data, we have to remove it
delete tcp->release_inner_pdu();
set_data(nullptr, 0);
//For the first matched data or only for data packets, we set FIN bit
//This only for client packets, because this will trigger server to close the connection
//Packets will be filtered anyway also if client don't send packets
if (_data_original_size != 0 && is_input){
if (_data_original_size != 0){
tcp->set_flag(Tins::TCP::FIN,1);
tcp->set_flag(Tins::TCP::ACK,1);
tcp->set_flag(Tins::TCP::SYN,0);
@@ -241,10 +303,16 @@ class PktRequest {
}
}
void mangle_custom_pkt(uint8_t* pkt, const size_t& pkt_size){
void mangle_custom_pkt(const char* raw_pkt, size_t raw_pkt_size){
if (action == FilterAction::NOACTION){
action = FilterAction::MANGLE;
perfrom_action(pkt, pkt_size);
try{
set_packet(raw_pkt, raw_pkt_size);
reserialize();
action = FilterAction::MANGLE;
}catch(...){
action = FilterAction::DROP;
}
perfrom_action(false);
}else{
throw invalid_argument("Cannot mangle a packet that has already been accepted or dropped");
}
@@ -259,7 +327,7 @@ class PktRequest {
delete ipv6;
}
inline Tins::PDU::serialization_type serialize(){
Tins::PDU::serialization_type serialize(){
if (is_ipv6){
return ipv6->serialize();
}else{
@@ -268,15 +336,17 @@ class PktRequest {
}
private:
void perfrom_action(uint8_t* custom_data = nullptr, size_t custom_data_size = 0){
void perfrom_action(bool do_serialize = true){
char buf[MNL_SOCKET_BUFFER_SIZE];
struct nlmsghdr *nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(res_id));
switch (action)
{
case FilterAction::ACCEPT:
if (need_tcp_fixing){
Tins::PDU::serialization_type data = serialize();
nfq_nlmsg_verdict_put_pkt(nlh_verdict, data.data(), data.size());
if (do_serialize){
reserialize();
}
nfq_nlmsg_verdict_put_pkt(nlh_verdict, packet.data(), packet.size());
}
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(packet_id), NF_ACCEPT );
break;
@@ -285,32 +355,20 @@ class PktRequest {
break;
case FilterAction::MANGLE:{
//If not custom data, use the data in the packets
Tins::PDU::serialization_type data;
if (custom_data == nullptr){
data = serialize();
}else{
try{
data = reserialize_raw_data(custom_data, custom_data_size);
}catch(...){
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(packet_id), NF_DROP );
action = FilterAction::DROP;
break;
}
if(do_serialize){
reserialize();
}
nfq_nlmsg_verdict_put_pkt(nlh_verdict, packet.data(), packet.size());
#ifdef DEBUG
size_t new_size = _data_original_size+((int64_t)custom_data_size) - ((int64_t)_original_size);
cerr << "[DEBUG] MANGLEDPKT " << (is_input?"-> IN ":"<- OUT") << " [SIZE: " << new_size << "]" << endl;
cerr << "[DEBUG] MANGLEDPKT " << (is_input?"-> IN ":"<- OUT") << " [SIZE: " << packet.size()-header_size() << "]" << endl;
#endif
if (tcp && custom_data_size != _original_size){
int64_t delta = ((int64_t)custom_data_size) - ((int64_t)_original_size);
if (is_input && tcp_in_offset != nullptr){
*tcp_in_offset += delta;
}else if (!is_input && tcp_out_offset != nullptr){
*tcp_out_offset += delta;
if (tcp && ack_seq_offset && packet.size() != _original_size){
if (is_input){
ack_seq_offset->in += packet.size() - _original_size;
}else{
ack_seq_offset->out += packet.size() - _original_size;
}
}
nfq_nlmsg_verdict_put_pkt(nlh_verdict, data.data(), data.size());
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(packet_id), NF_ACCEPT );
break;
}

View File

@@ -16,6 +16,7 @@
#include <syncstream>
#include <iostream>
#include "../classes/netfilter.cpp"
#include "../classes/nfqueue.cpp"
#include "stream_ctx.cpp"
#include "settings.cpp"
#include <Python.h>
@@ -46,7 +47,7 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue<PyProxyQueue> {
};
PyThreadState *tstate = NULL;
NfQueue::PktRequest<PyProxyQueue>* pkt;
tcp_ack_seq_ctx* current_tcp_ack = nullptr;
NfQueue::tcp_ack_seq_ctx* current_tcp_ack = nullptr;
void before_loop() override {
PyStatus pystatus;
@@ -89,7 +90,7 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue<PyProxyQueue> {
pyq->pkt->drop();// This is needed because the callback has to take the updated pkt pointer!
}
void filter_action(NfQueue::PktRequest<PyProxyQueue>* pkt, Stream& stream){
void filter_action(NfQueue::PktRequest<PyProxyQueue>* pkt, Stream& stream, const string& data){
auto stream_search = sctx.streams_ctx.find(pkt->sid);
pyfilter_ctx* stream_match;
if (stream_search == sctx.streams_ctx.end()){
@@ -108,7 +109,7 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue<PyProxyQueue> {
stream_match = stream_search->second;
}
auto result = stream_match->handle_packet(pkt);
auto result = stream_match->handle_packet(pkt, data);
switch(result.action){
case PyFilterResponse::ACCEPT:
return pkt->accept();
@@ -125,7 +126,7 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue<PyProxyQueue> {
stream.server_data_callback(bind(keep_fin_packet, this));
return pkt->reject();
case PyFilterResponse::MANGLE:
pkt->mangle_custom_pkt((uint8_t*)result.mangled_packet->data(), result.mangled_packet->size());
pkt->mangle_custom_pkt(result.mangled_packet->c_str(), result.mangled_packet->size());
if (pkt->get_action() == NfQueue::FilterAction::DROP){
cerr << "[error] [filter_action] Failed to mangle: the packet sent is not serializzable... the packet was dropped" << endl;
print_blocked_reason(*result.filter_match_by);
@@ -146,20 +147,21 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue<PyProxyQueue> {
}
static void on_data_recv(Stream& stream, PyProxyQueue* proxy_info, string data) {
proxy_info->pkt->data = data.data();
proxy_info->pkt->data_size = data.size();
proxy_info->filter_action(proxy_info->pkt, stream);
static void on_data_recv(Stream& stream, PyProxyQueue* pyq, const string& data) {
pyq->pkt->fix_data_payload();
pyq->filter_action(pyq->pkt, stream, data); //Only here the rebuilt_tcp_data is set
}
//Input data filtering
static void on_client_data(Stream& stream, PyProxyQueue* proxy_info) {
on_data_recv(stream, proxy_info, string(stream.client_payload().begin(), stream.client_payload().end()));
static void on_client_data(Stream& stream, PyProxyQueue* pyq) {
auto data = stream.client_payload();
on_data_recv(stream, pyq, string((char*)data.data(), data.size()));
}
//Server data filtering
static void on_server_data(Stream& stream, PyProxyQueue* proxy_info) {
on_data_recv(stream, proxy_info, string(stream.server_payload().begin(), stream.server_payload().end()));
static void on_server_data(Stream& stream, PyProxyQueue* pyq) {
auto data = stream.server_payload();
on_data_recv(stream, pyq, string((char*)data.data(), data.size()));
}
// A stream was terminated. The second argument is the reason why it was terminated
@@ -178,10 +180,9 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue<PyProxyQueue> {
if (pyq->current_tcp_ack != nullptr){
pyq->current_tcp_ack->reset();
}else{
pyq->current_tcp_ack = new tcp_ack_seq_ctx();
pyq->current_tcp_ack = new NfQueue::tcp_ack_seq_ctx();
pyq->sctx.tcp_ack_ctx.insert_or_assign(pyq->pkt->sid, pyq->current_tcp_ack);
pyq->pkt->tcp_in_offset = &pyq->current_tcp_ack->in_tcp_offset;
pyq->pkt->tcp_out_offset = &pyq->current_tcp_ack->out_tcp_offset;
pyq->pkt->ack_seq_offset = pyq->current_tcp_ack; // Set ack context
}
//Should not happen, but with this we can be sure about this
@@ -205,18 +206,17 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue<PyProxyQueue> {
auto tcp_ack_search = sctx.tcp_ack_ctx.find(pkt->sid);
if (tcp_ack_search != sctx.tcp_ack_ctx.end()){
current_tcp_ack = tcp_ack_search->second;
pkt->tcp_in_offset = &current_tcp_ack->in_tcp_offset;
pkt->tcp_out_offset = &current_tcp_ack->out_tcp_offset;
pkt->ack_seq_offset = current_tcp_ack;
}else{
current_tcp_ack = nullptr;
//If necessary will be created by libtis new_stream callback
}
pkt->fix_tcp_ack();
if (pkt->is_ipv6){
pkt->fix_tcp_ack();
follower.process_packet(*pkt->ipv6);
}else{
pkt->fix_tcp_ack();
follower.process_packet(*pkt->ipv4);
}

View File

@@ -7,7 +7,9 @@
#include <map>
#include <Python.h>
#include "../classes/netfilter.cpp"
#include "../classes/nfqueue.cpp"
#include "settings.cpp"
#include "../utils.cpp"
using namespace std;
@@ -50,17 +52,6 @@ struct py_filter_response {
typedef Tins::TCPIP::StreamIdentifier stream_id;
struct tcp_ack_seq_ctx{
//Can be negative, so we use int64_t (for a uint64_t value)
int64_t in_tcp_offset = 0;
int64_t out_tcp_offset = 0;
tcp_ack_seq_ctx(){}
void reset(){
in_tcp_offset = 0;
out_tcp_offset = 0;
}
};
struct pyfilter_ctx {
PyObject * glob = nullptr;
@@ -105,12 +96,14 @@ struct pyfilter_ctx {
}
py_filter_response handle_packet(
NfQueue::PktRequest<PyProxyQueue>* pkt
NfQueue::PktRequest<PyProxyQueue>* pkt,
const string& data
){
PyObject * packet_info = PyDict_New();
set_item_to_dict(packet_info, "data", PyBytes_FromStringAndSize(pkt->data, pkt->data_size));
set_item_to_dict(packet_info, "l4_size", PyLong_FromLong(pkt->data_original_size()));
pkt->reserialize();
set_item_to_dict(packet_info, "data", PyBytes_FromStringAndSize(data.c_str(), data.size()));
set_item_to_dict(packet_info, "l4_size", PyLong_FromLong(pkt->data_size()));
set_item_to_dict(packet_info, "raw_packet", PyBytes_FromStringAndSize(pkt->packet.c_str(), pkt->packet.size()));
set_item_to_dict(packet_info, "is_input", PyBool_FromLong(pkt->is_input));
set_item_to_dict(packet_info, "is_ipv6", PyBool_FromLong(pkt->is_ipv6));
@@ -129,8 +122,7 @@ struct pyfilter_ctx {
#endif
return py_filter_response(PyFilterResponse::EXCEPTION);
}
Py_DECREF(result);
result = get_item_from_glob("__firegex_pyfilter_result");
@@ -235,11 +227,12 @@ struct pyfilter_ctx {
};
typedef map<stream_id, pyfilter_ctx*> matching_map;
typedef map<stream_id, tcp_ack_seq_ctx*> tcp_ack_map;
struct stream_ctx {
matching_map streams_ctx;
tcp_ack_map tcp_ack_ctx;
NfQueue::tcp_ack_map tcp_ack_ctx;
void clean_stream_by_id(stream_id sid){
auto stream_search = streams_ctx.find(sid);

View File

@@ -20,6 +20,7 @@
#include "../classes/netfilter.cpp"
#include "stream_ctx.cpp"
#include "regex_rules.cpp"
#include "../utils.cpp"
using namespace std;
@@ -30,8 +31,6 @@ namespace Regex {
using Tins::TCPIP::Stream;
using Tins::TCPIP::StreamFollower;
class RegexNfQueue : public NfQueue::ThreadNfQueue<RegexNfQueue> {
public:
stream_ctx sctx;
@@ -39,7 +38,7 @@ public:
StreamFollower follower;
NfQueue::PktRequest<RegexNfQueue>* pkt;
bool filter_action(NfQueue::PktRequest<RegexNfQueue>* pkt){
bool filter_action(NfQueue::PktRequest<RegexNfQueue>* pkt, const string& data){
shared_ptr<RegexRules> conf = regex_config;
auto current_version = conf->ver();
@@ -85,12 +84,12 @@ public:
stream_match = stream_search->second;
}
err = hs_scan_stream(
stream_match,pkt->data, pkt->data_size,
stream_match, data.c_str(), data.size(),
0, scratch_space, match_func, &match_res
);
}else{
err = hs_scan(
regex_matcher,pkt->data, pkt->data_size,
regex_matcher, data.c_str(), data.size(),
0, scratch_space, match_func, &match_res
);
}
@@ -102,7 +101,7 @@ public:
throw invalid_argument("Cannot close stream match on hyperscan");
}
if (err != HS_SUCCESS && err != HS_SCAN_TERMINATED) {
cerr << "[error] [filter_callback] Error while matching the stream (hs)" << endl;
cerr << "[error] [filter_callback] Error while matching the stream (hs) " << err << endl;
throw invalid_argument("Error while matching the stream with hyperscan");
}
if (match_res.has_matched){
@@ -113,41 +112,13 @@ public:
return true;
}
void handle_next_packet(NfQueue::PktRequest<RegexNfQueue>* _pkt) override{
pkt = _pkt; // Setting packet context
if (pkt->tcp){
if (pkt->ipv4){
follower.process_packet(*pkt->ipv4);
}else{
follower.process_packet(*pkt->ipv6);
}
//Fallback to the default action
if (pkt->get_action() == NfQueue::FilterAction::NOACTION){
return pkt->accept();
}
}else{
if (!pkt->udp){
throw invalid_argument("Only TCP and UDP are supported");
}
if(pkt->data_size == 0){
return pkt->accept();
}else if (filter_action(pkt)){
return pkt->accept();
}else{
return pkt->drop();
}
}
}
//If the stream has already been matched, drop all data, and try to close the connection
static void keep_fin_packet(RegexNfQueue* nfq){
nfq->pkt->reject();// This is needed because the callback has to take the updated pkt pointer!
nfq->pkt->reject(); // This is needed because the callback has to take the updated pkt pointer!
}
static void on_data_recv(Stream& stream, RegexNfQueue* nfq, string data) {
nfq->pkt->data = data.data();
nfq->pkt->data_size = data.size();
if (!nfq->filter_action(nfq->pkt)){
static void on_data_recv(Stream& stream, RegexNfQueue* nfq, const string& data) {
if (!nfq->filter_action(nfq->pkt, data)){
nfq->sctx.clean_stream_by_id(nfq->pkt->sid);
stream.client_data_callback(bind(keep_fin_packet, nfq));
stream.server_data_callback(bind(keep_fin_packet, nfq));
@@ -157,12 +128,14 @@ public:
//Input data filtering
static void on_client_data(Stream& stream, RegexNfQueue* nfq) {
on_data_recv(stream, nfq, string(stream.client_payload().begin(), stream.client_payload().end()));
auto data = stream.client_payload();
on_data_recv(stream, nfq, string((char*)data.data(), data.size()));
}
//Server data filtering
static void on_server_data(Stream& stream, RegexNfQueue* nfq) {
on_data_recv(stream, nfq, string(stream.server_payload().begin(), stream.server_payload().end()));
auto data = stream.server_payload();
on_data_recv(stream, nfq, string((char*)data.data(), data.size()));
}
// A stream was terminated. The second argument is the reason why it was terminated
@@ -181,6 +154,32 @@ public:
stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, nfq));
}
void handle_next_packet(NfQueue::PktRequest<RegexNfQueue>* _pkt) override{
pkt = _pkt; // Setting packet context
if (pkt->tcp){
if (pkt->ipv4){
follower.process_packet(*pkt->ipv4);
}else{
follower.process_packet(*pkt->ipv6);
}
//Fallback to the default action
if (pkt->get_action() == NfQueue::FilterAction::NOACTION){
return pkt->accept();
}
}else{
if (!pkt->udp){
throw invalid_argument("Only TCP and UDP are supported");
}
if(pkt->data_size() == 0){
return pkt->accept();
}else if (filter_action(pkt, string(pkt->data(), pkt->data_size()))){
return pkt->accept();
}else{
return pkt->drop();
}
}
}
void before_loop() override{
follower.new_stream_callback(bind(on_new_stream, placeholders::_1, this));
follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, this));

View File

@@ -5,9 +5,12 @@ import asyncio
import traceback
from fastapi import HTTPException
import time
from utils import run_func
nft = FiregexTables()
OUTSTREAM_BUFFER_SIZE = 1024*10
class FiregexInterceptor:
def __init__(self):
@@ -28,14 +31,20 @@ class FiregexInterceptor:
self.sock_writer:asyncio.StreamWriter = None
self.sock_conn_lock:asyncio.Lock
self.last_time_exception = 0
self.outstrem_function = None
self.expection_function = None
self.outstrem_task: asyncio.Task
self.outstrem_buffer = ""
@classmethod
async def start(cls, srv: Service):
async def start(cls, srv: Service, outstream_func=None, exception_func=None):
self = cls()
self.srv = srv
self.filter_map_lock = asyncio.Lock()
self.update_config_lock = asyncio.Lock()
self.sock_conn_lock = asyncio.Lock()
self.outstrem_function = outstream_func
self.expection_function = exception_func
if not self.sock_conn_lock.locked():
await self.sock_conn_lock.acquire()
self.sock_path = f"/tmp/firegex_nfproxy_{srv.id}.sock"
@@ -50,16 +59,37 @@ class FiregexInterceptor:
await self.ack_lock.acquire()
return self
async def _stream_handler(self):
while True:
try:
line = (await self.process.stdout.readuntil()).decode(errors="ignore")
print(line, end="")
except Exception as e:
self.ack_arrived = False
self.ack_status = False
self.ack_fail_what = "Can't read from nfq client"
self.ack_lock.release()
await self.stop()
raise HTTPException(status_code=500, detail="Can't read from nfq client") from e
self.outstrem_buffer+=line
if len(self.outstrem_buffer) > OUTSTREAM_BUFFER_SIZE:
self.outstrem_buffer = self.outstrem_buffer[-OUTSTREAM_BUFFER_SIZE:]+"\n"
if self.outstrem_function:
await run_func(self.outstrem_function, self.srv.id, line)
async def _start_binary(self):
proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../cpproxy")
self.process = await asyncio.create_subprocess_exec(
proxy_binary_path, stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
env={
"NTHREADS": os.getenv("NTHREADS","1"),
"FIREGEX_NFQUEUE_FAIL_OPEN": "1" if self.srv.fail_open else "0",
"FIREGEX_NFPROXY_SOCK": self.sock_path
},
)
self.outstrem_task = asyncio.create_task(self._stream_handler())
try:
async with asyncio.timeout(3):
await self.sock_conn_lock.acquire()
@@ -101,9 +131,7 @@ class FiregexInterceptor:
filter_name = line.split()[1]
print("BLOCKED", filter_name)
async with self.filter_map_lock:
print("LOCKED MAP LOCK")
if filter_name in self.filter_map:
print("ADDING BLOCKED PACKET")
self.filter_map[filter_name].blocked_packets+=1
await self.filter_map[filter_name].update()
if line.startswith("MANGLED "):
@@ -113,8 +141,9 @@ class FiregexInterceptor:
self.filter_map[filter_name].edited_packets+=1
await self.filter_map[filter_name].update()
if line.startswith("EXCEPTION"):
self.last_time_exception = time.time()
print("TODO EXCEPTION HANDLING") # TODO
self.last_time_exception = int(time.time()*1000) #ms timestamp
if self.expection_function:
await run_func(self.expection_function, self.srv.id, self.last_time_exception)
if line.startswith("ACK "):
self.ack_arrived = True
self.ack_status = line.split()[1].upper() == "OK"
@@ -132,6 +161,7 @@ class FiregexInterceptor:
self.server_task.cancel()
self.update_task.cancel()
self.unix_sock.close()
self.outstrem_task.cancel()
if os.path.exists(self.sock_path):
os.remove(self.sock_path)
if self.process and self.process.returncode is None:

View File

@@ -3,6 +3,7 @@ from modules.nfproxy.firegex import FiregexInterceptor
from modules.nfproxy.nftables import FiregexTables, FiregexFilter
from modules.nfproxy.models import Service, PyFilter
from utils.sqlite import SQLite
from utils import run_func
class STATUS:
STOP = "stop"
@@ -11,13 +12,20 @@ class STATUS:
nft = FiregexTables()
class ServiceManager:
def __init__(self, srv: Service, db):
def __init__(self, srv: Service, db, outstream_func=None, exception_func=None):
self.srv = srv
self.db = db
self.status = STATUS.STOP
self.filters: dict[str, FiregexFilter] = {}
self.lock = asyncio.Lock()
self.interceptor = None
self.outstream_function = outstream_func
self.last_exception_time = 0
async def excep_internal_handler(srv, exc_time):
self.last_exception_time = exc_time
if exception_func:
await run_func(exception_func, srv, exc_time)
self.exception_function = excep_internal_handler
async def _update_filters_from_db(self):
pyfilters = [
@@ -52,10 +60,16 @@ class ServiceManager:
self.status = status
self.__update_status_db(status)
def read_outstrem_buffer(self):
if self.interceptor:
return self.interceptor.outstrem_buffer
else:
return ""
async def start(self):
if not self.interceptor:
nft.delete(self.srv)
self.interceptor = await FiregexInterceptor.start(self.srv)
self.interceptor = await FiregexInterceptor.start(self.srv, outstream_func=self.outstream_function, exception_func=self.exception_function)
await self._update_filters_from_db()
self._set_status(STATUS.ACTIVE)
@@ -75,10 +89,12 @@ class ServiceManager:
await self._update_filters_from_db()
class FirewallManager:
def __init__(self, db:SQLite):
def __init__(self, db:SQLite, outstream_func=None, exception_func=None):
self.db = db
self.service_table: dict[str, ServiceManager] = {}
self.lock = asyncio.Lock()
self.outstream_function = outstream_func
self.exception_function = exception_func
async def close(self):
for key in list(self.service_table.keys()):
@@ -100,7 +116,7 @@ class FirewallManager:
srv = Service.from_dict(srv)
if srv.id in self.service_table:
continue
self.service_table[srv.id] = ServiceManager(srv, self.db)
self.service_table[srv.id] = ServiceManager(srv, self.db, outstream_func=self.outstream_function, exception_func=self.exception_function)
await self.service_table[srv.id].next(srv.status)
def get(self,srv_id) -> ServiceManager:

View File

@@ -28,22 +28,22 @@ class FiregexTables(NFTableManager):
def __init__(self):
super().__init__([
{"add":{"chain":{
{"add":{"chain":{ #Input chain attached before conntrack see it
"family":"inet",
"table":self.table_name,
"name":self.input_chain,
"type":"filter",
"hook":"prerouting",
"prio":-150,
"prio":-301,
"policy":"accept"
}}},
{"add":{"chain":{
{"add":{"chain":{ #Output chain attached after conntrack saw it
"family":"inet",
"table":self.table_name,
"name":self.output_chain,
"type":"filter",
"hook":"postrouting",
"prio":-150,
"prio":-290,
"policy":"accept"
}}}
],[

View File

@@ -26,7 +26,7 @@ class FiregexTables(NFTableManager):
"name":self.input_chain,
"type":"filter",
"hook":"prerouting",
"prio":-150,
"prio":-301,
"policy":"accept"
}}},
{"add":{"chain":{
@@ -35,7 +35,7 @@ class FiregexTables(NFTableManager):
"name":self.output_chain,
"type":"filter",
"hook":"postrouting",
"prio":-150,
"prio":-301,
"policy":"accept"
}}}
],[

View File

@@ -28,7 +28,7 @@ class FiregexTables(NFTableManager):
"name":self.prerouting_porthijack,
"type":"filter",
"hook":"prerouting",
"prio":-300,
"prio":-310,
"policy":"accept"
}}},
{"add":{"chain":{
@@ -37,7 +37,7 @@ class FiregexTables(NFTableManager):
"name":self.postrouting_porthijack,
"type":"filter",
"hook":"postrouting",
"prio":-300,
"prio":-310,
"policy":"accept"
}}}
],[

View File

@@ -14,6 +14,7 @@ from modules.nfproxy.nftables import convert_protocol_to_l4
import asyncio
import traceback
from utils import DEBUG
import utils
class ServiceModel(BaseModel):
service_id: str
@@ -107,6 +108,10 @@ async def startup():
await firewall.init()
except Exception as e:
print("WARNING cannot start firewall:", e)
utils.socketio.on("nfproxy-outstream-join", join_outstream)
utils.socketio.on("nfproxy-outstream-leave", leave_outstream)
utils.socketio.on("nfproxy-exception-join", join_exception)
utils.socketio.on("nfproxy-exception-leave", leave_exception)
async def shutdown():
db.backup()
@@ -121,7 +126,13 @@ def gen_service_id():
break
return res
firewall = FirewallManager(db)
async def outstream_func(service_id, data):
await utils.socketio.emit(f"nfproxy-outstream-{service_id}", data, room=f"nfproxy-outstream-{service_id}")
async def exception_func(service_id, timestamp):
await utils.socketio.emit(f"nfproxy-exception-{service_id}", timestamp, room=f"nfproxy-exception-{service_id}")
firewall = FirewallManager(db, outstream_func=outstream_func, exception_func=exception_func)
@app.get('/services', response_model=list[ServiceModel])
async def get_service_list():
@@ -355,3 +366,33 @@ async def get_pyfilters(service_id: str):
return f.read()
except FileNotFoundError:
return ""
#Socket io events
async def join_outstream(sid, data):
"""Client joins a room."""
srv = data.get("service")
if srv:
room = f"nfproxy-outstream-{srv}"
await utils.socketio.enter_room(sid, room)
await utils.socketio.emit(room, firewall.get(srv).read_outstrem_buffer(), room=sid)
async def leave_outstream(sid, data):
"""Client leaves a room."""
srv = data.get("service")
if srv:
await utils.socketio.leave_room(sid, f"nfproxy-outstream-{srv}")
async def join_exception(sid, data):
"""Client joins a room."""
srv = data.get("service")
if srv:
room = f"nfproxy-exception-{srv}"
await utils.socketio.enter_room(sid, room)
await utils.socketio.emit(room, firewall.get(srv).last_exception_time, room=sid)
async def leave_exception(sid, data):
"""Client leaves a room."""
srv = data.get("service")
if srv:
await utils.socketio.leave_room(sid, f"nfproxy-exception-{srv}")

View File

@@ -8,6 +8,7 @@ from fastapi.responses import FileResponse
from utils import DEBUG, ON_DOCKER, ROUTERS_DIR, list_files, run_func
from utils.models import ResetRequest
import asyncio
import traceback
REACT_BUILD_DIR: str = "../frontend/build/" if not ON_DOCKER else "frontend/"
REACT_HTML_PATH: str = os.path.join(REACT_BUILD_DIR,"index.html")
@@ -70,6 +71,7 @@ def get_router_modules():
name=route
))
except Exception as e:
traceback.print_exc()
print(f"Router {route} failed to load: {e}")
return res