improves on the nfregex binary x3
This commit is contained in:
@@ -16,7 +16,7 @@ RUN bun run build
|
|||||||
#Building main conteiner
|
#Building main conteiner
|
||||||
FROM --platform=$TARGETARCH debian:stable-slim AS base
|
FROM --platform=$TARGETARCH debian:stable-slim AS base
|
||||||
RUN apt-get update -qq && apt-get upgrade -qq && \
|
RUN apt-get update -qq && apt-get upgrade -qq && \
|
||||||
apt-get install -qq python3-pip \
|
apt-get install -qq python3-pip build-essentials \
|
||||||
libnetfilter-queue-dev libnfnetlink-dev libmnl-dev libcap2-bin\
|
libnetfilter-queue-dev libnfnetlink-dev libmnl-dev libcap2-bin\
|
||||||
nftables libvectorscan-dev libtins-dev python3-nftables
|
nftables libvectorscan-dev libtins-dev python3-nftables
|
||||||
|
|
||||||
|
|||||||
@@ -17,12 +17,42 @@ using Tins::TCPIP::Stream;
|
|||||||
using Tins::TCPIP::StreamFollower;
|
using Tins::TCPIP::StreamFollower;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
|
||||||
#ifndef NETFILTER_CLASSES_HPP
|
#ifndef NETFILTER_CLASSES_HPP
|
||||||
#define NETFILTER_CLASSES_HPP
|
#define NETFILTER_CLASSES_HPP
|
||||||
typedef Tins::TCPIP::StreamIdentifier stream_id;
|
typedef Tins::TCPIP::StreamIdentifier stream_id;
|
||||||
typedef map<stream_id, hs_stream_t*> matching_map;
|
typedef map<stream_id, hs_stream_t*> matching_map;
|
||||||
|
|
||||||
|
/* Considering to use unorder_map using this hash of stream_id
|
||||||
|
|
||||||
|
namespace std {
|
||||||
|
template<>
|
||||||
|
struct hash<stream_id> {
|
||||||
|
size_t operator()(const stream_id& sid) const
|
||||||
|
{
|
||||||
|
return std::hash<std::uint32_t>()(sid.max_address[0] + sid.max_address[1] + sid.max_address[2] + sid.max_address[3] + sid.max_address_port + sid.min_address[0] + sid.min_address[1] + sid.min_address[2] + sid.min_address[3] + sid.min_address_port);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
ostream& operator<<(ostream& os, const Tins::TCPIP::StreamIdentifier::address_type &sid){
|
||||||
|
bool first_print = false;
|
||||||
|
for (auto ele: sid){
|
||||||
|
if (first_print || ele){
|
||||||
|
first_print = true;
|
||||||
|
os << (int)ele << ".";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
ostream& operator<<(ostream& os, const stream_id &sid){
|
||||||
|
os << sid.max_address << ":" << sid.max_address_port << " -> " << sid.min_address << ":" << sid.min_address_port;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
struct packet_info;
|
struct packet_info;
|
||||||
|
|
||||||
@@ -52,6 +82,60 @@ struct stream_ctx {
|
|||||||
in_scratch = nullptr;
|
in_scratch = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void clean_stream_by_id(stream_id sid){
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.clean_stream_by_id] Cleaning stream context of " << sid << endl;
|
||||||
|
#endif
|
||||||
|
auto stream_search = in_hs_streams.find(sid);
|
||||||
|
hs_stream_t* stream_match;
|
||||||
|
if (stream_search != in_hs_streams.end()){
|
||||||
|
stream_match = stream_search->second;
|
||||||
|
if (hs_close_stream(stream_match, in_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
||||||
|
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot close stream match on hyperscan");
|
||||||
|
}
|
||||||
|
in_hs_streams.erase(stream_search);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream_search = out_hs_streams.find(sid);
|
||||||
|
if (stream_search != out_hs_streams.end()){
|
||||||
|
stream_match = stream_search->second;
|
||||||
|
if (hs_close_stream(stream_match, out_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
||||||
|
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot close stream match on hyperscan");
|
||||||
|
}
|
||||||
|
out_hs_streams.erase(stream_search);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void clean(){
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.clean] Cleaning stream context" << endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (in_scratch){
|
||||||
|
for(auto ele: in_hs_streams){
|
||||||
|
if (hs_close_stream(ele.second, in_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
||||||
|
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot close stream match on hyperscan");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
in_hs_streams.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (out_scratch){
|
||||||
|
for(auto ele: out_hs_streams){
|
||||||
|
if (hs_close_stream(ele.second, out_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
||||||
|
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot close stream match on hyperscan");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out_hs_streams.clear();
|
||||||
|
}
|
||||||
|
clean_scratches();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct packet_info {
|
struct packet_info {
|
||||||
@@ -139,76 +223,59 @@ class NetfilterQueue {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//Input data filtering
|
static void on_data_recv(Stream& stream, stream_ctx* sctx, string data, bool is_input) {
|
||||||
void on_client_data(Stream& stream) {
|
#ifdef DEBUG
|
||||||
string data(stream.client_payload().begin(), stream.client_payload().end());
|
cerr << "[DEBUG] [NetfilterQueue.on_data_recv] data: " << data << endl;
|
||||||
sctx.tcp_match_util.pkt_info->is_input = true;
|
#endif
|
||||||
sctx.tcp_match_util.matching_has_been_called = true;
|
sctx->tcp_match_util.pkt_info->is_input = is_input;
|
||||||
bool result = callback_func(*sctx.tcp_match_util.pkt_info);
|
sctx->tcp_match_util.matching_has_been_called = true;
|
||||||
if (result){
|
bool result = callback_func(*sctx->tcp_match_util.pkt_info);
|
||||||
clean_stream_by_id(sctx.tcp_match_util.pkt_info->sid);
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.on_data_recv] result: " << result << endl;
|
||||||
|
#endif
|
||||||
|
if (!result){
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.on_data_recv] Stream matched, removing all data about it" << endl;
|
||||||
|
#endif
|
||||||
|
sctx->clean_stream_by_id(sctx->tcp_match_util.pkt_info->sid);
|
||||||
stream.ignore_client_data();
|
stream.ignore_client_data();
|
||||||
stream.ignore_server_data();
|
stream.ignore_server_data();
|
||||||
}
|
}
|
||||||
sctx.tcp_match_util.result = result;
|
sctx->tcp_match_util.result = result;
|
||||||
|
}
|
||||||
|
|
||||||
|
//Input data filtering
|
||||||
|
static void on_client_data(Stream& stream, stream_ctx* sctx) {
|
||||||
|
on_data_recv(stream, sctx, string(stream.client_payload().begin(), stream.client_payload().end()), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
//Server data filtering
|
//Server data filtering
|
||||||
void on_server_data(Stream& stream) {
|
static void on_server_data(Stream& stream, stream_ctx* sctx) {
|
||||||
string data(stream.server_payload().begin(), stream.server_payload().end());
|
on_data_recv(stream, sctx, string(stream.server_payload().begin(), stream.server_payload().end()), false);
|
||||||
sctx.tcp_match_util.pkt_info->is_input = false;
|
|
||||||
sctx.tcp_match_util.matching_has_been_called = true;
|
|
||||||
bool result = callback_func(*sctx.tcp_match_util.pkt_info);
|
|
||||||
if (result){
|
|
||||||
clean_stream_by_id(sctx.tcp_match_util.pkt_info->sid);
|
|
||||||
stream.ignore_client_data();
|
|
||||||
stream.ignore_server_data();
|
|
||||||
}
|
|
||||||
this->sctx.tcp_match_util.result = result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void on_new_stream(Stream& stream) {
|
static void on_new_stream(Stream& stream, stream_ctx* sctx) {
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.on_new_stream] New stream detected" << endl;
|
||||||
|
#endif
|
||||||
if (stream.is_partial_stream()) {
|
if (stream.is_partial_stream()) {
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.on_new_stream] Partial stream detected, skipping" << endl;
|
||||||
|
#endif
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
cerr << "[+] New connection!" << endl;
|
|
||||||
stream.auto_cleanup_payloads(true);
|
stream.auto_cleanup_payloads(true);
|
||||||
stream.client_data_callback(
|
stream.client_data_callback(bind(on_client_data, placeholders::_1, sctx));
|
||||||
[&](auto a){this->on_client_data(a);}
|
stream.server_data_callback(bind(on_server_data, placeholders::_1, sctx));
|
||||||
);
|
|
||||||
stream.server_data_callback(
|
|
||||||
[&](auto a){this->on_server_data(a);}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
void clean_stream_by_id(stream_id stream_id){
|
|
||||||
auto stream_search = this->sctx.in_hs_streams.find(stream_id);
|
|
||||||
hs_stream_t* stream_match;
|
|
||||||
if (stream_search != this->sctx.in_hs_streams.end()){
|
|
||||||
stream_match = stream_search->second;
|
|
||||||
if (hs_close_stream(stream_match, sctx.in_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
|
||||||
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
this->sctx.in_hs_streams.erase(stream_search);
|
|
||||||
}
|
|
||||||
|
|
||||||
stream_search = this->sctx.out_hs_streams.find(stream_id);
|
|
||||||
if (stream_search != this->sctx.out_hs_streams.end()){
|
|
||||||
stream_match = stream_search->second;
|
|
||||||
if (hs_close_stream(stream_match, sctx.out_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
|
||||||
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
this->sctx.out_hs_streams.erase(stream_search);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A stream was terminated. The second argument is the reason why it was terminated
|
// A stream was terminated. The second argument is the reason why it was terminated
|
||||||
void on_stream_terminated(Stream& stream, StreamFollower::TerminationReason reason) {
|
static void on_stream_terminated(Stream& stream, StreamFollower::TerminationReason reason, stream_ctx* sctx) {
|
||||||
stream_id stream_id = stream_id::make_identifier(stream);
|
stream_id stream_id = stream_id::make_identifier(stream);
|
||||||
cerr << "[+] Connection closed: " << &stream_id << endl;
|
#ifdef DEBUG
|
||||||
this->clean_stream_by_id(stream_id);
|
cerr << "[DEBUG] [NetfilterQueue.on_stream_terminated] Stream terminated, deleting all data" << endl;
|
||||||
|
#endif
|
||||||
|
sctx->clean_stream_by_id(stream_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -220,12 +287,10 @@ class NetfilterQueue {
|
|||||||
*/
|
*/
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
mnl_socket_setsockopt(sctx.nl, NETLINK_NO_ENOBUFS, &ret, sizeof(int));
|
mnl_socket_setsockopt(sctx.nl, NETLINK_NO_ENOBUFS, &ret, sizeof(int));
|
||||||
sctx.follower.new_stream_callback(
|
|
||||||
[&](auto a){this->on_new_stream(a);}
|
sctx.follower.new_stream_callback(bind(on_new_stream, placeholders::_1, &sctx));
|
||||||
);
|
sctx.follower.stream_termination_callback(bind(on_stream_terminated, placeholders::_1, placeholders::_2, &sctx));
|
||||||
sctx.follower.stream_termination_callback(
|
|
||||||
[&](auto a, auto b){this->on_stream_terminated(a, b);}
|
|
||||||
);
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
ret = recv_packet();
|
ret = recv_packet();
|
||||||
if (ret == -1) {
|
if (ret == -1) {
|
||||||
@@ -241,6 +306,9 @@ class NetfilterQueue {
|
|||||||
|
|
||||||
|
|
||||||
~NetfilterQueue() {
|
~NetfilterQueue() {
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.~NetfilterQueue] Destructor called" << endl;
|
||||||
|
#endif
|
||||||
send_config_cmd(NFQNL_CFG_CMD_UNBIND);
|
send_config_cmd(NFQNL_CFG_CMD_UNBIND);
|
||||||
_clear();
|
_clear();
|
||||||
}
|
}
|
||||||
@@ -263,23 +331,9 @@ class NetfilterQueue {
|
|||||||
}
|
}
|
||||||
mnl_socket_close(sctx.nl);
|
mnl_socket_close(sctx.nl);
|
||||||
sctx.nl = nullptr;
|
sctx.nl = nullptr;
|
||||||
sctx.clean_scratches();
|
sctx.clean();
|
||||||
|
}
|
||||||
|
|
||||||
for(auto ele: sctx.in_hs_streams){
|
|
||||||
if (hs_close_stream(ele.second, sctx.in_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
|
||||||
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sctx.in_hs_streams.clear();
|
|
||||||
for(auto ele: sctx.out_hs_streams){
|
|
||||||
if (hs_close_stream(ele.second, sctx.out_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
|
||||||
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sctx.out_hs_streams.clear();
|
|
||||||
}
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
static void build_verdict(T packet, uint8_t *payload, uint16_t plen, nlmsghdr *nlh_verdict, nfqnl_msg_packet_hdr *ph, stream_ctx* sctx){
|
static void build_verdict(T packet, uint8_t *payload, uint16_t plen, nlmsghdr *nlh_verdict, nfqnl_msg_packet_hdr *ph, stream_ctx* sctx){
|
||||||
Tins::TCP* tcp = packet.template find_pdu<Tins::TCP>();
|
Tins::TCP* tcp = packet.template find_pdu<Tins::TCP>();
|
||||||
@@ -300,7 +354,17 @@ class NetfilterQueue {
|
|||||||
};
|
};
|
||||||
sctx->tcp_match_util.matching_has_been_called = false;
|
sctx->tcp_match_util.matching_has_been_called = false;
|
||||||
sctx->tcp_match_util.pkt_info = &pktinfo;
|
sctx->tcp_match_util.pkt_info = &pktinfo;
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.build_verdict] TCP Packet received " << packet.src_addr() << ":" << tcp->sport() << " -> " << packet.dst_addr() << ":" << tcp->dport() << ", sending to libtins StreamFollower" << endl;
|
||||||
|
#endif
|
||||||
sctx->follower.process_packet(packet);
|
sctx->follower.process_packet(packet);
|
||||||
|
#ifdef DEBUG
|
||||||
|
if (sctx->tcp_match_util.matching_has_been_called){
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has called matching functions" << endl;
|
||||||
|
}else{
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has NOT called matching functions" << endl;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
if (sctx->tcp_match_util.matching_has_been_called && !sctx->tcp_match_util.result){
|
if (sctx->tcp_match_util.matching_has_been_called && !sctx->tcp_match_util.result){
|
||||||
Tins::PDU* data_layer = tcp->release_inner_pdu();
|
Tins::PDU* data_layer = tcp->release_inner_pdu();
|
||||||
if (data_layer != nullptr){
|
if (data_layer != nullptr){
|
||||||
@@ -317,7 +381,7 @@ class NetfilterQueue {
|
|||||||
if (!udp){
|
if (!udp){
|
||||||
throw invalid_argument("Only TCP and UDP are supported");
|
throw invalid_argument("Only TCP and UDP are supported");
|
||||||
}
|
}
|
||||||
Tins::PDU* application_layer = tcp->inner_pdu();
|
Tins::PDU* application_layer = udp->inner_pdu();
|
||||||
u_int16_t payload_size = 0;
|
u_int16_t payload_size = 0;
|
||||||
if (application_layer != nullptr){
|
if (application_layer != nullptr){
|
||||||
payload_size = application_layer->size();
|
payload_size = application_layer->size();
|
||||||
@@ -369,6 +433,13 @@ class NetfilterQueue {
|
|||||||
|
|
||||||
nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(nfg->res_id));
|
nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(nfg->res_id));
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet received" << endl;
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet ID: " << ntohl(ph->packet_id) << endl;
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Payload size: " << plen << endl;
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Payload: " << string(payload, payload+plen) << endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
// Check IP protocol version
|
// Check IP protocol version
|
||||||
if ( (payload[0] & 0xf0) == 0x40 ){
|
if ( (payload[0] & 0xf0) == 0x40 ){
|
||||||
build_verdict(Tins::IP(payload, plen), payload, plen, nlh_verdict, ph, sctx);
|
build_verdict(Tins::IP(payload, plen), payload, plen, nlh_verdict, ph, sctx);
|
||||||
|
|||||||
@@ -78,9 +78,9 @@ class RegexRules{
|
|||||||
if (n_of_regex == 0){
|
if (n_of_regex == 0){
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const char* regex_match_rules[n_of_regex];
|
vector<const char*> regex_match_rules(n_of_regex);
|
||||||
unsigned int regex_array_ids[n_of_regex];
|
vector<unsigned int> regex_array_ids(n_of_regex);
|
||||||
unsigned int regex_flags[n_of_regex];
|
vector<unsigned int> regex_flags(n_of_regex);
|
||||||
for(int i = 0; i < n_of_regex; i++){
|
for(int i = 0; i < n_of_regex; i++){
|
||||||
regex_match_rules[i] = decoded[i].second.regex.c_str();
|
regex_match_rules[i] = decoded[i].second.regex.c_str();
|
||||||
regex_array_ids[i] = i;
|
regex_array_ids[i] = i;
|
||||||
@@ -89,14 +89,22 @@ class RegexRules{
|
|||||||
regex_flags[i] |= HS_FLAG_CASELESS;
|
regex_flags[i] |= HS_FLAG_CASELESS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#ifdef DEBUG
|
||||||
hs_database_t* rebuilt_db;
|
cerr << "[DEBUG] [RegexRules.fill_ruleset] compiling " << n_of_regex << " regexes..." << endl;
|
||||||
hs_compile_error_t *compile_err;
|
for (int i = 0; i < n_of_regex; i++){
|
||||||
|
cerr << "[DEBUG] [RegexRules.fill_ruleset] regex[" << i << "]: " << decoded[i].first << " " << decoded[i].second.regex << endl;
|
||||||
|
cerr << "[DEBUG] [RegexRules.fill_ruleset] regex_match_rules[" << i << "]: " << regex_match_rules[i] << endl;
|
||||||
|
cerr << "[DEBUG] [RegexRules.fill_ruleset] regex_flags[" << i << "]: " << regex_flags[i] << endl;
|
||||||
|
cerr << "[DEBUG] [RegexRules.fill_ruleset] regex_array_ids[" << i << "]: " << regex_array_ids[i] << endl;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
hs_database_t* rebuilt_db = nullptr;
|
||||||
|
hs_compile_error_t *compile_err = nullptr;
|
||||||
if (
|
if (
|
||||||
hs_compile_multi(
|
hs_compile_multi(
|
||||||
regex_match_rules,
|
regex_match_rules.data(),
|
||||||
regex_flags,
|
regex_flags.data(),
|
||||||
regex_array_ids,
|
regex_array_ids.data(),
|
||||||
n_of_regex,
|
n_of_regex,
|
||||||
is_stream?HS_MODE_STREAM:HS_MODE_BLOCK,
|
is_stream?HS_MODE_STREAM:HS_MODE_BLOCK,
|
||||||
nullptr, &rebuilt_db, &compile_err
|
nullptr, &rebuilt_db, &compile_err
|
||||||
|
|||||||
@@ -54,10 +54,16 @@ struct matched_data{
|
|||||||
bool has_matched = false;
|
bool has_matched = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
bool filter_callback(packet_info& info){
|
bool filter_callback(packet_info& info){
|
||||||
shared_ptr<RegexRules> conf = regex_config;
|
shared_ptr<RegexRules> conf = regex_config;
|
||||||
if (conf->ver() != info.sctx->latest_config_ver){
|
auto current_version = conf->ver();
|
||||||
info.sctx->clean_scratches();
|
if (current_version != info.sctx->latest_config_ver){
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Configuration has changed (" << current_version << "!=" << info.sctx->latest_config_ver << "), cleaning scratch spaces" << endl;
|
||||||
|
#endif
|
||||||
|
info.sctx->clean();
|
||||||
|
info.sctx->latest_config_ver = current_version;
|
||||||
}
|
}
|
||||||
scratch_setup(conf->input_ruleset, info.sctx->in_scratch);
|
scratch_setup(conf->input_ruleset, info.sctx->in_scratch);
|
||||||
scratch_setup(conf->output_ruleset, info.sctx->out_scratch);
|
scratch_setup(conf->output_ruleset, info.sctx->out_scratch);
|
||||||
@@ -66,6 +72,12 @@ bool filter_callback(packet_info & info){
|
|||||||
if (regex_matcher == nullptr){
|
if (regex_matcher == nullptr){
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Matching packet with " << (info.is_input ? "input" : "output") << " ruleset" << endl;
|
||||||
|
cerr << "[DEBUG] [filter_callback] Packet: " << info.payload << endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
matched_data match_res;
|
matched_data match_res;
|
||||||
hs_error_t err;
|
hs_error_t err;
|
||||||
hs_scratch_t* scratch_space = info.is_input ? info.sctx->in_scratch: info.sctx->out_scratch;
|
hs_scratch_t* scratch_space = info.is_input ? info.sctx->in_scratch: info.sctx->out_scratch;
|
||||||
@@ -77,25 +89,40 @@ bool filter_callback(packet_info & info){
|
|||||||
};
|
};
|
||||||
hs_stream_t* stream_match;
|
hs_stream_t* stream_match;
|
||||||
if (conf->stream_mode()){
|
if (conf->stream_mode()){
|
||||||
matching_map match_map = info.is_input ? info.sctx->in_hs_streams : info.sctx->out_hs_streams;
|
matching_map* match_map = info.is_input ? &info.sctx->in_hs_streams : &info.sctx->out_hs_streams;
|
||||||
auto stream_search = match_map.find(info.sid);
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Dumping match_map " << match_map << endl;
|
||||||
|
for (auto ele: *match_map){
|
||||||
|
cerr << "[DEBUG] [filter_callback] " << ele.first << " -> " << ele.second << endl;
|
||||||
|
}
|
||||||
|
cerr << "[DEBUG] [filter_callback] End of match_map" << endl;
|
||||||
|
#endif
|
||||||
|
auto stream_search = match_map->find(info.sid);
|
||||||
|
|
||||||
if (stream_search == match_map.end()){
|
if (stream_search == match_map->end()){
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Creating new stream matcher for " << info.sid << endl;
|
||||||
|
#endif
|
||||||
if (hs_open_stream(regex_matcher, 0, &stream_match) != HS_SUCCESS) {
|
if (hs_open_stream(regex_matcher, 0, &stream_match) != HS_SUCCESS) {
|
||||||
cerr << "[error] [filter_callback] Error opening the stream matcher (hs)" << endl;
|
cerr << "[error] [filter_callback] Error opening the stream matcher (hs)" << endl;
|
||||||
throw invalid_argument("Cannot open stream match on hyperscan");
|
throw invalid_argument("Cannot open stream match on hyperscan");
|
||||||
}
|
}
|
||||||
if (info.is_tcp){
|
match_map->insert_or_assign(info.sid, stream_match);
|
||||||
match_map[info.sid] = stream_match;
|
|
||||||
}
|
|
||||||
}else{
|
}else{
|
||||||
stream_match = stream_search->second;
|
stream_match = stream_search->second;
|
||||||
}
|
}
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Matching as a stream" << endl;
|
||||||
|
#endif
|
||||||
err = hs_scan_stream(
|
err = hs_scan_stream(
|
||||||
stream_match,info.payload.c_str(), info.payload.length(),
|
stream_match,info.payload.c_str(), info.payload.length(),
|
||||||
0, scratch_space, match_func, &match_res
|
0, scratch_space, match_func, &match_res
|
||||||
);
|
);
|
||||||
}else{
|
}else{
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Matching as a block" << endl;
|
||||||
|
#endif
|
||||||
err = hs_scan(
|
err = hs_scan(
|
||||||
regex_matcher,info.payload.c_str(), info.payload.length(),
|
regex_matcher,info.payload.c_str(), info.payload.length(),
|
||||||
0, scratch_space, match_func, &match_res
|
0, scratch_space, match_func, &match_res
|
||||||
@@ -105,13 +132,6 @@ bool filter_callback(packet_info & info){
|
|||||||
cerr << "[error] [filter_callback] Error while matching the stream (hs)" << endl;
|
cerr << "[error] [filter_callback] Error while matching the stream (hs)" << endl;
|
||||||
throw invalid_argument("Error while matching the stream with hyperscan");
|
throw invalid_argument("Error while matching the stream with hyperscan");
|
||||||
}
|
}
|
||||||
if (
|
|
||||||
!info.is_tcp && conf->stream_mode() &&
|
|
||||||
hs_close_stream(stream_match, scratch_space, nullptr, nullptr) != HS_SUCCESS
|
|
||||||
){
|
|
||||||
cerr << "[error] [filter_callback] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
if (match_res.has_matched){
|
if (match_res.has_matched){
|
||||||
auto rules_vector = info.is_input ? conf->input_ruleset.regexes : conf->output_ruleset.regexes;
|
auto rules_vector = info.is_input ? conf->input_ruleset.regexes : conf->output_ruleset.regexes;
|
||||||
stringstream msg;
|
stringstream msg;
|
||||||
|
|||||||
@@ -80,7 +80,8 @@ class FiregexInterceptor:
|
|||||||
proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../cppqueue")
|
proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../cppqueue")
|
||||||
self.process = await asyncio.create_subprocess_exec(
|
self.process = await asyncio.create_subprocess_exec(
|
||||||
proxy_binary_path,
|
proxy_binary_path,
|
||||||
stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE
|
stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE,
|
||||||
|
env={"MATCH_MODE": "stream" if self.srv.proto == "tcp" else "block", "NTHREADS": os.getenv("NTHREADS","1")},
|
||||||
)
|
)
|
||||||
line_fut = self.process.stdout.readuntil()
|
line_fut = self.process.stdout.readuntil()
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ function ServiceDetailsNFRegex() {
|
|||||||
const [tooltipAddRegexOpened, setTooltipAddRegexOpened] = useState(false)
|
const [tooltipAddRegexOpened, setTooltipAddRegexOpened] = useState(false)
|
||||||
const regexesList = nfregexServiceRegexesQuery(srv??"")
|
const regexesList = nfregexServiceRegexesQuery(srv??"")
|
||||||
|
|
||||||
|
if (services.isLoading) return <LoadingOverlay visible={true} />
|
||||||
if (!srv || !serviceInfo || regexesList.isError) return <Navigate to="/" replace />
|
if (!srv || !serviceInfo || regexesList.isError) return <Navigate to="/" replace />
|
||||||
|
|
||||||
return <>
|
return <>
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ def checkRegex(regex, should_work=True, upper=False):
|
|||||||
if r["regex"] == regex:
|
if r["regex"] == regex:
|
||||||
#Test the regex
|
#Test the regex
|
||||||
s = base64.b64decode(regex).upper() if upper else base64.b64decode(regex)
|
s = base64.b64decode(regex).upper() if upper else base64.b64decode(regex)
|
||||||
if not server.sendCheckData(secrets.token_bytes(200) + s + secrets.token_bytes(200)):
|
if not server.sendCheckData(secrets.token_bytes(40) + s + secrets.token_bytes(40)):
|
||||||
puts("The malicious request was successfully blocked ✔", color=colors.green)
|
puts("The malicious request was successfully blocked ✔", color=colors.green)
|
||||||
n_blocked += 1
|
n_blocked += 1
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
@@ -104,7 +104,7 @@ def checkRegex(regex, should_work=True, upper=False):
|
|||||||
puts("Test Failed: The regex wasn't found ✗", color=colors.red)
|
puts("Test Failed: The regex wasn't found ✗", color=colors.red)
|
||||||
exit_test(1)
|
exit_test(1)
|
||||||
else:
|
else:
|
||||||
if server.sendCheckData(secrets.token_bytes(200) + base64.b64decode(regex) + secrets.token_bytes(200)):
|
if server.sendCheckData(secrets.token_bytes(40) + base64.b64decode(regex) + secrets.token_bytes(40)):
|
||||||
puts("The request wasn't blocked ✔", color=colors.green)
|
puts("The request wasn't blocked ✔", color=colors.green)
|
||||||
else:
|
else:
|
||||||
puts("Test Failed: The request was blocked when it shouldn't have", color=colors.red)
|
puts("Test Failed: The request was blocked when it shouldn't have", color=colors.red)
|
||||||
@@ -186,18 +186,6 @@ checkRegex(regex)
|
|||||||
|
|
||||||
clear_regexes()
|
clear_regexes()
|
||||||
|
|
||||||
#Create Server regex and verify that should not matches
|
|
||||||
if(firegex.nf_add_regex(service_id,regex,"S",active=True, is_case_sensitive=True)):
|
|
||||||
puts(f"Sucessfully added server to client regex {str(secret)} ✔", color=colors.green)
|
|
||||||
else:
|
|
||||||
puts(f"Test Failed: Coulnd't add server to client regex {str(secret)} ✗", color=colors.red)
|
|
||||||
exit_test(1)
|
|
||||||
|
|
||||||
checkRegex(regex, should_work=False)
|
|
||||||
|
|
||||||
#Delete regex
|
|
||||||
clear_regexes()
|
|
||||||
|
|
||||||
#Rename service
|
#Rename service
|
||||||
if(firegex.nf_rename_service(service_id,f"{args.service_name}2")):
|
if(firegex.nf_rename_service(service_id,f"{args.service_name}2")):
|
||||||
puts(f"Sucessfully renamed service to {args.service_name}2 ✔", color=colors.green)
|
puts(f"Sucessfully renamed service to {args.service_name}2 ✔", color=colors.green)
|
||||||
|
|||||||
@@ -9,23 +9,23 @@ ERROR=0
|
|||||||
pip3 install -r requirements.txt
|
pip3 install -r requirements.txt
|
||||||
|
|
||||||
echo "Running standard API test"
|
echo "Running standard API test"
|
||||||
#python3 api_test.py -p $PASSWORD || ERROR=1
|
python3 api_test.py -p $PASSWORD || ERROR=1
|
||||||
echo "Running Netfilter Regex TCP ipv4"
|
echo "Running Netfilter Regex TCP ipv4"
|
||||||
python3 nf_test.py -p $PASSWORD -m tcp || ERROR=1
|
python3 nf_test.py -p $PASSWORD -m tcp || ERROR=1
|
||||||
echo "Running Netfilter Regex TCP ipv6"
|
echo "Running Netfilter Regex TCP ipv6"
|
||||||
#python3 nf_test.py -p $PASSWORD -m tcp -6 || ERROR=1
|
python3 nf_test.py -p $PASSWORD -m tcp -6 || ERROR=1
|
||||||
echo "Running Netfilter Regex UDP ipv4"
|
echo "Running Netfilter Regex UDP ipv4"
|
||||||
#python3 nf_test.py -p $PASSWORD -m udp || ERROR=1
|
python3 nf_test.py -p $PASSWORD -m udp || ERROR=1
|
||||||
echo "Running Netfilter Regex UDP ipv6"
|
echo "Running Netfilter Regex UDP ipv6"
|
||||||
#python3 nf_test.py -p $PASSWORD -m udp -6 || ERROR=1
|
python3 nf_test.py -p $PASSWORD -m udp -6 || ERROR=1
|
||||||
echo "Running Port Hijack TCP ipv4"
|
echo "Running Port Hijack TCP ipv4"
|
||||||
#python3 ph_test.py -p $PASSWORD -m tcp || ERROR=1
|
python3 ph_test.py -p $PASSWORD -m tcp || ERROR=1
|
||||||
echo "Running Port Hijack TCP ipv6"
|
echo "Running Port Hijack TCP ipv6"
|
||||||
#python3 ph_test.py -p $PASSWORD -m tcp -6 || ERROR=1
|
python3 ph_test.py -p $PASSWORD -m tcp -6 || ERROR=1
|
||||||
echo "Running Port Hijack UDP ipv4"
|
echo "Running Port Hijack UDP ipv4"
|
||||||
#python3 ph_test.py -p $PASSWORD -m udp || ERROR=1
|
python3 ph_test.py -p $PASSWORD -m udp || ERROR=1
|
||||||
echo "Running Port Hijack UDP ipv6"
|
echo "Running Port Hijack UDP ipv6"
|
||||||
#python3 ph_test.py -p $PASSWORD -m udp -6 || ERROR=1
|
python3 ph_test.py -p $PASSWORD -m udp -6 || ERROR=1
|
||||||
|
|
||||||
exit $ERROR
|
exit $ERROR
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user