c++ refactoring, init pypi projects, gh action added
This commit is contained in:
17
.github/workflows/docker-image.yml
vendored
17
.github/workflows/docker-image.yml
vendored
@@ -20,12 +20,6 @@ jobs:
|
|||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Build and run firegex
|
|
||||||
run: python3 start.py start --psw-no-interactive testpassword
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: cd tests && ./run_tests.sh
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@master
|
uses: docker/setup-qemu-action@master
|
||||||
with:
|
with:
|
||||||
@@ -47,7 +41,14 @@ jobs:
|
|||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
- name: Extract tag name
|
||||||
|
id: tag
|
||||||
|
run: echo TAG_NAME=$(echo $GITHUB_REF | cut -d / -f 3) >> $GITHUB_OUTPUT
|
||||||
|
- name: Update version in setup.py
|
||||||
|
run: >-
|
||||||
|
sed -i "s/{{VERSION_PLACEHOLDER}}/${{ steps.tag.outputs.TAG_NAME }}/g" backend/utils/__init__.py;
|
||||||
|
sed -i "s/{{VERSION_PLACEHOLDER}}/${{ steps.tag.outputs.TAG_NAME }}/g" proxy-client/setup.py;
|
||||||
|
sed -i "s/{{VERSION_PLACEHOLDER}}/${{ steps.tag.outputs.TAG_NAME }}/g" proxy-client/firegex/__init__.py;
|
||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
@@ -59,5 +60,3 @@ jobs:
|
|||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
cache-to: type=gha,mode=max
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
47
.github/workflows/pypi-publish.yml
vendored
Normal file
47
.github/workflows/pypi-publish.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# This workflow will upload a Python Package using Twine when a release is created
|
||||||
|
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
|
||||||
|
|
||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
name: Upload Python Package
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types:
|
||||||
|
- published
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.x'
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install build
|
||||||
|
- name: Extract tag name
|
||||||
|
id: tag
|
||||||
|
run: echo TAG_NAME=$(echo $GITHUB_REF | cut -d / -f 3) >> $GITHUB_OUTPUT
|
||||||
|
- name: Update version in setup.py
|
||||||
|
run: >-
|
||||||
|
sed -i "s/{{VERSION_PLACEHOLDER}}/${{ steps.tag.outputs.TAG_NAME }}/g" proxy-client/setup.py;
|
||||||
|
sed -i "s/{{VERSION_PLACEHOLDER}}/${{ steps.tag.outputs.TAG_NAME }}/g" proxy-client/firegex/__init__.py;
|
||||||
|
- name: Build package
|
||||||
|
run: cd client && python -m build && mv ./dist ../
|
||||||
|
- name: Publish package
|
||||||
|
uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
|
||||||
|
with:
|
||||||
|
user: __token__
|
||||||
|
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -11,7 +11,10 @@
|
|||||||
|
|
||||||
# testing
|
# testing
|
||||||
/frontend/coverage
|
/frontend/coverage
|
||||||
|
/proxy-client/firegex.egg-info
|
||||||
|
/proxy-client/dist
|
||||||
|
/proxy-client/fgex-pip/fgex.egg-info
|
||||||
|
/proxy-client/fgex-pip/dist
|
||||||
/backend/db/
|
/backend/db/
|
||||||
/backend/db/**
|
/backend/db/**
|
||||||
/frontend/build/
|
/frontend/build/
|
||||||
@@ -21,10 +24,10 @@
|
|||||||
/backend/modules/cppqueue
|
/backend/modules/cppqueue
|
||||||
/backend/binsrc/cppqueue
|
/backend/binsrc/cppqueue
|
||||||
/backend/modules/proxy
|
/backend/modules/proxy
|
||||||
docker-compose.yml
|
/docker-compose.yml
|
||||||
firegex-compose.yml
|
/firegex-compose.yml
|
||||||
firegex-compose-tmp-file.yml
|
/firegex-compose-tmp-file.yml
|
||||||
firegex.py
|
/firegex.py
|
||||||
/tests/benchmark.csv
|
/tests/benchmark.csv
|
||||||
# misc
|
# misc
|
||||||
**/.DS_Store
|
**/.DS_Store
|
||||||
|
|||||||
@@ -27,7 +27,8 @@ ADD ./backend/requirements.txt /execute/requirements.txt
|
|||||||
RUN pip3 install --no-cache-dir --break-system-packages -r /execute/requirements.txt --no-warn-script-location
|
RUN pip3 install --no-cache-dir --break-system-packages -r /execute/requirements.txt --no-warn-script-location
|
||||||
|
|
||||||
COPY ./backend/binsrc /execute/binsrc
|
COPY ./backend/binsrc /execute/binsrc
|
||||||
RUN g++ binsrc/nfqueue.cpp -o modules/cppqueue -O3 -lnetfilter_queue -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libhs libmnl)
|
RUN g++ binsrc/nfqueue.cpp -o modules/cppqueue -std=c++23 -O3 -lnetfilter_queue -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libhs libmnl)
|
||||||
|
RUN g++ binsrc/nfproxy-tun.cpp -o modules/cppnfproxy -std=c++23 -O3 -lnetfilter_queue -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libmnl)
|
||||||
|
|
||||||
COPY ./backend/ /execute/
|
COPY ./backend/ /execute/
|
||||||
COPY --from=frontend /app/dist/ ./frontend/
|
COPY --from=frontend /app/dist/ ./frontend/
|
||||||
|
|||||||
@@ -17,181 +17,61 @@ using Tins::TCPIP::Stream;
|
|||||||
using Tins::TCPIP::StreamFollower;
|
using Tins::TCPIP::StreamFollower;
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
#ifndef NETFILTER_CLASSES_HPP
|
#ifndef NETFILTER_CLASS_CPP
|
||||||
#define NETFILTER_CLASSES_HPP
|
#define NETFILTER_CLASS_CPP
|
||||||
typedef Tins::TCPIP::StreamIdentifier stream_id;
|
|
||||||
typedef map<stream_id, hs_stream_t*> matching_map;
|
|
||||||
|
|
||||||
/* Considering to use unorder_map using this hash of stream_id
|
typedef int QueueCallbackFunction(const nlmsghdr *, const mnl_socket*, void *);
|
||||||
|
|
||||||
namespace std {
|
struct nfqueue_execution_data_tmp{
|
||||||
template<>
|
mnl_socket* nl = nullptr;
|
||||||
struct hash<stream_id> {
|
function<QueueCallbackFunction> queue_cb = nullptr;
|
||||||
size_t operator()(const stream_id& sid) const
|
void *data = nullptr;
|
||||||
{
|
|
||||||
return std::hash<std::uint32_t>()(sid.max_address[0] + sid.max_address[1] + sid.max_address[2] + sid.max_address[3] + sid.max_address_port + sid.min_address[0] + sid.min_address[1] + sid.min_address[2] + sid.min_address[3] + sid.min_address_port);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
ostream& operator<<(ostream& os, const Tins::TCPIP::StreamIdentifier::address_type &sid){
|
|
||||||
bool first_print = false;
|
|
||||||
for (auto ele: sid){
|
|
||||||
if (first_print || ele){
|
|
||||||
first_print = true;
|
|
||||||
os << (int)ele << ".";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return os;
|
|
||||||
}
|
|
||||||
|
|
||||||
ostream& operator<<(ostream& os, const stream_id &sid){
|
|
||||||
os << sid.max_address << ":" << sid.max_address_port << " -> " << sid.min_address << ":" << sid.min_address_port;
|
|
||||||
return os;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct packet_info;
|
|
||||||
|
|
||||||
struct tcp_stream_tmp {
|
|
||||||
bool matching_has_been_called = false;
|
|
||||||
bool result;
|
|
||||||
packet_info *pkt_info;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct stream_ctx {
|
class NfQueueExecutor {
|
||||||
matching_map in_hs_streams;
|
private:
|
||||||
matching_map out_hs_streams;
|
size_t BUF_SIZE = 0xffff + (MNL_SOCKET_BUFFER_SIZE/2);
|
||||||
hs_scratch_t* in_scratch = nullptr;
|
char *queue_msg_buffer = nullptr;
|
||||||
hs_scratch_t* out_scratch = nullptr;
|
QueueCallbackFunction * _queue_callback_hook = nullptr;
|
||||||
u_int16_t latest_config_ver = 0;
|
|
||||||
StreamFollower follower;
|
|
||||||
mnl_socket* nl;
|
|
||||||
tcp_stream_tmp tcp_match_util;
|
|
||||||
|
|
||||||
void clean_scratches(){
|
|
||||||
if (out_scratch != nullptr){
|
|
||||||
hs_free_scratch(out_scratch);
|
|
||||||
out_scratch = nullptr;
|
|
||||||
}
|
|
||||||
if (in_scratch != nullptr){
|
|
||||||
hs_free_scratch(in_scratch);
|
|
||||||
in_scratch = nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void clean_stream_by_id(stream_id sid){
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.clean_stream_by_id] Cleaning stream context of " << sid << endl;
|
|
||||||
#endif
|
|
||||||
auto stream_search = in_hs_streams.find(sid);
|
|
||||||
hs_stream_t* stream_match;
|
|
||||||
if (stream_search != in_hs_streams.end()){
|
|
||||||
stream_match = stream_search->second;
|
|
||||||
if (hs_close_stream(stream_match, in_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
|
||||||
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
in_hs_streams.erase(stream_search);
|
|
||||||
}
|
|
||||||
|
|
||||||
stream_search = out_hs_streams.find(sid);
|
|
||||||
if (stream_search != out_hs_streams.end()){
|
|
||||||
stream_match = stream_search->second;
|
|
||||||
if (hs_close_stream(stream_match, out_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
|
||||||
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
out_hs_streams.erase(stream_search);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void clean(){
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.clean] Cleaning stream context" << endl;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (in_scratch){
|
|
||||||
for(auto ele: in_hs_streams){
|
|
||||||
if (hs_close_stream(ele.second, in_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
|
||||||
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
in_hs_streams.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (out_scratch){
|
|
||||||
for(auto ele: out_hs_streams){
|
|
||||||
if (hs_close_stream(ele.second, out_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
|
||||||
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out_hs_streams.clear();
|
|
||||||
}
|
|
||||||
clean_scratches();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct packet_info {
|
|
||||||
string packet;
|
|
||||||
string payload;
|
|
||||||
stream_id sid;
|
|
||||||
bool is_input;
|
|
||||||
bool is_tcp;
|
|
||||||
stream_ctx* sctx;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef bool NetFilterQueueCallback(packet_info &);
|
|
||||||
|
|
||||||
template <NetFilterQueueCallback callback_func>
|
|
||||||
class NetfilterQueue {
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
size_t BUF_SIZE = 0xffff + (MNL_SOCKET_BUFFER_SIZE/2);
|
|
||||||
char *buf = nullptr;
|
|
||||||
unsigned int portid;
|
unsigned int portid;
|
||||||
u_int16_t queue_num;
|
u_int16_t queue_num;
|
||||||
stream_ctx sctx;
|
mnl_socket* nl = nullptr;
|
||||||
|
|
||||||
NetfilterQueue(u_int16_t queue_num): queue_num(queue_num) {
|
NfQueueExecutor(u_int16_t queue_num, QueueCallbackFunction* queue_cb): queue_num(queue_num), _queue_callback_hook(queue_cb){
|
||||||
sctx.nl = mnl_socket_open(NETLINK_NETFILTER);
|
nl = mnl_socket_open(NETLINK_NETFILTER);
|
||||||
|
|
||||||
if (sctx.nl == nullptr) { throw runtime_error( "mnl_socket_open" );}
|
if (nl == nullptr) { throw runtime_error( "mnl_socket_open" );}
|
||||||
|
|
||||||
if (mnl_socket_bind(sctx.nl, 0, MNL_SOCKET_AUTOPID) < 0) {
|
if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) {
|
||||||
mnl_socket_close(sctx.nl);
|
mnl_socket_close(nl);
|
||||||
throw runtime_error( "mnl_socket_bind" );
|
throw runtime_error( "mnl_socket_bind" );
|
||||||
}
|
}
|
||||||
portid = mnl_socket_get_portid(sctx.nl);
|
portid = mnl_socket_get_portid(nl);
|
||||||
|
|
||||||
buf = (char*) malloc(BUF_SIZE);
|
queue_msg_buffer = (char*) malloc(BUF_SIZE);
|
||||||
|
|
||||||
if (!buf) {
|
if (!queue_msg_buffer) {
|
||||||
mnl_socket_close(sctx.nl);
|
mnl_socket_close(nl);
|
||||||
throw runtime_error( "allocate receive buffer" );
|
throw runtime_error( "allocate receive buffer" );
|
||||||
}
|
}
|
||||||
|
|
||||||
if (send_config_cmd(NFQNL_CFG_CMD_BIND) < 0) {
|
if (_send_config_cmd(NFQNL_CFG_CMD_BIND) < 0) {
|
||||||
_clear();
|
_clear();
|
||||||
throw runtime_error( "mnl_socket_send" );
|
throw runtime_error( "mnl_socket_send" );
|
||||||
}
|
}
|
||||||
//TEST if BIND was successful
|
//TEST if BIND was successful
|
||||||
if (send_config_cmd(NFQNL_CFG_CMD_NONE) < 0) { // SEND A NONE cmmand to generate an error meessage
|
if (_send_config_cmd(NFQNL_CFG_CMD_NONE) < 0) { // SEND A NONE cmmand to generate an error meessage
|
||||||
_clear();
|
_clear();
|
||||||
throw runtime_error( "mnl_socket_send" );
|
throw runtime_error( "mnl_socket_send" );
|
||||||
}
|
}
|
||||||
if (recv_packet() == -1) { //RECV the error message
|
if (_recv_packet() == -1) { //RECV the error message
|
||||||
_clear();
|
_clear();
|
||||||
throw runtime_error( "mnl_socket_recvfrom" );
|
throw runtime_error( "mnl_socket_recvfrom" );
|
||||||
}
|
}
|
||||||
|
|
||||||
struct nlmsghdr *nlh = (struct nlmsghdr *) buf;
|
struct nlmsghdr *nlh = (struct nlmsghdr *) queue_msg_buffer;
|
||||||
|
|
||||||
if (nlh->nlmsg_type != NLMSG_ERROR) {
|
if (nlh->nlmsg_type != NLMSG_ERROR) {
|
||||||
_clear();
|
_clear();
|
||||||
@@ -210,71 +90,32 @@ class NetfilterQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//END TESTING BIND
|
//END TESTING BIND
|
||||||
nlh = nfq_nlmsg_put(buf, NFQNL_MSG_CONFIG, queue_num);
|
nlh = nfq_nlmsg_put(queue_msg_buffer, NFQNL_MSG_CONFIG, queue_num);
|
||||||
nfq_nlmsg_cfg_put_params(nlh, NFQNL_COPY_PACKET, 0xffff);
|
nfq_nlmsg_cfg_put_params(nlh, NFQNL_COPY_PACKET, 0xffff);
|
||||||
|
|
||||||
mnl_attr_put_u32(nlh, NFQA_CFG_FLAGS, htonl(NFQA_CFG_F_GSO));
|
mnl_attr_put_u32(nlh, NFQA_CFG_FLAGS, htonl(NFQA_CFG_F_GSO));
|
||||||
mnl_attr_put_u32(nlh, NFQA_CFG_MASK, htonl(NFQA_CFG_F_GSO));
|
mnl_attr_put_u32(nlh, NFQA_CFG_MASK, htonl(NFQA_CFG_F_GSO));
|
||||||
|
|
||||||
if (mnl_socket_sendto(sctx.nl, nlh, nlh->nlmsg_len) < 0) {
|
if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
|
||||||
_clear();
|
_clear();
|
||||||
throw runtime_error( "mnl_socket_send" );
|
throw runtime_error( "mnl_socket_send" );
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void on_data_recv(Stream& stream, stream_ctx* sctx, string data) {
|
NfQueueExecutor(u_int16_t queue_num): NfQueueExecutor(queue_num, nullptr) {}
|
||||||
sctx->tcp_match_util.matching_has_been_called = true;
|
|
||||||
bool result = callback_func(*sctx->tcp_match_util.pkt_info);
|
// --- Functions to be implemented by the user
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.on_data_recv] result: " << result << endl;
|
virtual void before_loop() {
|
||||||
#endif
|
// Do nothing by default
|
||||||
if (!result){
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.on_data_recv] Stream matched, removing all data about it" << endl;
|
|
||||||
#endif
|
|
||||||
sctx->clean_stream_by_id(sctx->tcp_match_util.pkt_info->sid);
|
|
||||||
stream.ignore_client_data();
|
|
||||||
stream.ignore_server_data();
|
|
||||||
}
|
|
||||||
sctx->tcp_match_util.result = result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//Input data filtering
|
virtual void * callback_data_fetch(){
|
||||||
static void on_client_data(Stream& stream, stream_ctx* sctx) {
|
return nullptr;
|
||||||
on_data_recv(stream, sctx, string(stream.client_payload().begin(), stream.client_payload().end()));
|
|
||||||
}
|
|
||||||
|
|
||||||
//Server data filtering
|
|
||||||
static void on_server_data(Stream& stream, stream_ctx* sctx) {
|
|
||||||
on_data_recv(stream, sctx, string(stream.server_payload().begin(), stream.server_payload().end()));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void on_new_stream(Stream& stream, stream_ctx* sctx) {
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.on_new_stream] New stream detected" << endl;
|
|
||||||
#endif
|
|
||||||
if (stream.is_partial_stream()) {
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.on_new_stream] Partial stream detected, skipping" << endl;
|
|
||||||
#endif
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
stream.auto_cleanup_payloads(true);
|
|
||||||
stream.client_data_callback(bind(on_client_data, placeholders::_1, sctx));
|
|
||||||
stream.server_data_callback(bind(on_server_data, placeholders::_1, sctx));
|
|
||||||
stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, sctx));
|
|
||||||
}
|
|
||||||
|
|
||||||
// A stream was terminated. The second argument is the reason why it was terminated
|
|
||||||
static void on_stream_close(Stream& stream, stream_ctx* sctx) {
|
|
||||||
stream_id stream_id = stream_id::make_identifier(stream);
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.on_stream_close] Stream terminated, deleting all data" << endl;
|
|
||||||
#endif
|
|
||||||
sctx->clean_stream_by_id(stream_id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// --- End of functions to be implemented by the user
|
||||||
|
|
||||||
void run(){
|
void run(){
|
||||||
/*
|
/*
|
||||||
@@ -283,18 +124,21 @@ class NetfilterQueue {
|
|||||||
* in this information, so turn it off.
|
* in this information, so turn it off.
|
||||||
*/
|
*/
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
mnl_socket_setsockopt(sctx.nl, NETLINK_NO_ENOBUFS, &ret, sizeof(int));
|
mnl_socket_setsockopt(nl, NETLINK_NO_ENOBUFS, &ret, sizeof(int));
|
||||||
|
|
||||||
sctx.follower.new_stream_callback(bind(on_new_stream, placeholders::_1, &sctx));
|
before_loop();
|
||||||
sctx.follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, &sctx));
|
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
ret = recv_packet();
|
ret = _recv_packet();
|
||||||
if (ret == -1) {
|
if (ret == -1) {
|
||||||
throw runtime_error( "mnl_socket_recvfrom" );
|
throw runtime_error( "mnl_socket_recvfrom" );
|
||||||
}
|
}
|
||||||
|
nfqueue_execution_data_tmp data = {
|
||||||
ret = mnl_cb_run(buf, ret, 0, portid, queue_cb, &sctx);
|
nl: nl,
|
||||||
|
queue_cb: _queue_callback_hook,
|
||||||
|
data: callback_data_fetch()
|
||||||
|
};
|
||||||
|
ret = mnl_cb_run(queue_msg_buffer, ret, 0, portid, _real_queue_cb, &data);
|
||||||
if (ret < 0){
|
if (ret < 0){
|
||||||
throw runtime_error( "mnl_cb_run" );
|
throw runtime_error( "mnl_cb_run" );
|
||||||
}
|
}
|
||||||
@@ -302,170 +146,46 @@ class NetfilterQueue {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
~NetfilterQueue() {
|
~NfQueueExecutor() {
|
||||||
#ifdef DEBUG
|
_send_config_cmd(NFQNL_CFG_CMD_UNBIND);
|
||||||
cerr << "[DEBUG] [NetfilterQueue.~NetfilterQueue] Destructor called" << endl;
|
|
||||||
#endif
|
|
||||||
send_config_cmd(NFQNL_CFG_CMD_UNBIND);
|
|
||||||
_clear();
|
_clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
ssize_t send_config_cmd(nfqnl_msg_config_cmds cmd){
|
static int _real_queue_cb(const nlmsghdr *nlh, void *data_ptr) {
|
||||||
struct nlmsghdr *nlh = nfq_nlmsg_put(buf, NFQNL_MSG_CONFIG, queue_num);
|
nfqueue_execution_data_tmp* info = (nfqueue_execution_data_tmp*) data_ptr;
|
||||||
|
if (info->queue_cb == nullptr) return MNL_CB_OK;
|
||||||
|
return info->queue_cb(nlh, info->nl, info->data);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void _clear(){
|
||||||
|
if (queue_msg_buffer != nullptr) {
|
||||||
|
free(queue_msg_buffer);
|
||||||
|
queue_msg_buffer = nullptr;
|
||||||
|
}
|
||||||
|
mnl_socket_close(nl);
|
||||||
|
nl = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline ssize_t _send_config_cmd(nfqnl_msg_config_cmds cmd){
|
||||||
|
struct nlmsghdr *nlh = nfq_nlmsg_put(queue_msg_buffer, NFQNL_MSG_CONFIG, queue_num);
|
||||||
nfq_nlmsg_cfg_put_cmd(nlh, AF_INET, cmd);
|
nfq_nlmsg_cfg_put_cmd(nlh, AF_INET, cmd);
|
||||||
return mnl_socket_sendto(sctx.nl, nlh, nlh->nlmsg_len);
|
return mnl_socket_sendto(nl, nlh, nlh->nlmsg_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t recv_packet(){
|
inline ssize_t _recv_packet(){
|
||||||
return mnl_socket_recvfrom(sctx.nl, buf, BUF_SIZE);
|
return mnl_socket_recvfrom(nl, queue_msg_buffer, BUF_SIZE);
|
||||||
}
|
|
||||||
|
|
||||||
void _clear(){
|
|
||||||
if (buf != nullptr) {
|
|
||||||
free(buf);
|
|
||||||
buf = nullptr;
|
|
||||||
}
|
|
||||||
mnl_socket_close(sctx.nl);
|
|
||||||
sctx.nl = nullptr;
|
|
||||||
sctx.clean();
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename T>
|
|
||||||
static void build_verdict(T packet, uint8_t *payload, uint16_t plen, nlmsghdr *nlh_verdict, nfqnl_msg_packet_hdr *ph, stream_ctx* sctx, bool is_input){
|
|
||||||
Tins::TCP* tcp = packet.template find_pdu<Tins::TCP>();
|
|
||||||
|
|
||||||
if (tcp){
|
|
||||||
Tins::PDU* application_layer = tcp->inner_pdu();
|
|
||||||
u_int16_t payload_size = 0;
|
|
||||||
if (application_layer != nullptr){
|
|
||||||
payload_size = application_layer->size();
|
|
||||||
}
|
|
||||||
packet_info pktinfo{
|
|
||||||
packet: string(payload, payload+plen),
|
|
||||||
payload: string(payload+plen - payload_size, payload+plen),
|
|
||||||
sid: stream_id::make_identifier(packet),
|
|
||||||
is_input: is_input,
|
|
||||||
is_tcp: true,
|
|
||||||
sctx: sctx,
|
|
||||||
};
|
|
||||||
sctx->tcp_match_util.matching_has_been_called = false;
|
|
||||||
sctx->tcp_match_util.pkt_info = &pktinfo;
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.build_verdict] TCP Packet received " << packet.src_addr() << ":" << tcp->sport() << " -> " << packet.dst_addr() << ":" << tcp->dport() << " thr: " << this_thread::get_id() << ", sending to libtins StreamFollower" << endl;
|
|
||||||
#endif
|
|
||||||
sctx->follower.process_packet(packet);
|
|
||||||
#ifdef DEBUG
|
|
||||||
if (sctx->tcp_match_util.matching_has_been_called){
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has called matching functions" << endl;
|
|
||||||
}else{
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has NOT called matching functions" << endl;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
if (sctx->tcp_match_util.matching_has_been_called && !sctx->tcp_match_util.result){
|
|
||||||
Tins::PDU* data_layer = tcp->release_inner_pdu();
|
|
||||||
if (data_layer != nullptr){
|
|
||||||
delete data_layer;
|
|
||||||
}
|
|
||||||
tcp->set_flag(Tins::TCP::FIN,1);
|
|
||||||
tcp->set_flag(Tins::TCP::ACK,1);
|
|
||||||
tcp->set_flag(Tins::TCP::SYN,0);
|
|
||||||
nfq_nlmsg_verdict_put_pkt(nlh_verdict, packet.serialize().data(), packet.size());
|
|
||||||
}
|
|
||||||
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
|
|
||||||
}else{
|
|
||||||
Tins::UDP* udp = packet.template find_pdu<Tins::UDP>();
|
|
||||||
if (!udp){
|
|
||||||
throw invalid_argument("Only TCP and UDP are supported");
|
|
||||||
}
|
|
||||||
Tins::PDU* application_layer = udp->inner_pdu();
|
|
||||||
u_int16_t payload_size = 0;
|
|
||||||
if (application_layer != nullptr){
|
|
||||||
payload_size = application_layer->size();
|
|
||||||
}
|
|
||||||
if((udp->inner_pdu() == nullptr)){
|
|
||||||
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
|
|
||||||
}
|
|
||||||
packet_info pktinfo{
|
|
||||||
packet: string(payload, payload+plen),
|
|
||||||
payload: string(payload+plen - payload_size, payload+plen),
|
|
||||||
sid: stream_id::make_identifier(packet),
|
|
||||||
is_input: is_input,
|
|
||||||
is_tcp: false,
|
|
||||||
sctx: sctx,
|
|
||||||
};
|
|
||||||
if (callback_func(pktinfo)){
|
|
||||||
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
|
|
||||||
}else{
|
|
||||||
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_DROP );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int queue_cb(const nlmsghdr *nlh, void *data_ptr)
|
|
||||||
{
|
|
||||||
stream_ctx* sctx = (stream_ctx*)data_ptr;
|
|
||||||
|
|
||||||
//Extract attributes from the nlmsghdr
|
|
||||||
nlattr *attr[NFQA_MAX+1] = {};
|
|
||||||
|
|
||||||
if (nfq_nlmsg_parse(nlh, attr) < 0) {
|
|
||||||
perror("problems parsing");
|
|
||||||
return MNL_CB_ERROR;
|
|
||||||
}
|
|
||||||
if (attr[NFQA_PACKET_HDR] == nullptr) {
|
|
||||||
fputs("metaheader not set\n", stderr);
|
|
||||||
return MNL_CB_ERROR;
|
|
||||||
}
|
|
||||||
if (attr[NFQA_MARK] == nullptr) {
|
|
||||||
fputs("mark not set\n", stderr);
|
|
||||||
return MNL_CB_ERROR;
|
|
||||||
}
|
|
||||||
//Get Payload
|
|
||||||
uint16_t plen = mnl_attr_get_payload_len(attr[NFQA_PAYLOAD]);
|
|
||||||
uint8_t *payload = (uint8_t *)mnl_attr_get_payload(attr[NFQA_PAYLOAD]);
|
|
||||||
|
|
||||||
//Return result to the kernel
|
|
||||||
struct nfqnl_msg_packet_hdr *ph = (nfqnl_msg_packet_hdr*) mnl_attr_get_payload(attr[NFQA_PACKET_HDR]);
|
|
||||||
struct nfgenmsg *nfg = (nfgenmsg *)mnl_nlmsg_get_payload(nlh);
|
|
||||||
char buf[MNL_SOCKET_BUFFER_SIZE];
|
|
||||||
struct nlmsghdr *nlh_verdict;
|
|
||||||
struct nlattr *nest;
|
|
||||||
|
|
||||||
nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(nfg->res_id));
|
|
||||||
|
|
||||||
bool is_input = ntohl(mnl_attr_get_u32(attr[NFQA_MARK])) & 0x1; // == 0x1337 that is odd
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet received" << endl;
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet ID: " << ntohl(ph->packet_id) << endl;
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Payload size: " << plen << endl;
|
|
||||||
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Is input: " << is_input << endl;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Check IP protocol version
|
|
||||||
if ( (payload[0] & 0xf0) == 0x40 ){
|
|
||||||
build_verdict(Tins::IP(payload, plen), payload, plen, nlh_verdict, ph, sctx, is_input);
|
|
||||||
}else{
|
|
||||||
build_verdict(Tins::IPv6(payload, plen), payload, plen, nlh_verdict, ph, sctx, is_input);
|
|
||||||
}
|
|
||||||
|
|
||||||
nest = mnl_attr_nest_start(nlh_verdict, NFQA_CT);
|
|
||||||
mnl_attr_put_u32(nlh_verdict, CTA_MARK, htonl(42));
|
|
||||||
mnl_attr_nest_end(nlh_verdict, nest);
|
|
||||||
|
|
||||||
if (mnl_socket_sendto(sctx->nl, nlh_verdict, nlh_verdict->nlmsg_len) < 0) {
|
|
||||||
throw runtime_error( "mnl_socket_send" );
|
|
||||||
}
|
|
||||||
|
|
||||||
return MNL_CB_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template <NetFilterQueueCallback func>
|
|
||||||
|
template <typename Executor, typename = enable_if_t<is_base_of_v<NfQueueExecutor, Executor>>>
|
||||||
class NFQueueSequence{
|
class NFQueueSequence{
|
||||||
|
|
||||||
private:
|
private:
|
||||||
vector<NetfilterQueue<func> *> nfq;
|
vector<Executor *> nfq;
|
||||||
uint16_t _init;
|
uint16_t _init;
|
||||||
uint16_t _end;
|
uint16_t _end;
|
||||||
vector<thread> threads;
|
vector<thread> threads;
|
||||||
@@ -474,7 +194,7 @@ class NFQueueSequence{
|
|||||||
|
|
||||||
NFQueueSequence(uint16_t seq_len){
|
NFQueueSequence(uint16_t seq_len){
|
||||||
if (seq_len <= 0) throw invalid_argument("seq_len <= 0");
|
if (seq_len <= 0) throw invalid_argument("seq_len <= 0");
|
||||||
nfq = vector<NetfilterQueue<func>*>(seq_len);
|
nfq = vector<Executor*>(seq_len);
|
||||||
_init = QUEUE_BASE_NUM;
|
_init = QUEUE_BASE_NUM;
|
||||||
while(nfq[0] == nullptr){
|
while(nfq[0] == nullptr){
|
||||||
if (_init+seq_len-1 >= 65536){
|
if (_init+seq_len-1 >= 65536){
|
||||||
@@ -482,7 +202,7 @@ class NFQueueSequence{
|
|||||||
}
|
}
|
||||||
for (int i=0;i<seq_len;i++){
|
for (int i=0;i<seq_len;i++){
|
||||||
try{
|
try{
|
||||||
nfq[i] = new NetfilterQueue<func>(_init+i);
|
nfq[i] = new Executor(_init+i);
|
||||||
}catch(const invalid_argument e){
|
}catch(const invalid_argument e){
|
||||||
for(int j = 0; j < i; j++) {
|
for(int j = 0; j < i; j++) {
|
||||||
delete nfq[j];
|
delete nfq[j];
|
||||||
@@ -499,7 +219,9 @@ class NFQueueSequence{
|
|||||||
void start(){
|
void start(){
|
||||||
if (threads.size() != 0) throw runtime_error("NFQueueSequence: already started!");
|
if (threads.size() != 0) throw runtime_error("NFQueueSequence: already started!");
|
||||||
for (int i=0;i<nfq.size();i++){
|
for (int i=0;i<nfq.size();i++){
|
||||||
threads.push_back(thread(&NetfilterQueue<func>::run, nfq[i]));
|
threads.push_back(thread([executor = nfq[i]](){
|
||||||
|
executor->run();
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -524,4 +246,4 @@ class NFQueueSequence{
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // NETFILTER_CLASSES_HPP
|
#endif // NETFILTER_CLASS_CPP
|
||||||
20
backend/binsrc/nfproxy-tun.cpp
Normal file
20
backend/binsrc/nfproxy-tun.cpp
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#include "proxytun/proxytun.cpp"
|
||||||
|
#include "utils.hpp"
|
||||||
|
#include <iostream>
|
||||||
|
#include <syncstream>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
int main(int argc, char *argv[]){
|
||||||
|
int n_of_threads = 1;
|
||||||
|
char * n_threads_str = getenv("NTHREADS");
|
||||||
|
if (n_threads_str != nullptr) n_of_threads = ::atoi(n_threads_str);
|
||||||
|
if(n_of_threads <= 0) n_of_threads = 1;
|
||||||
|
|
||||||
|
NFQueueSequence<SocketTunnelQueue> queues(n_of_threads);
|
||||||
|
queues.start();
|
||||||
|
|
||||||
|
osyncstream(cout) << "QUEUES " << queues.init() << " " << queues.end() << endl;
|
||||||
|
cerr << "[info] [main] Queues: " << queues.init() << ":" << queues.end() << " threads assigned: " << n_of_threads << endl;
|
||||||
|
|
||||||
|
}
|
||||||
@@ -1,12 +1,11 @@
|
|||||||
#include "classes/regex_rules.cpp"
|
#include "regex/regex_rules.cpp"
|
||||||
|
#include "regex/regexfilter.cpp"
|
||||||
#include "classes/netfilter.cpp"
|
#include "classes/netfilter.cpp"
|
||||||
#include "utils.hpp"
|
#include <syncstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
shared_ptr<RegexRules> regex_config;
|
|
||||||
|
|
||||||
void config_updater (){
|
void config_updater (){
|
||||||
string line;
|
string line;
|
||||||
while (true){
|
while (true){
|
||||||
@@ -33,124 +32,15 @@ void config_updater (){
|
|||||||
try{
|
try{
|
||||||
regex_config.reset(new RegexRules(raw_rules, regex_config->stream_mode()));
|
regex_config.reset(new RegexRules(raw_rules, regex_config->stream_mode()));
|
||||||
cerr << "[info] [updater] Config update done to ver "<< regex_config->ver() << endl;
|
cerr << "[info] [updater] Config update done to ver "<< regex_config->ver() << endl;
|
||||||
cout << "ACK OK" << endl;
|
osyncstream(cout) << "ACK OK" << endl;
|
||||||
}catch(const std::exception& e){
|
}catch(const std::exception& e){
|
||||||
cerr << "[error] [updater] Failed to build new configuration!" << endl;
|
cerr << "[error] [updater] Failed to build new configuration!" << endl;
|
||||||
cout << "ACK FAIL " << e.what() << endl;
|
osyncstream(cout) << "ACK FAIL " << e.what() << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void inline scratch_setup(regex_ruleset &conf, hs_scratch_t* & scratch){
|
|
||||||
if (scratch == nullptr && conf.hs_db != nullptr){
|
|
||||||
if (hs_alloc_scratch(conf.hs_db, &scratch) != HS_SUCCESS) {
|
|
||||||
throw invalid_argument("Cannot alloc scratch");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct matched_data{
|
|
||||||
unsigned int matched = 0;
|
|
||||||
bool has_matched = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
bool filter_callback(packet_info& info){
|
|
||||||
shared_ptr<RegexRules> conf = regex_config;
|
|
||||||
auto current_version = conf->ver();
|
|
||||||
if (current_version != info.sctx->latest_config_ver){
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [filter_callback] Configuration has changed (" << current_version << "!=" << info.sctx->latest_config_ver << "), cleaning scratch spaces" << endl;
|
|
||||||
#endif
|
|
||||||
info.sctx->clean();
|
|
||||||
info.sctx->latest_config_ver = current_version;
|
|
||||||
}
|
|
||||||
scratch_setup(conf->input_ruleset, info.sctx->in_scratch);
|
|
||||||
scratch_setup(conf->output_ruleset, info.sctx->out_scratch);
|
|
||||||
|
|
||||||
hs_database_t* regex_matcher = info.is_input ? conf->input_ruleset.hs_db : conf->output_ruleset.hs_db;
|
|
||||||
if (regex_matcher == nullptr){
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [filter_callback] Matching packet with " << (info.is_input ? "input" : "output") << " ruleset" << endl;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
matched_data match_res;
|
|
||||||
hs_error_t err;
|
|
||||||
hs_scratch_t* scratch_space = info.is_input ? info.sctx->in_scratch: info.sctx->out_scratch;
|
|
||||||
auto match_func = [](unsigned int id, auto from, auto to, auto flags, auto ctx){
|
|
||||||
auto res = (matched_data*)ctx;
|
|
||||||
res->has_matched = true;
|
|
||||||
res->matched = id;
|
|
||||||
return -1; // Stop matching
|
|
||||||
};
|
|
||||||
hs_stream_t* stream_match;
|
|
||||||
if (conf->stream_mode()){
|
|
||||||
matching_map* match_map = info.is_input ? &info.sctx->in_hs_streams : &info.sctx->out_hs_streams;
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [filter_callback] Dumping match_map " << match_map << endl;
|
|
||||||
for (auto ele: *match_map){
|
|
||||||
cerr << "[DEBUG] [filter_callback] " << ele.first << " -> " << ele.second << endl;
|
|
||||||
}
|
|
||||||
cerr << "[DEBUG] [filter_callback] End of match_map" << endl;
|
|
||||||
#endif
|
|
||||||
auto stream_search = match_map->find(info.sid);
|
|
||||||
|
|
||||||
if (stream_search == match_map->end()){
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [filter_callback] Creating new stream matcher for " << info.sid << endl;
|
|
||||||
#endif
|
|
||||||
if (hs_open_stream(regex_matcher, 0, &stream_match) != HS_SUCCESS) {
|
|
||||||
cerr << "[error] [filter_callback] Error opening the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot open stream match on hyperscan");
|
|
||||||
}
|
|
||||||
if (info.is_tcp){
|
|
||||||
match_map->insert_or_assign(info.sid, stream_match);
|
|
||||||
}
|
|
||||||
}else{
|
|
||||||
stream_match = stream_search->second;
|
|
||||||
}
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [filter_callback] Matching as a stream" << endl;
|
|
||||||
#endif
|
|
||||||
err = hs_scan_stream(
|
|
||||||
stream_match,info.payload.c_str(), info.payload.length(),
|
|
||||||
0, scratch_space, match_func, &match_res
|
|
||||||
);
|
|
||||||
}else{
|
|
||||||
#ifdef DEBUG
|
|
||||||
cerr << "[DEBUG] [filter_callback] Matching as a block" << endl;
|
|
||||||
#endif
|
|
||||||
err = hs_scan(
|
|
||||||
regex_matcher,info.payload.c_str(), info.payload.length(),
|
|
||||||
0, scratch_space, match_func, &match_res
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if (
|
|
||||||
!info.is_tcp && conf->stream_mode() &&
|
|
||||||
hs_close_stream(stream_match, scratch_space, nullptr, nullptr) != HS_SUCCESS
|
|
||||||
){
|
|
||||||
cerr << "[error] [filter_callback] Error closing the stream matcher (hs)" << endl;
|
|
||||||
throw invalid_argument("Cannot close stream match on hyperscan");
|
|
||||||
}
|
|
||||||
if (err != HS_SUCCESS && err != HS_SCAN_TERMINATED) {
|
|
||||||
cerr << "[error] [filter_callback] Error while matching the stream (hs)" << endl;
|
|
||||||
throw invalid_argument("Error while matching the stream with hyperscan");
|
|
||||||
}
|
|
||||||
if (match_res.has_matched){
|
|
||||||
auto rules_vector = info.is_input ? conf->input_ruleset.regexes : conf->output_ruleset.regexes;
|
|
||||||
stringstream msg;
|
|
||||||
msg << "BLOCKED " << rules_vector[match_res.matched] << "\n";
|
|
||||||
cout << msg.str() << flush;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
int main(int argc, char *argv[]){
|
int main(int argc, char *argv[]){
|
||||||
int n_of_threads = 1;
|
int n_of_threads = 1;
|
||||||
char * n_threads_str = getenv("NTHREADS");
|
char * n_threads_str = getenv("NTHREADS");
|
||||||
@@ -165,10 +55,10 @@ int main(int argc, char *argv[]){
|
|||||||
|
|
||||||
regex_config.reset(new RegexRules(stream_mode));
|
regex_config.reset(new RegexRules(stream_mode));
|
||||||
|
|
||||||
NFQueueSequence<filter_callback> queues(n_of_threads);
|
NFQueueSequence<RegexQueue> queues(n_of_threads);
|
||||||
queues.start();
|
queues.start();
|
||||||
|
|
||||||
cout << "QUEUES " << queues.init() << " " << queues.end() << endl;
|
osyncstream(cout) << "QUEUES " << queues.init() << " " << queues.end() << endl;
|
||||||
cerr << "[info] [main] Queues: " << queues.init() << ":" << queues.end() << " threads assigned: " << n_of_threads << " stream mode: " << stream_mode << endl;
|
cerr << "[info] [main] Queues: " << queues.init() << ":" << queues.end() << " threads assigned: " << n_of_threads << " stream mode: " << stream_mode << endl;
|
||||||
|
|
||||||
config_updater();
|
config_updater();
|
||||||
|
|||||||
142
backend/binsrc/proxytun/proxytun.cpp
Normal file
142
backend/binsrc/proxytun/proxytun.cpp
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
#ifndef PROXY_TUNNEL_CPP
|
||||||
|
#define PROXY_TUNNEL_CPP
|
||||||
|
|
||||||
|
#include <linux/netfilter/nfnetlink_queue.h>
|
||||||
|
#include <libnetfilter_queue/libnetfilter_queue.h>
|
||||||
|
#include <linux/netfilter/nfnetlink_conntrack.h>
|
||||||
|
#include <tins/tins.h>
|
||||||
|
#include <tins/tcp_ip/stream_follower.h>
|
||||||
|
#include <tins/tcp_ip/stream_identifier.h>
|
||||||
|
#include <libmnl/libmnl.h>
|
||||||
|
#include <linux/netfilter.h>
|
||||||
|
#include <linux/netfilter/nfnetlink.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <stdexcept>
|
||||||
|
#include <iostream>
|
||||||
|
#include "../classes/netfilter.cpp"
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
using Tins::TCPIP::Stream;
|
||||||
|
using Tins::TCPIP::StreamFollower;
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
typedef Tins::TCPIP::StreamIdentifier stream_id;
|
||||||
|
|
||||||
|
class SocketTunnelQueue: public NfQueueExecutor {
|
||||||
|
public:
|
||||||
|
|
||||||
|
StreamFollower follower;
|
||||||
|
|
||||||
|
void before_loop() override {
|
||||||
|
follower.new_stream_callback(bind(on_new_stream, placeholders::_1));
|
||||||
|
follower.stream_termination_callback(bind(on_stream_close, placeholders::_1));
|
||||||
|
}
|
||||||
|
|
||||||
|
void * callback_data_fetch() override{
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool filter_action(){
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void on_data_recv(Stream& stream, string data, bool is_input) {
|
||||||
|
bool result = filter_action();
|
||||||
|
if (!result){
|
||||||
|
stream.ignore_client_data();
|
||||||
|
stream.ignore_server_data();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Input data filtering
|
||||||
|
static void on_client_data(Stream& stream) {
|
||||||
|
on_data_recv(stream, string(stream.client_payload().begin(), stream.client_payload().end()), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
//Server data filtering
|
||||||
|
static void on_server_data(Stream& stream) {
|
||||||
|
on_data_recv(stream, string(stream.server_payload().begin(), stream.server_payload().end()), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// A stream was terminated. The second argument is the reason why it was terminated
|
||||||
|
static void on_stream_close(Stream& stream) {
|
||||||
|
stream_id stream_id = stream_id::make_identifier(stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void on_new_stream(Stream& stream) {
|
||||||
|
stream.auto_cleanup_payloads(true);
|
||||||
|
if (stream.is_partial_stream()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
stream.client_data_callback(bind(on_client_data, placeholders::_1));
|
||||||
|
stream.server_data_callback(bind(on_server_data, placeholders::_1));
|
||||||
|
stream.stream_closed_callback(bind(on_stream_close, placeholders::_1));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
static void build_verdict(T packet, uint8_t *payload, uint16_t plen, nlmsghdr *nlh_verdict, nfqnl_msg_packet_hdr *ph){
|
||||||
|
sctx->tcp_match_util.matching_has_been_called = false;
|
||||||
|
sctx->follower.process_packet(packet);
|
||||||
|
if (sctx->tcp_match_util.matching_has_been_called && !sctx->tcp_match_util.result){
|
||||||
|
Tins::PDU* data_layer = tcp->release_inner_pdu();
|
||||||
|
if (data_layer != nullptr){
|
||||||
|
delete data_layer;
|
||||||
|
}
|
||||||
|
tcp->set_flag(Tins::TCP::FIN,1);
|
||||||
|
tcp->set_flag(Tins::TCP::ACK,1);
|
||||||
|
tcp->set_flag(Tins::TCP::SYN,0);
|
||||||
|
nfq_nlmsg_verdict_put_pkt(nlh_verdict, packet.serialize().data(), packet.size());
|
||||||
|
}
|
||||||
|
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
|
||||||
|
}
|
||||||
|
|
||||||
|
static int queue_cb(const nlmsghdr *nlh, const mnl_socket* nl, void *data_ptr) {
|
||||||
|
|
||||||
|
//Extract attributes from the nlmsghdr
|
||||||
|
nlattr *attr[NFQA_MAX+1] = {};
|
||||||
|
|
||||||
|
if (nfq_nlmsg_parse(nlh, attr) < 0) {
|
||||||
|
perror("problems parsing");
|
||||||
|
return MNL_CB_ERROR;
|
||||||
|
}
|
||||||
|
if (attr[NFQA_PACKET_HDR] == nullptr) {
|
||||||
|
fputs("metaheader not set\n", stderr);
|
||||||
|
return MNL_CB_ERROR;
|
||||||
|
}
|
||||||
|
//Get Payload
|
||||||
|
uint16_t plen = mnl_attr_get_payload_len(attr[NFQA_PAYLOAD]);
|
||||||
|
uint8_t *payload = (uint8_t *)mnl_attr_get_payload(attr[NFQA_PAYLOAD]);
|
||||||
|
|
||||||
|
//Return result to the kernel
|
||||||
|
struct nfqnl_msg_packet_hdr *ph = (nfqnl_msg_packet_hdr*) mnl_attr_get_payload(attr[NFQA_PACKET_HDR]);
|
||||||
|
struct nfgenmsg *nfg = (nfgenmsg *)mnl_nlmsg_get_payload(nlh);
|
||||||
|
char buf[MNL_SOCKET_BUFFER_SIZE];
|
||||||
|
struct nlmsghdr *nlh_verdict;
|
||||||
|
|
||||||
|
nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(nfg->res_id));
|
||||||
|
|
||||||
|
// Check IP protocol version
|
||||||
|
if ( (payload[0] & 0xf0) == 0x40 ){
|
||||||
|
build_verdict(Tins::IP(payload, plen), payload, plen, nlh_verdict, ph);
|
||||||
|
}else{
|
||||||
|
build_verdict(Tins::IPv6(payload, plen), payload, plen, nlh_verdict, ph);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mnl_socket_sendto(nl, nlh_verdict, nlh_verdict->nlmsg_len) < 0) {
|
||||||
|
throw runtime_error( "mnl_socket_send" );
|
||||||
|
}
|
||||||
|
|
||||||
|
return MNL_CB_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
SocketTunnelQueue(int queue) : NfQueueExecutor(queue, &queue_cb) {}
|
||||||
|
|
||||||
|
~SocketTunnelQueue() {
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // PROXY_TUNNEL_CPP
|
||||||
@@ -1,15 +1,16 @@
|
|||||||
|
#ifndef REGEX_FILTER_CPP
|
||||||
|
#define REGEX_FILTER_CPP
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include "../utils.hpp"
|
#include "../utils.hpp"
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <hs.h>
|
#include <hs.h>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
#ifndef REGEX_FILTER_HPP
|
|
||||||
#define REGEX_FILTER_HPP
|
|
||||||
|
|
||||||
enum FilterDirection{ CTOS, STOC };
|
enum FilterDirection{ CTOS, STOC };
|
||||||
|
|
||||||
struct decoded_regex {
|
struct decoded_regex {
|
||||||
@@ -170,5 +171,20 @@ class RegexRules{
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // REGEX_FILTER_HPP
|
shared_ptr<RegexRules> regex_config;
|
||||||
|
|
||||||
|
void inline scratch_setup(regex_ruleset &conf, hs_scratch_t* & scratch){
|
||||||
|
if (scratch == nullptr && conf.hs_db != nullptr){
|
||||||
|
if (hs_alloc_scratch(conf.hs_db, &scratch) != HS_SUCCESS) {
|
||||||
|
throw invalid_argument("Cannot alloc scratch");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct matched_data{
|
||||||
|
unsigned int matched = 0;
|
||||||
|
bool has_matched = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // REGEX_FILTER_CPP
|
||||||
|
|
||||||
321
backend/binsrc/regex/regexfilter.cpp
Normal file
321
backend/binsrc/regex/regexfilter.cpp
Normal file
@@ -0,0 +1,321 @@
|
|||||||
|
#ifndef REGEX_FILTER_CLASS_CPP
|
||||||
|
#define REGEX_FILTER_CLASS_CPP
|
||||||
|
|
||||||
|
#include <linux/netfilter/nfnetlink_queue.h>
|
||||||
|
#include <libnetfilter_queue/libnetfilter_queue.h>
|
||||||
|
#include <linux/netfilter/nfnetlink_conntrack.h>
|
||||||
|
#include <tins/tins.h>
|
||||||
|
#include <tins/tcp_ip/stream_follower.h>
|
||||||
|
#include <tins/tcp_ip/stream_identifier.h>
|
||||||
|
#include <libmnl/libmnl.h>
|
||||||
|
#include <linux/netfilter.h>
|
||||||
|
#include <linux/netfilter/nfnetlink.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <stdexcept>
|
||||||
|
#include <thread>
|
||||||
|
#include <hs.h>
|
||||||
|
#include <syncstream>
|
||||||
|
#include <iostream>
|
||||||
|
#include "../classes/netfilter.cpp"
|
||||||
|
#include "stream_ctx.cpp"
|
||||||
|
#include "regex_rules.cpp"
|
||||||
|
|
||||||
|
using Tins::TCPIP::Stream;
|
||||||
|
using Tins::TCPIP::StreamFollower;
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
class RegexQueue: public NfQueueExecutor {
|
||||||
|
public:
|
||||||
|
stream_ctx sctx;
|
||||||
|
|
||||||
|
void before_loop() override {
|
||||||
|
sctx.follower.new_stream_callback(bind(on_new_stream, placeholders::_1, &sctx));
|
||||||
|
sctx.follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, &sctx));
|
||||||
|
}
|
||||||
|
|
||||||
|
void * callback_data_fetch() override{
|
||||||
|
return &sctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool filter_action(packet_info& info){
|
||||||
|
shared_ptr<RegexRules> conf = regex_config;
|
||||||
|
auto current_version = conf->ver();
|
||||||
|
if (current_version != info.sctx->latest_config_ver){
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Configuration has changed (" << current_version << "!=" << info.sctx->latest_config_ver << "), cleaning scratch spaces" << endl;
|
||||||
|
#endif
|
||||||
|
info.sctx->clean();
|
||||||
|
info.sctx->latest_config_ver = current_version;
|
||||||
|
}
|
||||||
|
scratch_setup(conf->input_ruleset, info.sctx->in_scratch);
|
||||||
|
scratch_setup(conf->output_ruleset, info.sctx->out_scratch);
|
||||||
|
|
||||||
|
hs_database_t* regex_matcher = info.is_input ? conf->input_ruleset.hs_db : conf->output_ruleset.hs_db;
|
||||||
|
if (regex_matcher == nullptr){
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Matching packet with " << (info.is_input ? "input" : "output") << " ruleset" << endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
matched_data match_res;
|
||||||
|
hs_error_t err;
|
||||||
|
hs_scratch_t* scratch_space = info.is_input ? info.sctx->in_scratch: info.sctx->out_scratch;
|
||||||
|
auto match_func = [](unsigned int id, auto from, auto to, auto flags, auto ctx){
|
||||||
|
auto res = (matched_data*)ctx;
|
||||||
|
res->has_matched = true;
|
||||||
|
res->matched = id;
|
||||||
|
return -1; // Stop matching
|
||||||
|
};
|
||||||
|
hs_stream_t* stream_match;
|
||||||
|
if (conf->stream_mode()){
|
||||||
|
matching_map* match_map = info.is_input ? &info.sctx->in_hs_streams : &info.sctx->out_hs_streams;
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Dumping match_map " << match_map << endl;
|
||||||
|
for (auto ele: *match_map){
|
||||||
|
cerr << "[DEBUG] [filter_callback] " << ele.first << " -> " << ele.second << endl;
|
||||||
|
}
|
||||||
|
cerr << "[DEBUG] [filter_callback] End of match_map" << endl;
|
||||||
|
#endif
|
||||||
|
auto stream_search = match_map->find(info.sid);
|
||||||
|
|
||||||
|
if (stream_search == match_map->end()){
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Creating new stream matcher for " << info.sid << endl;
|
||||||
|
#endif
|
||||||
|
if (hs_open_stream(regex_matcher, 0, &stream_match) != HS_SUCCESS) {
|
||||||
|
cerr << "[error] [filter_callback] Error opening the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot open stream match on hyperscan");
|
||||||
|
}
|
||||||
|
if (info.is_tcp){
|
||||||
|
match_map->insert_or_assign(info.sid, stream_match);
|
||||||
|
}
|
||||||
|
}else{
|
||||||
|
stream_match = stream_search->second;
|
||||||
|
}
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Matching as a stream" << endl;
|
||||||
|
#endif
|
||||||
|
err = hs_scan_stream(
|
||||||
|
stream_match,info.payload.c_str(), info.payload.length(),
|
||||||
|
0, scratch_space, match_func, &match_res
|
||||||
|
);
|
||||||
|
}else{
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [filter_callback] Matching as a block" << endl;
|
||||||
|
#endif
|
||||||
|
err = hs_scan(
|
||||||
|
regex_matcher,info.payload.c_str(), info.payload.length(),
|
||||||
|
0, scratch_space, match_func, &match_res
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (
|
||||||
|
!info.is_tcp && conf->stream_mode() &&
|
||||||
|
hs_close_stream(stream_match, scratch_space, nullptr, nullptr) != HS_SUCCESS
|
||||||
|
){
|
||||||
|
cerr << "[error] [filter_callback] Error closing the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot close stream match on hyperscan");
|
||||||
|
}
|
||||||
|
if (err != HS_SUCCESS && err != HS_SCAN_TERMINATED) {
|
||||||
|
cerr << "[error] [filter_callback] Error while matching the stream (hs)" << endl;
|
||||||
|
throw invalid_argument("Error while matching the stream with hyperscan");
|
||||||
|
}
|
||||||
|
if (match_res.has_matched){
|
||||||
|
auto rules_vector = info.is_input ? conf->input_ruleset.regexes : conf->output_ruleset.regexes;
|
||||||
|
stringstream msg;
|
||||||
|
msg << "BLOCKED " << rules_vector[match_res.matched] << "\n";
|
||||||
|
osyncstream(cout) << msg.str() << flush;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void on_data_recv(Stream& stream, stream_ctx* sctx, string data) {
|
||||||
|
sctx->tcp_match_util.matching_has_been_called = true;
|
||||||
|
bool result = filter_action(*sctx->tcp_match_util.pkt_info);
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.on_data_recv] result: " << result << endl;
|
||||||
|
#endif
|
||||||
|
if (!result){
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.on_data_recv] Stream matched, removing all data about it" << endl;
|
||||||
|
#endif
|
||||||
|
sctx->clean_stream_by_id(sctx->tcp_match_util.pkt_info->sid);
|
||||||
|
stream.ignore_client_data();
|
||||||
|
stream.ignore_server_data();
|
||||||
|
}
|
||||||
|
sctx->tcp_match_util.result = result;
|
||||||
|
}
|
||||||
|
|
||||||
|
//Input data filtering
|
||||||
|
static void on_client_data(Stream& stream, stream_ctx* sctx) {
|
||||||
|
on_data_recv(stream, sctx, string(stream.client_payload().begin(), stream.client_payload().end()));
|
||||||
|
}
|
||||||
|
|
||||||
|
//Server data filtering
|
||||||
|
static void on_server_data(Stream& stream, stream_ctx* sctx) {
|
||||||
|
on_data_recv(stream, sctx, string(stream.server_payload().begin(), stream.server_payload().end()));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// A stream was terminated. The second argument is the reason why it was terminated
|
||||||
|
static void on_stream_close(Stream& stream, stream_ctx* sctx) {
|
||||||
|
stream_id stream_id = stream_id::make_identifier(stream);
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.on_stream_close] Stream terminated, deleting all data" << endl;
|
||||||
|
#endif
|
||||||
|
sctx->clean_stream_by_id(stream_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void on_new_stream(Stream& stream, stream_ctx* sctx) {
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.on_new_stream] New stream detected" << endl;
|
||||||
|
#endif
|
||||||
|
stream.auto_cleanup_payloads(true);
|
||||||
|
if (stream.is_partial_stream()) {
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.on_new_stream] Partial stream detected, skipping" << endl;
|
||||||
|
#endif
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
stream.client_data_callback(bind(on_client_data, placeholders::_1, sctx));
|
||||||
|
stream.server_data_callback(bind(on_server_data, placeholders::_1, sctx));
|
||||||
|
stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, sctx));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
static void build_verdict(T packet, uint8_t *payload, uint16_t plen, nlmsghdr *nlh_verdict, nfqnl_msg_packet_hdr *ph, stream_ctx* sctx, bool is_input){
|
||||||
|
Tins::TCP* tcp = packet.template find_pdu<Tins::TCP>();
|
||||||
|
|
||||||
|
if (tcp){
|
||||||
|
Tins::PDU* application_layer = tcp->inner_pdu();
|
||||||
|
u_int16_t payload_size = 0;
|
||||||
|
if (application_layer != nullptr){
|
||||||
|
payload_size = application_layer->size();
|
||||||
|
}
|
||||||
|
packet_info pktinfo{
|
||||||
|
packet: string(payload, payload+plen),
|
||||||
|
payload: string(payload+plen - payload_size, payload+plen),
|
||||||
|
sid: stream_id::make_identifier(packet),
|
||||||
|
is_input: is_input,
|
||||||
|
is_tcp: true,
|
||||||
|
sctx: sctx,
|
||||||
|
};
|
||||||
|
sctx->tcp_match_util.matching_has_been_called = false;
|
||||||
|
sctx->tcp_match_util.pkt_info = &pktinfo;
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.build_verdict] TCP Packet received " << packet.src_addr() << ":" << tcp->sport() << " -> " << packet.dst_addr() << ":" << tcp->dport() << " thr: " << this_thread::get_id() << ", sending to libtins StreamFollower" << endl;
|
||||||
|
#endif
|
||||||
|
sctx->follower.process_packet(packet);
|
||||||
|
#ifdef DEBUG
|
||||||
|
if (sctx->tcp_match_util.matching_has_been_called){
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has called matching functions" << endl;
|
||||||
|
}else{
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has NOT called matching functions" << endl;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
if (sctx->tcp_match_util.matching_has_been_called && !sctx->tcp_match_util.result){
|
||||||
|
Tins::PDU* data_layer = tcp->release_inner_pdu();
|
||||||
|
if (data_layer != nullptr){
|
||||||
|
delete data_layer;
|
||||||
|
}
|
||||||
|
tcp->set_flag(Tins::TCP::FIN,1);
|
||||||
|
tcp->set_flag(Tins::TCP::ACK,1);
|
||||||
|
tcp->set_flag(Tins::TCP::SYN,0);
|
||||||
|
nfq_nlmsg_verdict_put_pkt(nlh_verdict, packet.serialize().data(), packet.size());
|
||||||
|
}
|
||||||
|
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
|
||||||
|
}else{
|
||||||
|
Tins::UDP* udp = packet.template find_pdu<Tins::UDP>();
|
||||||
|
if (!udp){
|
||||||
|
throw invalid_argument("Only TCP and UDP are supported");
|
||||||
|
}
|
||||||
|
Tins::PDU* application_layer = udp->inner_pdu();
|
||||||
|
u_int16_t payload_size = 0;
|
||||||
|
if (application_layer != nullptr){
|
||||||
|
payload_size = application_layer->size();
|
||||||
|
}
|
||||||
|
if((udp->inner_pdu() == nullptr)){
|
||||||
|
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
|
||||||
|
}
|
||||||
|
packet_info pktinfo{
|
||||||
|
packet: string(payload, payload+plen),
|
||||||
|
payload: string(payload+plen - payload_size, payload+plen),
|
||||||
|
sid: stream_id::make_identifier(packet),
|
||||||
|
is_input: is_input,
|
||||||
|
is_tcp: false,
|
||||||
|
sctx: sctx,
|
||||||
|
};
|
||||||
|
if (filter_action(pktinfo)){
|
||||||
|
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_ACCEPT );
|
||||||
|
}else{
|
||||||
|
nfq_nlmsg_verdict_put(nlh_verdict, ntohl(ph->packet_id), NF_DROP );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int queue_cb(const nlmsghdr *nlh, const mnl_socket* nl, void *data_ptr) {
|
||||||
|
|
||||||
|
stream_ctx* sctx = (stream_ctx*)data_ptr;
|
||||||
|
|
||||||
|
//Extract attributes from the nlmsghdr
|
||||||
|
nlattr *attr[NFQA_MAX+1] = {};
|
||||||
|
|
||||||
|
if (nfq_nlmsg_parse(nlh, attr) < 0) {
|
||||||
|
perror("problems parsing");
|
||||||
|
return MNL_CB_ERROR;
|
||||||
|
}
|
||||||
|
if (attr[NFQA_PACKET_HDR] == nullptr) {
|
||||||
|
fputs("metaheader not set\n", stderr);
|
||||||
|
return MNL_CB_ERROR;
|
||||||
|
}
|
||||||
|
if (attr[NFQA_MARK] == nullptr) {
|
||||||
|
fputs("mark not set\n", stderr);
|
||||||
|
return MNL_CB_ERROR;
|
||||||
|
}
|
||||||
|
//Get Payload
|
||||||
|
uint16_t plen = mnl_attr_get_payload_len(attr[NFQA_PAYLOAD]);
|
||||||
|
uint8_t *payload = (uint8_t *)mnl_attr_get_payload(attr[NFQA_PAYLOAD]);
|
||||||
|
|
||||||
|
//Return result to the kernel
|
||||||
|
struct nfqnl_msg_packet_hdr *ph = (nfqnl_msg_packet_hdr*) mnl_attr_get_payload(attr[NFQA_PACKET_HDR]);
|
||||||
|
struct nfgenmsg *nfg = (nfgenmsg *)mnl_nlmsg_get_payload(nlh);
|
||||||
|
char buf[MNL_SOCKET_BUFFER_SIZE];
|
||||||
|
struct nlmsghdr *nlh_verdict;
|
||||||
|
|
||||||
|
nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(nfg->res_id));
|
||||||
|
|
||||||
|
bool is_input = ntohl(mnl_attr_get_u32(attr[NFQA_MARK])) & 0x1; // == 0x1337 that is odd
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet received" << endl;
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet ID: " << ntohl(ph->packet_id) << endl;
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Payload size: " << plen << endl;
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.queue_cb] Is input: " << is_input << endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Check IP protocol version
|
||||||
|
if ( (payload[0] & 0xf0) == 0x40 ){
|
||||||
|
build_verdict(Tins::IP(payload, plen), payload, plen, nlh_verdict, ph, sctx, is_input);
|
||||||
|
}else{
|
||||||
|
build_verdict(Tins::IPv6(payload, plen), payload, plen, nlh_verdict, ph, sctx, is_input);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mnl_socket_sendto(nl, nlh_verdict, nlh_verdict->nlmsg_len) < 0) {
|
||||||
|
throw runtime_error( "mnl_socket_send" );
|
||||||
|
}
|
||||||
|
|
||||||
|
return MNL_CB_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
RegexQueue(int queue) : NfQueueExecutor(queue, &queue_cb) {}
|
||||||
|
|
||||||
|
~RegexQueue() {
|
||||||
|
sctx.clean();
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // REGEX_FILTER_CLASS_CPP
|
||||||
143
backend/binsrc/regex/stream_ctx.cpp
Normal file
143
backend/binsrc/regex/stream_ctx.cpp
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
|
||||||
|
#ifndef STREAM_CTX_CPP
|
||||||
|
#define STREAM_CTX_CPP
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <hs.h>
|
||||||
|
#include <tins/tcp_ip/stream_follower.h>
|
||||||
|
#include <tins/tcp_ip/stream_identifier.h>
|
||||||
|
|
||||||
|
using Tins::TCPIP::Stream;
|
||||||
|
using Tins::TCPIP::StreamFollower;
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
typedef Tins::TCPIP::StreamIdentifier stream_id;
|
||||||
|
typedef map<stream_id, hs_stream_t*> matching_map;
|
||||||
|
|
||||||
|
/* Considering to use unorder_map using this hash of stream_id
|
||||||
|
|
||||||
|
namespace std {
|
||||||
|
template<>
|
||||||
|
struct hash<stream_id> {
|
||||||
|
size_t operator()(const stream_id& sid) const
|
||||||
|
{
|
||||||
|
return std::hash<std::uint32_t>()(sid.max_address[0] + sid.max_address[1] + sid.max_address[2] + sid.max_address[3] + sid.max_address_port + sid.min_address[0] + sid.min_address[1] + sid.min_address[2] + sid.min_address[3] + sid.min_address_port);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
ostream& operator<<(ostream& os, const Tins::TCPIP::StreamIdentifier::address_type &sid){
|
||||||
|
bool first_print = false;
|
||||||
|
for (auto ele: sid){
|
||||||
|
if (first_print || ele){
|
||||||
|
first_print = true;
|
||||||
|
os << (int)ele << ".";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
ostream& operator<<(ostream& os, const stream_id &sid){
|
||||||
|
os << sid.max_address << ":" << sid.max_address_port << " -> " << sid.min_address << ":" << sid.min_address_port;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
struct packet_info;
|
||||||
|
|
||||||
|
struct tcp_stream_tmp {
|
||||||
|
bool matching_has_been_called = false;
|
||||||
|
bool result;
|
||||||
|
packet_info *pkt_info;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct stream_ctx {
|
||||||
|
matching_map in_hs_streams;
|
||||||
|
matching_map out_hs_streams;
|
||||||
|
hs_scratch_t* in_scratch = nullptr;
|
||||||
|
hs_scratch_t* out_scratch = nullptr;
|
||||||
|
u_int16_t latest_config_ver = 0;
|
||||||
|
StreamFollower follower;
|
||||||
|
tcp_stream_tmp tcp_match_util;
|
||||||
|
|
||||||
|
void clean_scratches(){
|
||||||
|
if (out_scratch != nullptr){
|
||||||
|
hs_free_scratch(out_scratch);
|
||||||
|
out_scratch = nullptr;
|
||||||
|
}
|
||||||
|
if (in_scratch != nullptr){
|
||||||
|
hs_free_scratch(in_scratch);
|
||||||
|
in_scratch = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void clean_stream_by_id(stream_id sid){
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.clean_stream_by_id] Cleaning stream context of " << sid << endl;
|
||||||
|
#endif
|
||||||
|
auto stream_search = in_hs_streams.find(sid);
|
||||||
|
hs_stream_t* stream_match;
|
||||||
|
if (stream_search != in_hs_streams.end()){
|
||||||
|
stream_match = stream_search->second;
|
||||||
|
if (hs_close_stream(stream_match, in_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
||||||
|
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot close stream match on hyperscan");
|
||||||
|
}
|
||||||
|
in_hs_streams.erase(stream_search);
|
||||||
|
}
|
||||||
|
|
||||||
|
stream_search = out_hs_streams.find(sid);
|
||||||
|
if (stream_search != out_hs_streams.end()){
|
||||||
|
stream_match = stream_search->second;
|
||||||
|
if (hs_close_stream(stream_match, out_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
||||||
|
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot close stream match on hyperscan");
|
||||||
|
}
|
||||||
|
out_hs_streams.erase(stream_search);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void clean(){
|
||||||
|
|
||||||
|
#ifdef DEBUG
|
||||||
|
cerr << "[DEBUG] [NetfilterQueue.clean] Cleaning stream context" << endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (in_scratch){
|
||||||
|
for(auto ele: in_hs_streams){
|
||||||
|
if (hs_close_stream(ele.second, in_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
||||||
|
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot close stream match on hyperscan");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
in_hs_streams.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (out_scratch){
|
||||||
|
for(auto ele: out_hs_streams){
|
||||||
|
if (hs_close_stream(ele.second, out_scratch, nullptr, nullptr) != HS_SUCCESS) {
|
||||||
|
cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl;
|
||||||
|
throw invalid_argument("Cannot close stream match on hyperscan");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out_hs_streams.clear();
|
||||||
|
}
|
||||||
|
clean_scratches();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct packet_info {
|
||||||
|
string packet;
|
||||||
|
string payload;
|
||||||
|
stream_id sid;
|
||||||
|
bool is_input;
|
||||||
|
bool is_tcp;
|
||||||
|
stream_ctx* sctx;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#endif // STREAM_CTX_CPP
|
||||||
@@ -4,5 +4,3 @@ chown nobody -R /execute/
|
|||||||
|
|
||||||
exec capsh --caps="cap_net_admin+eip cap_setpcap,cap_setuid,cap_setgid+ep" \
|
exec capsh --caps="cap_net_admin+eip cap_setpcap,cap_setuid,cap_setgid+ep" \
|
||||||
--keep=1 --user=nobody --addamb=cap_net_admin -- -c "python3 /execute/app.py DOCKER"
|
--keep=1 --user=nobody --addamb=cap_net_admin -- -c "python3 /execute/app.py DOCKER"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
0
backend/modules/nfproxy/__init__.py
Normal file
0
backend/modules/nfproxy/__init__.py
Normal file
171
backend/modules/nfproxy/firegex.py
Normal file
171
backend/modules/nfproxy/firegex.py
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
from modules.nfregex.nftables import FiregexTables
|
||||||
|
from utils import run_func
|
||||||
|
from modules.nfregex.models import Service, Regex
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
import traceback
|
||||||
|
from utils import DEBUG
|
||||||
|
from fastapi import HTTPException
|
||||||
|
|
||||||
|
nft = FiregexTables()
|
||||||
|
|
||||||
|
class RegexFilter:
|
||||||
|
def __init__(
|
||||||
|
self, regex,
|
||||||
|
is_case_sensitive=True,
|
||||||
|
input_mode=False,
|
||||||
|
output_mode=False,
|
||||||
|
blocked_packets=0,
|
||||||
|
id=None,
|
||||||
|
update_func = None
|
||||||
|
):
|
||||||
|
self.regex = regex
|
||||||
|
self.is_case_sensitive = is_case_sensitive
|
||||||
|
if input_mode == output_mode:
|
||||||
|
input_mode = output_mode = True # (False, False) == (True, True)
|
||||||
|
self.input_mode = input_mode
|
||||||
|
self.output_mode = output_mode
|
||||||
|
self.blocked = blocked_packets
|
||||||
|
self.id = id
|
||||||
|
self.update_func = update_func
|
||||||
|
self.compiled_regex = self.compile()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_regex(cls, regex:Regex, update_func = None):
|
||||||
|
return cls(
|
||||||
|
id=regex.id, regex=regex.regex, is_case_sensitive=regex.is_case_sensitive,
|
||||||
|
blocked_packets=regex.blocked_packets,
|
||||||
|
input_mode = regex.mode in ["C","B"], output_mode=regex.mode in ["S","B"],
|
||||||
|
update_func = update_func
|
||||||
|
)
|
||||||
|
def compile(self):
|
||||||
|
if isinstance(self.regex, str):
|
||||||
|
self.regex = self.regex.encode()
|
||||||
|
if not isinstance(self.regex, bytes):
|
||||||
|
raise Exception("Invalid Regex Paramether")
|
||||||
|
re.compile(self.regex) # raise re.error if it's invalid!
|
||||||
|
case_sensitive = "1" if self.is_case_sensitive else "0"
|
||||||
|
if self.input_mode:
|
||||||
|
yield case_sensitive + "C" + self.regex.hex()
|
||||||
|
if self.output_mode:
|
||||||
|
yield case_sensitive + "S" + self.regex.hex()
|
||||||
|
|
||||||
|
async def update(self):
|
||||||
|
if self.update_func:
|
||||||
|
await run_func(self.update_func, self)
|
||||||
|
|
||||||
|
class FiregexInterceptor:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.srv:Service
|
||||||
|
self.filter_map_lock:asyncio.Lock
|
||||||
|
self.filter_map: dict[str, RegexFilter]
|
||||||
|
self.regex_filters: set[RegexFilter]
|
||||||
|
self.update_config_lock:asyncio.Lock
|
||||||
|
self.process:asyncio.subprocess.Process
|
||||||
|
self.update_task: asyncio.Task
|
||||||
|
self.ack_arrived = False
|
||||||
|
self.ack_status = None
|
||||||
|
self.ack_fail_what = ""
|
||||||
|
self.ack_lock = asyncio.Lock()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def start(cls, srv: Service):
|
||||||
|
self = cls()
|
||||||
|
self.srv = srv
|
||||||
|
self.filter_map_lock = asyncio.Lock()
|
||||||
|
self.update_config_lock = asyncio.Lock()
|
||||||
|
queue_range = await self._start_binary()
|
||||||
|
self.update_task = asyncio.create_task(self.update_blocked())
|
||||||
|
nft.add(self.srv, queue_range)
|
||||||
|
if not self.ack_lock.locked():
|
||||||
|
await self.ack_lock.acquire()
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def _start_binary(self):
|
||||||
|
proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../cppqueue")
|
||||||
|
self.process = await asyncio.create_subprocess_exec(
|
||||||
|
proxy_binary_path,
|
||||||
|
stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE,
|
||||||
|
env={"MATCH_MODE": "stream" if self.srv.proto == "tcp" else "block", "NTHREADS": os.getenv("NTHREADS","1")},
|
||||||
|
)
|
||||||
|
line_fut = self.process.stdout.readuntil()
|
||||||
|
try:
|
||||||
|
line_fut = await asyncio.wait_for(line_fut, timeout=3)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
self.process.kill()
|
||||||
|
raise Exception("Invalid binary output")
|
||||||
|
line = line_fut.decode()
|
||||||
|
if line.startswith("QUEUES "):
|
||||||
|
params = line.split()
|
||||||
|
return (int(params[1]), int(params[2]))
|
||||||
|
else:
|
||||||
|
self.process.kill()
|
||||||
|
raise Exception("Invalid binary output")
|
||||||
|
|
||||||
|
async def update_blocked(self):
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
line = (await self.process.stdout.readuntil()).decode()
|
||||||
|
if DEBUG:
|
||||||
|
print(line)
|
||||||
|
if line.startswith("BLOCKED "):
|
||||||
|
regex_id = line.split()[1]
|
||||||
|
async with self.filter_map_lock:
|
||||||
|
if regex_id in self.filter_map:
|
||||||
|
self.filter_map[regex_id].blocked+=1
|
||||||
|
await self.filter_map[regex_id].update()
|
||||||
|
if line.startswith("ACK "):
|
||||||
|
self.ack_arrived = True
|
||||||
|
self.ack_status = line.split()[1].upper() == "OK"
|
||||||
|
if not self.ack_status:
|
||||||
|
self.ack_fail_what = " ".join(line.split()[2:])
|
||||||
|
self.ack_lock.release()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
except asyncio.IncompleteReadError:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
self.update_task.cancel()
|
||||||
|
if self.process and self.process.returncode is None:
|
||||||
|
self.process.kill()
|
||||||
|
|
||||||
|
async def _update_config(self, filters_codes):
|
||||||
|
async with self.update_config_lock:
|
||||||
|
self.process.stdin.write((" ".join(filters_codes)+"\n").encode())
|
||||||
|
await self.process.stdin.drain()
|
||||||
|
try:
|
||||||
|
async with asyncio.timeout(3):
|
||||||
|
await self.ack_lock.acquire()
|
||||||
|
except TimeoutError:
|
||||||
|
pass
|
||||||
|
if not self.ack_arrived or not self.ack_status:
|
||||||
|
raise HTTPException(status_code=500, detail=f"NFQ error: {self.ack_fail_what}")
|
||||||
|
|
||||||
|
|
||||||
|
async def reload(self, filters:list[RegexFilter]):
|
||||||
|
async with self.filter_map_lock:
|
||||||
|
self.filter_map = self.compile_filters(filters)
|
||||||
|
filters_codes = self.get_filter_codes()
|
||||||
|
await self._update_config(filters_codes)
|
||||||
|
|
||||||
|
def get_filter_codes(self):
|
||||||
|
filters_codes = list(self.filter_map.keys())
|
||||||
|
filters_codes.sort(key=lambda a: self.filter_map[a].blocked, reverse=True)
|
||||||
|
return filters_codes
|
||||||
|
|
||||||
|
def compile_filters(self, filters:list[RegexFilter]):
|
||||||
|
res = {}
|
||||||
|
for filter_obj in filters:
|
||||||
|
try:
|
||||||
|
raw_filters = filter_obj.compile()
|
||||||
|
for filter in raw_filters:
|
||||||
|
res[filter] = filter_obj
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return res
|
||||||
|
|
||||||
119
backend/modules/nfproxy/firewall.py
Normal file
119
backend/modules/nfproxy/firewall.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
import asyncio
|
||||||
|
from modules.nfregex.firegex import FiregexInterceptor, RegexFilter
|
||||||
|
from modules.nfregex.nftables import FiregexTables, FiregexFilter
|
||||||
|
from modules.nfregex.models import Regex, Service
|
||||||
|
from utils.sqlite import SQLite
|
||||||
|
|
||||||
|
class STATUS:
|
||||||
|
STOP = "stop"
|
||||||
|
ACTIVE = "active"
|
||||||
|
|
||||||
|
nft = FiregexTables()
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceManager:
|
||||||
|
def __init__(self, srv: Service, db):
|
||||||
|
self.srv = srv
|
||||||
|
self.db = db
|
||||||
|
self.status = STATUS.STOP
|
||||||
|
self.filters: dict[int, FiregexFilter] = {}
|
||||||
|
self.lock = asyncio.Lock()
|
||||||
|
self.interceptor = None
|
||||||
|
|
||||||
|
async def _update_filters_from_db(self):
|
||||||
|
regexes = [
|
||||||
|
Regex.from_dict(ele) for ele in
|
||||||
|
self.db.query("SELECT * FROM regexes WHERE service_id = ? AND active=1;", self.srv.id)
|
||||||
|
]
|
||||||
|
#Filter check
|
||||||
|
old_filters = set(self.filters.keys())
|
||||||
|
new_filters = set([f.id for f in regexes])
|
||||||
|
#remove old filters
|
||||||
|
for f in old_filters:
|
||||||
|
if f not in new_filters:
|
||||||
|
del self.filters[f]
|
||||||
|
#add new filters
|
||||||
|
for f in new_filters:
|
||||||
|
if f not in old_filters:
|
||||||
|
filter = [ele for ele in regexes if ele.id == f][0]
|
||||||
|
self.filters[f] = RegexFilter.from_regex(filter, self._stats_updater)
|
||||||
|
if self.interceptor:
|
||||||
|
await self.interceptor.reload(self.filters.values())
|
||||||
|
|
||||||
|
def __update_status_db(self, status):
|
||||||
|
self.db.query("UPDATE services SET status = ? WHERE service_id = ?;", status, self.srv.id)
|
||||||
|
|
||||||
|
async def next(self,to):
|
||||||
|
async with self.lock:
|
||||||
|
if (self.status, to) == (STATUS.ACTIVE, STATUS.STOP):
|
||||||
|
await self.stop()
|
||||||
|
self._set_status(to)
|
||||||
|
# PAUSE -> ACTIVE
|
||||||
|
elif (self.status, to) == (STATUS.STOP, STATUS.ACTIVE):
|
||||||
|
await self.restart()
|
||||||
|
|
||||||
|
def _stats_updater(self,filter:RegexFilter):
|
||||||
|
self.db.query("UPDATE regexes SET blocked_packets = ? WHERE regex_id = ?;", filter.blocked, filter.id)
|
||||||
|
|
||||||
|
def _set_status(self,status):
|
||||||
|
self.status = status
|
||||||
|
self.__update_status_db(status)
|
||||||
|
|
||||||
|
async def start(self):
|
||||||
|
if not self.interceptor:
|
||||||
|
nft.delete(self.srv)
|
||||||
|
self.interceptor = await FiregexInterceptor.start(self.srv)
|
||||||
|
await self._update_filters_from_db()
|
||||||
|
self._set_status(STATUS.ACTIVE)
|
||||||
|
|
||||||
|
async def stop(self):
|
||||||
|
nft.delete(self.srv)
|
||||||
|
if self.interceptor:
|
||||||
|
await self.interceptor.stop()
|
||||||
|
self.interceptor = None
|
||||||
|
|
||||||
|
async def restart(self):
|
||||||
|
await self.stop()
|
||||||
|
await self.start()
|
||||||
|
|
||||||
|
async def update_filters(self):
|
||||||
|
async with self.lock:
|
||||||
|
await self._update_filters_from_db()
|
||||||
|
|
||||||
|
class FirewallManager:
|
||||||
|
def __init__(self, db:SQLite):
|
||||||
|
self.db = db
|
||||||
|
self.service_table: dict[str, ServiceManager] = {}
|
||||||
|
self.lock = asyncio.Lock()
|
||||||
|
|
||||||
|
async def close(self):
|
||||||
|
for key in list(self.service_table.keys()):
|
||||||
|
await self.remove(key)
|
||||||
|
|
||||||
|
async def remove(self,srv_id):
|
||||||
|
async with self.lock:
|
||||||
|
if srv_id in self.service_table:
|
||||||
|
await self.service_table[srv_id].next(STATUS.STOP)
|
||||||
|
del self.service_table[srv_id]
|
||||||
|
|
||||||
|
async def init(self):
|
||||||
|
nft.init()
|
||||||
|
await self.reload()
|
||||||
|
|
||||||
|
async def reload(self):
|
||||||
|
async with self.lock:
|
||||||
|
for srv in self.db.query('SELECT * FROM services;'):
|
||||||
|
srv = Service.from_dict(srv)
|
||||||
|
if srv.id in self.service_table:
|
||||||
|
continue
|
||||||
|
self.service_table[srv.id] = ServiceManager(srv, self.db)
|
||||||
|
await self.service_table[srv.id].next(srv.status)
|
||||||
|
|
||||||
|
def get(self,srv_id) -> ServiceManager:
|
||||||
|
if srv_id in self.service_table:
|
||||||
|
return self.service_table[srv_id]
|
||||||
|
else:
|
||||||
|
raise ServiceNotFoundException()
|
||||||
|
|
||||||
|
class ServiceNotFoundException(Exception):
|
||||||
|
pass
|
||||||
30
backend/modules/nfproxy/models.py
Normal file
30
backend/modules/nfproxy/models.py
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
import base64
|
||||||
|
|
||||||
|
class Service:
|
||||||
|
def __init__(self, service_id: str, status: str, port: int, name: str, proto: str, ip_int: str, **other):
|
||||||
|
self.id = service_id
|
||||||
|
self.status = status
|
||||||
|
self.port = port
|
||||||
|
self.name = name
|
||||||
|
self.proto = proto
|
||||||
|
self.ip_int = ip_int
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, var: dict):
|
||||||
|
return cls(**var)
|
||||||
|
|
||||||
|
|
||||||
|
class Regex:
|
||||||
|
def __init__(self, regex_id: int, regex: bytes, mode: str, service_id: str, blocked_packets: int, is_case_sensitive: bool, active: bool, **other):
|
||||||
|
self.regex = regex
|
||||||
|
self.mode = mode
|
||||||
|
self.service_id = service_id
|
||||||
|
self.blocked_packets = blocked_packets
|
||||||
|
self.id = regex_id
|
||||||
|
self.is_case_sensitive = is_case_sensitive
|
||||||
|
self.active = active
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, var: dict):
|
||||||
|
var['regex'] = base64.b64decode(var['regex'])
|
||||||
|
return cls(**var)
|
||||||
105
backend/modules/nfproxy/nftables.py
Normal file
105
backend/modules/nfproxy/nftables.py
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
from modules.nfregex.models import Service
|
||||||
|
from utils import ip_parse, ip_family, NFTableManager, nftables_int_to_json
|
||||||
|
|
||||||
|
class FiregexFilter:
|
||||||
|
def __init__(self, proto:str, port:int, ip_int:str, target:str, id:int):
|
||||||
|
self.id = id
|
||||||
|
self.target = target
|
||||||
|
self.proto = proto
|
||||||
|
self.port = int(port)
|
||||||
|
self.ip_int = str(ip_int)
|
||||||
|
|
||||||
|
def __eq__(self, o: object) -> bool:
|
||||||
|
if isinstance(o, FiregexFilter) or isinstance(o, Service):
|
||||||
|
return self.port == o.port and self.proto == o.proto and ip_parse(self.ip_int) == ip_parse(o.ip_int)
|
||||||
|
return False
|
||||||
|
|
||||||
|
class FiregexTables(NFTableManager):
|
||||||
|
input_chain = "nfproxy_input"
|
||||||
|
output_chain = "nfproxy_output"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__([
|
||||||
|
{"add":{"chain":{
|
||||||
|
"family":"inet",
|
||||||
|
"table":self.table_name,
|
||||||
|
"name":self.input_chain,
|
||||||
|
"type":"filter",
|
||||||
|
"hook":"prerouting",
|
||||||
|
"prio":-150,
|
||||||
|
"policy":"accept"
|
||||||
|
}}},
|
||||||
|
{"add":{"chain":{
|
||||||
|
"family":"inet",
|
||||||
|
"table":self.table_name,
|
||||||
|
"name":self.output_chain,
|
||||||
|
"type":"filter",
|
||||||
|
"hook":"postrouting",
|
||||||
|
"prio":-150,
|
||||||
|
"policy":"accept"
|
||||||
|
}}}
|
||||||
|
],[
|
||||||
|
{"flush":{"chain":{"table":self.table_name,"family":"inet", "name":self.input_chain}}},
|
||||||
|
{"delete":{"chain":{"table":self.table_name,"family":"inet", "name":self.input_chain}}},
|
||||||
|
{"flush":{"chain":{"table":self.table_name,"family":"inet", "name":self.output_chain}}},
|
||||||
|
{"delete":{"chain":{"table":self.table_name,"family":"inet", "name":self.output_chain}}},
|
||||||
|
])
|
||||||
|
|
||||||
|
def add(self, srv:Service, queue_range):
|
||||||
|
|
||||||
|
for ele in self.get():
|
||||||
|
if ele.__eq__(srv): return
|
||||||
|
|
||||||
|
init, end = queue_range
|
||||||
|
if init > end: init, end = end, init
|
||||||
|
self.cmd(
|
||||||
|
{ "insert":{ "rule": {
|
||||||
|
"family": "inet",
|
||||||
|
"table": self.table_name,
|
||||||
|
"chain": self.output_chain,
|
||||||
|
"expr": [
|
||||||
|
{'match': {'left': {'payload': {'protocol': ip_family(srv.ip_int), 'field': 'saddr'}}, 'op': '==', 'right': nftables_int_to_json(srv.ip_int)}},
|
||||||
|
{'match': {"left": { "payload": {"protocol": str(srv.proto), "field": "sport"}}, "op": "==", "right": int(srv.port)}},
|
||||||
|
{"queue": {"num": str(init) if init == end else {"range":[init, end] }, "flags": ["bypass"]}}
|
||||||
|
]
|
||||||
|
}}},
|
||||||
|
{"insert":{"rule":{
|
||||||
|
"family": "inet",
|
||||||
|
"table": self.table_name,
|
||||||
|
"chain": self.input_chain,
|
||||||
|
"expr": [
|
||||||
|
{'match': {'left': {'payload': {'protocol': ip_family(srv.ip_int), 'field': 'daddr'}}, 'op': '==', 'right': nftables_int_to_json(srv.ip_int)}},
|
||||||
|
{'match': {"left": { "payload": {"protocol": str(srv.proto), "field": "dport"}}, "op": "==", "right": int(srv.port)}},
|
||||||
|
{"queue": {"num": str(init) if init == end else {"range":[init, end] }, "flags": ["bypass"]}}
|
||||||
|
]
|
||||||
|
}}}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get(self) -> list[FiregexFilter]:
|
||||||
|
res = []
|
||||||
|
for filter in self.list_rules(tables=[self.table_name], chains=[self.input_chain,self.output_chain]):
|
||||||
|
ip_int = None
|
||||||
|
if isinstance(filter["expr"][0]["match"]["right"],str):
|
||||||
|
ip_int = str(ip_parse(filter["expr"][0]["match"]["right"]))
|
||||||
|
else:
|
||||||
|
ip_int = f'{filter["expr"][0]["match"]["right"]["prefix"]["addr"]}/{filter["expr"][0]["match"]["right"]["prefix"]["len"]}'
|
||||||
|
res.append(FiregexFilter(
|
||||||
|
target=filter["chain"],
|
||||||
|
id=int(filter["handle"]),
|
||||||
|
proto=filter["expr"][1]["match"]["left"]["payload"]["protocol"],
|
||||||
|
port=filter["expr"][1]["match"]["right"],
|
||||||
|
ip_int=ip_int
|
||||||
|
))
|
||||||
|
return res
|
||||||
|
|
||||||
|
def delete(self, srv:Service):
|
||||||
|
for filter in self.get():
|
||||||
|
if filter.__eq__(srv):
|
||||||
|
self.cmd({ "delete":{ "rule": {
|
||||||
|
"family": "inet",
|
||||||
|
"table": self.table_name,
|
||||||
|
"chain": filter.target,
|
||||||
|
"handle": filter.id
|
||||||
|
}}})
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ ON_DOCKER = "DOCKER" in sys.argv
|
|||||||
DEBUG = "DEBUG" in sys.argv
|
DEBUG = "DEBUG" in sys.argv
|
||||||
FIREGEX_PORT = int(os.getenv("PORT","4444"))
|
FIREGEX_PORT = int(os.getenv("PORT","4444"))
|
||||||
JWT_ALGORITHM: str = "HS256"
|
JWT_ALGORITHM: str = "HS256"
|
||||||
API_VERSION = "3.0.0"
|
API_VERSION = "{{VERSION_PLACEHOLDER}}" if "{" not in "{{VERSION_PLACEHOLDER}}" else "0.0.0"
|
||||||
|
|
||||||
PortType = Annotated[int, Path(gt=0, lt=65536)]
|
PortType = Annotated[int, Path(gt=0, lt=65536)]
|
||||||
|
|
||||||
|
|||||||
1
proxy-client/MANIFEST.in
Normal file
1
proxy-client/MANIFEST.in
Normal file
@@ -0,0 +1 @@
|
|||||||
|
include requirements.txt
|
||||||
3
proxy-client/README.md
Normal file
3
proxy-client/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Firegex Python Library and CLI
|
||||||
|
|
||||||
|
It's a work in progress!
|
||||||
7
proxy-client/fgex
Executable file
7
proxy-client/fgex
Executable file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# TODO implement cli start function
|
||||||
|
from firegex.cli import run
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run()
|
||||||
5
proxy-client/fgex-pip/README.md
Normal file
5
proxy-client/fgex-pip/README.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Firegex python library
|
||||||
|
|
||||||
|
Alias of 'firegex' libaray
|
||||||
|
|
||||||
|
It's a work in progress!
|
||||||
1
proxy-client/fgex-pip/fgex/__init__.py
Normal file
1
proxy-client/fgex-pip/fgex/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
from firegex import *
|
||||||
6
proxy-client/fgex-pip/fgex/__main__.py
Normal file
6
proxy-client/fgex-pip/fgex/__main__.py
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
from firegex.cli import run
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run()
|
||||||
25
proxy-client/fgex-pip/setup.py
Normal file
25
proxy-client/fgex-pip/setup.py
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
import setuptools
|
||||||
|
|
||||||
|
with open("README.md", "r", encoding="utf-8") as fh:
|
||||||
|
long_description = fh.read()
|
||||||
|
|
||||||
|
setuptools.setup(
|
||||||
|
name="fgex",
|
||||||
|
version="0.0.0",
|
||||||
|
author="Pwnzer0tt1",
|
||||||
|
author_email="pwnzer0tt1@poliba.it",
|
||||||
|
py_modules=["fgex"],
|
||||||
|
install_requires=["fgex"],
|
||||||
|
include_package_data=True,
|
||||||
|
description="Firegex client",
|
||||||
|
long_description=long_description,
|
||||||
|
long_description_content_type="text/markdown",
|
||||||
|
url="https://github.com/pwnzer0tt1/firegex",
|
||||||
|
packages=setuptools.find_packages(),
|
||||||
|
classifiers=[
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
],
|
||||||
|
python_requires='>=3.10',
|
||||||
|
)
|
||||||
7
proxy-client/firegex/__init__.py
Normal file
7
proxy-client/firegex/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
|
||||||
|
__version__ = "{{VERSION_PLACEHOLDER}}" if "{" not in "{{VERSION_PLACEHOLDER}}" else "0.0.0"
|
||||||
|
|
||||||
|
#Exported functions
|
||||||
|
__all__ = [
|
||||||
|
|
||||||
|
]
|
||||||
7
proxy-client/firegex/__main__.py
Normal file
7
proxy-client/firegex/__main__.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# TODO implement cli start function
|
||||||
|
from firegexproxy.cli import run
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run()
|
||||||
14
proxy-client/requirements.txt
Normal file
14
proxy-client/requirements.txt
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
typer==0.12.3
|
||||||
|
requests>=2.32.3
|
||||||
|
python-dateutil==2.9.0.post0
|
||||||
|
pydantic >= 2
|
||||||
|
typing-extensions >= 4.7.1
|
||||||
|
textual==0.89.1
|
||||||
|
toml==0.10.2
|
||||||
|
psutil==6.0.0
|
||||||
|
dirhash==0.5.0
|
||||||
|
requests-toolbelt==1.0.0
|
||||||
|
python-socketio[client]==5.11.4
|
||||||
|
orjson
|
||||||
|
|
||||||
|
# TODO choose dependencies
|
||||||
31
proxy-client/setup.py
Normal file
31
proxy-client/setup.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
import setuptools
|
||||||
|
|
||||||
|
with open("README.md", "r", encoding="utf-8") as fh:
|
||||||
|
long_description = fh.read()
|
||||||
|
|
||||||
|
with open('requirements.txt', 'r', encoding='utf-8') as f:
|
||||||
|
required = [ele.strip() for ele in f.read().splitlines() if not ele.strip().startswith("#") and ele.strip() != ""]
|
||||||
|
|
||||||
|
VERSION = "{{VERSION_PLACEHOLDER}}"
|
||||||
|
|
||||||
|
setuptools.setup(
|
||||||
|
name="firegex",
|
||||||
|
version= VERSION if "{" not in VERSION else "0.0.0", #uv pip install -U . --no-cache-dir for testing
|
||||||
|
author="Pwnzer0tt1",
|
||||||
|
author_email="pwnzer0tt1@poliba.it",
|
||||||
|
scripts=["fgex"],
|
||||||
|
py_modules=["fgex"],
|
||||||
|
install_requires=required,
|
||||||
|
include_package_data=True,
|
||||||
|
description="Firegex client",
|
||||||
|
long_description=long_description,
|
||||||
|
long_description_content_type="text/markdown",
|
||||||
|
url="https://github.com/pwnzer0tt1/firegex",
|
||||||
|
packages=setuptools.find_packages(),
|
||||||
|
classifiers=[
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
],
|
||||||
|
python_requires='>=3.10',
|
||||||
|
)
|
||||||
40
start.py
40
start.py
@@ -1,6 +1,12 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
import argparse, sys, platform, os, multiprocessing, subprocess, getpass
|
import argparse
|
||||||
|
import sys
|
||||||
|
import platform
|
||||||
|
import os
|
||||||
|
import multiprocessing
|
||||||
|
import subprocess
|
||||||
|
import getpass
|
||||||
|
|
||||||
pref = "\033["
|
pref = "\033["
|
||||||
reset = f"{pref}0m"
|
reset = f"{pref}0m"
|
||||||
@@ -36,7 +42,7 @@ def dict_to_yaml(data, indent_spaces:int=4, base_indent:int=0, additional_spaces
|
|||||||
spaces = ' '*((indent_spaces*base_indent)+additional_spaces)
|
spaces = ' '*((indent_spaces*base_indent)+additional_spaces)
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
if not add_text_on_dict is None:
|
if add_text_on_dict is not None:
|
||||||
spaces_len = len(spaces)-len(add_text_on_dict)
|
spaces_len = len(spaces)-len(add_text_on_dict)
|
||||||
spaces = (' '*max(spaces_len, 0))+add_text_on_dict
|
spaces = (' '*max(spaces_len, 0))+add_text_on_dict
|
||||||
add_text_on_dict = None
|
add_text_on_dict = None
|
||||||
@@ -76,7 +82,7 @@ def composecmd(cmd, composefile=None):
|
|||||||
puts("Docker compose not found! please install docker compose!", color=colors.red)
|
puts("Docker compose not found! please install docker compose!", color=colors.red)
|
||||||
|
|
||||||
def check_already_running():
|
def check_already_running():
|
||||||
return "firegex" in cmd_check(f'docker ps --filter "name=^firegex$"', get_output=True)
|
return "firegex" in cmd_check('docker ps --filter "name=^firegex$"', get_output=True)
|
||||||
|
|
||||||
def gen_args(args_to_parse: list[str]|None = None):
|
def gen_args(args_to_parse: list[str]|None = None):
|
||||||
|
|
||||||
@@ -106,13 +112,13 @@ def gen_args(args_to_parse: list[str]|None = None):
|
|||||||
parser_restart.add_argument('--logs', required=False, action="store_true", help='Show firegex logs', default=False)
|
parser_restart.add_argument('--logs', required=False, action="store_true", help='Show firegex logs', default=False)
|
||||||
args = parser.parse_args(args=args_to_parse)
|
args = parser.parse_args(args=args_to_parse)
|
||||||
|
|
||||||
if not "clear" in args:
|
if "clear" not in args:
|
||||||
args.clear = False
|
args.clear = False
|
||||||
|
|
||||||
if not "threads" in args or args.threads < 1:
|
if "threads" not in args or args.threads < 1:
|
||||||
args.threads = multiprocessing.cpu_count()
|
args.threads = multiprocessing.cpu_count()
|
||||||
|
|
||||||
if not "port" in args or args.port < 1:
|
if "port" not in args or args.port < 1:
|
||||||
args.port = 4444
|
args.port = 4444
|
||||||
|
|
||||||
if args.command is None:
|
if args.command is None:
|
||||||
@@ -126,7 +132,7 @@ def gen_args(args_to_parse: list[str]|None = None):
|
|||||||
args = gen_args()
|
args = gen_args()
|
||||||
|
|
||||||
def is_linux():
|
def is_linux():
|
||||||
return "linux" in sys.platform and not 'microsoft-standard' in platform.uname().release
|
return "linux" in sys.platform and 'microsoft-standard' not in platform.uname().release
|
||||||
|
|
||||||
def write_compose(skip_password = True):
|
def write_compose(skip_password = True):
|
||||||
psw_set = get_password() if not skip_password else None
|
psw_set = get_password() if not skip_password else None
|
||||||
@@ -229,10 +235,13 @@ def get_password():
|
|||||||
|
|
||||||
|
|
||||||
def volume_exists():
|
def volume_exists():
|
||||||
return "firegex_firegex_data" in cmd_check(f'docker volume ls --filter "name=^firegex_firegex_data$"', get_output=True)
|
return "firegex_firegex_data" in cmd_check('docker volume ls --filter "name=^firegex_firegex_data$"', get_output=True)
|
||||||
|
|
||||||
def nfqueue_exists():
|
def nfqueue_exists():
|
||||||
import socket, fcntl, os, time
|
import socket
|
||||||
|
import fcntl
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
NETLINK_NETFILTER = 12
|
NETLINK_NETFILTER = 12
|
||||||
SOL_NETLINK = 270
|
SOL_NETLINK = 270
|
||||||
@@ -241,7 +250,7 @@ def nfqueue_exists():
|
|||||||
nfsock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, NETLINK_NETFILTER)
|
nfsock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, NETLINK_NETFILTER)
|
||||||
fcntl.fcntl(nfsock, fcntl.F_SETFL, os.O_RDONLY|os.O_NONBLOCK)
|
fcntl.fcntl(nfsock, fcntl.F_SETFL, os.O_RDONLY|os.O_NONBLOCK)
|
||||||
nfsock.setsockopt(SOL_NETLINK, NETLINK_EXT_ACK, 1)
|
nfsock.setsockopt(SOL_NETLINK, NETLINK_EXT_ACK, 1)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
for rev in [3,2,1,0]:
|
for rev in [3,2,1,0]:
|
||||||
@@ -252,10 +261,13 @@ def nfqueue_exists():
|
|||||||
nfsock.send(payload)
|
nfsock.send(payload)
|
||||||
data = nfsock.recv(1024)
|
data = nfsock.recv(1024)
|
||||||
is_error = data[4] == 2
|
is_error = data[4] == 2
|
||||||
if not is_error: return True # The module exists and we have permission to use it
|
if not is_error:
|
||||||
|
return True # The module exists and we have permission to use it
|
||||||
error_code = int.from_bytes(data[16:16+4], signed=True, byteorder='little')
|
error_code = int.from_bytes(data[16:16+4], signed=True, byteorder='little')
|
||||||
if error_code == -1: return True # EPERM (the user is not root, but the module exists)
|
if error_code == -1:
|
||||||
if error_code == -2: pass # ENOENT (the module does not exist)
|
return True # EPERM (the user is not root, but the module exists)
|
||||||
|
if error_code == -2:
|
||||||
|
pass # ENOENT (the module does not exist)
|
||||||
else:
|
else:
|
||||||
puts("Error while trying to check if the nfqueue module is loaded, this check will be skipped!", color=colors.yellow)
|
puts("Error while trying to check if the nfqueue module is loaded, this check will be skipped!", color=colors.yellow)
|
||||||
return True
|
return True
|
||||||
@@ -294,7 +306,7 @@ def main():
|
|||||||
if check_already_running():
|
if check_already_running():
|
||||||
puts("Firegex is already running! use --help to see options useful to manage firegex execution", color=colors.yellow)
|
puts("Firegex is already running! use --help to see options useful to manage firegex execution", color=colors.yellow)
|
||||||
else:
|
else:
|
||||||
puts(f"Firegex", color=colors.yellow, end="")
|
puts("Firegex", color=colors.yellow, end="")
|
||||||
puts(" will start on port ", end="")
|
puts(" will start on port ", end="")
|
||||||
puts(f"{args.port}", color=colors.cyan)
|
puts(f"{args.port}", color=colors.cyan)
|
||||||
write_compose(skip_password=False)
|
write_compose(skip_password=False)
|
||||||
|
|||||||
Reference in New Issue
Block a user