diff --git a/.gitignore b/.gitignore index cf7f774..88bc9d8 100644 --- a/.gitignore +++ b/.gitignore @@ -11,10 +11,11 @@ # testing /frontend/coverage -/proxy-client/firegex.egg-info -/proxy-client/dist -/proxy-client/fgex-pip/fgex.egg-info -/proxy-client/fgex-pip/dist +/fgex-lib/firegex.egg-info +/fgex-lib/dist +/fgex-lib/build +/fgex-lib/fgex-pip/fgex.egg-info +/fgex-lib/fgex-pip/dist /backend/db/ /backend/db/** /frontend/build/ diff --git a/Dockerfile b/Dockerfile index d8270d2..451aa22 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,22 +12,23 @@ RUN bun i COPY ./frontend/ . RUN bun run build - #Building main conteiner FROM --platform=$TARGETARCH registry.fedoraproject.org/fedora:latest -RUN dnf -y update && dnf install -y python3.13-devel python3-pip @development-tools gcc-c++ \ +RUN dnf -y update && dnf install -y python3.13-devel @development-tools gcc-c++ \ libnetfilter_queue-devel libnfnetlink-devel libmnl-devel libcap-ng-utils nftables \ - vectorscan-devel libtins-devel python3-nftables libpcap-devel boost-devel + vectorscan-devel libtins-devel python3-nftables libpcap-devel boost-devel uv RUN mkdir -p /execute/modules WORKDIR /execute ADD ./backend/requirements.txt /execute/requirements.txt -RUN pip3 install --no-cache-dir --break-system-packages -r /execute/requirements.txt --no-warn-script-location +RUN uv pip install --no-cache --system -r /execute/requirements.txt +COPY ./fgex-lib /execute/fgex-lib +RUN uv pip install --no-cache --system ./fgex-lib COPY ./backend/binsrc /execute/binsrc RUN g++ binsrc/nfregex.cpp -o modules/cppregex -std=c++23 -O3 -lnetfilter_queue -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libhs libmnl) -#RUN g++ binsrc/nfproxy.cpp -o modules/cpproxy -std=c++23 -O3 -lnetfilter_queue -lpython3.13 -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libmnl python3) +RUN g++ binsrc/nfproxy.cpp -o modules/cpproxy -std=c++23 -O3 -lnetfilter_queue -lpython3.13 -pthread -lnfnetlink $(pkg-config --cflags --libs libtins libmnl python3) COPY ./backend/ /execute/ COPY --from=frontend /app/dist/ ./frontend/ diff --git a/backend/app.py b/backend/app.py index b6646f2..1869b97 100644 --- a/backend/app.py +++ b/backend/app.py @@ -9,12 +9,13 @@ from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm from jose import jwt from passlib.context import CryptContext from utils.sqlite import SQLite -from utils import API_VERSION, FIREGEX_PORT, JWT_ALGORITHM, get_interfaces, socketio_emit, DEBUG, SysctlManager +from utils import API_VERSION, FIREGEX_PORT, JWT_ALGORITHM, get_interfaces, socketio_emit, DEBUG, SysctlManager, NORELOAD from utils.loader import frontend_deploy, load_routers from utils.models import ChangePasswordModel, IpInterface, PasswordChangeForm, PasswordForm, ResetRequest, StatusModel, StatusMessageModel from contextlib import asynccontextmanager from fastapi.middleware.cors import CORSMiddleware import socketio +from socketio.exceptions import ConnectionRefusedError # DB init db = SQLite('db/firegex.db') @@ -52,7 +53,6 @@ if DEBUG: allow_headers=["*"], ) - utils.socketio = socketio.AsyncServer( async_mode="asgi", cors_allowed_origins=[], @@ -69,9 +69,6 @@ def set_psw(psw: str): hash_psw = crypto.hash(psw) db.put("password",hash_psw) -@utils.socketio.on("update") -async def updater(): pass - def create_access_token(data: dict): to_encode = data.copy() encoded_jwt = jwt.encode(to_encode, JWT_SECRET(), algorithm=JWT_ALGORITHM) @@ -90,6 +87,28 @@ async def check_login(token: str = Depends(oauth2_scheme)): return False return logged_in +@utils.socketio.on("connect") +async def sio_connect(sid, environ, auth): + if not auth or not await check_login(auth.get("token")): + raise ConnectionRefusedError("Unauthorized") + utils.sid_list.add(sid) + +@utils.socketio.on("disconnect") +async def sio_disconnect(sid): + try: + utils.sid_list.remove(sid) + except KeyError: + pass + +async def disconnect_all(): + while True: + if len(utils.sid_list) == 0: + break + await utils.socketio.disconnect(utils.sid_list.pop()) + +@utils.socketio.on("update") +async def updater(): pass + async def is_loggined(auth: bool = Depends(check_login)): if not auth: raise HTTPException( @@ -122,6 +141,7 @@ async def login_api(form: OAuth2PasswordRequestForm = Depends()): return {"access_token": create_access_token({"logged_in": True}), "token_type": "bearer"} raise HTTPException(406,"Wrong password!") + @app.post('/api/set-password', response_model=ChangePasswordModel) async def set_password(form: PasswordForm): """Set the password of firegex""" @@ -143,6 +163,7 @@ async def change_password(form: PasswordChangeForm): return {"status":"Cannot insert an empty password!"} if form.expire: db.put("secret", secrets.token_hex(32)) + await disconnect_all() set_psw(form.password) await refresh_frontend() @@ -198,9 +219,9 @@ if __name__ == '__main__': os.chdir(os.path.dirname(os.path.realpath(__file__))) uvicorn.run( "app:app", - host="::" if DEBUG else None, + host="0.0.0.0" if DEBUG else None, port=FIREGEX_PORT, - reload=DEBUG, + reload=DEBUG and not NORELOAD, access_log=True, workers=1, # Firewall module can't be replicated in multiple workers # Later the firewall module will be moved to a separate process diff --git a/backend/binsrc/classes/nfqueue.cpp b/backend/binsrc/classes/nfqueue.cpp index 513db4a..4f3d557 100644 --- a/backend/binsrc/classes/nfqueue.cpp +++ b/backend/binsrc/classes/nfqueue.cpp @@ -7,6 +7,7 @@ #include #include #include +#include using namespace std; @@ -17,6 +18,17 @@ enum class FilterAction{ DROP, ACCEPT, MANGLE, NOACTION }; enum class L4Proto { TCP, UDP, RAW }; typedef Tins::TCPIP::StreamIdentifier stream_id; +struct tcp_ack_seq_ctx{ + int64_t in = 0; + int64_t out = 0; + tcp_ack_seq_ctx(){} + void reset(){ + in = 0; + out = 0; + } +}; + +typedef map tcp_ack_map; template class PktRequest { @@ -25,6 +37,10 @@ class PktRequest { mnl_socket* nl = nullptr; uint16_t res_id; uint32_t packet_id; + size_t _original_size; + size_t _data_original_size; + size_t _header_size; + bool need_tcp_fixing = false; public: bool is_ipv6; Tins::IP* ipv4 = nullptr; @@ -35,21 +51,28 @@ class PktRequest { bool is_input; string packet; - char* data; - size_t data_size; stream_id sid; - T* ctx; + tcp_ack_seq_ctx* ack_seq_offset = nullptr; + + T* ctx = nullptr; private: - inline void fetch_data_size(Tins::PDU* pdu){ + static inline size_t inner_data_size(Tins::PDU* pdu){ + if (pdu == nullptr){ + return 0; + } auto inner = pdu->inner_pdu(); if (inner == nullptr){ - data_size = 0; - }else{ - data_size = inner->size(); + return 0; } + return inner->size(); + } + + inline void __internal_fetch_data_size(Tins::PDU* pdu){ + _data_original_size = inner_data_size(pdu); + _header_size = _original_size - _data_original_size; } L4Proto fill_l4_info(){ @@ -58,14 +81,14 @@ class PktRequest { if (tcp == nullptr){ udp = ipv6->find_pdu(); if (udp == nullptr){ - fetch_data_size(ipv6); + __internal_fetch_data_size(ipv6); return L4Proto::RAW; }else{ - fetch_data_size(udp); + __internal_fetch_data_size(udp); return L4Proto::UDP; } }else{ - fetch_data_size(tcp); + __internal_fetch_data_size(tcp); return L4Proto::TCP; } }else{ @@ -73,36 +96,164 @@ class PktRequest { if (tcp == nullptr){ udp = ipv4->find_pdu(); if (udp == nullptr){ - fetch_data_size(ipv4); + __internal_fetch_data_size(ipv4); return L4Proto::RAW; }else{ - fetch_data_size(udp); + __internal_fetch_data_size(udp); return L4Proto::UDP; } }else{ - fetch_data_size(tcp); + __internal_fetch_data_size(tcp); return L4Proto::TCP; } } } + bool need_tcp_fix(){ + return tcp && ack_seq_offset != nullptr && (ack_seq_offset->in != 0 || ack_seq_offset->out != 0); + } + public: PktRequest(const char* payload, size_t plen, T* ctx, mnl_socket* nl, nfgenmsg *nfg, nfqnl_msg_packet_hdr *ph, bool is_input): ctx(ctx), nl(nl), res_id(nfg->res_id), packet_id(ph->packet_id), is_input(is_input), packet(string(payload, plen)), - is_ipv6((payload[0] & 0xf0) == 0x60){ - if (is_ipv6){ - ipv6 = new Tins::IPv6((uint8_t*)packet.c_str(), plen); - sid = stream_id::make_identifier(*ipv6); - }else{ - ipv4 = new Tins::IP((uint8_t*)packet.c_str(), plen); - sid = stream_id::make_identifier(*ipv4); - } - l4_proto = fill_l4_info(); - data = packet.data()+(plen-data_size); + action(FilterAction::NOACTION), + is_ipv6((payload[0] & 0xf0) == 0x60) + { + if (is_ipv6){ + ipv6 = new Tins::IPv6((uint8_t*)packet.c_str(), plen); + sid = stream_id::make_identifier(*ipv6); + _original_size = ipv6->size(); + }else{ + ipv4 = new Tins::IP((uint8_t*)packet.c_str(), plen); + sid = stream_id::make_identifier(*ipv4); + _original_size = ipv4->size(); } + l4_proto = fill_l4_info(); + #ifdef DEBUG + if (tcp){ + cerr << "[DEBUG] NEW_PACKET " << (is_input?"-> IN ":"<- OUT") << " [SIZE: " << data_size() << "] FLAGS: " << (tcp->get_flag(Tins::TCP::FIN)?"FIN ":"") << (tcp->get_flag(Tins::TCP::SYN)?"SYN ":"") << (tcp->get_flag(Tins::TCP::RST)?"RST ":"") << (tcp->get_flag(Tins::TCP::ACK)?"ACK ":"") << (tcp->get_flag(Tins::TCP::PSH)?"PSH ":"") << endl; + cerr << "[SEQ: " << tcp->seq() << "] [ACK: " << tcp->ack_seq() << "]" << " [WIN: " << tcp->window() << "] [FLAGS: " << tcp->flags() << "]\n" << endl; + } + #endif + } + + inline size_t header_size(){ + return _header_size; + } + + char* data(){ + return packet.data()+_header_size; + } + + size_t data_size(){ + return packet.size()-_header_size; + } + + size_t data_original_size(){ + return _data_original_size; + } + + void reserialize(){ + auto data = serialize(); + packet.resize(data.size()); + memcpy(packet.data(), data.data(), data.size()); + } + + void set_data(const char* data, const size_t& data_size){ + auto bef_raw = before_raw_pdu_ptr(); + if (bef_raw){ + delete before_raw_pdu_ptr()->release_inner_pdu(); + if (data_size > 0){ + before_raw_pdu_ptr() /= move(Tins::RawPDU((uint8_t*)data, data_size)); + } + } + } + + Tins::PDU* before_raw_pdu_ptr(){ + if (tcp){ + return tcp; + }else if (udp){ + return udp; + }else if (ipv4){ + return ipv4; + }else if (ipv6){ + return ipv6; + } + return nullptr; + } + + void set_packet(const char* data, size_t data_size){ + // Parsing only the header with libtins + Tins::PDU *data_pdu = nullptr; + size_t total_size; + if (is_ipv6){ + delete ipv6; + ipv6 = new Tins::IPv6((uint8_t*)data, data_size); + if (tcp){ + tcp = ipv6->find_pdu(); + data_pdu = tcp; + }else if (udp){ + udp = ipv6->find_pdu(); + data_pdu = udp; + }else{ + data_pdu = ipv6; + } + total_size = ipv6->size(); + }else{ + delete ipv4; + ipv4 = new Tins::IP((uint8_t*)data, data_size); + if (tcp){ + tcp = ipv4->find_pdu(); + data_pdu = tcp; + }else if(udp){ + udp = ipv4->find_pdu(); + data_pdu = udp; + }else{ + data_pdu = ipv4; + } + total_size = ipv4->size(); + } + _header_size = total_size - inner_data_size(data_pdu); + // Libtins can skip data if the lenght is changed to a bigger len (due to ip header total lenght), so we need to specify the data section manually + set_data(data+_header_size, data_size-_header_size); + } + + void fix_tcp_ack(){ + need_tcp_fixing = need_tcp_fix(); + if(!need_tcp_fixing){ + return; + } + #ifdef DEBUG + cerr << "[DEBUG] Fixing ack_seq with offsets " << ((int32_t)ack_seq_offset->in) << " " << ((int32_t)ack_seq_offset->out) << endl; + #endif + if (is_input){ + tcp->seq(tcp->seq() + ack_seq_offset->in); + tcp->ack_seq(tcp->ack_seq() - ack_seq_offset->out); + }else{ + tcp->ack_seq(tcp->ack_seq() - ack_seq_offset->in); + tcp->seq(tcp->seq() + ack_seq_offset->out); + } + #ifdef DEBUG + size_t new_size = inner_data_size(tcp); + cerr << "[DEBUG] FIXED PKT " << (is_input?"-> IN ":"<- OUT") << " [SIZE: " << data_size() << "] FLAGS: " << (tcp->get_flag(Tins::TCP::FIN)?"FIN ":"") << (tcp->get_flag(Tins::TCP::SYN)?"SYN ":"") << (tcp->get_flag(Tins::TCP::RST)?"RST ":"") << (tcp->get_flag(Tins::TCP::ACK)?"ACK ":"") << (tcp->get_flag(Tins::TCP::PSH)?"PSH ":"") << endl; + cerr << "[SEQ: " << tcp->seq() << "] [ACK: " << tcp->ack_seq() << "]" << " [WIN: " << tcp->window() << "] [FLAGS: " << tcp->flags() << "]\n" << endl; + #endif + } + + void fix_data_payload(){ + //Stream follower move the payload data, so we need to reinizialize RawPDU + auto bef_raw = before_raw_pdu_ptr(); + if (bef_raw){ + delete bef_raw->release_inner_pdu(); + auto new_data_size = packet.size()-_header_size; + if (new_data_size > 0){ + bef_raw /= move(Tins::RawPDU((uint8_t*)packet.data()+_header_size, new_data_size)); + } + } + } void drop(){ if (action == FilterAction::NOACTION){ @@ -113,6 +264,10 @@ class PktRequest { } } + size_t original_size(){ + return _original_size; + } + void accept(){ if (action == FilterAction::NOACTION){ action = FilterAction::ACCEPT; @@ -131,6 +286,40 @@ class PktRequest { } } + void reject(){ + if (tcp){ + //If the packet has data, we have to remove it + set_data(nullptr, 0); + //For the first matched data or only for data packets, we set FIN bit + //This only for client packets, because this will trigger server to close the connection + //Packets will be filtered anyway also if client don't send packets + if (_data_original_size != 0){ + tcp->set_flag(Tins::TCP::FIN,1); + tcp->set_flag(Tins::TCP::ACK,1); + tcp->set_flag(Tins::TCP::SYN,0); + } + //Send the edited packet to the kernel + mangle(); + }else{ + drop(); + } + } + + void mangle_custom_pkt(const char* raw_pkt, size_t raw_pkt_size){ + if (action == FilterAction::NOACTION){ + try{ + set_packet(raw_pkt, raw_pkt_size); + reserialize(); + action = FilterAction::MANGLE; + }catch(...){ + action = FilterAction::DROP; + } + perfrom_action(false); + }else{ + throw invalid_argument("Cannot mangle a packet that has already been accepted or dropped"); + } + } + FilterAction get_action(){ return action; } @@ -140,23 +329,51 @@ class PktRequest { delete ipv6; } + Tins::PDU::serialization_type serialize(){ + if (is_ipv6){ + return ipv6->serialize(); + }else{ + return ipv4->serialize(); + } + } + private: - void perfrom_action(){ + void perfrom_action(bool do_serialize = true){ char buf[MNL_SOCKET_BUFFER_SIZE]; struct nlmsghdr *nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(res_id)); switch (action) { case FilterAction::ACCEPT: + if (need_tcp_fixing){ + if (do_serialize){ + fix_data_payload(); + reserialize(); + } + nfq_nlmsg_verdict_put_pkt(nlh_verdict, packet.data(), packet.size()); + } nfq_nlmsg_verdict_put(nlh_verdict, ntohl(packet_id), NF_ACCEPT ); break; case FilterAction::DROP: nfq_nlmsg_verdict_put(nlh_verdict, ntohl(packet_id), NF_DROP ); break; case FilterAction::MANGLE:{ - if (is_ipv6){ - nfq_nlmsg_verdict_put_pkt(nlh_verdict, ipv6->serialize().data(), ipv6->size()); - }else{ - nfq_nlmsg_verdict_put_pkt(nlh_verdict, ipv4->serialize().data(), ipv4->size()); + //If not custom data, use the data in the packets + if(do_serialize){ + reserialize(); + } + nfq_nlmsg_verdict_put_pkt(nlh_verdict, packet.data(), packet.size()); + #ifdef DEBUG + if (tcp){ + cerr << "[DEBUG] MANGLEDPKT " << (is_input?"-> IN ":"<- OUT") << " [SIZE: " << data_size() << "] FLAGS: " << (tcp->get_flag(Tins::TCP::FIN)?"FIN ":"") << (tcp->get_flag(Tins::TCP::SYN)?"SYN ":"") << (tcp->get_flag(Tins::TCP::RST)?"RST ":"") << (tcp->get_flag(Tins::TCP::ACK)?"ACK ":"") << (tcp->get_flag(Tins::TCP::PSH)?"PSH ":"") << endl; + cerr << "[SEQ: " << tcp->seq() << "] [ACK: " << tcp->ack_seq() << "]" << " [WIN: " << tcp->window() << "] [FLAGS: " << tcp->flags() << "]\n" << endl; + } + #endif + if (tcp && ack_seq_offset && packet.size() != _original_size){ + if (is_input){ + ack_seq_offset->in += data_size() - _data_original_size; + }else{ + ack_seq_offset->out += data_size() - _data_original_size; + } } nfq_nlmsg_verdict_put(nlh_verdict, ntohl(packet_id), NF_ACCEPT ); break; diff --git a/backend/binsrc/nfproxy.cpp b/backend/binsrc/nfproxy.cpp index 520292b..cbb6c38 100644 --- a/backend/binsrc/nfproxy.cpp +++ b/backend/binsrc/nfproxy.cpp @@ -1,67 +1,134 @@ #define PY_SSIZE_T_CLEAN #include -#include "proxytun/settings.cpp" -#include "proxytun/proxytun.cpp" +#include "pyproxy/settings.cpp" +#include "pyproxy/pyproxy.cpp" #include "classes/netfilter.cpp" -#include #include #include #include +#include +#include "utils.cpp" using namespace std; using namespace Firegex::PyProxy; using Firegex::NfQueue::MultiThreadQueue; -ssize_t read_check(int __fd, void *__buf, size_t __nbytes){ - ssize_t bytes = read(__fd, __buf, __nbytes); - if (bytes == 0){ - cerr << "[fatal] [updater] read() returned EOF" << endl; - throw invalid_argument("read() returned EOF"); - } - if (bytes < 0){ - cerr << "[fatal] [updater] read() returned an error" << bytes << endl; - throw invalid_argument("read() returned an error"); - } - return bytes; +/* + +How python code is handles: + +User code example: +```python + +from firegex.nfproxy import DROP, ACCEPT, pyfilter + +@pyfilter +def invalid_curl_agent(http): + if "curl" in http.headers.get("User-Agent", ""): + return DROP + return ACCEPT + +``` + +The code is now edited adding an intestation and a end statement: +```python + +__firegex_pyfilter_enabled = ["invalid_curl_agent", "func3"] # This list is dynamically generated by firegex backend +__firegex_proto = "http" +import firegex.nfproxy.internals +firegex.nfproxy.internals.compile(globals(), locals()) # This function can save other global variables, to use by the packet handler and is used generally to check and optimize the code +```` +(First lines are the same to keep line of code consistent on exceptions messages) + +This code will be executed only once, and is needed to build the global and local context to use +The globals and locals generated here are copied for each connection, and are used to handle the packets + +Using C API will be injected in global context the following informations: + +__firegex_packet_info = { + "data" = b"raw data found on L4", + "raw_packet" = b"raw packet", + "is_input" = True, # If the packet is incoming from a client + "is_ipv6" = False, # If the packet is ipv6 + "is_tcp" = True, # If the packet is tcp } +As result the packet handler is responsible to return a dictionary in the global context with the following dictionary: +__firegex_pyfilter_result = { + "action": REJECT, # One of PyFilterResponse + "matched_by": "invalid_curl_agent", # The function that matched the packet (used if action = DROP or REJECT or MANGLE) + "mangled_packet": b"new packet" # The new packet to send to the kernel (used if action = MANGLE) +} + +PyFilterResponse { + ACCEPT = 0, + DROP = 1, + REJECT = 2, + MANGLE = 3, + EXCEPTION = 4, + INVALID = 5 +}; + +Every time a packet is received, the packet handler will execute the following code: +```python +firegex.nfproxy.internals.handle_packet(globals()) +```` + +The TCP stream is sorted by libtins using c++ code, but the c++ code is not responsabile di buffer the stream, but only to sort those +So firegex handle_packet has to implement a way to limit memory usage, this dipends on what methods you choose to use to filter packets +firegex lib will give you all the needed possibilities to do this is many ways + +Final note: is not raccomanded to use variables that starts with __firegex_ in your code, because they may break the nfproxy +*/ + + + void config_updater (){ while (true){ + PyThreadState* state = PyEval_SaveThread(); // Release GIL while doing IO operation uint32_t code_size; - read_check(STDIN_FILENO, &code_size, 4); - vector code(code_size); - read_check(STDIN_FILENO, code.data(), code_size); + memcpy(&code_size, control_socket.recv(4).c_str(), 4); + code_size = be32toh(code_size); + string code = control_socket.recv(code_size); + #ifdef DEBUG + cerr << "[DEBUG] [updater] Received code: " << code << endl; + #endif cerr << "[info] [updater] Updating configuration" << endl; + PyEval_AcquireThread(state); //Restore GIL before executing python code try{ config.reset(new PyCodeConfig(code)); cerr << "[info] [updater] Config update done" << endl; - osyncstream(cout) << "ACK OK" << endl; + control_socket << "ACK OK" << endl; }catch(const std::exception& e){ cerr << "[error] [updater] Failed to build new configuration!" << endl; - osyncstream(cout) << "ACK FAIL " << e.what() << endl; + control_socket << "ACK FAIL " << e.what() << endl; } } } -int main(int argc, char *argv[]){ + +int main(int argc, char *argv[]) { + // Connect to the python backend using the unix socket + init_control_socket(); + + // Initialize the python interpreter Py_Initialize(); atexit(Py_Finalize); + init_handle_packet_code(); //Compile the static code used to handle packets - if (freopen(nullptr, "rb", stdin) == nullptr){ // We need to read from stdin binary data - cerr << "[fatal] [main] Failed to reopen stdin in binary mode" << endl; - return 1; - } int n_of_threads = 1; char * n_threads_str = getenv("NTHREADS"); if (n_threads_str != nullptr) n_of_threads = ::atoi(n_threads_str); if(n_of_threads <= 0) n_of_threads = 1; config.reset(new PyCodeConfig()); + MultiThreadQueue queue(n_of_threads); - osyncstream(cout) << "QUEUE " << queue.queue_num() << endl; + control_socket << "QUEUE " << queue.queue_num() << endl; + cerr << "[info] [main] Queue: " << queue.queue_num() << " threads assigned: " << n_of_threads << endl; thread qthr([&](){ diff --git a/backend/binsrc/proxytun/proxytun.cpp b/backend/binsrc/proxytun/proxytun.cpp deleted file mode 100644 index 910a86b..0000000 --- a/backend/binsrc/proxytun/proxytun.cpp +++ /dev/null @@ -1,165 +0,0 @@ -#ifndef PROXY_TUNNEL_CLASS_CPP -#define PROXY_TUNNEL_CLASS_CPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../classes/netfilter.cpp" -#include "stream_ctx.cpp" -#include "settings.cpp" - -using Tins::TCPIP::Stream; -using Tins::TCPIP::StreamFollower; -using namespace std; - -namespace Firegex { -namespace PyProxy { - -class PyProxyQueue: public NfQueue::ThreadNfQueue { - public: - stream_ctx sctx; - StreamFollower follower; - - struct { - bool matching_has_been_called = false; - bool already_closed = false; - bool result; - NfQueue::PktRequest* pkt; - } match_ctx; - - void before_loop() override { - follower.new_stream_callback(bind(on_new_stream, placeholders::_1, this)); - follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, this)); - } - - bool filter_action(NfQueue::PktRequest* pkt){ - shared_ptr conf = config; - - auto stream_search = sctx.streams_ctx.find(pkt->sid); - pyfilter_ctx* stream_match; - if (stream_search == sctx.streams_ctx.end()){ - // TODO: New pyfilter_ctx - }else{ - stream_match = stream_search->second; - } - - bool has_matched = false; - //TODO exec filtering action - - if (has_matched){ - // Say to firegex what filter has matched - //osyncstream(cout) << "BLOCKED " << rules_vector[match_res.matched] << endl; - return false; - } - return true; - } - - //If the stream has already been matched, drop all data, and try to close the connection - static void keep_fin_packet(PyProxyQueue* pkt){ - pkt->match_ctx.matching_has_been_called = true; - pkt->match_ctx.already_closed = true; - } - - static void on_data_recv(Stream& stream, PyProxyQueue* pkt, string data) { - pkt->match_ctx.matching_has_been_called = true; - pkt->match_ctx.already_closed = false; - bool result = pkt->filter_action(pkt->match_ctx.pkt); - if (!result){ - pkt->sctx.clean_stream_by_id(pkt->match_ctx.pkt->sid); - stream.client_data_callback(bind(keep_fin_packet, pkt)); - stream.server_data_callback(bind(keep_fin_packet, pkt)); - } - pkt->match_ctx.result = result; - } - - //Input data filtering - static void on_client_data(Stream& stream, PyProxyQueue* pkt) { - on_data_recv(stream, pkt, string(stream.client_payload().begin(), stream.client_payload().end())); - } - - //Server data filtering - static void on_server_data(Stream& stream, PyProxyQueue* pkt) { - on_data_recv(stream, pkt, string(stream.server_payload().begin(), stream.server_payload().end())); - } - - // A stream was terminated. The second argument is the reason why it was terminated - static void on_stream_close(Stream& stream, PyProxyQueue* pkt) { - stream_id stream_id = stream_id::make_identifier(stream); - pkt->sctx.clean_stream_by_id(stream_id); - } - - static void on_new_stream(Stream& stream, PyProxyQueue* pkt) { - stream.auto_cleanup_payloads(true); - if (stream.is_partial_stream()) { - //TODO take a decision about this... - stream.enable_recovery_mode(10 * 1024); - } - stream.client_data_callback(bind(on_client_data, placeholders::_1, pkt)); - stream.server_data_callback(bind(on_server_data, placeholders::_1, pkt)); - stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, pkt)); - } - - - void handle_next_packet(NfQueue::PktRequest* pkt) override{ - if (pkt->l4_proto != NfQueue::L4Proto::TCP){ - throw invalid_argument("Only TCP and UDP are supported"); - } - Tins::PDU* application_layer = pkt->tcp->inner_pdu(); - u_int16_t payload_size = 0; - if (application_layer != nullptr){ - payload_size = application_layer->size(); - } - match_ctx.matching_has_been_called = false; - match_ctx.pkt = pkt; - if (pkt->is_ipv6){ - follower.process_packet(*pkt->ipv6); - }else{ - follower.process_packet(*pkt->ipv4); - } - // Do an action only is an ordered packet has been received - if (match_ctx.matching_has_been_called){ - bool empty_payload = payload_size == 0; - //In this 2 cases we have to remove all data about the stream - if (!match_ctx.result || match_ctx.already_closed){ - sctx.clean_stream_by_id(pkt->sid); - //If the packet has data, we have to remove it - if (!empty_payload){ - Tins::PDU* data_layer = pkt->tcp->release_inner_pdu(); - if (data_layer != nullptr){ - delete data_layer; - } - } - //For the first matched data or only for data packets, we set FIN bit - //This only for client packets, because this will trigger server to close the connection - //Packets will be filtered anyway also if client don't send packets - if ((!match_ctx.result || !empty_payload) && pkt->is_input){ - pkt->tcp->set_flag(Tins::TCP::FIN,1); - pkt->tcp->set_flag(Tins::TCP::ACK,1); - pkt->tcp->set_flag(Tins::TCP::SYN,0); - } - //Send the edited packet to the kernel - return pkt->mangle(); - } - } - return pkt->accept(); - } - - ~PyProxyQueue() { - sctx.clean(); - } - -}; - -}} -#endif // PROXY_TUNNEL_CLASS_CPP \ No newline at end of file diff --git a/backend/binsrc/proxytun/settings.cpp b/backend/binsrc/proxytun/settings.cpp deleted file mode 100644 index f4adae4..0000000 --- a/backend/binsrc/proxytun/settings.cpp +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef PROXY_TUNNEL_SETTINGS_CPP -#define PROXY_TUNNEL_SETTINGS_CPP - -#include -#include - -using namespace std; - -class PyCodeConfig{ - public: - const vector code; - public: - PyCodeConfig(vector pycode): code(pycode){} - PyCodeConfig(): code(vector()){} - - ~PyCodeConfig(){} -}; - -shared_ptr config; - -#endif // PROXY_TUNNEL_SETTINGS_CPP - diff --git a/backend/binsrc/proxytun/stream_ctx.cpp b/backend/binsrc/proxytun/stream_ctx.cpp deleted file mode 100644 index 3057ac8..0000000 --- a/backend/binsrc/proxytun/stream_ctx.cpp +++ /dev/null @@ -1,39 +0,0 @@ - -#ifndef STREAM_CTX_CPP -#define STREAM_CTX_CPP - -#include -#include -#include - -using namespace std; - -typedef Tins::TCPIP::StreamIdentifier stream_id; - -struct pyfilter_ctx { - void * pyglob; // TODO python glob??? - string pycode; -}; - -typedef map matching_map; - -struct stream_ctx { - matching_map streams_ctx; - - void clean_stream_by_id(stream_id sid){ - auto stream_search = streams_ctx.find(sid); - if (stream_search != streams_ctx.end()){ - auto stream_match = stream_search->second; - //DEALLOC PY GLOB TODO - delete stream_match; - } - } - void clean(){ - for (auto ele: streams_ctx){ - //TODO dealloc ele.second.pyglob - delete ele.second; - } - } -}; - -#endif // STREAM_CTX_CPP \ No newline at end of file diff --git a/backend/binsrc/pyproxy/pyproxy.cpp b/backend/binsrc/pyproxy/pyproxy.cpp new file mode 100644 index 0000000..2062866 --- /dev/null +++ b/backend/binsrc/pyproxy/pyproxy.cpp @@ -0,0 +1,266 @@ +#ifndef PROXY_TUNNEL_CLASS_CPP +#define PROXY_TUNNEL_CLASS_CPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../classes/netfilter.cpp" +#include "../classes/nfqueue.cpp" +#include "stream_ctx.cpp" +#include "settings.cpp" +#include + +using Tins::TCPIP::Stream; +using Tins::TCPIP::StreamFollower; +using namespace std; + +namespace Firegex { +namespace PyProxy { + +class PyProxyQueue: public NfQueue::ThreadNfQueue { + private: + u_int16_t latest_config_ver = 0; + public: + stream_ctx sctx; + StreamFollower follower; + PyThreadState * tstate = nullptr; + + PyInterpreterConfig py_thread_config = { + .use_main_obmalloc = 0, + .allow_fork = 0, + .allow_exec = 0, + .allow_threads = 0, + .allow_daemon_threads = 0, + .check_multi_interp_extensions = 1, + .gil = PyInterpreterConfig_OWN_GIL, + }; + NfQueue::PktRequest* pkt; + NfQueue::tcp_ack_seq_ctx* current_tcp_ack = nullptr; + + PyObject* handle_packet_code = nullptr; + + void before_loop() override { + PyStatus pystatus; + // Create a new interpreter for the thread + tstate = PyThreadState_New(PyInterpreterState_Main()); + PyEval_AcquireThread(tstate); + pystatus = Py_NewInterpreterFromConfig(&tstate, &py_thread_config); + if(tstate == nullptr){ + cerr << "[fatal] [main] Failed to create new interpreter" << endl; + throw invalid_argument("Failed to create new interpreter (null tstate)"); + } + if (PyStatus_Exception(pystatus)) { + cerr << "[fatal] [main] Failed to create new interpreter" << endl; + Py_ExitStatusException(pystatus); + throw invalid_argument("Failed to create new interpreter (pystatus exc)"); + } + + if(!PyGC_IsEnabled()){ + PyGC_Enable(); + } + + handle_packet_code = unmarshal_code(py_handle_packet_code); + // Setting callbacks for the stream follower + follower.new_stream_callback(bind(on_new_stream, placeholders::_1, this)); + follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, this)); + } + + inline void print_blocked_reason(const string& func_name){ + control_socket << "BLOCKED " << func_name << endl; + } + + inline void print_mangle_reason(const string& func_name){ + control_socket << "MANGLED " << func_name << endl; + } + + inline void print_exception_reason(){ + control_socket << "EXCEPTION" << endl; + } + + //If the stream has already been matched, drop all data, and try to close the connection + static void keep_fin_packet(PyProxyQueue* pyq){ + pyq->pkt->reject();// This is needed because the callback has to take the updated pkt pointer! + } + + static void keep_dropped(PyProxyQueue* pyq){ + pyq->pkt->drop();// This is needed because the callback has to take the updated pkt pointer! + } + + void filter_action(NfQueue::PktRequest* pkt, Stream& stream, const string& data){ + auto stream_search = sctx.streams_ctx.find(pkt->sid); + pyfilter_ctx* stream_match; + if (stream_search == sctx.streams_ctx.end()){ + shared_ptr conf = config; + //If config is not set, ignore the stream + PyObject* compiled_code = conf->compiled_code(); + if (compiled_code == nullptr){ + stream.client_data_callback(nullptr); + stream.server_data_callback(nullptr); + stream.ignore_client_data(); + stream.ignore_server_data(); + return pkt->accept(); + }else{ + try{ + stream_match = new pyfilter_ctx(compiled_code, handle_packet_code); + }catch(invalid_argument& e){ + cerr << "[error] [filter_action] Failed to create the filter context" << endl; + print_exception_reason(); + sctx.clean_stream_by_id(pkt->sid); + stream.client_data_callback(nullptr); + stream.server_data_callback(nullptr); + stream.ignore_client_data(); + stream.ignore_server_data(); + return pkt->accept(); + } + sctx.streams_ctx.insert_or_assign(pkt->sid, stream_match); + } + }else{ + stream_match = stream_search->second; + } + + auto result = stream_match->handle_packet(pkt, data); + switch(result.action){ + case PyFilterResponse::ACCEPT: + return pkt->accept(); + case PyFilterResponse::DROP: + print_blocked_reason(*result.filter_match_by); + sctx.clean_stream_by_id(pkt->sid); + stream.client_data_callback(bind(keep_dropped, this)); + stream.server_data_callback(bind(keep_dropped, this)); + return pkt->drop(); + case PyFilterResponse::REJECT: + print_blocked_reason(*result.filter_match_by); + sctx.clean_stream_by_id(pkt->sid); + stream.client_data_callback(bind(keep_fin_packet, this)); + stream.server_data_callback(bind(keep_fin_packet, this)); + return pkt->reject(); + case PyFilterResponse::MANGLE: + pkt->mangle_custom_pkt(result.mangled_packet->c_str(), result.mangled_packet->size()); + if (pkt->get_action() == NfQueue::FilterAction::DROP){ + cerr << "[error] [filter_action] Failed to mangle: the packet sent is not serializzable... the packet was dropped" << endl; + print_blocked_reason(*result.filter_match_by); + print_exception_reason(); + }else{ + print_mangle_reason(*result.filter_match_by); + } + return; + case PyFilterResponse::EXCEPTION: + case PyFilterResponse::INVALID: + print_exception_reason(); + sctx.clean_stream_by_id(pkt->sid); + //Free the packet data + stream.ignore_client_data(); + stream.ignore_server_data(); + stream.client_data_callback(nullptr); + stream.server_data_callback(nullptr); + return pkt->accept(); + } + } + + + static void on_data_recv(Stream& stream, PyProxyQueue* pyq, const string& data) { + pyq->pkt->fix_data_payload(); + pyq->filter_action(pyq->pkt, stream, data); //Only here the rebuilt_tcp_data is set + } + + //Input data filtering + static void on_client_data(Stream& stream, PyProxyQueue* pyq) { + auto data = stream.client_payload(); + on_data_recv(stream, pyq, string((char*)data.data(), data.size())); + } + + //Server data filtering + static void on_server_data(Stream& stream, PyProxyQueue* pyq) { + auto data = stream.server_payload(); + on_data_recv(stream, pyq, string((char*)data.data(), data.size())); + } + + // A stream was terminated. The second argument is the reason why it was terminated + static void on_stream_close(Stream& stream, PyProxyQueue* pyq) { + stream_id stream_id = stream_id::make_identifier(stream); + pyq->sctx.clean_stream_by_id(stream_id); + pyq->sctx.clean_tcp_ack_by_id(stream_id); + } + + static void on_new_stream(Stream& stream, PyProxyQueue* pyq) { + stream.auto_cleanup_payloads(true); + if (stream.is_partial_stream()) { + stream.enable_recovery_mode(10 * 1024); + } + + if (pyq->current_tcp_ack != nullptr){ + pyq->current_tcp_ack->reset(); + }else{ + pyq->current_tcp_ack = new NfQueue::tcp_ack_seq_ctx(); + pyq->sctx.tcp_ack_ctx.insert_or_assign(pyq->pkt->sid, pyq->current_tcp_ack); + pyq->pkt->ack_seq_offset = pyq->current_tcp_ack; // Set ack context + } + + //Should not happen, but with this we can be sure about this + auto tcp_ack_search = pyq->sctx.tcp_ack_ctx.find(pyq->pkt->sid); + if (tcp_ack_search != pyq->sctx.tcp_ack_ctx.end()){ + tcp_ack_search->second->reset(); + } + + stream.client_data_callback(bind(on_client_data, placeholders::_1, pyq)); + stream.server_data_callback(bind(on_server_data, placeholders::_1, pyq)); + stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, pyq)); + } + + void handle_next_packet(NfQueue::PktRequest* _pkt) override{ + pkt = _pkt; // Setting packet context + + if (pkt->l4_proto != NfQueue::L4Proto::TCP){ + throw invalid_argument("Only TCP and UDP are supported"); + } + + auto tcp_ack_search = sctx.tcp_ack_ctx.find(pkt->sid); + if (tcp_ack_search != sctx.tcp_ack_ctx.end()){ + current_tcp_ack = tcp_ack_search->second; + pkt->ack_seq_offset = current_tcp_ack; + }else{ + current_tcp_ack = nullptr; + //If necessary will be created by libtis new_stream callback + } + + pkt->fix_tcp_ack(); + + if (pkt->is_ipv6){ + follower.process_packet(*pkt->ipv6); + }else{ + follower.process_packet(*pkt->ipv4); + } + + //Fallback to the default action + if (pkt->get_action() == NfQueue::FilterAction::NOACTION){ + return pkt->accept(); + } + } + + ~PyProxyQueue() { + // Closing first the interpreter + + Py_EndInterpreter(tstate); + PyEval_ReleaseThread(tstate); + PyThreadState_Clear(tstate); + PyThreadState_Delete(tstate); + Py_DECREF(handle_packet_code); + + sctx.clean(); + } + +}; + +}} +#endif // PROXY_TUNNEL_CLASS_CPP \ No newline at end of file diff --git a/backend/binsrc/pyproxy/settings.cpp b/backend/binsrc/pyproxy/settings.cpp new file mode 100644 index 0000000..6648994 --- /dev/null +++ b/backend/binsrc/pyproxy/settings.cpp @@ -0,0 +1,102 @@ +#ifndef PROXY_TUNNEL_SETTINGS_CPP +#define PROXY_TUNNEL_SETTINGS_CPP + +#include +#include +#include +#include +#include +#include "../utils.cpp" + +using namespace std; + +namespace Firegex { +namespace PyProxy { + +class PyCodeConfig; + +shared_ptr config; +UnixClientConnection control_socket; + +PyObject* unmarshal_code(string encoded_code){ + if (encoded_code.empty()) return nullptr; + return PyMarshal_ReadObjectFromString(encoded_code.c_str(), encoded_code.size()); +} + +class PyCodeConfig{ + public: + string encoded_code; + + PyCodeConfig(const string& pycode){ + PyObject* compiled_code = Py_CompileStringExFlags(pycode.c_str(), "", Py_file_input, NULL, 2); + if (compiled_code == nullptr){ + std::cerr << "[fatal] [main] Failed to compile the code" << endl; + throw invalid_argument("Failed to compile the code"); + } + PyObject* glob = PyDict_New(); + PyObject* result = PyEval_EvalCode(compiled_code, glob, glob); + Py_DECREF(glob); + if (PyErr_Occurred()){ + PyErr_Print(); + Py_DECREF(compiled_code); + std::cerr << "[fatal] [main] Failed to execute the code" << endl; + throw invalid_argument("Failed to execute the code, maybe an invalid filter code has been provided"); + } + Py_XDECREF(result); + PyObject* code_dump = PyMarshal_WriteObjectToString(compiled_code, 4); + Py_DECREF(compiled_code); + if (code_dump == nullptr){ + if (PyErr_Occurred()) + PyErr_Print(); + std::cerr << "[fatal] [main] Failed to dump the code" << endl; + throw invalid_argument("Failed to dump the code"); + } + if (!PyBytes_Check(code_dump)){ + std::cerr << "[fatal] [main] Failed to dump the code" << endl; + Py_DECREF(code_dump); + throw invalid_argument("Failed to dump the code"); + } + encoded_code = string(PyBytes_AsString(code_dump), PyBytes_Size(code_dump)); + Py_DECREF(code_dump); + } + + PyObject* compiled_code(){ + return unmarshal_code(encoded_code); + } + + PyCodeConfig(){} +}; + +void init_control_socket(){ + char * socket_path = getenv("FIREGEX_NFPROXY_SOCK"); + if (socket_path == nullptr) throw invalid_argument("FIREGEX_NFPROXY_SOCK not set"); + if (strlen(socket_path) >= 108) throw invalid_argument("FIREGEX_NFPROXY_SOCK too long"); + control_socket = UnixClientConnection(socket_path); +} + +string py_handle_packet_code; + +void init_handle_packet_code(){ + PyObject* compiled_code = Py_CompileStringExFlags( + "firegex.nfproxy.internals.handle_packet(globals())\n", "", + Py_file_input, NULL, 2); + PyObject* code_dump = PyMarshal_WriteObjectToString(compiled_code, 4); + Py_DECREF(compiled_code); + if (code_dump == nullptr){ + if (PyErr_Occurred()) + PyErr_Print(); + std::cerr << "[fatal] [main] Failed to dump the code" << endl; + throw invalid_argument("Failed to dump the code"); + } + if (!PyBytes_Check(code_dump)){ + std::cerr << "[fatal] [main] Failed to dump the code" << endl; + Py_DECREF(code_dump); + throw invalid_argument("Failed to dump the code"); + } + py_handle_packet_code = string(PyBytes_AsString(code_dump), PyBytes_Size(code_dump)); + Py_DECREF(code_dump); +} + +}} +#endif // PROXY_TUNNEL_SETTINGS_CPP + diff --git a/backend/binsrc/pyproxy/stream_ctx.cpp b/backend/binsrc/pyproxy/stream_ctx.cpp new file mode 100644 index 0000000..9c249bc --- /dev/null +++ b/backend/binsrc/pyproxy/stream_ctx.cpp @@ -0,0 +1,280 @@ + +#ifndef STREAM_CTX_CPP +#define STREAM_CTX_CPP + +#include +#include +#include +#include +#include "../classes/netfilter.cpp" +#include "../classes/nfqueue.cpp" +#include "settings.cpp" +#include "../utils.cpp" + +using namespace std; + + +namespace Firegex { +namespace PyProxy { + +class PyCodeConfig; +class PyProxyQueue; + +enum PyFilterResponse { + ACCEPT = 0, + DROP = 1, + REJECT = 2, + MANGLE = 3, + EXCEPTION = 4, + INVALID = 5 +}; + +const PyFilterResponse VALID_PYTHON_RESPONSE[4] = { + PyFilterResponse::ACCEPT, + PyFilterResponse::DROP, + PyFilterResponse::REJECT, + PyFilterResponse::MANGLE +}; + +struct py_filter_response { + PyFilterResponse action; + string* filter_match_by = nullptr; + string* mangled_packet = nullptr; + + py_filter_response(PyFilterResponse action, string* filter_match_by = nullptr, string* mangled_packet = nullptr): + action(action), filter_match_by(filter_match_by), mangled_packet(mangled_packet){} + + ~py_filter_response(){ + delete mangled_packet; + delete filter_match_by; + } +}; + +typedef Tins::TCPIP::StreamIdentifier stream_id; + +struct pyfilter_ctx { + + PyObject * glob = nullptr; + PyObject * py_handle_packet = nullptr; + + pyfilter_ctx(PyObject * compiled_code, PyObject * handle_packet_code){ + py_handle_packet = handle_packet_code; + Py_INCREF(py_handle_packet); + glob = PyDict_New(); + PyObject* result = PyEval_EvalCode(compiled_code, glob, glob); + Py_XDECREF(compiled_code); + if (PyErr_Occurred()){ + PyErr_Print(); + Py_XDECREF(glob); + std::cerr << "[fatal] [main] Failed to compile the code" << endl; + throw invalid_argument("Failed to execute the code, maybe an invalid filter code has been provided"); + } + Py_XDECREF(result); + } + + ~pyfilter_ctx(){ + Py_DECREF(glob); + Py_DECREF(py_handle_packet); + PyGC_Collect(); + } + + inline void set_item_to_glob(const char* key, PyObject* value){ + set_item_to_dict(glob, key, value); + } + + inline PyObject* get_item_from_glob(const char* key){ + return PyDict_GetItemString(glob, key); + } + + void del_item_from_glob(const char* key){ + if (PyDict_DelItemString(glob, key) != 0){ + if (PyErr_Occurred()) + PyErr_Print(); + throw invalid_argument("Failed to delete item from dict"); + } + } + + inline void set_item_to_dict(PyObject* dict, const char* key, PyObject* value){ + if (PyDict_SetItemString(dict, key, value) != 0){ + if (PyErr_Occurred()) + PyErr_Print(); + throw invalid_argument("Failed to set item to dict"); + } + Py_DECREF(value); + } + + py_filter_response handle_packet( + NfQueue::PktRequest* pkt, + const string& data + ){ + PyObject * packet_info = PyDict_New(); + + pkt->reserialize(); + set_item_to_dict(packet_info, "data", PyBytes_FromStringAndSize(data.c_str(), data.size())); + set_item_to_dict(packet_info, "l4_size", PyLong_FromLong(pkt->data_size())); + set_item_to_dict(packet_info, "raw_packet", PyBytes_FromStringAndSize(pkt->packet.c_str(), pkt->packet.size())); + set_item_to_dict(packet_info, "is_input", PyBool_FromLong(pkt->is_input)); + set_item_to_dict(packet_info, "is_ipv6", PyBool_FromLong(pkt->is_ipv6)); + set_item_to_dict(packet_info, "is_tcp", PyBool_FromLong(pkt->l4_proto == NfQueue::L4Proto::TCP)); + + // Set packet info to the global context + set_item_to_glob("__firegex_packet_info", packet_info); + PyObject * result = PyEval_EvalCode(py_handle_packet, glob, glob); + PyGC_Collect(); + del_item_from_glob("__firegex_packet_info"); + + if (PyErr_Occurred()){ + cerr << "[error] [handle_packet] Failed to execute the code " << result << endl; + PyErr_Print(); + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] Exception raised" << endl; + #endif + return py_filter_response(PyFilterResponse::EXCEPTION); + } + + Py_DECREF(result); + + result = get_item_from_glob("__firegex_pyfilter_result"); + if (result == nullptr){ + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] No result found" << endl; + #endif + return py_filter_response(PyFilterResponse::INVALID); + } + + if (!PyDict_Check(result)){ + if (PyErr_Occurred()){ + PyErr_Print(); + } + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] Result is not a dict" << endl; + #endif + del_item_from_glob("__firegex_pyfilter_result"); + return py_filter_response(PyFilterResponse::INVALID); + } + PyObject* action = PyDict_GetItemString(result, "action"); + if (action == nullptr){ + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] No result action found" << endl; + #endif + del_item_from_glob("__firegex_pyfilter_result"); + return py_filter_response(PyFilterResponse::INVALID); + } + if (!PyLong_Check(action)){ + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] Action is not a long" << endl; + #endif + del_item_from_glob("__firegex_pyfilter_result"); + return py_filter_response(PyFilterResponse::INVALID); + } + PyFilterResponse action_enum = (PyFilterResponse)PyLong_AsLong(action); + + //Check action_enum + bool valid = false; + for (auto valid_action: VALID_PYTHON_RESPONSE){ + if (action_enum == valid_action){ + valid = true; + break; + } + } + if (!valid){ + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] Invalid action" << endl; + #endif + del_item_from_glob("__firegex_pyfilter_result"); + return py_filter_response(PyFilterResponse::INVALID); + } + + if (action_enum == PyFilterResponse::ACCEPT){ + del_item_from_glob("__firegex_pyfilter_result"); + return py_filter_response(action_enum); + } + PyObject *func_name_py = PyDict_GetItemString(result, "matched_by"); + if (func_name_py == nullptr){ + del_item_from_glob("__firegex_pyfilter_result"); + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] No result matched_by found" << endl; + #endif + return py_filter_response(PyFilterResponse::INVALID); + } + if (!PyUnicode_Check(func_name_py)){ + del_item_from_glob("__firegex_pyfilter_result"); + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] matched_by is not a string" << endl; + #endif + return py_filter_response(PyFilterResponse::INVALID); + } + string* func_name = new string(PyUnicode_AsUTF8(func_name_py)); + if (action_enum == PyFilterResponse::DROP || action_enum == PyFilterResponse::REJECT){ + del_item_from_glob("__firegex_pyfilter_result"); + return py_filter_response(action_enum, func_name); + } + if (action_enum == PyFilterResponse::MANGLE){ + PyObject* mangled_packet = PyDict_GetItemString(result, "mangled_packet"); + if (mangled_packet == nullptr){ + del_item_from_glob("__firegex_pyfilter_result"); + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] No result mangled_packet found" << endl; + #endif + return py_filter_response(PyFilterResponse::INVALID); + } + if (!PyBytes_Check(mangled_packet)){ + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] mangled_packet is not a bytes" << endl; + #endif + del_item_from_glob("__firegex_pyfilter_result"); + return py_filter_response(PyFilterResponse::INVALID); + } + string* pkt_str = new string(PyBytes_AsString(mangled_packet), PyBytes_Size(mangled_packet)); + del_item_from_glob("__firegex_pyfilter_result"); + return py_filter_response(PyFilterResponse::MANGLE, func_name, pkt_str); + } + + //Should never reach this point, but just in case of new action not managed... + del_item_from_glob("__firegex_pyfilter_result"); + return py_filter_response(PyFilterResponse::INVALID); + } + +}; + +typedef map matching_map; + + +struct stream_ctx { + + matching_map streams_ctx; + NfQueue::tcp_ack_map tcp_ack_ctx; + + void clean_stream_by_id(stream_id sid){ + auto stream_search = streams_ctx.find(sid); + if (stream_search != streams_ctx.end()){ + auto stream_match = stream_search->second; + delete stream_match; + streams_ctx.erase(stream_search->first); + } + } + + void clean_tcp_ack_by_id(stream_id sid){ + auto tcp_ack_search = tcp_ack_ctx.find(sid); + if (tcp_ack_search != tcp_ack_ctx.end()){ + auto tcp_ack = tcp_ack_search->second; + delete tcp_ack; + tcp_ack_ctx.erase(tcp_ack_search->first); + } + } + + void clean(){ + for (auto ele: streams_ctx){ + delete ele.second; + } + for (auto ele: tcp_ack_ctx){ + delete ele.second; + } + tcp_ack_ctx.clear(); + streams_ctx.clear(); + } +}; + + +}} +#endif // STREAM_CTX_CPP \ No newline at end of file diff --git a/backend/binsrc/regex/regex_rules.cpp b/backend/binsrc/regex/regex_rules.cpp index 71ef786..83fd6dc 100644 --- a/backend/binsrc/regex/regex_rules.cpp +++ b/backend/binsrc/regex/regex_rules.cpp @@ -76,12 +76,11 @@ class RegexRules{ }else{ hs_free_database(db); } - } private: - static inline u_int16_t glob_seq = 0; - u_int16_t version; + static inline uint16_t glob_seq = 0; + uint16_t version; vector> decoded_input_rules; vector> decoded_output_rules; bool is_stream = true; @@ -96,9 +95,7 @@ class RegexRules{ input_ruleset.hs_db = nullptr; } } - - - + void fill_ruleset(vector> & decoded, regex_ruleset & ruleset){ size_t n_of_regex = decoded.size(); if (n_of_regex == 0){ @@ -150,7 +147,6 @@ class RegexRules{ public: RegexRules(vector raw_rules, bool is_stream){ this->is_stream = is_stream; - this->version = ++glob_seq; // 0 version is a invalid version (useful for some logics) for(string ele : raw_rules){ try{ decoded_regex rule = decode_regex(ele); @@ -170,6 +166,7 @@ class RegexRules{ free_dbs(); throw current_exception(); } + this->version = ++glob_seq; // 0 version is the null version } u_int16_t ver(){ diff --git a/backend/binsrc/regex/regexfilter.cpp b/backend/binsrc/regex/regexfilter.cpp index bfb2407..2aef9ff 100644 --- a/backend/binsrc/regex/regexfilter.cpp +++ b/backend/binsrc/regex/regexfilter.cpp @@ -20,6 +20,7 @@ #include "../classes/netfilter.cpp" #include "stream_ctx.cpp" #include "regex_rules.cpp" +#include "../utils.cpp" using namespace std; @@ -30,22 +31,14 @@ namespace Regex { using Tins::TCPIP::Stream; using Tins::TCPIP::StreamFollower; - - class RegexNfQueue : public NfQueue::ThreadNfQueue { public: stream_ctx sctx; u_int16_t latest_config_ver = 0; StreamFollower follower; - struct { - bool matching_has_been_called = false; - bool already_closed = false; - bool result; - NfQueue::PktRequest* pkt; - } match_ctx; - + NfQueue::PktRequest* pkt; - bool filter_action(NfQueue::PktRequest* pkt){ + bool filter_action(NfQueue::PktRequest* pkt, const string& data){ shared_ptr conf = regex_config; auto current_version = conf->ver(); @@ -91,12 +84,12 @@ public: stream_match = stream_search->second; } err = hs_scan_stream( - stream_match,pkt->data, pkt->data_size, + stream_match, data.c_str(), data.size(), 0, scratch_space, match_func, &match_res ); }else{ err = hs_scan( - regex_matcher,pkt->data, pkt->data_size, + regex_matcher, data.c_str(), data.size(), 0, scratch_space, match_func, &match_res ); } @@ -108,7 +101,7 @@ public: throw invalid_argument("Cannot close stream match on hyperscan"); } if (err != HS_SUCCESS && err != HS_SCAN_TERMINATED) { - cerr << "[error] [filter_callback] Error while matching the stream (hs)" << endl; + cerr << "[error] [filter_callback] Error while matching the stream (hs) " << err << endl; throw invalid_argument("Error while matching the stream with hyperscan"); } if (match_res.has_matched){ @@ -119,85 +112,30 @@ public: return true; } - void handle_next_packet(NfQueue::PktRequest* pkt) override{ - bool empty_payload = pkt->data_size == 0; - if (pkt->tcp){ - match_ctx.matching_has_been_called = false; - match_ctx.pkt = pkt; - - if (pkt->ipv4){ - follower.process_packet(*pkt->ipv4); - }else{ - follower.process_packet(*pkt->ipv6); - } - - // Do an action only is an ordered packet has been received - if (match_ctx.matching_has_been_called){ - - //In this 2 cases we have to remove all data about the stream - if (!match_ctx.result || match_ctx.already_closed){ - sctx.clean_stream_by_id(pkt->sid); - //If the packet has data, we have to remove it - if (!empty_payload){ - Tins::PDU* data_layer = pkt->tcp->release_inner_pdu(); - if (data_layer != nullptr){ - delete data_layer; - } - } - //For the first matched data or only for data packets, we set FIN bit - //This only for client packets, because this will trigger server to close the connection - //Packets will be filtered anyway also if client don't send packets - if ((!match_ctx.result || !empty_payload) && pkt->is_input){ - pkt->tcp->set_flag(Tins::TCP::FIN,1); - pkt->tcp->set_flag(Tins::TCP::ACK,1); - pkt->tcp->set_flag(Tins::TCP::SYN,0); - } - //Send the edited packet to the kernel - return pkt->mangle(); - } - } - return pkt->accept(); - }else{ - if (!pkt->udp){ - throw invalid_argument("Only TCP and UDP are supported"); - } - if(empty_payload){ - return pkt->accept(); - }else if (filter_action(pkt)){ - return pkt->accept(); - }else{ - return pkt->drop(); - } - } - } //If the stream has already been matched, drop all data, and try to close the connection static void keep_fin_packet(RegexNfQueue* nfq){ - nfq->match_ctx.matching_has_been_called = true; - nfq->match_ctx.already_closed = true; + nfq->pkt->reject(); // This is needed because the callback has to take the updated pkt pointer! } - static void on_data_recv(Stream& stream, RegexNfQueue* nfq, string data) { - nfq->match_ctx.matching_has_been_called = true; - nfq->match_ctx.already_closed = false; - nfq->match_ctx.pkt->data = data.data(); - nfq->match_ctx.pkt->data_size = data.size(); - bool result = nfq->filter_action(nfq->match_ctx.pkt); - if (!result){ - nfq->sctx.clean_stream_by_id(nfq->match_ctx.pkt->sid); + static void on_data_recv(Stream& stream, RegexNfQueue* nfq, const string& data) { + if (!nfq->filter_action(nfq->pkt, data)){ + nfq->sctx.clean_stream_by_id(nfq->pkt->sid); stream.client_data_callback(bind(keep_fin_packet, nfq)); stream.server_data_callback(bind(keep_fin_packet, nfq)); + nfq->pkt->reject(); } - nfq->match_ctx.result = result; } //Input data filtering static void on_client_data(Stream& stream, RegexNfQueue* nfq) { - on_data_recv(stream, nfq, string(stream.client_payload().begin(), stream.client_payload().end())); + auto data = stream.client_payload(); + on_data_recv(stream, nfq, string((char*)data.data(), data.size())); } //Server data filtering static void on_server_data(Stream& stream, RegexNfQueue* nfq) { - on_data_recv(stream, nfq, string(stream.server_payload().begin(), stream.server_payload().end())); + auto data = stream.server_payload(); + on_data_recv(stream, nfq, string((char*)data.data(), data.size())); } // A stream was terminated. The second argument is the reason why it was terminated @@ -216,6 +154,32 @@ public: stream.stream_closed_callback(bind(on_stream_close, placeholders::_1, nfq)); } + void handle_next_packet(NfQueue::PktRequest* _pkt) override{ + pkt = _pkt; // Setting packet context + if (pkt->tcp){ + if (pkt->ipv4){ + follower.process_packet(*pkt->ipv4); + }else{ + follower.process_packet(*pkt->ipv6); + } + //Fallback to the default action + if (pkt->get_action() == NfQueue::FilterAction::NOACTION){ + return pkt->accept(); + } + }else{ + if (!pkt->udp){ + throw invalid_argument("Only TCP and UDP are supported"); + } + if(pkt->data_size() == 0){ + return pkt->accept(); + }else if (filter_action(pkt, string(pkt->data(), pkt->data_size()))){ + return pkt->accept(); + }else{ + return pkt->drop(); + } + } + } + void before_loop() override{ follower.new_stream_callback(bind(on_new_stream, placeholders::_1, this)); follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, this)); diff --git a/backend/binsrc/regex/stream_ctx.cpp b/backend/binsrc/regex/stream_ctx.cpp index dc1c3fe..3ee6e3d 100644 --- a/backend/binsrc/regex/stream_ctx.cpp +++ b/backend/binsrc/regex/stream_ctx.cpp @@ -17,7 +17,6 @@ namespace Regex { typedef Tins::TCPIP::StreamIdentifier stream_id; typedef map matching_map; -#ifdef DEBUG ostream& operator<<(ostream& os, const Tins::TCPIP::StreamIdentifier::address_type &sid){ bool first_print = false; for (auto ele: sid){ @@ -33,7 +32,6 @@ ostream& operator<<(ostream& os, const stream_id &sid){ os << sid.max_address << ":" << sid.max_address_port << " -> " << sid.min_address << ":" << sid.min_address_port; return os; } -#endif struct stream_ctx { matching_map in_hs_streams; diff --git a/backend/binsrc/utils.cpp b/backend/binsrc/utils.cpp index a4d889a..59ca77b 100644 --- a/backend/binsrc/utils.cpp +++ b/backend/binsrc/utils.cpp @@ -1,10 +1,17 @@ +#ifndef UTILS_CPP +#define UTILS_CPP + #include #include #include #include - -#ifndef UTILS_CPP -#define UTILS_CPP +#include +#include +#include +#include +#include +#include +#include bool unhexlify(std::string const &hex, std::string &newString) { try{ @@ -22,6 +29,113 @@ bool unhexlify(std::string const &hex, std::string &newString) { } } +class UnixClientConnection { +public: + int sockfd = -1; + struct sockaddr_un addr; +private: + // Internal buffer to accumulate the output until flush + std::ostringstream streamBuffer; +public: + + UnixClientConnection(){}; + + UnixClientConnection(const char* path) { + sockfd = socket(AF_UNIX, SOCK_STREAM, 0); + if (sockfd == -1) { + throw std::runtime_error(std::string("socket error: ") + std::strerror(errno)); + } + memset(&addr, 0, sizeof(addr)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1); + if (connect(sockfd, reinterpret_cast(&addr), sizeof(addr)) != 0) { + throw std::runtime_error(std::string("connect error: ") + std::strerror(errno)); + } + } + + // Delete copy constructor and assignment operator to avoid resource duplication + UnixClientConnection(const UnixClientConnection&) = delete; + UnixClientConnection& operator=(const UnixClientConnection&) = delete; + + // Move constructor + UnixClientConnection(UnixClientConnection&& other) noexcept + : sockfd(other.sockfd), addr(other.addr) { + other.sockfd = -1; + } + + // Move assignment operator + UnixClientConnection& operator=(UnixClientConnection&& other) noexcept { + if (this != &other) { + if (sockfd != -1) { + close(sockfd); + } + sockfd = other.sockfd; + addr = other.addr; + other.sockfd = -1; + } + return *this; + } + + void send(const std::string& data) { + if (::write(sockfd, data.c_str(), data.size()) == -1) { + throw std::runtime_error(std::string("write error: ") + std::strerror(errno)); + } + } + + std::string recv(size_t size) { + std::string buffer(size, '\0'); + ssize_t bytesRead = ::read(sockfd, &buffer[0], size); + if (bytesRead <= 0) { + throw std::runtime_error(std::string("read error: ") + std::strerror(errno)); + } + buffer.resize(bytesRead); // resize to actual bytes read + return buffer; + } + + // Template overload for generic types + template + UnixClientConnection& operator<<(const T& data) { + streamBuffer << data; + return *this; + } + + // Overload for manipulators (e.g., std::endl) + UnixClientConnection& operator<<(std::ostream& (*manip)(std::ostream&)) { + // Check if the manipulator is std::endl (or equivalent flush) + if (manip == static_cast(std::endl)){ + streamBuffer << '\n'; // Add a newline + std::string packet = streamBuffer.str(); + streamBuffer.str(""); // Clear the buffer + // Send the accumulated data as one packet + send(packet); + } + if (static_cast(std::flush)) { + std::string packet = streamBuffer.str(); + streamBuffer.str(""); // Clear the buffer + // Send the accumulated data as one packet + send(packet); + } else { + // For other manipulators, simply pass them to the buffer + streamBuffer << manip; + } + return *this; + } + + // Overload operator<< to allow printing connection info + friend std::ostream& operator<<(std::ostream& os, const UnixClientConnection& conn) { + os << "UnixClientConnection(sockfd=" << conn.sockfd + << ", path=" << conn.addr.sun_path << ")"; + return os; + } + + ~UnixClientConnection() { + if (sockfd != -1) { + close(sockfd); + } + } +}; + + #ifdef USE_PIPES_FOR_BLOKING_QUEUE template diff --git a/backend/modules/firewall/nftables.py b/backend/modules/firewall/nftables.py index c27ab1c..6822fca 100644 --- a/backend/modules/firewall/nftables.py +++ b/backend/modules/firewall/nftables.py @@ -1,4 +1,4 @@ -from modules.firewall.models import * +from modules.firewall.models import FirewallSettings, Action, Rule, Protocol, Mode, Table from utils import nftables_int_to_json, ip_family, NFTableManager, is_ip_parse import copy @@ -9,7 +9,8 @@ class FiregexTables(NFTableManager): filter_table = "filter" mangle_table = "mangle" - def init_comands(self, policy:str=Action.ACCEPT, opt: FirewallSettings|None = None): + def init_comands(self, policy:str=Action.ACCEPT, opt: + FirewallSettings|None = None): rules = [ {"add":{"table":{"name":self.filter_table,"family":"ip"}}}, {"add":{"table":{"name":self.filter_table,"family":"ip6"}}}, @@ -41,7 +42,8 @@ class FiregexTables(NFTableManager): {"add":{"chain":{"family":"ip","table":self.mangle_table,"name":self.rules_chain_out}}}, {"add":{"chain":{"family":"ip6","table":self.mangle_table,"name":self.rules_chain_out}}}, ] - if opt is None: return rules + if opt is None: + return rules if opt.allow_loopback: rules.extend([ @@ -194,13 +196,18 @@ class FiregexTables(NFTableManager): def chain_to_firegex(self, chain:str, table:str): if table == self.filter_table: match chain: - case "INPUT": return self.rules_chain_in - case "OUTPUT": return self.rules_chain_out - case "FORWARD": return self.rules_chain_fwd + case "INPUT": + return self.rules_chain_in + case "OUTPUT": + return self.rules_chain_out + case "FORWARD": + return self.rules_chain_fwd elif table == self.mangle_table: match chain: - case "PREROUTING": return self.rules_chain_in - case "POSTROUTING": return self.rules_chain_out + case "PREROUTING": + return self.rules_chain_in + case "POSTROUTING": + return self.rules_chain_out return None def insert_firegex_chains(self): @@ -214,7 +221,8 @@ class FiregexTables(NFTableManager): if r.get("family") == family and r.get("table") == table and r.get("chain") == chain and r.get("expr") == rule_to_add: found = True break - if found: continue + if found: + continue yield { "add":{ "rule": { "family": family, "table": table, @@ -274,7 +282,7 @@ class FiregexTables(NFTableManager): ip_filters.append({"match": { "op": "==", "left": { "meta": { "key": "oifname" } }, "right": srv.dst} }) port_filters = [] - if not srv.proto in [Protocol.ANY, Protocol.BOTH]: + if srv.proto not in [Protocol.ANY, Protocol.BOTH]: if srv.port_src_from != 1 or srv.port_src_to != 65535: #Any Port port_filters.append({'match': {'left': {'payload': {'protocol': str(srv.proto), 'field': 'sport'}}, 'op': '>=', 'right': int(srv.port_src_from)}}) port_filters.append({'match': {'left': {'payload': {'protocol': str(srv.proto), 'field': 'sport'}}, 'op': '<=', 'right': int(srv.port_src_to)}}) diff --git a/backend/modules/nfproxy/firegex.py b/backend/modules/nfproxy/firegex.py index 37055c3..821f116 100644 --- a/backend/modules/nfproxy/firegex.py +++ b/backend/modules/nfproxy/firegex.py @@ -1,41 +1,58 @@ from modules.nfproxy.nftables import FiregexTables -from utils import run_func from modules.nfproxy.models import Service, PyFilter import os import asyncio -from utils import DEBUG import traceback from fastapi import HTTPException +import time +from utils import run_func +from utils import DEBUG nft = FiregexTables() +OUTSTREAM_BUFFER_SIZE = 1024*10 + class FiregexInterceptor: def __init__(self): self.srv:Service - self._stats_updater_cb:callable self.filter_map_lock:asyncio.Lock self.filter_map: dict[str, PyFilter] - self.pyfilters: set[PyFilter] self.update_config_lock:asyncio.Lock self.process:asyncio.subprocess.Process self.update_task: asyncio.Task + self.server_task: asyncio.Task + self.sock_path: str + self.unix_sock: asyncio.Server self.ack_arrived = False self.ack_status = None - self.ack_fail_what = "" + self.ack_fail_what = "Queue response timed-out" self.ack_lock = asyncio.Lock() - - async def _call_stats_updater_callback(self, filter: PyFilter): - if self._stats_updater_cb: - await run_func(self._stats_updater_cb(filter)) + self.sock_reader:asyncio.StreamReader = None + self.sock_writer:asyncio.StreamWriter = None + self.sock_conn_lock:asyncio.Lock + self.last_time_exception = 0 + self.outstrem_function = None + self.expection_function = None + self.outstrem_task: asyncio.Task + self.outstrem_buffer = "" @classmethod - async def start(cls, srv: Service, stats_updater_cb:callable): + async def start(cls, srv: Service, outstream_func=None, exception_func=None): self = cls() - self._stats_updater_cb = stats_updater_cb self.srv = srv self.filter_map_lock = asyncio.Lock() self.update_config_lock = asyncio.Lock() + self.sock_conn_lock = asyncio.Lock() + self.outstrem_function = outstream_func + self.expection_function = exception_func + if not self.sock_conn_lock.locked(): + await self.sock_conn_lock.acquire() + self.sock_path = f"/tmp/firegex_nfproxy_{srv.id}.sock" + if os.path.exists(self.sock_path): + os.remove(self.sock_path) + self.unix_sock = await asyncio.start_unix_server(self._server_listener,path=self.sock_path) + self.server_task = asyncio.create_task(self.unix_sock.serve_forever()) queue_range = await self._start_binary() self.update_task = asyncio.create_task(self.update_stats()) nft.add(self.srv, queue_range) @@ -43,19 +60,49 @@ class FiregexInterceptor: await self.ack_lock.acquire() return self + async def _stream_handler(self): + while True: + try: + out_data = (await self.process.stdout.read(1024*10)).decode(errors="ignore") + if DEBUG: + print(out_data, end="") + except asyncio.exceptions.LimitOverrunError: + self.outstrem_buffer = "" + continue + except Exception as e: + self.ack_arrived = False + self.ack_status = False + self.ack_fail_what = "Can't read from nfq client" + self.ack_lock.release() + await self.stop() + traceback.print_exc() # Python can't print it alone? nope it's python... wasted 1 day :) + raise HTTPException(status_code=500, detail="Can't read from nfq client") from e + self.outstrem_buffer+=out_data + if len(self.outstrem_buffer) > OUTSTREAM_BUFFER_SIZE: + self.outstrem_buffer = self.outstrem_buffer[-OUTSTREAM_BUFFER_SIZE:]+"\n" + if self.outstrem_function: + await run_func(self.outstrem_function, self.srv.id, out_data) + async def _start_binary(self): - proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../cpproxy") + proxy_binary_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../cpproxy")) self.process = await asyncio.create_subprocess_exec( - proxy_binary_path, - stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE, - env={"NTHREADS": os.getenv("NTHREADS","1")}, + proxy_binary_path, stdin=asyncio.subprocess.DEVNULL, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.STDOUT, + env={ + "NTHREADS": os.getenv("NTHREADS","1"), + "FIREGEX_NFQUEUE_FAIL_OPEN": "1" if self.srv.fail_open else "0", + "FIREGEX_NFPROXY_SOCK": self.sock_path + }, ) - line_fut = self.process.stdout.readuntil() + self.outstrem_task = asyncio.create_task(self._stream_handler()) try: - line_fut = await asyncio.wait_for(line_fut, timeout=3) + async with asyncio.timeout(3): + await self.sock_conn_lock.acquire() + line_fut = await self.sock_reader.readuntil() except asyncio.TimeoutError: self.process.kill() - raise Exception("Invalid binary output") + raise Exception("Binary don't returned queue number until timeout") line = line_fut.decode() if line.startswith("QUEUE "): params = line.split() @@ -64,24 +111,45 @@ class FiregexInterceptor: self.process.kill() raise Exception("Invalid binary output") + async def _server_listener(self, reader:asyncio.StreamReader, writer:asyncio.StreamWriter): + if self.sock_reader or self.sock_writer: + writer.write_eof() # Technically never reached + writer.close() + reader.feed_eof() + return + self.sock_reader = reader + self.sock_writer = writer + self.sock_conn_lock.release() + async def update_stats(self): try: while True: - line = (await self.process.stdout.readuntil()).decode() - if DEBUG: - print(line) + try: + line = (await self.sock_reader.readuntil()).decode() + except Exception as e: + self.ack_arrived = False + self.ack_status = False + self.ack_fail_what = "Can't read from nfq client" + self.ack_lock.release() + await self.stop() + raise HTTPException(status_code=500, detail="Can't read from nfq client") from e if line.startswith("BLOCKED "): - filter_id = line.split()[1] + filter_name = line.split()[1] + print("BLOCKED", filter_name) async with self.filter_map_lock: - if filter_id in self.filter_map: - self.filter_map[filter_id].blocked_packets+=1 - await self.filter_map[filter_id].update() - if line.startswith("EDITED "): - filter_id = line.split()[1] + if filter_name in self.filter_map: + self.filter_map[filter_name].blocked_packets+=1 + await self.filter_map[filter_name].update() + if line.startswith("MANGLED "): + filter_name = line.split()[1] async with self.filter_map_lock: - if filter_id in self.filter_map: - self.filter_map[filter_id].edited_packets+=1 - await self.filter_map[filter_id].update() + if filter_name in self.filter_map: + self.filter_map[filter_name].edited_packets+=1 + await self.filter_map[filter_name].update() + if line.startswith("EXCEPTION"): + self.last_time_exception = int(time.time()*1000) #ms timestamp + if self.expection_function: + await run_func(self.expection_function, self.srv.id, self.last_time_exception) if line.startswith("ACK "): self.ack_arrived = True self.ack_status = line.split()[1].upper() == "OK" @@ -96,26 +164,44 @@ class FiregexInterceptor: traceback.print_exc() async def stop(self): + self.server_task.cancel() self.update_task.cancel() + self.unix_sock.close() + self.outstrem_task.cancel() + if os.path.exists(self.sock_path): + os.remove(self.sock_path) if self.process and self.process.returncode is None: self.process.kill() - async def _update_config(self, filters_codes): + async def _update_config(self, code): async with self.update_config_lock: - # TODO write compiled code correctly - # self.process.stdin.write((" ".join(filters_codes)+"\n").encode()) - await self.process.stdin.drain() - try: - async with asyncio.timeout(3): - await self.ack_lock.acquire() - except TimeoutError: - pass - if not self.ack_arrived or not self.ack_status: - raise HTTPException(status_code=500, detail=f"NFQ error: {self.ack_fail_what}") + if self.sock_writer: + self.sock_writer.write(len(code).to_bytes(4, byteorder='big')+code.encode()) + await self.sock_writer.drain() + try: + async with asyncio.timeout(3): + await self.ack_lock.acquire() + except TimeoutError: + self.ack_fail_what = "Queue response timed-out" + if not self.ack_arrived or not self.ack_status: + await self.stop() + raise HTTPException(status_code=500, detail=f"NFQ error: {self.ack_fail_what}") + else: + raise HTTPException(status_code=400, detail="Socket not ready") async def reload(self, filters:list[PyFilter]): async with self.filter_map_lock: - self.filter_map = self.compile_filters(filters) - # TODO COMPILE CODE - #await self._update_config(filters_codes) TODO pass the compiled code + if os.path.exists(f"db/nfproxy_filters/{self.srv.id}.py"): + with open(f"db/nfproxy_filters/{self.srv.id}.py") as f: + filter_file = f.read() + else: + filter_file = "" + self.filter_map = {ele.name: ele for ele in filters} + await self._update_config( + filter_file + "\n\n" + + "__firegex_pyfilter_enabled = [" + ", ".join([repr(f.name) for f in filters]) + "]\n" + + "__firegex_proto = " + repr(self.srv.proto) + "\n" + + "import firegex.nfproxy.internals\n" + + "firegex.nfproxy.internals.compile(globals())\n" + ) diff --git a/backend/modules/nfproxy/firewall.py b/backend/modules/nfproxy/firewall.py index 59002d9..045f9ab 100644 --- a/backend/modules/nfproxy/firewall.py +++ b/backend/modules/nfproxy/firewall.py @@ -3,6 +3,7 @@ from modules.nfproxy.firegex import FiregexInterceptor from modules.nfproxy.nftables import FiregexTables, FiregexFilter from modules.nfproxy.models import Service, PyFilter from utils.sqlite import SQLite +from utils import run_func class STATUS: STOP = "stop" @@ -11,22 +12,29 @@ class STATUS: nft = FiregexTables() class ServiceManager: - def __init__(self, srv: Service, db): + def __init__(self, srv: Service, db, outstream_func=None, exception_func=None): self.srv = srv self.db = db self.status = STATUS.STOP - self.filters: dict[int, FiregexFilter] = {} + self.filters: dict[str, FiregexFilter] = {} self.lock = asyncio.Lock() self.interceptor = None + self.outstream_function = outstream_func + self.last_exception_time = 0 + async def excep_internal_handler(srv, exc_time): + self.last_exception_time = exc_time + if exception_func: + await run_func(exception_func, srv, exc_time) + self.exception_function = excep_internal_handler async def _update_filters_from_db(self): pyfilters = [ - PyFilter.from_dict(ele) for ele in + PyFilter.from_dict(ele, self.db) for ele in self.db.query("SELECT * FROM pyfilter WHERE service_id = ? AND active=1;", self.srv.id) ] #Filter check old_filters = set(self.filters.keys()) - new_filters = set([f.id for f in pyfilters]) + new_filters = set([f.name for f in pyfilters]) #remove old filters for f in old_filters: if f not in new_filters: @@ -34,7 +42,7 @@ class ServiceManager: #add new filters for f in new_filters: if f not in old_filters: - self.filters[f] = [ele for ele in pyfilters if ele.id == f][0] + self.filters[f] = [ele for ele in pyfilters if ele.name == f][0] if self.interceptor: await self.interceptor.reload(self.filters.values()) @@ -43,24 +51,25 @@ class ServiceManager: async def next(self,to): async with self.lock: - if (self.status, to) == (STATUS.ACTIVE, STATUS.STOP): + if to == STATUS.STOP: await self.stop() - self._set_status(to) - # PAUSE -> ACTIVE - elif (self.status, to) == (STATUS.STOP, STATUS.ACTIVE): + if to == STATUS.ACTIVE: await self.restart() - def _stats_updater(self,filter:PyFilter): - self.db.query("UPDATE pyfilter SET blocked_packets = ?, edited_packets = ? WHERE filter_id = ?;", filter.blocked_packets, filter.edited_packets, filter.id) - def _set_status(self,status): self.status = status self.__update_status_db(status) + def read_outstrem_buffer(self): + if self.interceptor: + return self.interceptor.outstrem_buffer + else: + return "" + async def start(self): if not self.interceptor: nft.delete(self.srv) - self.interceptor = await FiregexInterceptor.start(self.srv, self._stats_updater) + self.interceptor = await FiregexInterceptor.start(self.srv, outstream_func=self.outstream_function, exception_func=self.exception_function) await self._update_filters_from_db() self._set_status(STATUS.ACTIVE) @@ -69,6 +78,7 @@ class ServiceManager: if self.interceptor: await self.interceptor.stop() self.interceptor = None + self._set_status(STATUS.STOP) async def restart(self): await self.stop() @@ -79,10 +89,12 @@ class ServiceManager: await self._update_filters_from_db() class FirewallManager: - def __init__(self, db:SQLite): + def __init__(self, db:SQLite, outstream_func=None, exception_func=None): self.db = db self.service_table: dict[str, ServiceManager] = {} self.lock = asyncio.Lock() + self.outstream_function = outstream_func + self.exception_function = exception_func async def close(self): for key in list(self.service_table.keys()): @@ -104,7 +116,7 @@ class FirewallManager: srv = Service.from_dict(srv) if srv.id in self.service_table: continue - self.service_table[srv.id] = ServiceManager(srv, self.db) + self.service_table[srv.id] = ServiceManager(srv, self.db, outstream_func=self.outstream_function, exception_func=self.exception_function) await self.service_table[srv.id].next(srv.status) def get(self,srv_id) -> ServiceManager: diff --git a/backend/modules/nfproxy/models.py b/backend/modules/nfproxy/models.py index ba048c4..bb691cd 100644 --- a/backend/modules/nfproxy/models.py +++ b/backend/modules/nfproxy/models.py @@ -1,12 +1,13 @@ class Service: - def __init__(self, service_id: str, status: str, port: int, name: str, proto: str, ip_int: str, **other): + def __init__(self, service_id: str, status: str, port: int, name: str, proto: str, ip_int: str, fail_open: bool, **other): self.id = service_id self.status = status self.port = port self.name = name self.proto = proto self.ip_int = ip_int + self.fail_open = fail_open @classmethod def from_dict(cls, var: dict): @@ -14,13 +15,19 @@ class Service: class PyFilter: - def __init__(self, filter_id:int, name: str, blocked_packets: int, edited_packets: int, active: bool, **other): - self.id = filter_id + def __init__(self, name: str, blocked_packets: int, edited_packets: int, active: bool, db, **other): self.name = name self.blocked_packets = blocked_packets self.edited_packets = edited_packets self.active = active + self.__db = db + + async def update(self): + self.__db.query("UPDATE pyfilter SET blocked_packets = ?, edited_packets = ? WHERE name = ?;", self.blocked_packets, self.edited_packets, self.name) + + def __repr__(self): + return f"" @classmethod - def from_dict(cls, var: dict): - return cls(**var) + def from_dict(cls, var: dict, db): + return cls(**var, db=db) diff --git a/backend/modules/nfproxy/nftables.py b/backend/modules/nfproxy/nftables.py index eafa129..046d98d 100644 --- a/backend/modules/nfproxy/nftables.py +++ b/backend/modules/nfproxy/nftables.py @@ -1,6 +1,14 @@ from modules.nfproxy.models import Service from utils import ip_parse, ip_family, NFTableManager, nftables_int_to_json +def convert_protocol_to_l4(proto:str): + if proto == "tcp": + return "tcp" + elif proto == "http": + return "tcp" + else: + raise Exception("Invalid protocol") + class FiregexFilter: def __init__(self, proto:str, port:int, ip_int:str, target:str, id:int): self.id = id @@ -11,7 +19,7 @@ class FiregexFilter: def __eq__(self, o: object) -> bool: if isinstance(o, FiregexFilter) or isinstance(o, Service): - return self.port == o.port and self.proto == o.proto and ip_parse(self.ip_int) == ip_parse(o.ip_int) + return self.port == o.port and self.proto == convert_protocol_to_l4(o.proto) and ip_parse(self.ip_int) == ip_parse(o.ip_int) return False class FiregexTables(NFTableManager): @@ -20,22 +28,22 @@ class FiregexTables(NFTableManager): def __init__(self): super().__init__([ - {"add":{"chain":{ + {"add":{"chain":{ #Input chain attached before conntrack see it "family":"inet", "table":self.table_name, "name":self.input_chain, "type":"filter", "hook":"prerouting", - "prio":-150, + "prio":-310, "policy":"accept" }}}, - {"add":{"chain":{ + {"add":{"chain":{ #Output chain attached after conntrack saw it "family":"inet", "table":self.table_name, "name":self.output_chain, "type":"filter", "hook":"postrouting", - "prio":-150, + "prio":-310, "policy":"accept" }}} ],[ @@ -61,7 +69,7 @@ class FiregexTables(NFTableManager): "chain": self.output_chain, "expr": [ {'match': {'left': {'payload': {'protocol': ip_family(srv.ip_int), 'field': 'saddr'}}, 'op': '==', 'right': nftables_int_to_json(srv.ip_int)}}, - {'match': {"left": { "payload": {"protocol": str(srv.proto), "field": "sport"}}, "op": "==", "right": int(srv.port)}}, + {'match': {"left": { "payload": {"protocol": convert_protocol_to_l4(str(srv.proto)), "field": "sport"}}, "op": "==", "right": int(srv.port)}}, {"mangle": {"key": {"meta": {"key": "mark"}},"value": 0x1338}}, {"queue": {"num": str(init) if init == end else {"range":[init, end] }, "flags": ["bypass"]}} ] @@ -72,7 +80,7 @@ class FiregexTables(NFTableManager): "chain": self.input_chain, "expr": [ {'match': {'left': {'payload': {'protocol': ip_family(srv.ip_int), 'field': 'daddr'}}, 'op': '==', 'right': nftables_int_to_json(srv.ip_int)}}, - {'match': {"left": { "payload": {"protocol": str(srv.proto), "field": "dport"}}, "op": "==", "right": int(srv.port)}}, + {'match': {"left": { "payload": {"protocol": convert_protocol_to_l4(str(srv.proto)), "field": "dport"}}, "op": "==", "right": int(srv.port)}}, {"mangle": {"key": {"meta": {"key": "mark"}},"value": 0x1337}}, {"queue": {"num": str(init) if init == end else {"range":[init, end] }, "flags": ["bypass"]}} ] diff --git a/backend/modules/nfregex/firegex.py b/backend/modules/nfregex/firegex.py index 3d14bda..701ca9d 100644 --- a/backend/modules/nfregex/firegex.py +++ b/backend/modules/nfregex/firegex.py @@ -79,7 +79,7 @@ class FiregexInterceptor: self.update_task: asyncio.Task self.ack_arrived = False self.ack_status = None - self.ack_fail_what = "" + self.ack_fail_what = "Queue response timed-out" self.ack_lock = asyncio.Lock() @classmethod @@ -158,8 +158,9 @@ class FiregexInterceptor: async with asyncio.timeout(3): await self.ack_lock.acquire() except TimeoutError: - pass + self.ack_fail_what = "Queue response timed-out" if not self.ack_arrived or not self.ack_status: + await self.stop() raise HTTPException(status_code=500, detail=f"NFQ error: {self.ack_fail_what}") diff --git a/backend/modules/nfregex/firewall.py b/backend/modules/nfregex/firewall.py index d0d5479..ec9231e 100644 --- a/backend/modules/nfregex/firewall.py +++ b/backend/modules/nfregex/firewall.py @@ -45,11 +45,9 @@ class ServiceManager: async def next(self,to): async with self.lock: - if (self.status, to) == (STATUS.ACTIVE, STATUS.STOP): + if to == STATUS.STOP: await self.stop() - self._set_status(to) - # PAUSE -> ACTIVE - elif (self.status, to) == (STATUS.STOP, STATUS.ACTIVE): + if to == STATUS.ACTIVE: await self.restart() def _stats_updater(self,filter:RegexFilter): @@ -71,6 +69,7 @@ class ServiceManager: if self.interceptor: await self.interceptor.stop() self.interceptor = None + self._set_status(STATUS.STOP) async def restart(self): await self.stop() diff --git a/backend/modules/nfregex/nftables.py b/backend/modules/nfregex/nftables.py index 34ed844..c352226 100644 --- a/backend/modules/nfregex/nftables.py +++ b/backend/modules/nfregex/nftables.py @@ -26,7 +26,7 @@ class FiregexTables(NFTableManager): "name":self.input_chain, "type":"filter", "hook":"prerouting", - "prio":-150, + "prio":-301, "policy":"accept" }}}, {"add":{"chain":{ @@ -35,7 +35,7 @@ class FiregexTables(NFTableManager): "name":self.output_chain, "type":"filter", "hook":"postrouting", - "prio":-150, + "prio":-301, "policy":"accept" }}} ],[ diff --git a/backend/modules/porthijack/nftables.py b/backend/modules/porthijack/nftables.py index 1d8dcde..0590b2f 100644 --- a/backend/modules/porthijack/nftables.py +++ b/backend/modules/porthijack/nftables.py @@ -28,7 +28,7 @@ class FiregexTables(NFTableManager): "name":self.prerouting_porthijack, "type":"filter", "hook":"prerouting", - "prio":-300, + "prio":-310, "policy":"accept" }}}, {"add":{"chain":{ @@ -37,7 +37,7 @@ class FiregexTables(NFTableManager): "name":self.postrouting_porthijack, "type":"filter", "hook":"postrouting", - "prio":-300, + "prio":-310, "policy":"accept" }}} ],[ diff --git a/backend/routers/nfproxy.py b/backend/routers/nfproxy.py index 4cbb825..96fffa2 100644 --- a/backend/routers/nfproxy.py +++ b/backend/routers/nfproxy.py @@ -7,6 +7,14 @@ from modules.nfproxy.firewall import STATUS, FirewallManager from utils.sqlite import SQLite from utils import ip_parse, refactor_name, socketio_emit, PortType from utils.models import ResetRequest, StatusMessageModel +import os +from firegex.nfproxy.internals import get_filter_names +from fastapi.responses import PlainTextResponse +from modules.nfproxy.nftables import convert_protocol_to_l4 +import asyncio +import traceback +from utils import DEBUG +import utils class ServiceModel(BaseModel): service_id: str @@ -18,12 +26,17 @@ class ServiceModel(BaseModel): n_filters: int edited_packets: int blocked_packets: int + fail_open: bool class RenameForm(BaseModel): name:str +class SettingsForm(BaseModel): + port: PortType|None = None + ip_int: str|None = None + fail_open: bool|None = None + class PyFilterModel(BaseModel): - filter_id: int name: str blocked_packets: int edited_packets: int @@ -34,12 +47,17 @@ class ServiceAddForm(BaseModel): port: PortType proto: str ip_int: str + fail_open: bool = True class ServiceAddResponse(BaseModel): status:str service_id: str|None = None -#app = APIRouter() Not released in this version +class SetPyFilterForm(BaseModel): + code: str + sid: str|None = None + +app = APIRouter() db = SQLite('db/nft-pyfilters.db', { 'services': { @@ -48,11 +66,12 @@ db = SQLite('db/nft-pyfilters.db', { 'port': 'INT NOT NULL CHECK(port > 0 and port < 65536)', 'name': 'VARCHAR(100) NOT NULL UNIQUE', 'proto': 'VARCHAR(3) NOT NULL CHECK (proto IN ("tcp", "http"))', + 'l4_proto': 'VARCHAR(3) NOT NULL CHECK (l4_proto IN ("tcp", "udp"))', 'ip_int': 'VARCHAR(100) NOT NULL', + 'fail_open': 'BOOLEAN NOT NULL CHECK (fail_open IN (0, 1)) DEFAULT 1', }, 'pyfilter': { - 'filter_id': 'INTEGER PRIMARY KEY', - 'name': 'VARCHAR(100) NOT NULL', + 'name': 'VARCHAR(100) PRIMARY KEY', 'blocked_packets': 'INTEGER UNSIGNED NOT NULL DEFAULT 0', 'edited_packets': 'INTEGER UNSIGNED NOT NULL DEFAULT 0', 'service_id': 'VARCHAR(100) NOT NULL', @@ -60,8 +79,8 @@ db = SQLite('db/nft-pyfilters.db', { 'FOREIGN KEY (service_id)':'REFERENCES services (service_id)', }, 'QUERY':[ - "CREATE UNIQUE INDEX IF NOT EXISTS unique_services ON services (port, ip_int, proto);", - "CREATE UNIQUE INDEX IF NOT EXISTS unique_pyfilter_service ON pyfilter (name, service_id);" + "CREATE UNIQUE INDEX IF NOT EXISTS unique_services ON services (port, ip_int, l4_proto);", + "CREATE UNIQUE INDEX IF NOT EXISTS unique_pyfilter_service ON pyfilter (name, service_id);" ] }) @@ -89,6 +108,10 @@ async def startup(): await firewall.init() except Exception as e: print("WARNING cannot start firewall:", e) + utils.socketio.on("nfproxy-outstream-join", join_outstream) + utils.socketio.on("nfproxy-outstream-leave", leave_outstream) + utils.socketio.on("nfproxy-exception-join", join_exception) + utils.socketio.on("nfproxy-exception-leave", leave_exception) async def shutdown(): db.backup() @@ -103,7 +126,13 @@ def gen_service_id(): break return res -firewall = FirewallManager(db) +async def outstream_func(service_id, data): + await utils.socketio.emit(f"nfproxy-outstream-{service_id}", data, room=f"nfproxy-outstream-{service_id}") + +async def exception_func(service_id, timestamp): + await utils.socketio.emit(f"nfproxy-exception-{service_id}", timestamp, room=f"nfproxy-exception-{service_id}") + +firewall = FirewallManager(db, outstream_func=outstream_func, exception_func=exception_func) @app.get('/services', response_model=list[ServiceModel]) async def get_service_list(): @@ -116,7 +145,8 @@ async def get_service_list(): s.name name, s.proto proto, s.ip_int ip_int, - COUNT(f.filter_id) n_filters, + s.fail_open fail_open, + COUNT(f.name) n_filters, COALESCE(SUM(f.blocked_packets),0) blocked_packets, COALESCE(SUM(f.edited_packets),0) edited_packets FROM services s LEFT JOIN pyfilter f ON s.service_id = f.service_id @@ -134,7 +164,8 @@ async def get_service_by_id(service_id: str): s.name name, s.proto proto, s.ip_int ip_int, - COUNT(f.filter_id) n_filters, + s.fail_open fail_open, + COUNT(f.name) n_filters, COALESCE(SUM(f.blocked_packets),0) blocked_packets, COALESCE(SUM(f.edited_packets),0) edited_packets FROM services s LEFT JOIN pyfilter f ON s.service_id = f.service_id @@ -163,6 +194,8 @@ async def service_delete(service_id: str): """Request the deletion of a specific service""" db.query('DELETE FROM services WHERE service_id = ?;', service_id) db.query('DELETE FROM pyfilter WHERE service_id = ?;', service_id) + if os.path.exists(f"db/nfproxy_filters/{service_id}.py"): + os.remove(f"db/nfproxy_filters/{service_id}.py") await firewall.remove(service_id) await refresh_frontend() return {'status': 'ok'} @@ -180,6 +213,42 @@ async def service_rename(service_id: str, form: RenameForm): await refresh_frontend() return {'status': 'ok'} +@app.put('/services/{service_id}/settings', response_model=StatusMessageModel) +async def service_settings(service_id: str, form: SettingsForm): + """Request to change the settings of a specific service (will cause a restart)""" + + if form.port is not None and (form.port < 1 or form.port > 65535): + raise HTTPException(status_code=400, detail="Invalid port") + + if form.ip_int is not None: + try: + form.ip_int = ip_parse(form.ip_int) + except ValueError: + raise HTTPException(status_code=400, detail="Invalid address") + + keys = [] + values = [] + + for key, value in form.model_dump(exclude_none=True).items(): + keys.append(key) + values.append(value) + + if len(keys) == 0: + raise HTTPException(status_code=400, detail="No settings to change provided") + + try: + db.query(f'UPDATE services SET {", ".join([f"{key}=?" for key in keys])} WHERE service_id = ?;', *values, service_id) + except sqlite3.IntegrityError: + raise HTTPException(status_code=400, detail="A service with these settings already exists") + + old_status = firewall.get(service_id).status + await firewall.remove(service_id) + await firewall.reload() + await firewall.get(service_id).next(old_status) + + await refresh_frontend() + return {'status': 'ok'} + @app.get('/services/{service_id}/pyfilters', response_model=list[PyFilterModel]) async def get_service_pyfilter_list(service_id: str): """Get the list of the pyfilters of a service""" @@ -187,49 +256,38 @@ async def get_service_pyfilter_list(service_id: str): raise HTTPException(status_code=400, detail="This service does not exists!") return db.query(""" SELECT - filter_id, name, blocked_packets, edited_packets, active + name, blocked_packets, edited_packets, active FROM pyfilter WHERE service_id = ?; """, service_id) -@app.get('/pyfilters/{filter_id}', response_model=PyFilterModel) -async def get_pyfilter_by_id(filter_id: int): +@app.get('/pyfilters/{filter_name}', response_model=PyFilterModel) +async def get_pyfilter_by_id(filter_name: str): """Get pyfilter info using his id""" res = db.query(""" SELECT - filter_id, name, blocked_packets, edited_packets, active - FROM pyfilter WHERE filter_id = ?; - """, filter_id) + name, blocked_packets, edited_packets, active + FROM pyfilter WHERE name = ?; + """, filter_name) if len(res) == 0: raise HTTPException(status_code=400, detail="This filter does not exists!") return res[0] -@app.delete('/pyfilters/{filter_id}', response_model=StatusMessageModel) -async def pyfilter_delete(filter_id: int): - """Delete a pyfilter using his id""" - res = db.query('SELECT * FROM pyfilter WHERE filter_id = ?;', filter_id) - if len(res) != 0: - db.query('DELETE FROM pyfilter WHERE filter_id = ?;', filter_id) - await firewall.get(res[0]["service_id"]).update_filters() - await refresh_frontend() - - return {'status': 'ok'} - -@app.post('/pyfilters/{filter_id}/enable', response_model=StatusMessageModel) -async def pyfilter_enable(filter_id: int): +@app.post('/pyfilters/{filter_name}/enable', response_model=StatusMessageModel) +async def pyfilter_enable(filter_name: str): """Request the enabling of a pyfilter""" - res = db.query('SELECT * FROM pyfilter WHERE filter_id = ?;', filter_id) + res = db.query('SELECT * FROM pyfilter WHERE name = ?;', filter_name) if len(res) != 0: - db.query('UPDATE pyfilter SET active=1 WHERE filter_id = ?;', filter_id) + db.query('UPDATE pyfilter SET active=1 WHERE name = ?;', filter_name) await firewall.get(res[0]["service_id"]).update_filters() await refresh_frontend() return {'status': 'ok'} -@app.post('/pyfilters/{filter_id}/disable', response_model=StatusMessageModel) -async def pyfilter_disable(filter_id: int): +@app.post('/pyfilters/{filter_name}/disable', response_model=StatusMessageModel) +async def pyfilter_disable(filter_name: str): """Request the deactivation of a pyfilter""" - res = db.query('SELECT * FROM pyfilter WHERE filter_id = ?;', filter_id) + res = db.query('SELECT * FROM pyfilter WHERE name = ?;', filter_name) if len(res) != 0: - db.query('UPDATE pyfilter SET active=0 WHERE filter_id = ?;', filter_id) + db.query('UPDATE pyfilter SET active=0 WHERE name = ?;', filter_name) await firewall.get(res[0]["service_id"]).update_filters() await refresh_frontend() return {'status': 'ok'} @@ -246,14 +304,95 @@ async def add_new_service(form: ServiceAddForm): srv_id = None try: srv_id = gen_service_id() - db.query("INSERT INTO services (service_id ,name, port, status, proto, ip_int) VALUES (?, ?, ?, ?, ?, ?)", - srv_id, refactor_name(form.name), form.port, STATUS.STOP, form.proto, form.ip_int) + db.query("INSERT INTO services (service_id ,name, port, status, proto, ip_int, fail_open, l4_proto) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", + srv_id, refactor_name(form.name), form.port, STATUS.STOP, form.proto, form.ip_int, form.fail_open, convert_protocol_to_l4(form.proto)) except sqlite3.IntegrityError: raise HTTPException(status_code=400, detail="This type of service already exists") await firewall.reload() await refresh_frontend() return {'status': 'ok', 'service_id': srv_id} -#TODO check all the APIs and add -# 1. API to change the python filter file -# 2. a socketio mechanism to lock the previous feature \ No newline at end of file +@app.put('/services/{service_id}/pyfilters/code', response_model=StatusMessageModel) +async def set_pyfilters(service_id: str, form: SetPyFilterForm): + """Set the python filter for a service""" + service = db.query("SELECT service_id, proto FROM services WHERE service_id = ?;", service_id) + if len(service) == 0: + raise HTTPException(status_code=400, detail="This service does not exists!") + service = service[0] + service_id = service["service_id"] + srv_proto = service["proto"] + + try: + async with asyncio.timeout(8): + try: + found_filters = get_filter_names(form.code, srv_proto) + except Exception as e: + if DEBUG: + traceback.print_exc() + raise HTTPException(status_code=400, detail="Compile error: "+str(e)) + + # Remove filters that are not in the new code + existing_filters = db.query("SELECT name FROM pyfilter WHERE service_id = ?;", service_id) + existing_filters = [ele["name"] for ele in existing_filters] + for filter in existing_filters: + if filter not in found_filters: + db.query("DELETE FROM pyfilter WHERE name = ?;", filter) + + # Add filters that are in the new code but not in the database + for filter in found_filters: + if not db.query("SELECT 1 FROM pyfilter WHERE service_id = ? AND name = ?;", service_id, filter): + db.query("INSERT INTO pyfilter (name, service_id) VALUES (?, ?);", filter, service["service_id"]) + + # Eventually edited filters will be reloaded + os.makedirs("db/nfproxy_filters", exist_ok=True) + with open(f"db/nfproxy_filters/{service_id}.py", "w") as f: + f.write(form.code) + await firewall.get(service_id).update_filters() + await refresh_frontend() + except asyncio.TimeoutError: + if DEBUG: + traceback.print_exc() + raise HTTPException(status_code=400, detail="The operation took too long") + + return {'status': 'ok'} + +@app.get('/services/{service_id}/pyfilters/code', response_class=PlainTextResponse) +async def get_pyfilters(service_id: str): + """Get the python filter for a service""" + if not db.query("SELECT 1 FROM services s WHERE s.service_id = ?;", service_id): + raise HTTPException(status_code=400, detail="This service does not exists!") + try: + with open(f"db/nfproxy_filters/{service_id}.py") as f: + return f.read() + except FileNotFoundError: + return "" + +#Socket io events +async def join_outstream(sid, data): + """Client joins a room.""" + srv = data.get("service") + if srv: + room = f"nfproxy-outstream-{srv}" + await utils.socketio.enter_room(sid, room) + await utils.socketio.emit(room, firewall.get(srv).read_outstrem_buffer(), room=sid) + +async def leave_outstream(sid, data): + """Client leaves a room.""" + srv = data.get("service") + if srv: + await utils.socketio.leave_room(sid, f"nfproxy-outstream-{srv}") + +async def join_exception(sid, data): + """Client joins a room.""" + srv = data.get("service") + if srv: + room = f"nfproxy-exception-{srv}" + await utils.socketio.enter_room(sid, room) + await utils.socketio.emit(room, firewall.get(srv).last_exception_time, room=sid) + +async def leave_exception(sid, data): + """Client leaves a room.""" + srv = data.get("service") + if srv: + await utils.socketio.leave_room(sid, f"nfproxy-exception-{srv}") + diff --git a/backend/utils/__init__.py b/backend/utils/__init__.py index 1d9c23a..c4fc13d 100644 --- a/backend/utils/__init__.py +++ b/backend/utils/__init__.py @@ -8,15 +8,22 @@ import nftables from socketio import AsyncServer from fastapi import Path from typing import Annotated +from functools import wraps +from pydantic import BaseModel, ValidationError +import traceback +from utils.models import StatusMessageModel +from typing import List LOCALHOST_IP = socket.gethostbyname(os.getenv("LOCALHOST_IP","127.0.0.1")) socketio:AsyncServer = None +sid_list:set = set() ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) ROUTERS_DIR = os.path.join(ROOT_DIR,"routers") ON_DOCKER = "DOCKER" in sys.argv DEBUG = "DEBUG" in sys.argv +NORELOAD = "NORELOAD" in sys.argv FIREGEX_PORT = int(os.getenv("PORT","4444")) JWT_ALGORITHM: str = "HS256" API_VERSION = "{{VERSION_PLACEHOLDER}}" if "{" not in "{{VERSION_PLACEHOLDER}}" else "0.0.0" @@ -153,4 +160,50 @@ class NFTableManager(Singleton): def raw_list(self): return self.cmd({"list": {"ruleset": None}})["nftables"] - +def _json_like(obj: BaseModel|List[BaseModel], unset=False, convert_keys:dict[str, str]=None, exclude:list[str]=None, mode:str="json"): + res = obj.model_dump(mode=mode, exclude_unset=not unset) + if convert_keys: + for from_k, to_k in convert_keys.items(): + if from_k in res: + res[to_k] = res.pop(from_k) + if exclude: + for ele in exclude: + if ele in res: + del res[ele] + return res + +def json_like(obj: BaseModel|List[BaseModel], unset=False, convert_keys:dict[str, str]=None, exclude:list[str]=None, mode:str="json") -> dict: + if isinstance(obj, list): + return [_json_like(ele, unset=unset, convert_keys=convert_keys, exclude=exclude, mode=mode) for ele in obj] + return _json_like(obj, unset=unset, convert_keys=convert_keys, exclude=exclude, mode=mode) + +def register_event(sio_server: AsyncServer, event_name: str, model: BaseModel, response_model: BaseModel|None = None): + def decorator(func): + @sio_server.on(event_name) # Automatically registers the event + @wraps(func) + async def wrapper(sid, data): + try: + # Parse and validate incoming data + parsed_data = model.model_validate(data) + except ValidationError: + return json_like(StatusMessageModel(status=f"Invalid {event_name} request")) + + # Call the original function with the parsed data + result = await func(sid, parsed_data) + # If a response model is provided, validate the output + if response_model: + try: + parsed_result = response_model.model_validate(result) + except ValidationError: + traceback.print_exc() + return json_like(StatusMessageModel(status=f"SERVER ERROR: Invalid {event_name} response")) + else: + parsed_result = result + # Emit the validated result + if parsed_result: + if isinstance(parsed_result, BaseModel): + return json_like(parsed_result) + return parsed_result + return wrapper + return decorator + diff --git a/backend/utils/loader.py b/backend/utils/loader.py index 435c8c2..d13b9d3 100644 --- a/backend/utils/loader.py +++ b/backend/utils/loader.py @@ -7,6 +7,8 @@ from starlette.responses import StreamingResponse from fastapi.responses import FileResponse from utils import DEBUG, ON_DOCKER, ROUTERS_DIR, list_files, run_func from utils.models import ResetRequest +import asyncio +import traceback REACT_BUILD_DIR: str = "../frontend/build/" if not ON_DOCKER else "frontend/" REACT_HTML_PATH: str = os.path.join(REACT_BUILD_DIR,"index.html") @@ -69,6 +71,7 @@ def get_router_modules(): name=route )) except Exception as e: + traceback.print_exc() print(f"Router {route} failed to load: {e}") return res @@ -87,12 +90,9 @@ def load_routers(app): if router.shutdown: shutdowns.append(router.shutdown) async def reset(reset_option:ResetRequest): - for func in resets: - await run_func(func, reset_option) + await asyncio.gather(*[run_func(func, reset_option) for func in resets]) async def startup(): - for func in startups: - await run_func(func) + await asyncio.gather(*[run_func(func) for func in startups]) async def shutdown(): - for func in shutdowns: - await run_func(func) + await asyncio.gather(*[run_func(func) for func in shutdowns]) return reset, startup, shutdown diff --git a/docs/FiregexInternals.png b/docs/FiregexInternals.png index 6a19f3c..7ecc3e0 100644 Binary files a/docs/FiregexInternals.png and b/docs/FiregexInternals.png differ diff --git a/docs/Firegex_Screenshot.png b/docs/Firegex_Screenshot.png index 7960b95..2532aba 100644 Binary files a/docs/Firegex_Screenshot.png and b/docs/Firegex_Screenshot.png differ diff --git a/proxy-client/MANIFEST.in b/fgex-lib/MANIFEST.in similarity index 100% rename from proxy-client/MANIFEST.in rename to fgex-lib/MANIFEST.in diff --git a/proxy-client/README.md b/fgex-lib/README.md similarity index 100% rename from proxy-client/README.md rename to fgex-lib/README.md diff --git a/proxy-client/fgex-pip/fgex/__main__.py b/fgex-lib/fgex old mode 100644 new mode 100755 similarity index 100% rename from proxy-client/fgex-pip/fgex/__main__.py rename to fgex-lib/fgex diff --git a/proxy-client/fgex-pip/README.md b/fgex-lib/fgex-pip/README.md similarity index 100% rename from proxy-client/fgex-pip/README.md rename to fgex-lib/fgex-pip/README.md diff --git a/proxy-client/fgex-pip/fgex/__init__.py b/fgex-lib/fgex-pip/fgex/__init__.py similarity index 100% rename from proxy-client/fgex-pip/fgex/__init__.py rename to fgex-lib/fgex-pip/fgex/__init__.py diff --git a/proxy-client/firegex/__main__.py b/fgex-lib/fgex-pip/fgex/__main__.py similarity index 71% rename from proxy-client/firegex/__main__.py rename to fgex-lib/fgex-pip/fgex/__main__.py index adcf48a..810291c 100644 --- a/proxy-client/firegex/__main__.py +++ b/fgex-lib/fgex-pip/fgex/__main__.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -# TODO implement cli start function from firegex.cli import run if __name__ == "__main__": diff --git a/proxy-client/fgex-pip/setup.py b/fgex-lib/fgex-pip/setup.py similarity index 92% rename from proxy-client/fgex-pip/setup.py rename to fgex-lib/fgex-pip/setup.py index b4bf8ce..37c85b2 100644 --- a/proxy-client/fgex-pip/setup.py +++ b/fgex-lib/fgex-pip/setup.py @@ -5,11 +5,11 @@ with open("README.md", "r", encoding="utf-8") as fh: setuptools.setup( name="fgex", - version="0.0.0", + version="0.0.1", author="Pwnzer0tt1", author_email="pwnzer0tt1@poliba.it", py_modules=["fgex"], - install_requires=["fgex"], + install_requires=["firegex"], include_package_data=True, description="Firegex client", long_description=long_description, diff --git a/proxy-client/firegex/__init__.py b/fgex-lib/firegex/__init__.py similarity index 86% rename from proxy-client/firegex/__init__.py rename to fgex-lib/firegex/__init__.py index 5f63222..fc84752 100644 --- a/proxy-client/firegex/__init__.py +++ b/fgex-lib/firegex/__init__.py @@ -2,6 +2,4 @@ __version__ = "{{VERSION_PLACEHOLDER}}" if "{" not in "{{VERSION_PLACEHOLDER}}" else "0.0.0" #Exported functions -__all__ = [ - -] \ No newline at end of file +__all__ = [] diff --git a/proxy-client/fgex b/fgex-lib/firegex/__main__.py old mode 100755 new mode 100644 similarity index 71% rename from proxy-client/fgex rename to fgex-lib/firegex/__main__.py index adcf48a..810291c --- a/proxy-client/fgex +++ b/fgex-lib/firegex/__main__.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -# TODO implement cli start function from firegex.cli import run if __name__ == "__main__": diff --git a/fgex-lib/firegex/cli.py b/fgex-lib/firegex/cli.py new file mode 100644 index 0000000..99b1207 --- /dev/null +++ b/fgex-lib/firegex/cli.py @@ -0,0 +1,76 @@ + +#!/usr/bin/env python3 + +import typer +from rich import print +from rich.markup import escape +from typer import Exit +from firegex import __version__ +from firegex.nfproxy.proxysim import run_proxy_simulation +from firegex.nfproxy.models import Protocols +import os +import socket + +app = typer.Typer( + no_args_is_help=True, + context_settings={"help_option_names": ["-h", "--help"]} +) + +def close_cli(code:int=1): + raise Exit(code) + +DEV_MODE = __version__ == "0.0.0" + +def test_connection(host, port, use_ipv6=False): + family = socket.AF_INET6 if use_ipv6 else socket.AF_INET + sock = socket.socket(family, socket.SOCK_STREAM) + + try: + sock.settimeout(3) + sock.connect((host, port)) + return True + except Exception: + return False + finally: + sock.close() + +@app.command(help="Run an nfproxy simulation") +def nfproxy( + filter_file: str = typer.Argument(..., help="The path to the filter file"), + address: str = typer.Argument(..., help="The address of the target to proxy"), + port: int = typer.Argument(..., help="The port of the target to proxy"), + + proto: Protocols = typer.Option(Protocols.TCP.value, help="The protocol to proxy"), + from_address: str = typer.Option(None, help="The address of the local server"), + from_port: int = typer.Option(7474, help="The port of the local server"), + ipv6: bool = typer.Option(False, "-6", help="Use IPv6 for the connection"), +): + if from_address is None: + from_address = "::1" if ipv6 else "127.0.0.1" + if not os.path.isfile(filter_file): + print(f"[bold red]'{escape(os.path.abspath(filter_file))}' not found[/]") + close_cli() + if not test_connection(address, port, ipv6): + print(f"[bold red]Can't connect to {escape(address)}:{port}[/]") + close_cli() + run_proxy_simulation(filter_file, proto.value, address, port, from_address, from_port, ipv6) + +def version_callback(verison: bool): + if verison: + print(__version__, "Development Mode" if DEV_MODE else "Release") + raise typer.Exit() + +@app.callback() +def main( + verison: bool = typer.Option(False, "--version", "-v", help="Show the version of the client", callback=version_callback), +): + pass + +def run(): + try: + app() + except KeyboardInterrupt: + print("[bold yellow]Operation cancelled[/]") + +if __name__ == "__main__": + run() diff --git a/fgex-lib/firegex/nfproxy/__init__.py b/fgex-lib/firegex/nfproxy/__init__.py new file mode 100644 index 0000000..4bcbeed --- /dev/null +++ b/fgex-lib/firegex/nfproxy/__init__.py @@ -0,0 +1,39 @@ +import functools +from firegex.nfproxy.models import RawPacket, TCPInputStream, TCPOutputStream, TCPClientStream, TCPServerStream +from firegex.nfproxy.internals.models import Action, FullStreamAction + +ACCEPT = Action.ACCEPT +DROP = Action.DROP +REJECT = Action.REJECT +UNSTABLE_MANGLE = Action.MANGLE + +def pyfilter(func): + """ + Decorator to mark functions that will be used in the proxy. + Stores the function reference in a global registry. + """ + if not hasattr(pyfilter, "registry"): + pyfilter.registry = set() + + pyfilter.registry.add(func.__name__) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + + return wrapper + +def get_pyfilters(): + """Returns the list of functions marked with @pyfilter.""" + return list(pyfilter.registry) + +def clear_pyfilter_registry(): + """Clears the pyfilter registry.""" + if hasattr(pyfilter, "registry"): + pyfilter.registry.clear() + +__all__ = [ + "ACCEPT", "DROP", "REJECT", "UNSTABLE_MANGLE" + "Action", "FullStreamAction", "pyfilter", + "RawPacket", "TCPInputStream", "TCPOutputStream", "TCPClientStream", "TCPServerStream" +] \ No newline at end of file diff --git a/fgex-lib/firegex/nfproxy/internals/__init__.py b/fgex-lib/firegex/nfproxy/internals/__init__.py new file mode 100644 index 0000000..9dbd689 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/internals/__init__.py @@ -0,0 +1,160 @@ +from inspect import signature +from firegex.nfproxy.internals.models import Action, FullStreamAction +from firegex.nfproxy.internals.models import FilterHandler, PacketHandlerResult +import functools +from firegex.nfproxy.internals.data import DataStreamCtx +from firegex.nfproxy.internals.exceptions import NotReadyToRun, StreamFullReject, DropPacket, RejectConnection, StreamFullDrop +from firegex.nfproxy.internals.data import RawPacket + +def context_call(glob, func, *args, **kargs): + glob["__firegex_tmp_args"] = args + glob["__firegex_tmp_kargs"] = kargs + glob["__firege_tmp_call"] = func + res = eval("__firege_tmp_call(*__firegex_tmp_args, **__firegex_tmp_kargs)", glob, glob) + if "__firegex_tmp_args" in glob.keys(): + del glob["__firegex_tmp_args"] + if "__firegex_tmp_kargs" in glob.keys(): + del glob["__firegex_tmp_kargs"] + if "__firege_tmp_call" in glob.keys(): + del glob["__firege_tmp_call"] + return res + +def generate_filter_structure(filters: list[str], proto:str, glob:dict) -> list[FilterHandler]: + from firegex.nfproxy.models import type_annotations_associations + if proto not in type_annotations_associations.keys(): + raise Exception("Invalid protocol") + res = [] + valid_annotation_type = type_annotations_associations[proto] + def add_func_to_list(func): + if not callable(func): + raise Exception(f"{func} is not a function") + sig = signature(func) + params_function = {} + + for k, v in sig.parameters.items(): + if v.annotation in valid_annotation_type.keys(): + params_function[v.annotation] = valid_annotation_type[v.annotation] + else: + raise Exception(f"Invalid type annotation {v.annotation} for function {func.__name__}") + + res.append( + FilterHandler( + func=func, + name=func.__name__, + params=params_function, + proto=proto + ) + ) + + for filter in filters: + if not isinstance(filter, str): + raise Exception("Invalid filter list: must be a list of strings") + if filter in glob.keys(): + add_func_to_list(glob[filter]) + else: + raise Exception(f"Filter {filter} not found") + return res + +def get_filters_info(code:str, proto:str) -> list[FilterHandler]: + glob = {} + exec("import firegex.nfproxy", glob, glob) + exec("firegex.nfproxy.clear_pyfilter_registry()", glob, glob) + exec(code, glob, glob) + filters = eval("firegex.nfproxy.get_pyfilters()", glob, glob) + try: + return generate_filter_structure(filters, proto, glob) + finally: + exec("firegex.nfproxy.clear_pyfilter_registry()", glob, glob) + + +def get_filter_names(code:str, proto:str) -> list[str]: + return [ele.name for ele in get_filters_info(code, proto)] + +def handle_packet(glob: dict) -> None: + internal_data = DataStreamCtx(glob) + + cache_call = {} # Cache of the data handler calls + cache_call[RawPacket] = internal_data.current_pkt + + final_result = Action.ACCEPT + result = PacketHandlerResult(glob) + + func_name = None + mangled_packet = None + for filter in internal_data.filter_call_info: + final_params = [] + skip_call = False + for data_type, data_func in filter.params.items(): + if data_type not in cache_call.keys(): + try: + cache_call[data_type] = data_func(internal_data) + except NotReadyToRun: + cache_call[data_type] = None + skip_call = True + break + except StreamFullDrop: + result.action = Action.DROP + result.matched_by = "@MAX_STREAM_SIZE_REACHED" + return result.set_result() + except StreamFullReject: + result.action = Action.REJECT + result.matched_by = "@MAX_STREAM_SIZE_REACHED" + return result.set_result() + except DropPacket: + result.action = Action.DROP + result.matched_by = filter.name + return result.set_result() + except RejectConnection: + result.action = Action.REJECT + result.matched_by = filter.name + return result.set_result() + if cache_call[data_type] is None: + skip_call = True + break + final_params.append(cache_call[data_type]) + + if skip_call: + continue + + res = context_call(glob, filter.func, *final_params) + + if res is None: + continue #ACCEPTED + if not isinstance(res, Action): + raise Exception(f"Invalid return type {type(res)} for function {filter.name}") + if res == Action.MANGLE: + mangled_packet = internal_data.current_pkt.raw_packet + if res != Action.ACCEPT: + func_name = filter.name + final_result = res + break + + result.action = final_result + result.matched_by = func_name + result.mangled_packet = mangled_packet + + return result.set_result() + + +def compile(glob:dict) -> None: + internal_data = DataStreamCtx(glob, init_pkt=False) + + glob["print"] = functools.partial(print, flush = True) + + filters = glob["__firegex_pyfilter_enabled"] + proto = glob["__firegex_proto"] + + internal_data.filter_call_info = generate_filter_structure(filters, proto, glob) + + if "FGEX_STREAM_MAX_SIZE" in glob and int(glob["FGEX_STREAM_MAX_SIZE"]) > 0: + internal_data.stream_max_size = int(glob["FGEX_STREAM_MAX_SIZE"]) + else: + internal_data.stream_max_size = 1*8e20 # 1MB default value + + if "FGEX_FULL_STREAM_ACTION" in glob and isinstance(glob["FGEX_FULL_STREAM_ACTION"], FullStreamAction): + internal_data.full_stream_action = glob["FGEX_FULL_STREAM_ACTION"] + else: + internal_data.full_stream_action = FullStreamAction.FLUSH + + PacketHandlerResult(glob).reset_result() + diff --git a/fgex-lib/firegex/nfproxy/internals/data.py b/fgex-lib/firegex/nfproxy/internals/data.py new file mode 100644 index 0000000..ce5062c --- /dev/null +++ b/fgex-lib/firegex/nfproxy/internals/data.py @@ -0,0 +1,142 @@ +from firegex.nfproxy.internals.models import FilterHandler +from firegex.nfproxy.internals.models import FullStreamAction + +class RawPacket: + """ + class rapresentation of the nfqueue packet sent in this context by the c++ core + """ + + def __init__(self, + data: bytes, + raw_packet: bytes, + is_input: bool, + is_ipv6: bool, + is_tcp: bool, + l4_size: int, + ): + self.__data = bytes(data) + self.__raw_packet = bytes(raw_packet) + self.__is_input = bool(is_input) + self.__is_ipv6 = bool(is_ipv6) + self.__is_tcp = bool(is_tcp) + self.__l4_size = int(l4_size) + self.__raw_packet_header_size = len(self.__raw_packet)-self.__l4_size + + @property + def is_input(self) -> bool: + return self.__is_input + + @property + def is_ipv6(self) -> bool: + return self.__is_ipv6 + + @property + def is_tcp(self) -> bool: + return self.__is_tcp + + @property + def data(self) -> bytes: + return self.__data + + @property + def l4_size(self) -> int: + return self.__l4_size + + @property + def raw_packet_header_len(self) -> int: + return self.__raw_packet_header_size + + @property + def l4_data(self) -> bytes: + return self.__raw_packet[self.raw_packet_header_len:] + + @l4_data.setter + def l4_data(self, v:bytes): + if not isinstance(v, bytes): + raise Exception("Invalid data type, data MUST be of type bytes") + #if len(v) != self.__l4_size: + # raise Exception("Invalid data size, must be equal to the original packet header size (due to a technical limitation)") + self.raw_packet = self.__raw_packet[:self.raw_packet_header_len]+v + + @property + def raw_packet(self) -> bytes: + return self.__raw_packet + + @raw_packet.setter + def raw_packet(self, v:bytes): + if not isinstance(v, bytes): + raise Exception("Invalid data type, data MUST be of type bytes") + if len(v) > 2**16: + raise Exception("Invalid data size, must be less than 2^16 bytes") + #if len(v) != len(self.__raw_packet): + # raise Exception("Invalid data size, must be equal to the original packet size (due to a technical limitation)") + if len(v) < self.raw_packet_header_len: + raise Exception("Invalid data size, must be greater than the original packet header size") + self.__raw_packet = v + self.__l4_size = len(v)-self.raw_packet_header_len + + @classmethod + def _fetch_packet(cls, internal_data:"DataStreamCtx"): + if not isinstance(internal_data, DataStreamCtx): + if isinstance(internal_data, dict): + internal_data = DataStreamCtx(internal_data) + else: + raise Exception("Invalid data type, data MUST be of type DataStream, or glob dict") + + if "__firegex_packet_info" not in internal_data.filter_glob.keys(): + raise Exception("Packet info not found") + return cls(**internal_data.filter_glob["__firegex_packet_info"]) + + def __repr__(self): + return f"RawPacket(data={self.data}, raw_packet={self.raw_packet}, is_input={self.is_input}, is_ipv6={self.is_ipv6}, is_tcp={self.is_tcp}, l4_size={self.l4_size})" + + +class DataStreamCtx: + + def __init__(self, glob: dict, init_pkt: bool = True): + if "__firegex_pyfilter_ctx" not in glob.keys(): + glob["__firegex_pyfilter_ctx"] = {} + self.__data = glob["__firegex_pyfilter_ctx"] + self.filter_glob = glob + self.current_pkt = RawPacket._fetch_packet(self) if init_pkt else None + + @property + def filter_call_info(self) -> list[FilterHandler]: + if "filter_call_info" not in self.__data.keys(): + self.__data["filter_call_info"] = [] + return self.__data.get("filter_call_info") + + @filter_call_info.setter + def filter_call_info(self, v: list[FilterHandler]): + self.__data["filter_call_info"] = v + + @property + def stream_max_size(self) -> int: + if "stream_max_size" not in self.__data.keys(): + self.__data["stream_max_size"] = 1*8e20 + return self.__data.get("stream_max_size") + + @stream_max_size.setter + def stream_max_size(self, v: int): + self.__data["stream_max_size"] = v + + @property + def full_stream_action(self) -> FullStreamAction: + if "full_stream_action" not in self.__data.keys(): + self.__data["full_stream_action"] = "flush" + return self.__data.get("full_stream_action") + + @full_stream_action.setter + def full_stream_action(self, v: FullStreamAction): + self.__data["full_stream_action"] = v + + @property + def data_handler_context(self) -> dict: + if "data_handler_context" not in self.__data.keys(): + self.__data["data_handler_context"] = {} + return self.__data.get("data_handler_context") + + @data_handler_context.setter + def data_handler_context(self, v: dict): + self.__data["data_handler_context"] = v + diff --git a/fgex-lib/firegex/nfproxy/internals/exceptions.py b/fgex-lib/firegex/nfproxy/internals/exceptions.py new file mode 100644 index 0000000..6c953c3 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/internals/exceptions.py @@ -0,0 +1,15 @@ + +class NotReadyToRun(Exception): + "raise this exception if the stream state is not ready to parse this object, the call will be skipped" + +class DropPacket(Exception): + "raise this exception if you want to drop the packet" + +class StreamFullDrop(Exception): + "raise this exception if you want to drop the packet due to full stream" + +class RejectConnection(Exception): + "raise this exception if you want to reject the connection" + +class StreamFullReject(Exception): + "raise this exception if you want to reject the connection due to full stream" diff --git a/fgex-lib/firegex/nfproxy/internals/models.py b/fgex-lib/firegex/nfproxy/internals/models.py new file mode 100644 index 0000000..86c1819 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/internals/models.py @@ -0,0 +1,40 @@ +from dataclasses import dataclass, field +from enum import Enum + +class Action(Enum): + ACCEPT = 0 + DROP = 1 + REJECT = 2 + MANGLE = 3 + +class FullStreamAction(Enum): + FLUSH = 0 + ACCEPT = 1 + REJECT = 2 + DROP = 3 + +@dataclass +class FilterHandler: + func: callable + name: str + params: dict[type, callable] + proto: str + +@dataclass +class PacketHandlerResult: + glob: dict = field(repr=False) + action: Action = Action.ACCEPT + matched_by: str = None + mangled_packet: bytes = None + + def set_result(self) -> None: + self.glob["__firegex_pyfilter_result"] = { + "action": self.action.value, + "matched_by": self.matched_by, + "mangled_packet": self.mangled_packet + } + + def reset_result(self) -> None: + self.glob["__firegex_pyfilter_result"] = None + + diff --git a/fgex-lib/firegex/nfproxy/models/__init__.py b/fgex-lib/firegex/nfproxy/models/__init__.py new file mode 100644 index 0000000..9da4c6a --- /dev/null +++ b/fgex-lib/firegex/nfproxy/models/__init__.py @@ -0,0 +1,31 @@ +from firegex.nfproxy.models.tcp import TCPInputStream, TCPOutputStream, TCPClientStream, TCPServerStream +from firegex.nfproxy.models.http import HttpRequest, HttpResponse, HttpRequestHeader, HttpResponseHeader +from firegex.nfproxy.internals.data import RawPacket +from enum import Enum + +type_annotations_associations = { + "tcp": { + RawPacket: RawPacket._fetch_packet, + TCPInputStream: TCPInputStream._fetch_packet, + TCPOutputStream: TCPOutputStream._fetch_packet, + }, + "http": { + RawPacket: RawPacket._fetch_packet, + TCPInputStream: TCPInputStream._fetch_packet, + TCPOutputStream: TCPOutputStream._fetch_packet, + HttpRequest: HttpRequest._fetch_packet, + HttpResponse: HttpResponse._fetch_packet, + HttpRequestHeader: HttpRequestHeader._fetch_packet, + HttpResponseHeader: HttpResponseHeader._fetch_packet, + } +} + +class Protocols(Enum): + TCP = "tcp" + HTTP = "http" + +__all__ = [ + "RawPacket", + "TCPInputStream", "TCPOutputStream", "TCPClientStream", "TCPServerStream", + "HttpRequest", "HttpResponse", "HttpRequestHeader", "HttpResponseHeader", "Protocols" +] \ No newline at end of file diff --git a/fgex-lib/firegex/nfproxy/models/http.py b/fgex-lib/firegex/nfproxy/models/http.py new file mode 100644 index 0000000..9cbc5f0 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/models/http.py @@ -0,0 +1,320 @@ +import pyllhttp +from firegex.nfproxy.internals.exceptions import NotReadyToRun +from firegex.nfproxy.internals.data import DataStreamCtx +from firegex.nfproxy.internals.exceptions import StreamFullDrop, StreamFullReject +from firegex.nfproxy.internals.models import FullStreamAction + +class InternalCallbackHandler(): + + url: str|None = None + _url_buffer: bytes = b"" + headers: dict[str, str] = {} + lheaders: dict[str, str] = {} # Lowercase headers + _header_fields: dict[bytes, bytes] = {} + has_begun: bool = False + body: bytes = None + _body_buffer: bytes = b"" + headers_complete: bool = False + message_complete: bool = False + status: str|None = None + _status_buffer: bytes = b"" + _current_header_field = b"" + _current_header_value = b"" + _save_body = True + total_size = 0 + + def on_message_begin(self): + self.has_begun = True + + def on_url(self, url): + self.total_size += len(url) + self._url_buffer += url + + def on_url_complete(self): + self.url = self._url_buffer.decode(errors="ignore") + self._url_buffer = None + + def on_header_field(self, field): + self.total_size += len(field) + self._current_header_field += field + + def on_header_field_complete(self): + self._current_header_field = self._current_header_field + + def on_header_value(self, value): + self.total_size += len(value) + self._current_header_value += value + + def on_header_value_complete(self): + if self._current_header_value is not None and self._current_header_field is not None: + self._header_fields[self._current_header_field.decode(errors="ignore")] = self._current_header_value.decode(errors="ignore") + self._current_header_field = b"" + self._current_header_value = b"" + + def on_headers_complete(self): + self.headers_complete = True + self.headers = self._header_fields + self.lheaders = {k.lower(): v for k, v in self._header_fields.items()} + self._header_fields = {} + self._current_header_field = b"" + self._current_header_value = b"" + + def on_body(self, body: bytes): + if self._save_body: + self.total_size += len(body) + self._body_buffer += body + + def on_message_complete(self): + self.body = self._body_buffer + self._body_buffer = b"" + try: + if "gzip" in self.content_encoding.lower(): + import gzip + import io + with gzip.GzipFile(fileobj=io.BytesIO(self.body)) as f: + self.body = f.read() + except Exception as e: + print(f"Error decompressing gzip: {e}: skipping", flush=True) + self.message_complete = True + + def on_status(self, status: bytes): + self.total_size += len(status) + self._status_buffer += status + + def on_status_complete(self): + self.status = self._status_buffer.decode(errors="ignore") + self._status_buffer = b"" + + @property + def user_agent(self) -> str: + return self.lheaders.get("user-agent", "") + + @property + def content_encoding(self) -> str: + return self.lheaders.get("content-encoding", "") + + @property + def content_type(self) -> str: + return self.lheaders.get("content-type", "") + + @property + def keep_alive(self) -> bool: + return self.should_keep_alive + + @property + def should_upgrade(self) -> bool: + return self.is_upgrading + + @property + def http_version(self) -> str: + return f"{self.major}.{self.minor}" + + @property + def method_parsed(self) -> str: + return self.method.decode(errors="ignore") + + @property + def content_length_parsed(self) -> int: + return self.content_length + + +class InternalHttpRequest(InternalCallbackHandler, pyllhttp.Request): + def __init__(self): + super(InternalCallbackHandler, self).__init__() + super(pyllhttp.Request, self).__init__() + +class InternalHttpResponse(InternalCallbackHandler, pyllhttp.Response): + def __init__(self): + super(InternalCallbackHandler, self).__init__() + super(pyllhttp.Response, self).__init__() + +class InternalBasicHttpMetaClass: + + def __init__(self): + self._parser: InternalHttpRequest|InternalHttpResponse + self._headers_were_set = False + self.stream = b"" + self.raised_error = False + + @property + def total_size(self) -> int: + return self._parser.total_size + + @property + def url(self) -> str|None: + return self._parser.url + + @property + def headers(self) -> dict[str, str]: + return self._parser.headers + + @property + def user_agent(self) -> str: + return self._parser.user_agent + + @property + def content_encoding(self) -> str: + return self._parser.content_encoding + + @property + def has_begun(self) -> bool: + return self._parser.has_begun + + @property + def body(self) -> bytes: + return self._parser.body + + @property + def headers_complete(self) -> bool: + return self._parser.headers_complete + + @property + def message_complete(self) -> bool: + return self._parser.message_complete + + @property + def http_version(self) -> str: + return self._parser.http_version + + @property + def keep_alive(self) -> bool: + return self._parser.keep_alive + + @property + def should_upgrade(self) -> bool: + return self._parser.should_upgrade + + @property + def content_length(self) -> int|None: + return self._parser.content_length_parsed + + def get_header(self, header: str, default=None) -> str: + return self._parser.lheaders.get(header.lower(), default) + + def _packet_to_stream(self, internal_data: DataStreamCtx): + return self.should_upgrade and self._parser._save_body + + def _fetch_current_packet(self, internal_data: DataStreamCtx): + if self._packet_to_stream(internal_data): # This is a websocket upgrade! + self._parser.total_size += len(internal_data.current_pkt.data) + self.stream += internal_data.current_pkt.data + else: + try: + self._parser.execute(internal_data.current_pkt.data) + if not self._parser.message_complete and self._parser.headers_complete and len(self._parser._body_buffer) == self._parser.content_length_parsed: + self._parser.on_message_complete() + except Exception as e: + self.raised_error = True + print(f"Error parsing HTTP packet: {e} {internal_data.current_pkt}", self, flush=True) + raise e + + #It's called the first time if the headers are complete, and second time with body complete + def _after_fetch_callable_checks(self, internal_data: DataStreamCtx): + if self._parser.headers_complete and not self._headers_were_set: + self._headers_were_set = True + return True + return self._parser.message_complete or self.should_upgrade + + def _before_fetch_callable_checks(self, internal_data: DataStreamCtx): + return True + + def _trigger_remove_data(self, internal_data: DataStreamCtx): + return self.message_complete and not self.should_upgrade + + @classmethod + def _fetch_packet(cls, internal_data: DataStreamCtx): + if internal_data.current_pkt is None or internal_data.current_pkt.is_tcp is False: + raise NotReadyToRun() + + datahandler:InternalBasicHttpMetaClass = internal_data.data_handler_context.get(cls, None) + if datahandler is None or datahandler.raised_error: + datahandler = cls() + internal_data.data_handler_context[cls] = datahandler + + if not datahandler._before_fetch_callable_checks(internal_data): + raise NotReadyToRun() + + # Memory size managment + if datahandler.total_size+len(internal_data.current_pkt.data) > internal_data.stream_max_size: + match internal_data.full_stream_action: + case FullStreamAction.FLUSH: + datahandler = cls() + internal_data.data_handler_context[cls] = datahandler + case FullStreamAction.REJECT: + raise StreamFullReject() + case FullStreamAction.DROP: + raise StreamFullDrop() + case FullStreamAction.ACCEPT: + raise NotReadyToRun() + + datahandler._fetch_current_packet(internal_data) + + if not datahandler._after_fetch_callable_checks(internal_data): + raise NotReadyToRun() + + if datahandler._trigger_remove_data(internal_data): + if internal_data.data_handler_context.get(cls): + del internal_data.data_handler_context[cls] + + return datahandler + +class HttpRequest(InternalBasicHttpMetaClass): + def __init__(self): + super().__init__() + # These will be used in the metaclass + self._parser: InternalHttpRequest = InternalHttpRequest() + self._headers_were_set = False + + @property + def method(self) -> bytes: + return self._parser.method_parsed + + def _before_fetch_callable_checks(self, internal_data: DataStreamCtx): + return internal_data.current_pkt.is_input + + def __repr__(self): + return f"" + +class HttpResponse(InternalBasicHttpMetaClass): + def __init__(self): + super().__init__() + self._parser: InternalHttpResponse = InternalHttpResponse() + self._headers_were_set = False + + @property + def status_code(self) -> int: + return self._parser.status + + def _before_fetch_callable_checks(self, internal_data: DataStreamCtx): + return not internal_data.current_pkt.is_input + + def __repr__(self): + return f"" + +class HttpRequestHeader(HttpRequest): + def __init__(self): + super().__init__() + self._parser._save_body = False + + def _before_fetch_callable_checks(self, internal_data: DataStreamCtx): + return internal_data.current_pkt.is_input and not self._headers_were_set + + def _after_fetch_callable_checks(self, internal_data: DataStreamCtx): + if self._parser.headers_complete and not self._headers_were_set: + self._headers_were_set = True + return True + return False + +class HttpResponseHeader(HttpResponse): + def __init__(self): + super().__init__() + self._parser._save_body = False + + def _before_fetch_callable_checks(self, internal_data: DataStreamCtx): + return not internal_data.current_pkt.is_input and not self._headers_were_set + + def _after_fetch_callable_checks(self, internal_data: DataStreamCtx): + if self._parser.headers_complete and not self._headers_were_set: + self._headers_were_set = True + return True + return False \ No newline at end of file diff --git a/fgex-lib/firegex/nfproxy/models/tcp.py b/fgex-lib/firegex/nfproxy/models/tcp.py new file mode 100644 index 0000000..fc46431 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/models/tcp.py @@ -0,0 +1,80 @@ +from firegex.nfproxy.internals.data import DataStreamCtx +from firegex.nfproxy.internals.exceptions import NotReadyToRun, StreamFullDrop, StreamFullReject +from firegex.nfproxy.internals.models import FullStreamAction + +class InternalTCPStream: + def __init__(self, + data: bytes, + is_ipv6: bool, + ): + self.data = bytes(data) + self.__is_ipv6 = bool(is_ipv6) + self.__total_stream_size = len(data) + + @property + def is_ipv6(self) -> bool: + return self.__is_ipv6 + + @property + def total_stream_size(self) -> int: + return self.__total_stream_size + + def _push_new_data(self, data: bytes): + self.data += data + self.__total_stream_size += len(data) + + @classmethod + def _fetch_packet(cls, internal_data:DataStreamCtx, is_input:bool=False): + if internal_data.current_pkt is None or internal_data.current_pkt.is_tcp is False: + raise NotReadyToRun() + if internal_data.current_pkt.is_input != is_input: + raise NotReadyToRun() + datahandler: TCPInputStream = internal_data.data_handler_context.get(cls, None) + if datahandler is None: + datahandler = cls(internal_data.current_pkt.data, internal_data.current_pkt.is_ipv6) + internal_data.data_handler_context[cls] = datahandler + else: + if datahandler.total_stream_size+len(internal_data.current_pkt.data) > internal_data.stream_max_size: + match internal_data.full_stream_action: + case FullStreamAction.FLUSH: + datahandler = cls(internal_data.current_pkt.data, internal_data.current_pkt.is_ipv6) + internal_data.data_handler_context[cls] = datahandler + case FullStreamAction.REJECT: + raise StreamFullReject() + case FullStreamAction.DROP: + raise StreamFullDrop() + case FullStreamAction.ACCEPT: + raise NotReadyToRun() + else: + datahandler._push_new_data(internal_data.current_pkt.data) + return datahandler + +class TCPInputStream(InternalTCPStream): + """ + This datamodel will assemble the TCP input stream from the client sent data. + The function that use this data model will be handled when: + - The packet is TCP + - At least 1 packet has been sent + - A new client packet has been received + """ + + @classmethod + def _fetch_packet(cls, internal_data:DataStreamCtx): + return super()._fetch_packet(internal_data, is_input=True) + +TCPClientStream = TCPInputStream + +class TCPOutputStream: + """ + This datamodel will assemble the TCP output stream from the server sent data. + The function that use this data model will be handled when: + - The packet is TCP + - At least 1 packet has been sent + - A new server packet has been sent + """ + + @classmethod + def _fetch_packet(cls, internal_data:DataStreamCtx): + return super()._fetch_packet(internal_data, is_input=False) + +TCPServerStream = TCPOutputStream diff --git a/fgex-lib/firegex/nfproxy/proxysim/__init__.py b/fgex-lib/firegex/nfproxy/proxysim/__init__.py new file mode 100644 index 0000000..d604cd1 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/proxysim/__init__.py @@ -0,0 +1,303 @@ +import socket +import os +from firegex.nfproxy.internals import get_filter_names +import traceback +from multiprocessing import Process +from firegex.nfproxy import ACCEPT, DROP, REJECT, UNSTABLE_MANGLE +from rich.markup import escape +from rich import print +import asyncio +from watchfiles import awatch, Change + +fake_ip_header = b"FAKE:IP:TCP:HEADERS:" +fake_ip_header_len = len(fake_ip_header) + +MANGLE_WARNING = True + +class LogLevels: + INFO = "INFO" + WARNING = "WARNING" + ERROR = "ERROR" + DEBUG = "DEBUG" + +def load_level_str(level:str): + if level is None: + return "" + match level: + case LogLevels.INFO: + return "[chartreuse4 bold]\\[INFO][/]" + case LogLevels.WARNING: + return "[yellow bold]\\[WARNING][/]" + case LogLevels.ERROR: + return "[red bold]\\[ERROR][/]" + case LogLevels.DEBUG: + return "[blue bold]\\[DEBUG][/]" + case _: + return f"\\[[red bold]{escape(level)}[/]]" + +def log_print(module:str, *args, level:str = LogLevels.INFO, **kwargs): + return print(f"{load_level_str(level)}[deep_pink4 bold]\\[nfproxy][/][medium_orchid3 bold]\\[{escape(module)}][/]", *args, **kwargs) + +async def _watch_filter_file(filter_file: str, reload_action): + abs_path = os.path.abspath(filter_file) + directory = os.path.dirname(abs_path) + # Immediately call the reload action on startup. + if reload_action is not None: + reload_action() + log_print("observer", f"Listening for changes on {escape(abs_path)}") + try: + # Monitor the directory; set recursive=False since we only care about the specific file. + async for changes in awatch(directory, recursive=False): + # Process events and filter for our file. + for change in changes: + event, path = change + if os.path.abspath(path) == abs_path: + # Optionally, you can check the event type: + if event in {Change.modified, Change.deleted}: + if reload_action is not None: + reload_action() + except asyncio.CancelledError: + log_print("observer", "Watcher cancelled, stopping.") + +async def _forward_and_filter(filter_ctx: dict, + reader: asyncio.StreamReader, + writer: asyncio.StreamWriter, + is_input: bool, + is_ipv6: bool, + is_tcp: bool, + has_to_filter: bool = True): + """Asynchronously forward data from reader to writer applying filters.""" + try: + has_to_drop = False + while True: + try: + data = await reader.read(4096) + except Exception: + break + if not data: + break + if has_to_drop: + continue + if has_to_filter: + filter_ctx["__firegex_packet_info"] = { + "data": data, + "l4_size": len(data), + "raw_packet": fake_ip_header + data, + "is_input": is_input, + "is_ipv6": is_ipv6, + "is_tcp": is_tcp + } + try: + exec("firegex.nfproxy.internals.handle_packet(globals())", filter_ctx, filter_ctx) + except Exception as e: + log_print("packet-handling", + f"Error while executing filter: {escape(str(e))}, forwarding normally from now", + level=LogLevels.ERROR) + traceback.print_exc() + # Stop filtering and forward the packet as is. + has_to_filter = False + writer.write(data) + await writer.drain() + continue + finally: + filter_ctx.pop("__firegex_packet_info", None) + + result = filter_ctx.pop("__firegex_pyfilter_result", None) + if result is None or not isinstance(result, dict): + log_print("filter-parsing", "No result found", level=LogLevels.ERROR) + has_to_filter = False + writer.write(data) + await writer.drain() + continue + + action = result.get("action") + if action is None or not isinstance(action, int): + log_print("filter-parsing", "No action found", level=LogLevels.ERROR) + has_to_filter = False + writer.write(data) + await writer.drain() + continue + + if action == ACCEPT.value: + writer.write(data) + await writer.drain() + continue + + filter_name = result.get("matched_by") + if filter_name is None or not isinstance(filter_name, str): + log_print("filter-parsing", "No matched_by found", level=LogLevels.ERROR) + has_to_filter = False + writer.write(data) + await writer.drain() + continue + + if action == DROP.value: + log_print("drop-action", "Dropping connection caused by {escape(filter_name)} pyfilter") + has_to_drop = True + continue + + if action == REJECT.value: + log_print("reject-action", f"Rejecting connection caused by {escape(filter_name)} pyfilter") + writer.close() + await writer.wait_closed() + return + + elif action == UNSTABLE_MANGLE.value: + mangled_packet = result.get("mangled_packet") + if mangled_packet is None or not isinstance(mangled_packet, bytes): + log_print("filter-parsing", "No mangled_packet found", level=LogLevels.ERROR) + has_to_filter = False + writer.write(data) + await writer.drain() + continue + log_print("mangle", f"Mangling packet caused by {escape(filter_name)} pyfilter") + if MANGLE_WARNING: + log_print("mangle", + "In the real execution mangling is not so stable as the simulation does, l4_data can be different by data", + level=LogLevels.WARNING) + writer.write(mangled_packet[fake_ip_header_len:]) + await writer.drain() + continue + else: + log_print("filter-parsing", f"Invalid action {action} found", level=LogLevels.ERROR) + has_to_filter = False + writer.write(data) + await writer.drain() + continue + else: + writer.write(data) + await writer.drain() + except Exception as exc: + log_print("forward_and_filter", f"Exception occurred: {escape(str(exc))}", level=LogLevels.ERROR) + finally: + writer.close() + try: + await writer.wait_closed() + except Exception: + pass + +async def _handle_connection( + reader: asyncio.StreamReader, writer: asyncio.StreamWriter, filter_code: str, + target_ip: str, target_port: int, ipv6: bool): + """Handle a new incoming connection and create a remote connection.""" + addr = writer.get_extra_info('peername') + log_print("listener", f"Accepted connection from {escape(addr[0])}:{addr[1]}") + try: + remote_reader, remote_writer = await asyncio.open_connection( + target_ip, target_port, + family=socket.AF_INET6 if ipv6 else socket.AF_INET) + except Exception as e: + log_print("listener", + f"Could not connect to remote {escape(target_ip)}:{target_port}: {escape(str(e))}", + level=LogLevels.ERROR) + writer.close() + await writer.wait_closed() + return + + try: + filter_ctx = {} + exec(filter_code, filter_ctx, filter_ctx) + except Exception as e: + log_print("listener", + f"Error while compiling filter context: {escape(str(e))}, forwarding normally", + level=LogLevels.ERROR) + traceback.print_exc() + filter_ctx = {} + # Create asynchronous tasks for bidirectional forwarding. + task1 = asyncio.create_task(_forward_and_filter(filter_ctx, reader, remote_writer, True, ipv6, True, True)) + task2 = asyncio.create_task(_forward_and_filter(filter_ctx, remote_reader, writer, False, ipv6, True, True)) + try: + await asyncio.gather(task1, task2) + except (KeyboardInterrupt, asyncio.CancelledError): + task1.cancel() + task2.cancel() + await asyncio.gather(task1, task2) + finally: + remote_writer.close() + await remote_writer.wait_closed() + +async def _execute_proxy( + filter_code: str, + target_ip: str, target_port: int, + local_ip: str = "127.0.0.1", local_port: int = 7474, + ipv6: bool = False +): + """Start the asyncio-based TCP proxy server.""" + addr_family = socket.AF_INET6 if ipv6 else socket.AF_INET + server = await asyncio.start_server( + lambda r, w: _handle_connection(r, w, filter_code, target_ip, target_port, ipv6), + local_ip, local_port, family=addr_family) + log_print("listener", f"TCP proxy listening on {escape(local_ip)}:{local_port} and forwarding to -> {escape(target_ip)}:{target_port}") + async with server: + await server.serve_forever() + + +def _proxy_asyncio_runner(filter_code: str, target_ip: str, target_port: int, local_ip: str, local_port: int, ipv6: bool): + try: + return asyncio.run(_execute_proxy(filter_code, target_ip, target_port, local_ip, local_port, ipv6)) + except KeyboardInterrupt: + log_print("listener", "Proxy server stopped", level=LogLevels.WARNING) + +def _build_filter(filepath:str, proto:str): + if os.path.isfile(filepath) is False: + raise Exception(f"Filter file {filepath} not found") + + with open(filepath, "r") as f: + filter_code = f.read() + + filters = get_filter_names(filter_code, proto) + filter_code += ( + "\n\n__firegex_pyfilter_enabled = [" + ", ".join([repr(f) for f in filters]) + "]\n" + "__firegex_proto = " + repr(proto) + "\n" + "import firegex.nfproxy.internals\n" + "firegex.nfproxy.internals.compile(globals())\n" + ) + + filter_glob = {} + exec(filter_code, filter_glob, filter_glob) # test compilation of filters + return filter_code + + +def run_proxy_simulation(filter_file:str, proto:str, target_ip:str, target_port:int, local_ip:str = None, local_port:int = 7474, ipv6:bool = False): + + if local_ip is None: + if ipv6: + local_ip = "::1" + else: + local_ip = "127.0.0.1" + + if os.path.isfile(filter_file) is False: + raise Exception(f"\\[nfproxy]\\[init] Filter file {filter_file} not found") + else: + filter_file = os.path.abspath(filter_file) + + proxy_process:Process|None = None + + def reload_proxy_proc(): + nonlocal proxy_process + if proxy_process is not None: + log_print("RELOADING", "Proxy reload triggered", level=LogLevels.WARNING) + proxy_process.kill() + proxy_process.join() + proxy_process = None + + compiled_filter = None + try: + compiled_filter = _build_filter(filter_file, proto) + except Exception: + log_print("reloader", f"Failed to build filter {escape(filter_file)}!", level=LogLevels.ERROR) + traceback.print_exc() + if compiled_filter is not None: + proxy_process = Process(target=_proxy_asyncio_runner, args=(compiled_filter, target_ip, target_port, local_ip, local_port, ipv6)) + proxy_process.start() + + try: + asyncio.run(_watch_filter_file(filter_file, reload_proxy_proc)) + except KeyboardInterrupt: + pass + finally: + if proxy_process is not None: + proxy_process.kill() + proxy_process.join() + + diff --git a/fgex-lib/requirements.txt b/fgex-lib/requirements.txt new file mode 100644 index 0000000..15e2e37 --- /dev/null +++ b/fgex-lib/requirements.txt @@ -0,0 +1,6 @@ +typer==0.15.2 +pydantic>=2 +typing-extensions>=4.7.1 +watchfiles +fgex +pyllhttp diff --git a/proxy-client/setup.py b/fgex-lib/setup.py similarity index 100% rename from proxy-client/setup.py rename to fgex-lib/setup.py diff --git a/frontend/bun.lock b/frontend/bun.lock index 526f399..b28c77a 100644 --- a/frontend/bun.lock +++ b/frontend/bun.lock @@ -5,17 +5,19 @@ "name": "firegex-frontend", "dependencies": { "@hello-pangea/dnd": "^16.6.0", - "@mantine/core": "^7.16.2", - "@mantine/form": "^7.16.2", - "@mantine/hooks": "^7.16.2", - "@mantine/modals": "^7.16.2", - "@mantine/notifications": "^7.16.2", + "@mantine/code-highlight": "^7.17.0", + "@mantine/core": "^7.16.3", + "@mantine/form": "^7.16.3", + "@mantine/hooks": "^7.16.3", + "@mantine/modals": "^7.16.3", + "@mantine/notifications": "^7.16.3", "@tanstack/react-query": "^4.36.1", "@types/jest": "^27.5.2", - "@types/node": "^20.17.16", + "@types/node": "^20.17.17", "@types/react": "^18.3.18", "@types/react-dom": "^18.3.5", "buffer": "^6.0.3", + "install": "^0.13.0", "react": "^19.0.0", "react-dom": "^19.0.0", "react-icons": "^5.4.0", @@ -141,17 +143,19 @@ "@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.25", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ=="], - "@mantine/core": ["@mantine/core@7.16.3", "", { "dependencies": { "@floating-ui/react": "^0.26.28", "clsx": "^2.1.1", "react-number-format": "^5.4.3", "react-remove-scroll": "^2.6.2", "react-textarea-autosize": "8.5.6", "type-fest": "^4.27.0" }, "peerDependencies": { "@mantine/hooks": "7.16.3", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "sha512-cxhIpfd2i0Zmk9TKdejYAoIvWouMGhzK3OOX+VRViZ5HEjnTQCGl2h3db56ThqB6NfVPCno6BPbt5lwekTtmuQ=="], + "@mantine/code-highlight": ["@mantine/code-highlight@7.17.0", "", { "dependencies": { "clsx": "^2.1.1", "highlight.js": "^11.10.0" }, "peerDependencies": { "@mantine/core": "7.17.0", "@mantine/hooks": "7.17.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "sha512-i6MvxW+PtdRNYHCm8Qa/aiMkLr47EYS0+12rf5XhDVdYZy+0+XiRkwBsxnvzQfKqv0QtH2dchBJDEBMmPB/nVw=="], - "@mantine/form": ["@mantine/form@7.16.3", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "klona": "^2.0.6" }, "peerDependencies": { "react": "^18.x || ^19.x" } }, "sha512-GqomUG2Ri5adxYsTU1S5IhKRPcqTG5JkPvMERns8PQAcUz/lvzsnk3wY1v4K5CEbCAdpimle4bSsZTM9g697vg=="], + "@mantine/core": ["@mantine/core@7.17.0", "", { "dependencies": { "@floating-ui/react": "^0.26.28", "clsx": "^2.1.1", "react-number-format": "^5.4.3", "react-remove-scroll": "^2.6.2", "react-textarea-autosize": "8.5.6", "type-fest": "^4.27.0" }, "peerDependencies": { "@mantine/hooks": "7.17.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "sha512-AU5UFewUNzBCUXIq5Jk6q402TEri7atZW61qHW6P0GufJ2W/JxGHRvgmHOVHTVIcuWQRCt9SBSqZoZ/vHs9LhA=="], - "@mantine/hooks": ["@mantine/hooks@7.16.3", "", { "peerDependencies": { "react": "^18.x || ^19.x" } }, "sha512-B94FBWk5Sc81tAjV+B3dGh/gKzfqzpzVC/KHyBRWOOyJRqeeRbI/FAaJo4zwppyQo1POSl5ArdyjtDRrRIj2SQ=="], + "@mantine/form": ["@mantine/form@7.17.0", "", { "dependencies": { "fast-deep-equal": "^3.1.3", "klona": "^2.0.6" }, "peerDependencies": { "react": "^18.x || ^19.x" } }, "sha512-LONdeb+wL8h9fvyQ339ZFLxqrvYff+b+H+kginZhnr45OBTZDLXNVAt/YoKVFEkynF9WDJjdBVrXKcOZvPgmrA=="], - "@mantine/modals": ["@mantine/modals@7.16.3", "", { "peerDependencies": { "@mantine/core": "7.16.3", "@mantine/hooks": "7.16.3", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "sha512-BJuDzRugK6xLbuFTTo8NLJumVvVmSYsNVcEtmlXOWTE3NkDGktBXGKo8V1B0XfJ9/d/rZw7HCE0p4i76MtA+bQ=="], + "@mantine/hooks": ["@mantine/hooks@7.17.0", "", { "peerDependencies": { "react": "^18.x || ^19.x" } }, "sha512-vo3K49mLy1nJ8LQNb5KDbJgnX0xwt3Y8JOF3ythjB5LEFMptdLSSgulu64zj+QHtzvffFCsMb05DbTLLpVP/JQ=="], - "@mantine/notifications": ["@mantine/notifications@7.16.3", "", { "dependencies": { "@mantine/store": "7.16.3", "react-transition-group": "4.4.5" }, "peerDependencies": { "@mantine/core": "7.16.3", "@mantine/hooks": "7.16.3", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "sha512-wtEME9kSYfXWYmAmQUZ8c+rwNmhdWRBaW1mlPdQsPkzMqkv4q6yy0IpgwcnuHStSG9EHaQBXazmVxMZJdEAWBQ=="], + "@mantine/modals": ["@mantine/modals@7.17.0", "", { "peerDependencies": { "@mantine/core": "7.17.0", "@mantine/hooks": "7.17.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "sha512-4sfiFxIxMxfm2RH4jXMN+cr8tFS5AexXG4TY7TRN/ySdkiWtFVvDe5l2/KRWWeWwDUb7wQhht8Ompj5KtexlEA=="], - "@mantine/store": ["@mantine/store@7.16.3", "", { "peerDependencies": { "react": "^18.x || ^19.x" } }, "sha512-6M2M5+0BrRtnVv+PUmr04tY1RjPqyapaHplo90uK1NMhP/1EIqrwTL9KoEtCNCJ5pog1AQtu0bj0QPbqUvxwLg=="], + "@mantine/notifications": ["@mantine/notifications@7.17.0", "", { "dependencies": { "@mantine/store": "7.17.0", "react-transition-group": "4.4.5" }, "peerDependencies": { "@mantine/core": "7.17.0", "@mantine/hooks": "7.17.0", "react": "^18.x || ^19.x", "react-dom": "^18.x || ^19.x" } }, "sha512-xejr1WW02NrrrE4HPDoownILJubcjLLwCDeTk907ZeeHKBEPut7RukEq6gLzOZBhNhKdPM+vCM7GcbXdaLZq/Q=="], + + "@mantine/store": ["@mantine/store@7.17.0", "", { "peerDependencies": { "react": "^18.x || ^19.x" } }, "sha512-nhWRYRLqvAjrD/ApKCXxuHyTWg2b5dC06Z5gmO8udj4pBgndNf9nmCl+Of90H6bgOa56moJA7UQyXoF1SfxqVg=="], "@rollup/pluginutils": ["@rollup/pluginutils@5.1.4", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", "picomatch": "^4.0.2" }, "peerDependencies": { "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "optionalPeers": ["rollup"] }, "sha512-USm05zrsFxYLPdWWq+K3STlWiT/3ELn3RcV5hJMghpeAIhxfsUIg6mt12CBJBInWMV4VneoV7SfGv8xIwo2qNQ=="], @@ -205,7 +209,7 @@ "@types/jest": ["@types/jest@27.5.2", "", { "dependencies": { "jest-matcher-utils": "^27.0.0", "pretty-format": "^27.0.0" } }, "sha512-mpT8LJJ4CMeeahobofYWIjFo0xonRS/HfxnVEPMPFSQdGUt1uHCnoPT7Zhb+sjDU2wz0oKV0OLUR0WzrHNgfeA=="], - "@types/node": ["@types/node@20.17.17", "", { "dependencies": { "undici-types": "~6.19.2" } }, "sha512-/WndGO4kIfMicEQLTi/mDANUu/iVUhT7KboZPdEqqHQ4aTS+3qT3U5gIqWDFV+XouorjfgGqvKILJeHhuQgFYg=="], + "@types/node": ["@types/node@20.17.19", "", { "dependencies": { "undici-types": "~6.19.2" } }, "sha512-LEwC7o1ifqg/6r2gn9Dns0f1rhK+fPFDoMiceTJ6kWmVk6bgXBI/9IOWfVan4WiAavK9pIVWdX0/e3J+eEUh5A=="], "@types/prop-types": ["@types/prop-types@15.7.14", "", {}, "sha512-gNMvNH49DJ7OJYv+KAKn0Xp45p8PLl6zo2YnvDIbTd4J6MER2BmWN49TG7n9LvkyihINxeKW8+3bfS2yDC9dzQ=="], @@ -295,12 +299,16 @@ "has-flag": ["has-flag@4.0.0", "", {}, "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="], + "highlight.js": ["highlight.js@11.11.1", "", {}, "sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w=="], + "hoist-non-react-statics": ["hoist-non-react-statics@3.3.2", "", { "dependencies": { "react-is": "^16.7.0" } }, "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw=="], "ieee754": ["ieee754@1.2.1", "", {}, "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="], "import-fresh": ["import-fresh@3.3.1", "", { "dependencies": { "parent-module": "^1.0.0", "resolve-from": "^4.0.0" } }, "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ=="], + "install": ["install@0.13.0", "", {}, "sha512-zDml/jzr2PKU9I8J/xyZBQn8rPCAY//UOYNmR01XwNwyfhEWObo2SWfSl1+0tm1u6PhxLwDnfsT/6jB7OUxqFA=="], + "is-arrayish": ["is-arrayish@0.2.1", "", {}, "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg=="], "is-what": ["is-what@4.1.16", "", {}, "sha512-ZhMwEosbFJkA0YhFnNDgTM4ZxDRsS6HqTo7qsZM08fehyRYIYa0yHu5R6mgo1n/8MgaPBXiPimPD77baVFYg+A=="], @@ -365,7 +373,7 @@ "react-dom": ["react-dom@19.0.0", "", { "dependencies": { "scheduler": "^0.25.0" }, "peerDependencies": { "react": "^19.0.0" } }, "sha512-4GV5sHFG0e/0AD4X+ySy6UJd3jVl1iNsNHdpad0qhABJ11twS3TTBnseqsKurKcsNqCEFeGL3uLpVChpIO3QfQ=="], - "react-icons": ["react-icons@5.4.0", "", { "peerDependencies": { "react": "*" } }, "sha512-7eltJxgVt7X64oHh6wSWNwwbKTCtMfK35hcjvJS0yxEAhPM8oUKdS3+kqaW1vicIltw+kR2unHaa12S9pPALoQ=="], + "react-icons": ["react-icons@5.5.0", "", { "peerDependencies": { "react": "*" } }, "sha512-MEFcXdkP3dLo8uumGI5xN3lDFNsRtrjbOEKDLD7yv76v4wpnEq2Lt2qeHaQOr34I/wPN3s3+N08WkQ+CW37Xiw=="], "react-is": ["react-is@18.3.1", "", {}, "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg=="], @@ -379,9 +387,9 @@ "react-remove-scroll-bar": ["react-remove-scroll-bar@2.3.8", "", { "dependencies": { "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "optionalPeers": ["@types/react"] }, "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q=="], - "react-router": ["react-router@7.1.5", "", { "dependencies": { "@types/cookie": "^0.6.0", "cookie": "^1.0.1", "set-cookie-parser": "^2.6.0", "turbo-stream": "2.4.0" }, "peerDependencies": { "react": ">=18", "react-dom": ">=18" }, "optionalPeers": ["react-dom"] }, "sha512-8BUF+hZEU4/z/JD201yK6S+UYhsf58bzYIDq2NS1iGpwxSXDu7F+DeGSkIXMFBuHZB21FSiCzEcUb18cQNdRkA=="], + "react-router": ["react-router@7.2.0", "", { "dependencies": { "@types/cookie": "^0.6.0", "cookie": "^1.0.1", "set-cookie-parser": "^2.6.0", "turbo-stream": "2.4.0" }, "peerDependencies": { "react": ">=18", "react-dom": ">=18" }, "optionalPeers": ["react-dom"] }, "sha512-fXyqzPgCPZbqhrk7k3hPcCpYIlQ2ugIXDboHUzhJISFVy2DEPsmHgN588MyGmkIOv3jDgNfUE3kJi83L28s/LQ=="], - "react-router-dom": ["react-router-dom@7.1.5", "", { "dependencies": { "react-router": "7.1.5" }, "peerDependencies": { "react": ">=18", "react-dom": ">=18" } }, "sha512-/4f9+up0Qv92D3bB8iN5P1s3oHAepSGa9h5k6tpTFlixTTskJZwKGhJ6vRJ277tLD1zuaZTt95hyGWV1Z37csQ=="], + "react-router-dom": ["react-router-dom@7.2.0", "", { "dependencies": { "react-router": "7.2.0" }, "peerDependencies": { "react": ">=18", "react-dom": ">=18" } }, "sha512-cU7lTxETGtQRQbafJubvZKHEn5izNABxZhBY0Jlzdv0gqQhCPQt2J8aN5ZPjS6mQOXn5NnirWNh+FpE8TTYN0Q=="], "react-style-singleton": ["react-style-singleton@2.2.3", "", { "dependencies": { "get-nonce": "^1.0.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ=="], diff --git a/frontend/package.json b/frontend/package.json index 071420d..0d7cdd3 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -5,21 +5,23 @@ "private": true, "dependencies": { "@hello-pangea/dnd": "^16.6.0", - "@mantine/core": "^7.16.3", - "@mantine/form": "^7.16.3", - "@mantine/hooks": "^7.16.3", - "@mantine/modals": "^7.16.3", - "@mantine/notifications": "^7.16.3", + "@mantine/code-highlight": "^7.17.0", + "@mantine/core": "^7.17.0", + "@mantine/form": "^7.17.0", + "@mantine/hooks": "^7.17.0", + "@mantine/modals": "^7.17.0", + "@mantine/notifications": "^7.17.0", "@tanstack/react-query": "^4.36.1", "@types/jest": "^27.5.2", - "@types/node": "^20.17.17", + "@types/node": "^20.17.19", "@types/react": "^18.3.18", "@types/react-dom": "^18.3.5", "buffer": "^6.0.3", + "install": "^0.13.0", "react": "^19.0.0", "react-dom": "^19.0.0", - "react-icons": "^5.4.0", - "react-router-dom": "^7.1.5", + "react-icons": "^5.5.0", + "react-router-dom": "^7.2.0", "socket.io-client": "^4.8.1", "typescript": "^5.7.3", "web-vitals": "^2.1.4", diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 7fc33a5..375cdcd 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -5,16 +5,14 @@ import { ImCross } from 'react-icons/im'; import { Outlet, Route, Routes } from 'react-router-dom'; import MainLayout from './components/MainLayout'; import { PasswordSend, ServerStatusResponse } from './js/models'; -import { DEV_IP_BACKEND, errorNotify, getstatus, HomeRedirector, IS_DEV, login, setpassword } from './js/utils'; +import { errorNotify, getstatus, HomeRedirector, IS_DEV, login, setpassword, socketio } from './js/utils'; import NFRegex from './pages/NFRegex'; -import io from 'socket.io-client'; import ServiceDetailsNFRegex from './pages/NFRegex/ServiceDetails'; import PortHijack from './pages/PortHijack'; import { Firewall } from './pages/Firewall'; import { useQueryClient } from '@tanstack/react-query'; - - -const socket = IS_DEV?io("ws://"+DEV_IP_BACKEND, {transports: ["websocket"], path:"/sock/socket.io" }):io({transports: ["websocket"], path:"/sock/socket.io"}); +import NFProxy from './pages/NFProxy'; +import ServiceDetailsNFProxy from './pages/NFProxy/ServiceDetails'; function App() { @@ -23,33 +21,20 @@ function App() { const [reqError, setReqError] = useState() const [error, setError] = useState() const [loadinBtn, setLoadingBtn] = useState(false); - const queryClient = useQueryClient() + const getStatus = () =>{ - getstatus().then( res =>{ - setSystemStatus(res) - setReqError(undefined) - setLoading(false) - }).catch(err=>{ - setReqError(err.toString()) - setLoading(false) - setTimeout(getStatus, 500) - }) + getstatus().then( res =>{ + setSystemStatus(res) + setReqError(undefined) + }).catch(err=>{ + setReqError(err.toString()) + setTimeout(getStatus, 500) + }).finally( ()=>setLoading(false) ) } useEffect(()=>{ getStatus() - socket.on("update", (data) => { - queryClient.invalidateQueries({ queryKey: data }) - }) - socket.on("connect_error", (err) => { - errorNotify("Socket.Io connection failed! ",`Error message: [${err.message}]`) - getStatus() - }); - return () => { - socket.off("update") - socket.off("connect_error") - } },[]) const form = useForm({ @@ -143,16 +128,7 @@ function App() { :null} }else if (systemStatus.status === "run" && systemStatus.loggined){ - return - }> - } > - } /> - - } /> - } /> - } /> - - + return }else{ return Error launching Firegex! 🔥 @@ -162,4 +138,41 @@ function App() { } } +const PageRouting = ({ getStatus }:{ getStatus:()=>void }) => { + + const queryClient = useQueryClient() + + + useEffect(()=>{ + getStatus() + socketio.on("update", (data) => { + queryClient.invalidateQueries({ queryKey: data }) + }) + socketio.on("connect_error", (err) => { + errorNotify("Socket.Io connection failed! ",`Error message: [${err.message}]`) + getStatus() + }); + return () => { + socketio.off("update") + socketio.off("connect_error") + } +},[]) + + return + }> + } > + } /> + + } > + } /> + + } /> + } /> + } /> + + +} + + + export default App; diff --git a/frontend/src/components/AddNewRegex.tsx b/frontend/src/components/AddNewRegex.tsx index 86d08c7..5437ec5 100644 --- a/frontend/src/components/AddNewRegex.tsx +++ b/frontend/src/components/AddNewRegex.tsx @@ -2,8 +2,9 @@ import { Button, Group, Space, TextInput, Notification, Switch, Modal, Select } import { useForm } from '@mantine/form'; import { useState } from 'react'; import { RegexAddForm } from '../js/models'; -import { b64decode, b64encode, getapiobject, okNotify } from '../js/utils'; +import { b64decode, b64encode, okNotify } from '../js/utils'; import { ImCross } from "react-icons/im" +import { nfregex } from './NFRegex/utils'; type RegexAddInfo = { regex:string, @@ -47,7 +48,7 @@ function AddNewRegex({ opened, onClose, service }:{ opened:boolean, onClose:()=> active: !values.deactive } setSubmitLoading(false) - getapiobject().regexesadd(request).then( res => { + nfregex.regexesadd(request).then( res => { if (!res){ setSubmitLoading(false) close(); diff --git a/frontend/src/components/DocsButton.tsx b/frontend/src/components/DocsButton.tsx new file mode 100644 index 0000000..6be3537 --- /dev/null +++ b/frontend/src/components/DocsButton.tsx @@ -0,0 +1,38 @@ +import { ActionIcon, ActionIconProps, Box, Modal, ScrollArea, Title, Tooltip } from "@mantine/core"; +import { useState } from "react"; +import { FaBookBookmark } from "react-icons/fa6"; +import { NFRegexDocs } from "./NFRegex/NFRegexDocs"; +import { NFProxyDocs } from "./NFProxy/NFProxyDocs"; +import { PortHijackDocs } from "./PortHijack/PortHijackDocs"; +import { EnumToPrimitiveUnion } from "../js/utils"; + +export enum DocType{ + NFREGEX = "nfregex", + NFPROXY = "nfproxy", + PORTHIJACK = "porthijack", +} + + +export const DocsButton = ({ doc, ...props }: { doc: EnumToPrimitiveUnion } & ActionIconProps) => { + const [open, setOpen] = useState(false); + + return + + setOpen(true)} size="lg" radius="md" variant="filled" {...props}> + + setOpen(false)} fullScreen title={ + Firegex Docs 📕 + } scrollAreaComponent={ScrollArea.Autosize}> + { + doc == DocType.NFREGEX ? + : + doc == DocType.NFPROXY ? + : + doc == DocType.PORTHIJACK ? + : + Docs not found + } + + +} + diff --git a/frontend/src/components/Firewall/utils.ts b/frontend/src/components/Firewall/utils.ts index c051df8..54fc4cc 100644 --- a/frontend/src/components/Firewall/utils.ts +++ b/frontend/src/components/Firewall/utils.ts @@ -90,4 +90,4 @@ export const firewall = { ruleset: async (data:RuleAddForm) => { return await postapi("firewall/rules", data) as ServerResponseListed; } -} \ No newline at end of file +} diff --git a/frontend/src/components/Header/index.tsx b/frontend/src/components/Header/index.tsx index 938c8fb..02f5f6a 100644 --- a/frontend/src/components/Header/index.tsx +++ b/frontend/src/components/Header/index.tsx @@ -31,8 +31,6 @@ function HeaderPage(props: any) { const [changePasswordModal, setChangePasswordModal] = useState(false); const [resetFiregexModal, setResetFiregexModal] = useState(false); - const [tooltipHomeOpened, setTooltipHomeOpened] = useState(false); - const [tooltipLogoutOpened,setTooltipLogoutOpened] = useState(false); return } onClick={() => setResetFiregexModal(true)}>Reset Firegex - + setTooltipHomeOpened(false)} onBlur={() => setTooltipHomeOpened(false)} - onMouseEnter={() => setTooltipHomeOpened(true)} onMouseLeave={() => setTooltipHomeOpened(false)}> + onClick={go_to_home}> - - setTooltipLogoutOpened(false)} onBlur={() => setTooltipLogoutOpened(false)} - onMouseEnter={() => setTooltipLogoutOpened(true)} onMouseLeave={() => setTooltipLogoutOpened(false)}> + + + setChangePasswordModal(false)} /> setResetFiregexModal(false)} /> diff --git a/frontend/src/components/ModalLog.tsx b/frontend/src/components/ModalLog.tsx new file mode 100644 index 0000000..4c07760 --- /dev/null +++ b/frontend/src/components/ModalLog.tsx @@ -0,0 +1,17 @@ +import { Code, Modal, ScrollArea } from "@mantine/core" + +export const ModalLog = ( + { title, opened, close, data }: + { + title: string, + opened: boolean, + close: () => void, + data: string, + } +) => { + return + + {data} + + +} \ No newline at end of file diff --git a/frontend/src/components/NFProxy/AddEditService.tsx b/frontend/src/components/NFProxy/AddEditService.tsx new file mode 100644 index 0000000..daa5585 --- /dev/null +++ b/frontend/src/components/NFProxy/AddEditService.tsx @@ -0,0 +1,139 @@ +import { Button, Group, Space, TextInput, Notification, Modal, Switch, SegmentedControl, Box, Tooltip } from '@mantine/core'; +import { useForm } from '@mantine/form'; +import { useEffect, useState } from 'react'; +import { okNotify, regex_ipv4, regex_ipv6 } from '../../js/utils'; +import { ImCross } from "react-icons/im" +import { nfproxy, Service } from './utils'; +import PortAndInterface from '../PortAndInterface'; +import { IoMdInformationCircleOutline } from "react-icons/io"; +import { ServiceAddForm as ServiceAddFormOriginal } from './utils'; + +type ServiceAddForm = ServiceAddFormOriginal & {autostart: boolean} + +function AddEditService({ opened, onClose, edit }:{ opened:boolean, onClose:()=>void, edit?:Service }) { + + const initialValues = { + name: "", + port:edit?.port??8080, + ip_int:edit?.ip_int??"", + proto:edit?.proto??"tcp", + fail_open: edit?.fail_open??false, + autostart: true + } + + const form = useForm({ + initialValues: initialValues, + validate:{ + name: (value) => edit? null : value !== "" ? null : "Service name is required", + port: (value) => (value>0 && value<65536) ? null : "Invalid port", + proto: (value) => ["tcp","http"].includes(value) ? null : "Invalid protocol", + ip_int: (value) => (value.match(regex_ipv6) || value.match(regex_ipv4)) ? null : "Invalid IP address", + } + }) + + useEffect(() => { + if (opened){ + form.setInitialValues(initialValues) + form.reset() + } + }, [opened]) + + const close = () =>{ + onClose() + form.reset() + setError(null) + } + + const [submitLoading, setSubmitLoading] = useState(false) + const [error, setError] = useState(null) + + const submitRequest = ({ name, port, autostart, proto, ip_int, fail_open }:ServiceAddForm) =>{ + setSubmitLoading(true) + if (edit){ + nfproxy.settings(edit.service_id, { port, ip_int, fail_open }).then( res => { + if (!res){ + setSubmitLoading(false) + close(); + okNotify(`Service ${name} settings updated`, `Successfully updated settings for service ${name}`) + } + }).catch( err => { + setSubmitLoading(false) + setError("Request Failed! [ "+err+" ]") + }) + }else{ + nfproxy.servicesadd({ name, port, proto, ip_int, fail_open }).then( res => { + if (res.status === "ok" && res.service_id){ + setSubmitLoading(false) + close(); + if (autostart) nfproxy.servicestart(res.service_id) + okNotify(`Service ${name} has been added`, `Successfully added service with port ${port}`) + }else{ + setSubmitLoading(false) + setError("Invalid request! [ "+res.status+" ]") + } + }).catch( err => { + setSubmitLoading(false) + setError("Request Failed! [ "+err+" ]") + }) + } + } + + + return +
+ {!edit?:null} + + + + + + + {!edit?:null} + + + Enable fail-open nfqueue + + + Firegex use internally nfqueue to handle packets
enabling this option will allow packets to pass through the firewall
in case the filtering is too slow or too many traffic is coming
+ }> + +
+
} + {...form.getInputProps('fail_open', { type: 'checkbox' })} + /> +
+ + {edit?null:} + + + + + + + {error?<> + + } color="red" onClose={()=>{setError(null)}}> + Error: {error} + + :null} + + +
+ +} + +export default AddEditService; diff --git a/frontend/src/components/NFProxy/ExceptionWarning.tsx b/frontend/src/components/NFProxy/ExceptionWarning.tsx new file mode 100644 index 0000000..45115d1 --- /dev/null +++ b/frontend/src/components/NFProxy/ExceptionWarning.tsx @@ -0,0 +1,44 @@ +import { IoIosWarning } from "react-icons/io" +import { socketio, WARNING_NFPROXY_TIME_LIMIT } from "../../js/utils" +import { Tooltip } from "@mantine/core" +import { useEffect, useState } from "react" +import { round } from "@mantine/core/lib/components/ColorPicker/converters/parsers" + + +export const ExceptionWarning = ({ service_id }: { service_id: string }) => { + const [lastExceptionTimestamp, setLastExceptionTimestamp] = useState(0) + + useEffect(() => { + socketio.emit("nfproxy-exception-join", { service: service_id }); + socketio.on(`nfproxy-exception-${service_id}`, (data) => { + setLastExceptionTimestamp(data) + }); + return () => { + socketio.emit("nfproxy-exception-leave", { service: service_id }); + } + }, []) + + const [_time, setTime] = useState(new Date()); + + useEffect(() => { + const interval = setInterval(() => { + setTime(new Date()); + }, 1000); + + return () => clearInterval(interval); + }, []); + + const deltaTime = new Date().getTime()-lastExceptionTimestamp + const minutes = Math.floor(deltaTime/(1000*60)) + const seconds = Math.floor(deltaTime/1000)%60 + + const deltaStringTime = `${minutes.toString().length>1?minutes:"0"+minutes}:${seconds.toString().length>1?seconds:"0"+seconds}` + + return <> + {(new Date().getTime()-lastExceptionTimestamp <= WARNING_NFPROXY_TIME_LIMIT)? + + + + :null} + +} \ No newline at end of file diff --git a/frontend/src/components/NFProxy/NFProxyDocs.tsx b/frontend/src/components/NFProxy/NFProxyDocs.tsx new file mode 100644 index 0000000..8f4a8a4 --- /dev/null +++ b/frontend/src/components/NFProxy/NFProxyDocs.tsx @@ -0,0 +1,410 @@ +import { CodeHighlight } from "@mantine/code-highlight"; +import { Container, Title, Text, List, Code, Space, Badge, Box } from "@mantine/core"; +import { CgEditBlackPoint } from "react-icons/cg"; +import { EXAMPLE_PYFILTER } from "./utils"; + +const IMPORT_CODE_EXAMPLE = `from firegex.nfproxy import pyfilter, ACCEPT, REJECT` + +const FOO_FILTER_CODE = `from firegex.nfproxy import pyfilter, ACCEPT, REJECT + +# This is NOT a filter +def useless_function() -> int: + print("This is a useless function") + return 42 + +@pyfilter +def none_filter(): # This is a filter that does nothing + useless_function() + return ACCEPT + +` + + +const TYPING_ARGS_EXAMPLE = `from firegex.nfproxy import pyfilter, ACCEPT, REJECT +from firegex.nfproxy.models import HttpRequest + +@pyfilter +def filter_with_args(http_request: HttpRequest) -> int: + if http_request.body: + if b"ILLEGAL" in http_request.body: + return REJECT +` + +const IMPORT_FULL_ACTION_STREAM = `from firegex.nfproxy import FullStreamAction` + +export const HELP_NFPROXY_SIM = `➤ fgex nfproxy -h + + Usage: fgex nfproxy [OPTIONS] FILTER_FILE ADDRESS PORT + + Run an nfproxy simulation + +╭─ Arguments ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ * filter_file TEXT The path to the filter file [default: None] [required] │ +│ * address TEXT The address of the target to proxy [default: None] [required] │ +│ * port INTEGER The port of the target to proxy [default: None] [required] │ +╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯ +╭─ Options ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮ +│ --proto [tcp|http] The protocol to proxy [default: tcp] │ +│ --from-address TEXT The address of the local server [default: None] │ +│ --from-port INTEGER The port of the local server [default: 7474] │ +│ -6 Use IPv6 for the connection │ +│ --help -h Show this message and exit. │ +╰─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯` + +const HttpBadge = () => { + return HTTP +} + +const TCPBadge = () => { + return TCP +} + + +export const NFProxyDocs = () => { + return ( + + 🌐 Netfilter Proxy Documentation + + 📖 Overview + + Netfilter Proxy is a simulated proxy that leverages nfqueue to intercept network packets. + It follows a similar workflow to NFRegex but introduces Python-based filtering capabilities, + providing users with the flexibility to upload custom filters. + + + ⚙️ How to use Netfilter Proxy + + To use Netfilter Proxy, simply create and upload a Python filter. The filter is passed to the C++ binary, + which then processes packets using the provided logic. This allows you to tailor the filtering behavior + to your needs. + + 💡 How to write pyfilters? + + First of all install the firegex lib and update it running pip install -U fgex. + After that you can use firegex module. + + With this code we imported the pyfilter decorator and the ACCEPT and REJECT statements.
+ Let's create a first (useless) filter to see the syntax: + + You see that the filter must be decorated with the pyfilter decorator and must return a statement about how to manage that packet. +
+ You can save every data about the current flow in the global variables, the code you write will be executed only once for flow. The globals variables are isolated between flows. + For each packet the filter functions will be called with the required paramethers and using the same globals as before. +
+ Saving data in globals of other modules is not recommended, because that memory is shared by the flows managed by the same thread and lead to unexpected behaviors. +
+ Global variables that starts with '__firegex' are reserved for internal use, don't use them. +
+ You can manage when the function is called and also getting some data specifying some paramethers, using type decorators. + Default values of the paramethers will be ignored, also kvargs values will be ignored. +
+ Functions with no type decorator are considered invalid pyfilters! +
+ + In this code we are filtering all the http requests that contains the word "ILLEGAL" in the body. All the other packets will be accepted (default behavior). + The function will be called only if at least internally teh HTTP request header has been parsed, and also when the body will be parsed. +
+ If we have multiple paramether, the function will be called only if with the packet arrived is possible to build all the paramethers. +
+ 🔧 How can I test the filter? + + You can test your filter by using fgex command installed by firegex lib: This will run a local proxy to a remote destination with the filter you specified. +
+ This can be done by running for instance: fgex nfproxy test_http.py 127.0.0.1 8080 --proto http + + You don't need to restart the proxy every time you change the filter, the filter will be reloaded automatically. +
+ 📦 Packet Statements + + Here there are all the statments you can return from a filter: + + ACCEPT: The packet will be accepted and forwarded to the destination. (default if None is returned) + REJECT: The connection will be closed and all the packets will be dropped. + DROP: This packet and all the following will be dropped. (This not simulate a connection closure) + UNSTABLE_MANGLE: The packet will be modified and forwarded. You can edit the packet only with RawPacket data handler. (This is an unstable statement, use it carefully) + + + ⚙️ Data Structures + + Here there are all the data structure you can use for your filters: + + + <CgEditBlackPoint style={{marginBottom: -3}}/> RawPacket + + This data is the raw packet processed by nfqueue. It contains: + + + + + data: The raw packet data assembled by libtins (read only). + + + is_input: It's true if the packet is incoming, false if it's outgoing. (read only) + + + is_ipv6: It's true if the packet is IPv6, false if it's IPv4. (read only) + + + is_tcp: It's true if the packet is TCP, false if it's UDP. (read only) + + + l4_size: The size of l4 payload (read only) + + + raw_packet_header_len: The size of the raw packet header (read only) + + + raw_packet: The raw packet data with ip and TCP header. You can edit all the packet content and it will be modified if you send + the UNSTABLE_MANGLE statement. Be careful, beacause the associated layer 4 data can be different from 'data' filed that instead arrives from libtins. + When you edit this field, l4_size and l4_data will be updated automatically. + + + l4_data: The l4 payload data, directly taken by the raw packet. You can edit all the packet content and it will be modified if you send + the UNSTABLE_MANGLE statement. Be careful, beacause the associated layer 4 data can be different from 'data' filed that instead arrives from libtins. When you edit this field, l4_size and raw_packet will be updated automatically. + + + + + <CgEditBlackPoint style={{marginBottom: -3}}/> TCPInputStream (alias: TCPClientStream) + + This data is the TCP input stream: this handler is called only on is_input=True packets. The filters that handles this data will be called only in this case. + + + + + data: The entire stream in input direction. (read only) + + + total_stream_size: The size of the entire stream in input direction. (read only) + + + is_ipv6: It's true if the stream is IPv6, false if it's IPv4. (read only) + + + + + <CgEditBlackPoint style={{marginBottom: -3}}/> TCPOutputStream (alias TCPServerStream) + + This data is the TCP output stream: this handler is called only on is_input=False packets. The filters that handles this data will be called only in this case. + + + + + data: The entire stream in output direction. (read only) + + + total_stream_size: The size of the entire stream in output direction. (read only) + + + is_ipv6: It's true if the stream is IPv6, false if it's IPv4. (read only) + + + + + <CgEditBlackPoint style={{marginBottom: -3}}/> HttpRequest + + This data is the Http request processed by nfqueue. This handler can be called twice per request: once when the http headers are complete, and once when the body is complete. + If the http data arrives in 1 single TCP packet, this handler will be called once + + + + + url: The url of the request (read only) + + + headers: The headers of the request (read only). The keys and values are exactly the same as the original request (case sensitive). + + + get_header(key:str, default = None): A function that returns the value of a header: it matches the key without case sensitivity. If the header is not found, it returns the default value. + + + user_agent: The user agent of the request (read only) + + + content_encoding: The content encoding of the request (read only) + + + content_length: The content length of the request (read only) + + + body: The body of the request (read only). It's None if the body has not arrived yet. + + + http_version: The http version of the request (read only) + + + keep_alive: It's true if the connection was marked for keep alive, false if it's not. (read only) + + + should_upgrade: It's true if the connection should be upgraded, false if it's not. (read only) + + + method: The method of the request (read only) + + + has_begun: It's true if the request has begun, false if it's not. (read only) + + + headers_complete: It's true if the headers are complete, false if they are not. (read only) + + + message_complete: It's true if the message is complete, false if it's not. (read only) + + + total_size: The size of the entire http request (read only) + + + stream: It's the buffer that contains the stream of the websocket traffic in input. This is used only if should_upgrade is True. (read only) + + + + + <CgEditBlackPoint style={{marginBottom: -3}}/> HttpRequestHeader + + Same as HttpRequest, but this handler is called only when the headers are complete and body is not buffered. Body will always be None + + <CgEditBlackPoint style={{marginBottom: -3}}/> HttpResponse + + This data is the Http response processed by nfqueue. This handler can be called twice per response: once when the http headers are complete, and once when the body is complete. + If the http data arrives in 1 single TCP packet, this handler will be called once + + + + + url: The url of the response (read only) + + + headers: The headers of the response (read only). The keys and values are exactly the same as the original response (case sensitive). + + + get_header(key:str, default = None): A function that returns the value of a header: it matches the key without case sensitivity. If the header is not found, it returns the default value. + + + user_agent: The user agent of the response (read only) + + + content_encoding: The content encoding of the response (read only) + + + content_length: The content length of the response (read only) + + + body: The body of the response (read only). It's None if the body has not arrived yet. + + + http_version: The http version of the response (read only) + + + keep_alive: It's true if the connection was marked for keep alive, false if it's not. (read only) + + + should_upgrade: It's true if the connection should be upgraded, false if it's not. (read only) + + + status_code: The status code of the response (read only) (int) + + + has_begun: It's true if the response has begun, false if it's not. (read only) + + + headers_complete: It's true if the headers are complete, false if they are not. (read only) + + + message_complete: It's true if the message is complete, false if it's not. (read only) + + + total_size: The size of the entire http response (read only) + + + stream: It's the buffer that contains the stream of the websocket traffic in output. This is used only if should_upgrade is True. (read only) + + + + + <CgEditBlackPoint style={{marginBottom: -3}}/> HttpResponseHeader + + Same as HttpResponse, but this handler is called only when the headers are complete and body is not buffered. Body will always be None + ⚠️ Stream Limiter + + What happen if in a specific TCP stream you have a lot of data? The stream limiter will be activated and some action will be taken. + You can configure the action performed by setting some option in the globals: +
+ First import the FullStreamAction enum: + + Then you can set in the globals these options: + + + FGEX_STREAM_MAX_SIZE: Sets the maximum size of the stream. If the stream exceeds this size, the FGEX_FULL_STREAM_ACTION will be performed. (this limit is applyed at the single stream related to the single data handler). + For example if TCPInputStream has reached the limit but HttpResponse has not, the action will be performed only on the TCPInputStream. The default is 1MB. + + + FGEX_FULL_STREAM_ACTION: Sets the action performed when the stream exceeds the FGEX_STREAM_MAX_SIZE. The default is FullStreamAction.FLUSH. + + + Heres will be explained every type of action you can set: + + + FLUSH: Flush the stream and continue to acquire new packets (default) + + + DROP: Drop the next stream packets - like a DROP action by filter + + + REJECT: Reject the stream and close the connection - like a REJECT action by filter + + + ACCEPT: Stops to call pyfilters and accept the traffic + + +
+ 🚀 How It Works + + The proxy is built on a multi-threaded architecture and integrates Python for dynamic filtering: + + + + + Packet Interception: + The nfqueue kernel module intercepts network packets(a netfilter module) 🔍
+ The rules for attach the nfqueue on the network traffic is done by the nftables lib with json APIs by the python manager. +
+
+ + + Packet Reading: + A dedicated thread reads packets from nfqueue. 🧵 + + + + + Multi-threaded Analysis: + The C++ binary launches multiple threads, each starting its own Python interpreter. + Thanks to Python 3.12’s support for a per-interpeter GIL, real multithreading is achieved. + Traffic is distributed among threads based on IP addresses and port hashing, ensuring that + packets belonging to the same flow are processed by the same thread. ⚡️ + + + + + Python Filter Integration: + Users can upload custom Python filters which are then executed by the interpreter, + allowing for dynamic and flexible packet handling. 🐍 + + + + + HTTP Parsing: + A Python wrapper for llhttp (forked and adapted for working with multi-interpeters) is used to parse HTTP connections, making it easier to handle + and analyze HTTP traffic. 📡 + + +
+ + 📚 Additional Resources + + Here's a pyfilter code commented example: + + +
+ ); +}; diff --git a/frontend/src/components/NFProxy/ServiceRow/RenameForm.tsx b/frontend/src/components/NFProxy/ServiceRow/RenameForm.tsx new file mode 100644 index 0000000..fec56b6 --- /dev/null +++ b/frontend/src/components/NFProxy/ServiceRow/RenameForm.tsx @@ -0,0 +1,68 @@ +import { Button, Group, Space, TextInput, Notification, Modal } from '@mantine/core'; +import { useForm } from '@mantine/form'; +import { useEffect, useState } from 'react'; +import { okNotify } from '../../../js/utils'; +import { ImCross } from "react-icons/im" +import { nfproxy, Service } from '../utils'; + +function RenameForm({ opened, onClose, service }:{ opened:boolean, onClose:()=>void, service:Service }) { + + const form = useForm({ + initialValues: { name:service.name }, + validate:{ name: (value) => value !== ""? null : "Service name is required" } + }) + + const close = () =>{ + onClose() + form.reset() + setError(null) + } + + useEffect(()=> form.setFieldValue("name", service.name),[opened]) + + const [submitLoading, setSubmitLoading] = useState(false) + const [error, setError] = useState(null) + + const submitRequest = ({ name }:{ name:string }) => { + setSubmitLoading(true) + nfproxy.servicerename(service.service_id, name).then( res => { + if (!res){ + setSubmitLoading(false) + close(); + okNotify(`Service ${service.name} has been renamed in ${ name }`, `Successfully renamed service on port ${service.port}`) + }else{ + setSubmitLoading(false) + setError("Error: [ "+res+" ]") + } + }).catch( err => { + setSubmitLoading(false) + setError("Request Failed! [ "+err+" ]") + }) + + } + + + return +
+ + + + + + {error?<> + + } color="red" onClose={()=>{setError(null)}}> + Error: {error} + + :null} + + +
+ +} + +export default RenameForm; diff --git a/frontend/src/components/NFProxy/ServiceRow/index.tsx b/frontend/src/components/NFProxy/ServiceRow/index.tsx new file mode 100644 index 0000000..f90627e --- /dev/null +++ b/frontend/src/components/NFProxy/ServiceRow/index.tsx @@ -0,0 +1,164 @@ +import { ActionIcon, Badge, Box, Divider, Menu, Space, Title, Tooltip } from '@mantine/core'; +import { useState } from 'react'; +import { FaPlay, FaStop } from 'react-icons/fa'; +import { nfproxy, Service, serviceQueryKey } from '../utils'; +import { MdDoubleArrow, MdOutlineArrowForwardIos } from "react-icons/md" +import YesNoModal from '../../YesNoModal'; +import { errorNotify, isMediumScreen, okNotify, regex_ipv4 } from '../../../js/utils'; +import { BsTrashFill } from 'react-icons/bs'; +import { BiRename } from 'react-icons/bi' +import RenameForm from './RenameForm'; +import { MenuDropDownWithButton } from '../../MainLayout'; +import { useQueryClient } from '@tanstack/react-query'; +import { TbPlugConnected } from "react-icons/tb"; +import { FaFilter } from "react-icons/fa"; +import { IoSettingsSharp } from 'react-icons/io5'; +import AddEditService from '../AddEditService'; +import { FaPencilAlt } from "react-icons/fa"; +import { ExceptionWarning } from '../ExceptionWarning'; + +export default function ServiceRow({ service, onClick }:{ service:Service, onClick?:()=>void }) { + + let status_color = "gray"; + switch(service.status){ + case "stop": status_color = "red"; break; + case "active": status_color = "teal"; break; + } + + const queryClient = useQueryClient() + const [buttonLoading, setButtonLoading] = useState(false) + const [deleteModal, setDeleteModal] = useState(false) + const [renameModal, setRenameModal] = useState(false) + const [editModal, setEditModal] = useState(false) + const isMedium = isMediumScreen() + + const stopService = async () => { + setButtonLoading(true) + + await nfproxy.servicestop(service.service_id).then(res => { + if(!res){ + okNotify(`Service ${service.name} stopped successfully!`,`The service on ${service.port} has been stopped!`) + queryClient.invalidateQueries(serviceQueryKey) + }else{ + errorNotify(`An error as occurred during the stopping of the service ${service.port}`,`Error: ${res}`) + } + }).catch(err => { + errorNotify(`An error as occurred during the stopping of the service ${service.port}`,`Error: ${err}`) + }) + setButtonLoading(false); + } + + const startService = async () => { + setButtonLoading(true) + await nfproxy.servicestart(service.service_id).then(res => { + if(!res){ + okNotify(`Service ${service.name} started successfully!`,`The service on ${service.port} has been started!`) + queryClient.invalidateQueries(serviceQueryKey) + }else{ + errorNotify(`An error as occurred during the starting of the service ${service.port}`,`Error: ${res}`) + } + }).catch(err => { + errorNotify(`An error as occurred during the starting of the service ${service.port}`,`Error: ${err}`) + }) + setButtonLoading(false) + } + + const deleteService = () => { + nfproxy.servicedelete(service.service_id).then(res => { + if (!res){ + okNotify("Service delete complete!",`The service ${service.name} has been deleted!`) + queryClient.invalidateQueries(serviceQueryKey) + }else + errorNotify("An error occurred while deleting a service",`Error: ${res}`) + }).catch(err => { + errorNotify("An error occurred while deleting a service",`Error: ${err}`) + }) + + } + + return <> + + + + + + + {service.name} + + + + {service.status} + + :{service.port} + + + {isMedium?null:} + + + + + {service.ip_int} on {service.proto} + + + {service.blocked_packets} + + {service.edited_packets} + + {service.n_filters} + + + {isMedium?:} + + + + + Edit service + } onClick={()=>setEditModal(true)}>Service Settings + } onClick={()=>setRenameModal(true)}>Change service name + + Danger zone + } onClick={()=>setDeleteModal(true)}>Delete Service + + + + + + + + + + + + + + {isMedium?:} + {onClick? + + :null} + + + + + setDeleteModal(false) } + action={deleteService} + opened={deleteModal} + /> + setRenameModal(false)} + opened={renameModal} + service={service} + /> + setEditModal(false)} + edit={service} + /> + +} diff --git a/frontend/src/components/NFProxy/UploadFilterModal.tsx b/frontend/src/components/NFProxy/UploadFilterModal.tsx new file mode 100644 index 0000000..1208393 --- /dev/null +++ b/frontend/src/components/NFProxy/UploadFilterModal.tsx @@ -0,0 +1,54 @@ +import { Button, FileButton, Group, Modal, Notification, Space } from "@mantine/core"; +import { nfproxy, Service } from "./utils"; +import { useEffect, useState } from "react"; +import { ImCross } from "react-icons/im"; +import { okNotify } from "../../js/utils"; + +export const UploadFilterModal = ({ opened, onClose, service }: { opened: boolean, onClose: () => void, service?: Service }) => { + const close = () =>{ + onClose() + setError(null) + } + + const [submitLoading, setSubmitLoading] = useState(false) + const [error, setError] = useState(null) + const [file, setFile] = useState(null); + + useEffect(() => { + if (opened && file){ + file.bytes().then( code => { + console.log(code.toString()) + setSubmitLoading(true) + nfproxy.setpyfilterscode(service?.service_id??"",code.toString()).then( res => { + if (!res){ + setSubmitLoading(false) + close(); + okNotify(`Service ${name} code updated`, `Successfully updated code for service ${name}`) + } + }).catch( err => { + setSubmitLoading(false) + setError("Error: "+err) + }) + }) + } + }, [opened, file]) + + return + + + + {(props) => } + + + + {error?<> + + } color="red" onClose={()=>{setError(null)}}> + Error: {error} + + :null} + + + + +} \ No newline at end of file diff --git a/frontend/src/components/NFProxy/utils.ts b/frontend/src/components/NFProxy/utils.ts new file mode 100644 index 0000000..541f3c7 --- /dev/null +++ b/frontend/src/components/NFProxy/utils.ts @@ -0,0 +1,175 @@ +import { PyFilter, ServerResponse } from "../../js/models" +import { deleteapi, getapi, postapi, putapi } from "../../js/utils" +import { useQuery } from "@tanstack/react-query" + +export type Service = { + service_id:string, + name:string, + status:string, + port:number, + proto: string, + ip_int: string, + n_filters:number, + edited_packets:number, + blocked_packets:number, + fail_open:boolean, +} + +export type ServiceAddForm = { + name:string, + port:number, + proto:string, + ip_int:string, + fail_open: boolean, +} + +export type ServiceSettings = { + port?:number, + ip_int?:string, + fail_open?: boolean, +} + +export type ServiceAddResponse = { + status: string, + service_id?: string, +} + +export const serviceQueryKey = ["nfproxy","services"] + +export const nfproxyServiceQuery = () => useQuery({queryKey:serviceQueryKey, queryFn:nfproxy.services}) +export const nfproxyServicePyfiltersQuery = (service_id:string) => useQuery({ + queryKey:[...serviceQueryKey,service_id,"pyfilters"], + queryFn:() => nfproxy.servicepyfilters(service_id) +}) + +export const nfproxyServiceFilterCodeQuery = (service_id:string) => useQuery({ + queryKey:[...serviceQueryKey,service_id,"pyfilters","code"], + queryFn:() => nfproxy.getpyfilterscode(service_id) +}) + +export const nfproxy = { + services: async () => { + return await getapi("nfproxy/services") as Service[]; + }, + serviceinfo: async (service_id:string) => { + return await getapi(`nfproxy/services/${service_id}`) as Service; + }, + pyfilterenable: async (filter_name:string) => { + const { status } = await postapi(`nfproxy/pyfilters/${filter_name}/enable`) as ServerResponse; + return status === "ok"?undefined:status + }, + pyfilterdisable: async (filter_name:string) => { + const { status } = await postapi(`nfproxy/pyfilters/${filter_name}/disable`) as ServerResponse; + return status === "ok"?undefined:status + }, + servicestart: async (service_id:string) => { + const { status } = await postapi(`nfproxy/services/${service_id}/start`) as ServerResponse; + return status === "ok"?undefined:status + }, + servicerename: async (service_id:string, name: string) => { + const { status } = await putapi(`nfproxy/services/${service_id}/rename`,{ name }) as ServerResponse; + return status === "ok"?undefined:status + }, + servicestop: async (service_id:string) => { + const { status } = await postapi(`nfproxy/services/${service_id}/stop`) as ServerResponse; + return status === "ok"?undefined:status + }, + servicesadd: async (data:ServiceAddForm) => { + return await postapi("nfproxy/services",data) as ServiceAddResponse; + }, + servicedelete: async (service_id:string) => { + const { status } = await deleteapi(`nfproxy/services/${service_id}`) as ServerResponse; + return status === "ok"?undefined:status + }, + servicepyfilters: async (service_id:string) => { + return await getapi(`nfproxy/services/${service_id}/pyfilters`) as PyFilter[]; + }, + settings: async (service_id:string, data:ServiceSettings) => { + const { status } = await putapi(`nfproxy/services/${service_id}/settings`,data) as ServerResponse; + return status === "ok"?undefined:status + }, + getpyfilterscode: async (service_id:string) => { + return await getapi(`nfproxy/services/${service_id}/pyfilters/code`) as string; + }, + setpyfilterscode: async (service_id:string, code:string) => { + const { status } = await putapi(`nfproxy/services/${service_id}/pyfilters/code`,{ code }) as ServerResponse; + return status === "ok"?undefined:status + } +} + + +export const EXAMPLE_PYFILTER = `# This in an example of a filter file with http protocol + +# From here we can import the DataTypes that we want to use: +# The data type must be specified in the filter functions +# And will also interally be used to decide when call some filters and how aggregate data +from firegex.nfproxy.models import RawPacket + +# global context in this execution is dedicated to a single TCP stream +# - This code will be executed once at the TCP stream start +# - The filter will be called for each packet in the stream +# - You can store in global context some data you need, but exceeding with data stored could be dangerous +# - At the end of the stream the global context will be destroyed + +from firegex.nfproxy import pyfilter +# pyfilter is a decorator, this will make the function become an effective filter and must have parameters with a specified type + +from firegex.nfproxy import REJECT, ACCEPT, UNSTABLE_MANGLE, DROP +# - The filter must return one of the following values: +# - ACCEPT: The packet will be accepted +# - REJECT: The packet will be rejected (will be activated a mechanism to send a FIN packet and drop all data in the stream) +# - UNSTABLE_MANGLE: The packet will be mangled and accepted +# - DROP: All the packets in this stream will be easly dropped + +# If you want, you can use print to debug your filters, but this could slow down the filter + +# Filter names must be unique and are specified by the name of the function wrapped by the decorator +@pyfilter +# This function will handle only a RawPacket object, this is the lowest level of the packet abstraction +def strange_filter(packet:RawPacket): + # Mangling packets can be dangerous, due to instability of the internal TCP state mangling done by the filter below + # Also is not garanteed that l4_data is the same of the packet data: + # packet data is the assembled TCP stream, l4_data is the TCP payload of the packet in the nfqueue + # Unorder packets in TCP are accepted by default, and python is not called in this case + # For this reason mangling will be only available RawPacket: higher level data abstraction will be read-only + if b"TEST_MANGLING" in packet.l4_data: + # It's possible to change teh raw_packet and l4_data values for mangling the packet, data is immutable instead + packet.l4_data = packet.l4_data.replace(b"TEST", b"UNSTABLE") + return UNSTABLE_MANGLE + # Drops the traffic + if b"BAD DATA 1" in packet.data: + return DROP + # Rejects the traffic + if b"BAD DATA 2" in packet.data: + return REJECT + # Accepts the traffic (default if None is returned) + return ACCEPT + +# Example with a higher level of abstraction +@pyfilter +def http_filter(http:HTTPRequest): + if http.method == "GET" and "test" in http.url: + return REJECT + +# ADVANCED OPTIONS +# You can specify some additional options on the streaming managment +# pyproxy will automatically store all the packets (already ordered by the c++ binary): +# +# If the stream is too big, you can specify what actions to take: +# This can be done defining some variables in the global context +# - FGEX_STREAM_MAX_SIZE: The maximum size of the stream in bytes (default 1MB) +# NOTE: the stream size is calculated and managed indipendently by the data type handling system +# Only types required by at least 1 filter will be stored. +# - FGEX_FULL_STREAM_ACTION: The action to do when the stream is full +# - FullStreamAction.FLUSH: Flush the stream and continue to acquire new packets (default) +# - FullStreamAction.DROP: Drop the next stream packets - like a DROP action by filter +# - FullStreamAction.REJECT: Reject the stream and close the connection - like a REJECT action by filter +# - FullStreamAction.ACCEPT: Stops to call pyfilters and accept the traffic + +from firege.nfproxy import FullStreamAction + +# Example of a global context +FGEX_STREAM_MAX_SIZE = 4096 +FGEX_FULL_STREAM_ACTION = FullStreamAction.REJECT +# This could be an ideal configuration if we expect to normally have streams with a maximum size of 4KB of traffic +` diff --git a/frontend/src/components/NFRegex/NFRegexDocs.tsx b/frontend/src/components/NFRegex/NFRegexDocs.tsx new file mode 100644 index 0000000..aee093b --- /dev/null +++ b/frontend/src/components/NFRegex/NFRegexDocs.tsx @@ -0,0 +1,69 @@ +import { Container, Title, Text, List } from "@mantine/core"; + +export const NFRegexDocs = () => { + return ( + + 📡 Netfilter Regex Documentation + + 📖 Overview + + Netfilter Regex is a powerful feature that enables filtering of network packets using regular expressions. This capability is especially useful when you need to inspect packet content and match specific strings or patterns. + + + ⚙️ How to Use Netfilter Regex + + To get started, create a service and attach a regular expression to it. Once the service is configured, apply it to a network interface to dynamically filter packets based on the defined regex. + + + 🚀 How It Works + + The packet filtering process is implemented in C++ and involves several key steps: + + + + + Packet Interception: + The nfqueue kernel module intercepts network packets (a netfilter module) 🔍
+ The rules for attach the nfqueue on the network traffic is done by the nftables lib with json APIs by the python manager. +
+
+ + + Packet Reading: + A dedicated thread reads packets from nfqueue. 🧵 + + + + + Packet Parsing: + Intercepted packets are parsed by libtins, a C++ library that extracts the payload from each packet. 📄 + + + + + Multi-threaded Analysis: + Multiple threads analyze packets concurrently. + While the nfqueue module balances network + load based solely on IP addresses—resulting in a single thread handling traffic in NAT environments + like CTF networks, firegex manage this threads user-level in a different way. + The traffic is routed in the threads based on IP addresses combined with port hashing, + ensuring a more balanced workload and that flows will be analyzed by the same thread. ⚡️ + + + + + TCP Handling: + For TCP connections, libtins employs a TCP follower to order packets received from the kernel. 📈 + + + + + Regex Matching: + The extracted payload is processed using vectorscan — a fork of hyperscan that runs also on arm64. + For UDP packets, matching occurs on a per-packet basis while saving only the match context rather than the full payload. 🎯 + + +
+
+ ); +}; diff --git a/frontend/src/components/NFRegex/ServiceRow/index.tsx b/frontend/src/components/NFRegex/ServiceRow/index.tsx index 53a5c1b..9086d54 100644 --- a/frontend/src/components/NFRegex/ServiceRow/index.tsx +++ b/frontend/src/components/NFRegex/ServiceRow/index.tsx @@ -25,7 +25,6 @@ export default function ServiceRow({ service, onClick }:{ service:Service, onCli const queryClient = useQueryClient() const [buttonLoading, setButtonLoading] = useState(false) - const [tooltipStopOpened, setTooltipStopOpened] = useState(false); const [deleteModal, setDeleteModal] = useState(false) const [renameModal, setRenameModal] = useState(false) const [editModal, setEditModal] = useState(false) @@ -115,13 +114,11 @@ export default function ServiceRow({ service, onClick }:{ service:Service, onCli } onClick={()=>setDeleteModal(true)}>Delete Service - + setTooltipStopOpened(false)} onBlur={() => setTooltipStopOpened(false)} - onMouseEnter={() => setTooltipStopOpened(true)} onMouseLeave={() => setTooltipStopOpened(false)}> + aria-describedby="tooltip-stop-id"> diff --git a/frontend/src/components/NFRegex/utils.ts b/frontend/src/components/NFRegex/utils.ts index faa67c4..0fd8b3a 100644 --- a/frontend/src/components/NFRegex/utils.ts +++ b/frontend/src/components/NFRegex/utils.ts @@ -36,7 +36,6 @@ export type ServiceAddResponse = { } export const serviceQueryKey = ["nfregex","services"] -export const statsQueryKey = ["nfregex","stats"] export const nfregexServiceQuery = () => useQuery({queryKey:serviceQueryKey, queryFn:nfregex.services}) export const nfregexServiceRegexesQuery = (service_id:string) => useQuery({ diff --git a/frontend/src/components/NavBar/index.tsx b/frontend/src/components/NavBar/index.tsx index 456e12b..ca00a25 100644 --- a/frontend/src/components/NavBar/index.tsx +++ b/frontend/src/components/NavBar/index.tsx @@ -1,12 +1,12 @@ -import { Collapse, Divider, Group, MantineColor, ScrollArea, Text, ThemeIcon, Title, UnstyledButton, Box, AppShell } from "@mantine/core"; +import { Divider, Group, MantineColor, ScrollArea, Text, ThemeIcon, Title, UnstyledButton, Box, AppShell } from "@mantine/core"; import { useState } from "react"; -import { IoMdGitNetwork } from "react-icons/io"; -import { MdOutlineExpandLess, MdOutlineExpandMore, MdTransform } from "react-icons/md"; +import { TbPlugConnected } from "react-icons/tb"; import { useNavigate } from "react-router-dom"; import { GrDirections } from "react-icons/gr"; import { PiWallLight } from "react-icons/pi"; import { useNavbarStore } from "../../js/store"; import { getMainPath } from "../../js/utils"; +import { BsRegex } from "react-icons/bs"; function NavBarButton({ navigate, closeNav, name, icon, color, disabled, onClick }: { navigate?: string, closeNav: () => void, name:string, icon:any, color:MantineColor, disabled?:boolean, onClick?:CallableFunction }) { @@ -36,9 +36,15 @@ export default function NavBar() {
- } /> - } /> - } /> + } /> + } /> + } /> + + Experimental Features 🧪 + + + + } /> diff --git a/frontend/src/components/PortHijack/PortHijackDocs.tsx b/frontend/src/components/PortHijack/PortHijackDocs.tsx new file mode 100644 index 0000000..1ddd48b --- /dev/null +++ b/frontend/src/components/PortHijack/PortHijackDocs.tsx @@ -0,0 +1,37 @@ +import { CodeHighlight } from "@mantine/code-highlight" +import { Code, Container, Space, Text, Title } from "@mantine/core" +import { HELP_NFPROXY_SIM } from "../NFProxy/NFProxyDocs" + + +export const PortHijackDocs = () => { + return + ⚡️ Hijack port to proxy + + 📖 Overview + + 'Hijack port to proxy' uses nftables to redirect the traffic from an external IP to a localhost server. + You are responsable to run and keep alive this server, that is your proxy. The original service will be accessible using loopback (127.0.0.1). + In this way you can run your custom proxy without touching the service configuration. + + + ⚙️ How to use Hijack port to proxy + + To use this feature, simply create your proxy, run it, than create a new service and set the proxy port and the external ip and port. + The traffic will be redirected to your proxy, that will still be able to contact the original service using loopback. + The responses of your proxy will be redirected to the original client, and teh proxy will see as the requests were made by the original client. +
+ You can use for instance the proxy simulator of nfproxy feature of firegex, and run it using nfproxy features. This will advantage you if for instance you need to mangle the traffic. + changing packets it's possible but not sure to do with nfproxy, but the simulator can change the packets normally (on PacketRaw data is always == l4_data in the simulator, check the nfproxy docs for more info) +
+ You will need to install firegex library with pip install -U fgex and than use the simulator command + + for instance: fgex nfproxy test_http.py 127.0.0.1 8080 --proto http --from-port 13377 +
+ 🚀 How It Works + + This modules works in a simple way: this only thing done is to change the destination and source ip using nftables rules so that the kernel will see that the request was done to the proxy port, + but externaly the packets exists as connections to the original service. This mangle is done only for external packet arriving from the external ip indicated, localhost traffic won't be touched. + + +
+} \ No newline at end of file diff --git a/frontend/src/components/PortHijack/ServiceRow/index.tsx b/frontend/src/components/PortHijack/ServiceRow/index.tsx index 2f4a136..7b29996 100644 --- a/frontend/src/components/PortHijack/ServiceRow/index.tsx +++ b/frontend/src/components/PortHijack/ServiceRow/index.tsx @@ -1,5 +1,5 @@ -import { ActionIcon, Badge, Box, Divider, Grid, Menu, Space, Title, Tooltip } from '@mantine/core'; -import React, { useState } from 'react'; +import { ActionIcon, Badge, Box, Divider, Menu, Space, Title, Tooltip } from '@mantine/core'; +import { useState } from 'react'; import { FaPlay, FaStop } from 'react-icons/fa'; import { porthijack, Service } from '../utils'; import YesNoModal from '../../YesNoModal'; @@ -17,11 +17,9 @@ export default function ServiceRow({ service }:{ service:Service }) { let status_color = service.active ? "teal": "red" const [buttonLoading, setButtonLoading] = useState(false) - const [tooltipStopOpened, setTooltipStopOpened] = useState(false); const [deleteModal, setDeleteModal] = useState(false) const [renameModal, setRenameModal] = useState(false) const [changeDestModal, setChangeDestModal] = useState(false) - const portInputRef = React.createRef() const isMedium = isMediumScreen() const form = useForm({ @@ -113,13 +111,11 @@ export default function ServiceRow({ service }:{ service:Service }) { } onClick={()=>setDeleteModal(true)}>Delete Service - + setTooltipStopOpened(false)} onBlur={() => setTooltipStopOpened(false)} - onMouseEnter={() => setTooltipStopOpened(true)} onMouseLeave={() => setTooltipStopOpened(false)}> + aria-describedby="tooltip-stop-id"> diff --git a/frontend/src/components/PyFilterView/index.tsx b/frontend/src/components/PyFilterView/index.tsx new file mode 100644 index 0000000..f1c647f --- /dev/null +++ b/frontend/src/components/PyFilterView/index.tsx @@ -0,0 +1,44 @@ +import { Text, Badge, Space, ActionIcon, Tooltip, Box } from '@mantine/core'; +import { useState } from 'react'; +import { PyFilter } from '../../js/models'; +import { errorNotify, isMediumScreen, okNotify } from '../../js/utils'; +import { FaPause, FaPlay } from 'react-icons/fa'; +import { FaFilter } from "react-icons/fa"; +import { nfproxy } from '../NFProxy/utils'; +import { FaPencilAlt } from 'react-icons/fa'; + +export default function PyFilterView({ filterInfo }:{ filterInfo:PyFilter }) { + + const isMedium = isMediumScreen() + + const changeRegexStatus = () => { + (filterInfo.active?nfproxy.pyfilterdisable:nfproxy.pyfilterenable)(filterInfo.name).then(res => { + if(!res){ + okNotify(`Filter ${filterInfo.name} ${filterInfo.active?"deactivated":"activated"} successfully!`,`Filter '${filterInfo.name}' has been ${filterInfo.active?"deactivated":"activated"}!`) + }else{ + errorNotify(`Filter ${filterInfo.name} ${filterInfo.active?"deactivation":"activation"} failed!`,`Error: ${res}`) + } + }).catch( err => errorNotify(`Filter ${filterInfo.name} ${filterInfo.active?"deactivation":"activation"} failed!`,`Error: ${err}`)) + } + + return + + + + {filterInfo.name} + + + {isMedium?<> + {filterInfo.blocked_packets} + + {filterInfo.edited_packets} + + :null} + + + {filterInfo.active?:} + + + + +} diff --git a/frontend/src/components/RegexView/index.tsx b/frontend/src/components/RegexView/index.tsx index e5b0821..787ae1b 100644 --- a/frontend/src/components/RegexView/index.tsx +++ b/frontend/src/components/RegexView/index.tsx @@ -1,13 +1,14 @@ import { Text, Title, Badge, Space, ActionIcon, Tooltip, Box } from '@mantine/core'; import { useState } from 'react'; import { RegexFilter } from '../../js/models'; -import { b64decode, errorNotify, getapiobject, isMediumScreen, okNotify } from '../../js/utils'; +import { b64decode, errorNotify, isMediumScreen, okNotify } from '../../js/utils'; import { BsTrashFill } from "react-icons/bs" import YesNoModal from '../YesNoModal'; import { FaPause, FaPlay } from 'react-icons/fa'; import { useClipboard } from '@mantine/hooks'; import { FaFilter } from "react-icons/fa"; -import { VscRegex } from "react-icons/vsc"; + +import { nfregex } from '../NFRegex/utils'; function RegexView({ regexInfo }:{ regexInfo:RegexFilter }) { @@ -18,13 +19,10 @@ function RegexView({ regexInfo }:{ regexInfo:RegexFilter }) { let regex_expr = b64decode(regexInfo.regex); const [deleteModal, setDeleteModal] = useState(false); - const [deleteTooltipOpened, setDeleteTooltipOpened] = useState(false); - const [statusTooltipOpened, setStatusTooltipOpened] = useState(false); const clipboard = useClipboard({ timeout: 500 }); - const isMedium = isMediumScreen(); const deleteRegex = () => { - getapiobject().regexdelete(regexInfo.id).then(res => { + nfregex.regexdelete(regexInfo.id).then(res => { if(!res){ okNotify(`Regex ${regex_expr} deleted successfully!`,`Regex '${regex_expr}' ID:${regexInfo.id} has been deleted!`) }else{ @@ -34,9 +32,9 @@ function RegexView({ regexInfo }:{ regexInfo:RegexFilter }) { } const changeRegexStatus = () => { - (regexInfo.active?getapiobject().regexdisable:getapiobject().regexenable)(regexInfo.id).then(res => { + (regexInfo.active?nfregex.regexdisable:nfregex.regexenable)(regexInfo.id).then(res => { if(!res){ - okNotify(`Regex ${regex_expr} ${regexInfo.active?"deactivated":"activated"} successfully!`,`Regex '${regex_expr}' ID:${regexInfo.id} has been ${regexInfo.active?"deactivated":"activated"}!`) + okNotify(`Regex ${regex_expr} ${regexInfo.active?"deactivated":"activated"} successfully!`,`Regex with id '${regexInfo.id}' has been ${regexInfo.active?"deactivated":"activated"}!`) }else{ errorNotify(`Regex ${regex_expr} ${regexInfo.active?"deactivation":"activation"} failed!`,`Error: ${res}`) } @@ -53,18 +51,14 @@ function RegexView({ regexInfo }:{ regexInfo:RegexFilter }) { }}>{regex_expr} - + setStatusTooltipOpened(false)} onBlur={() => setStatusTooltipOpened(false)} - onMouseEnter={() => setStatusTooltipOpened(true)} onMouseLeave={() => setStatusTooltipOpened(false)} >{regexInfo.active?:} - - setDeleteModal(true)} size="xl" radius="md" variant="filled" - onFocus={() => setDeleteTooltipOpened(false)} onBlur={() => setDeleteTooltipOpened(false)} - onMouseEnter={() => setDeleteTooltipOpened(true)} onMouseLeave={() => setDeleteTooltipOpened(false)} - > + + setDeleteModal(true)} size="xl" radius="md" variant="filled"> + diff --git a/frontend/src/index.css b/frontend/src/index.css index df910b8..192a8f7 100644 --- a/frontend/src/index.css +++ b/frontend/src/index.css @@ -96,6 +96,20 @@ body { opacity: 0.8; } +.firegex__regexview__pyfilter_text{ + padding: 6px; + padding-left: 15px; + padding-right: 15px; + background-color: var(--fourth_color); + border: 1px solid #444; + overflow-x: hidden; + border-radius: 8px; +} + +.firegex__regexview__pyfilter_text:hover{ + overflow-x: auto; +} + .firegex__porthijack__servicerow__row{ width: 95%; padding: 15px 0px; diff --git a/frontend/src/index.tsx b/frontend/src/index.tsx index b636476..b3212c8 100644 --- a/frontend/src/index.tsx +++ b/frontend/src/index.tsx @@ -9,6 +9,7 @@ import { import { queryClient } from './js/utils'; import '@mantine/core/styles.css'; import '@mantine/notifications/styles.css'; +import '@mantine/code-highlight/styles.css'; import './index.css'; const root = ReactDOM.createRoot( diff --git a/frontend/src/js/models.ts b/frontend/src/js/models.ts index 3f9804a..1c1a128 100644 --- a/frontend/src/js/models.ts +++ b/frontend/src/js/models.ts @@ -48,4 +48,11 @@ export type RegexAddForm = { is_case_sensitive:boolean, mode:string, // C->S S->C BOTH, active: boolean +} + +export type PyFilter = { + name:string, + blocked_packets:number, + edited_packets:number, + active:boolean } \ No newline at end of file diff --git a/frontend/src/js/utils.tsx b/frontend/src/js/utils.tsx index 0197205..56297fd 100644 --- a/frontend/src/js/utils.tsx +++ b/frontend/src/js/utils.tsx @@ -2,11 +2,11 @@ import { showNotification } from "@mantine/notifications"; import { ImCross } from "react-icons/im"; import { TiTick } from "react-icons/ti" import { Navigate } from "react-router-dom"; -import { nfregex } from "../components/NFRegex/utils"; import { ChangePassword, IpInterface, LoginResponse, PasswordSend, ServerResponse, ServerResponseToken, ServerStatusResponse } from "./models"; import { Buffer } from "buffer" import { QueryClient, useQuery } from "@tanstack/react-query"; import { useMediaQuery } from "@mantine/hooks"; +import { io } from "socket.io-client"; export const IS_DEV = import.meta.env.DEV @@ -18,6 +18,31 @@ export const regex_port = "^([1-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}| export const regex_range_port = "^(([1-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])(-([1-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])?)?)?$" export const DEV_IP_BACKEND = "127.0.0.1:4444" +export const WARNING_NFPROXY_TIME_LIMIT = 1000*60*10 // 10 minutes + +export type EnumToPrimitiveUnion = `${T & string}` | ParseNumber<`${T & number}`>; +type ParseNumber = T extends `${infer U extends number}` ? U : never; + +export function typeCastEnum(value: EnumToPrimitiveUnion): E { + return value as E; +} + +export const socketio = import.meta.env.DEV? + io("ws://"+DEV_IP_BACKEND, { + path:"/sock/socket.io", + transports: ['websocket'], + auth: { + token: localStorage.getItem("access_token") + } + }): + io({ + path:"/sock/socket.io", + transports: ['websocket'], + auth: { + token: localStorage.getItem("access_token") + } + }) + export const queryClient = new QueryClient({ defaultOptions: { queries: { staleTime: Infinity } }}) @@ -71,9 +96,14 @@ export async function genericapi(method:string,path:string,data:any = undefined, const errorDefault = res.statusText return res.json().then( res => reject(getErrorMessageFromServerResponse(res, errorDefault)) ).catch( _err => reject(errorDefault)) } - res.json().then( res => resolve(res) ).catch( err => reject(err)) - }) - .catch(err => { + res.text().then(t => { + try{ + resolve(JSON.parse(t)) + }catch(e){ + resolve(t) + } + }).catch( err => reject(err)) + }).catch(err => { reject(err) }) }); @@ -101,14 +131,6 @@ export function getMainPath(){ return "" } -export function getapiobject(){ - switch(getMainPath()){ - case "nfregex": - return nfregex - } - throw new Error('No api for this tool!'); -} - export function HomeRedirector(){ const section = sessionStorage.getItem("home_section") const path = section?`/${section}`:`/nfregex` diff --git a/frontend/src/pages/Firewall/index.tsx b/frontend/src/pages/Firewall/index.tsx index 07594ca..65a7fef 100644 --- a/frontend/src/pages/Firewall/index.tsx +++ b/frontend/src/pages/Firewall/index.tsx @@ -1,4 +1,4 @@ -import { ActionIcon, Badge, Box, Divider, FloatingIndicator, LoadingOverlay, Space, Switch, Table, Tabs, TextInput, Title, Tooltip, useMantineTheme } from "@mantine/core" +import { ActionIcon, Badge, Box, Divider, FloatingIndicator, LoadingOverlay, Space, Switch, Table, Tabs, TextInput, ThemeIcon, Title, Tooltip, useMantineTheme } from "@mantine/core" import { useEffect, useState } from "react"; import { BsPlusLg, BsTrashFill } from "react-icons/bs" import { rem } from '@mantine/core'; @@ -20,16 +20,12 @@ import { LuArrowBigRightDash } from "react-icons/lu" import { ImCheckmark, ImCross } from "react-icons/im"; import { IoSettingsSharp } from "react-icons/io5"; import { SettingsModal } from "./SettingsModal"; - +import { FaDirections } from "react-icons/fa"; +import { PiWallLight } from "react-icons/pi"; export const Firewall = () => { - const [tooltipAddOpened, setTooltipAddOpened] = useState(false); - const [tooltipRefreshOpened, setTooltipRefreshOpened] = useState(false); - const [tooltipApplyOpened, setTooltipApplyOpened] = useState(false); - const [tooltipSettingsOpened, setTooltipSettingsOpened] = useState(false); const [currentPolicy, setCurrentPolicy] = useState(ActionType.ACCEPT) - const [tooltipAddRulOpened, setTooltipAddRulOpened] = useState(false) const queryClient = useQueryClient() const rules = firewallRulesQuery() const [state, handlers] = useListState([]); @@ -346,7 +342,7 @@ export const Firewall = () => { - Firewall Rules + <ThemeIcon radius="md" size="md" variant='filled' color='red' ><PiWallLight size={20} /></ThemeIcon><Space w="xs" />Firewall Rules {isMedium?:} Enabled: @@ -361,33 +357,24 @@ export const Firewall = () => { {isMedium?:} - Rules: {rules.isLoading?0:rules.data?.rules.length} - - - setTooltipAddOpened(false)} onBlur={() => setTooltipAddOpened(false)} - onMouseEnter={() => setTooltipAddOpened(true)} onMouseLeave={() => setTooltipAddOpened(false)}> + Rules: {rules.isLoading?0:rules.data?.rules.length} + + + - + queryClient.invalidateQueries(["firewall"])} size="lg" radius="md" variant="filled" - loading={rules.isFetching} - onFocus={() => setTooltipRefreshOpened(false)} onBlur={() => setTooltipRefreshOpened(false)} - onMouseEnter={() => setTooltipRefreshOpened(true)} onMouseLeave={() => setTooltipRefreshOpened(false)}> + loading={rules.isFetching}> - - setSettingsModal(true)} size="lg" radius="md" variant="filled" - onFocus={() => setTooltipSettingsOpened(false)} onBlur={() => setTooltipSettingsOpened(false)} - onMouseEnter={() => setTooltipSettingsOpened(true)} onMouseLeave={() => setTooltipSettingsOpened(false)}> + + setSettingsModal(true)} size="lg" radius="md" variant="filled"> - - setTooltipApplyOpened(false)} onBlur={() => setTooltipApplyOpened(false)} - onMouseEnter={() => setTooltipApplyOpened(true)} onMouseLeave={() => setTooltipApplyOpened(false)} - disabled={!valuesChanged} - > + + + @@ -420,16 +407,20 @@ export const Firewall = () => { )} :<> - No rule found! Add one clicking the "+" buttons - - - - setTooltipAddRulOpened(false)} onBlur={() => setTooltipAddRulOpened(false)} - onMouseEnter={() => setTooltipAddRulOpened(true)} onMouseLeave={() => setTooltipAddRulOpened(false)}> - - -} + + + Firewall Rules allows you to use nftables but through a web interface + + Add new rules, sort it and enable the firewall: be carefull, wrong rules could also drops out firegex access + + + + + + + + + } s.service_id == srv) + const filtersList = nfproxyServicePyfiltersQuery(srv??"") + const [deleteModal, setDeleteModal] = useState(false) + const [renameModal, setRenameModal] = useState(false) + const [editModal, setEditModal] = useState(false) + const [buttonLoading, setButtonLoading] = useState(false) + const queryClient = useQueryClient() + const filterCode = nfproxyServiceFilterCodeQuery(srv??"") + const navigate = useNavigate() + const isMedium = isMediumScreen() + const [openLogModal, setOpenLogModal] = useState(false) + const [logData, logDataSetters] = useListState([]); + + + useEffect(()=>{ + if (srv){ + if (openLogModal){ + logDataSetters.setState([]) + socketio.emit("nfproxy-outstream-join", { service: srv }); + socketio.on(`nfproxy-outstream-${srv}`, (data) => { + logDataSetters.append(data) + }); + }else{ + socketio.emit("nfproxy-outstream-leave", { service: srv }); + socketio.off(`nfproxy-outstream-${srv}`); + logDataSetters.setState([]) + } + return () => { + socketio.emit("nfproxy-outstream-leave", { service: srv }); + socketio.off(`nfproxy-outstream-${srv}`); + logDataSetters.setState([]) + } + } + }, [openLogModal, srv]) + + if (services.isLoading) return + if (!srv || !serviceInfo || filtersList.isError) return + + let status_color = "gray"; + switch(serviceInfo.status){ + case "stop": status_color = "red"; break; + case "active": status_color = "teal"; break; + } + + const startService = async () => { + setButtonLoading(true) + await nfproxy.servicestart(serviceInfo.service_id).then(res => { + if(!res){ + okNotify(`Service ${serviceInfo.name} started successfully!`,`The service on ${serviceInfo.port} has been started!`) + queryClient.invalidateQueries(serviceQueryKey) + }else{ + errorNotify(`An error as occurred during the starting of the service ${serviceInfo.port}`,`Error: ${res}`) + } + }).catch(err => { + errorNotify(`An error as occurred during the starting of the service ${serviceInfo.port}`,`Error: ${err}`) + }) + setButtonLoading(false) + } + + const deleteService = () => { + nfproxy.servicedelete(serviceInfo.service_id).then(res => { + if (!res){ + okNotify("Service delete complete!",`The service ${serviceInfo.name} has been deleted!`) + queryClient.invalidateQueries(serviceQueryKey) + }else + errorNotify("An error occurred while deleting a service",`Error: ${res}`) + }).catch(err => { + errorNotify("An error occurred while deleting a service",`Error: ${err}`) + }) + } + + const stopService = async () => { + setButtonLoading(true) + + await nfproxy.servicestop(serviceInfo.service_id).then(res => { + if(!res){ + okNotify(`Service ${serviceInfo.name} stopped successfully!`,`The service on ${serviceInfo.port} has been stopped!`) + queryClient.invalidateQueries(serviceQueryKey) + }else{ + errorNotify(`An error as occurred during the stopping of the service ${serviceInfo.port}`,`Error: ${res}`) + } + }).catch(err => { + errorNotify(`An error as occurred during the stopping of the service ${serviceInfo.port}`,`Error: ${err}`) + }) + setButtonLoading(false); + } + + return <> + + + + + <Box className="center-flex"> + <MdDoubleArrow /><Space w="sm" />{serviceInfo.name} + </Box> + + + {isMedium?null:} + + + + + {serviceInfo.status} + + + :{serviceInfo.port} + + + + Edit service + } onClick={()=>setEditModal(true)}>Service Settings + } onClick={()=>setRenameModal(true)}>Change service name + + Danger zone + } onClick={()=>setDeleteModal(true)}>Delete Service + + + + setOpenLogModal(true)} loading={buttonLoading} variant="filled"> + + + + + + {isMedium?null:} + + + + {serviceInfo.edited_packets} + + {serviceInfo.blocked_packets} + + {serviceInfo.n_filters} + + {isMedium?:} + {serviceInfo.ip_int} on {serviceInfo.proto} + + {isMedium?null:} + + + navigate("/")} size="xl" radius="md" variant="filled" + aria-describedby="tooltip-back-id"> + + + + + + + + + + + + + + + + + + + + + {filterCode.data?<> + <FaPython style={{ marginBottom: -3 }} size={30} /><Space w="xs" />Filter code + + : null} + + {(!filtersList.data || filtersList.data.length == 0)?<> + + No filters found! Create some proxy filters, install the firegex client:<Space w="xs" /><Code mb={-4} >pip install -U fgex</Code> + + Read the documentation for more information<Space w="sm" /><DocsButton doc='nfproxy'/> + + Then create a new filter file with the following syntax and upload it here (using the button above) + :<>{filtersList.data?.map( (filterInfo) => )} + } + setDeleteModal(false) } + action={deleteService} + opened={deleteModal} + /> + setRenameModal(false)} + opened={renameModal} + service={serviceInfo} + /> + setEditModal(false)} + edit={serviceInfo} + /> + setOpenLogModal(false)} + title={`Logs for service ${serviceInfo.name}`} + data={logData.join("")} + /> + +} diff --git a/frontend/src/pages/NFProxy/index.tsx b/frontend/src/pages/NFProxy/index.tsx new file mode 100644 index 0000000..0529fa8 --- /dev/null +++ b/frontend/src/pages/NFProxy/index.tsx @@ -0,0 +1,172 @@ +import { ActionIcon, Badge, Box, Code, LoadingOverlay, Space, ThemeIcon, Title, Tooltip } from '@mantine/core'; +import { useEffect, useState } from 'react'; +import { BsPlusLg } from "react-icons/bs"; +import { useNavigate, useParams } from 'react-router-dom'; +import ServiceRow from '../../components/NFProxy/ServiceRow'; +import { errorNotify, getErrorMessage, isMediumScreen } from '../../js/utils'; +import AddEditService from '../../components/NFProxy/AddEditService'; +import { useQueryClient } from '@tanstack/react-query'; +import { TbPlugConnected, TbReload } from 'react-icons/tb'; +import { EXAMPLE_PYFILTER, nfproxy, nfproxyServiceQuery } from '../../components/NFProxy/utils'; +import { FaFilter, FaPencilAlt, FaServer } from 'react-icons/fa'; +import { MdUploadFile } from "react-icons/md"; +import { notifications } from '@mantine/notifications'; +import { useFileDialog } from '@mantine/hooks'; +import { CodeHighlight } from '@mantine/code-highlight'; +import { DocsButton } from '../../components/DocsButton'; + + +export default function NFProxy({ children }: { children: any }) { + + const navigator = useNavigate() + const [open, setOpen] = useState(false); + const {srv} = useParams() + const queryClient = useQueryClient() + const isMedium = isMediumScreen() + const services = nfproxyServiceQuery() + const fileDialog = useFileDialog({ + accept: ".py", + multiple: false, + resetOnOpen: true, + onChange: (files) => { + if (files?.length??0 > 0) + setFile(files![0]) + } + }); + const [file, setFile] = useState(null); + useEffect(() => { + if (!srv) return + const service = services.data?.find(s => s.service_id === srv) + if (!service) return + if (file){ + console.log("Uploading code") + const notify_id = notifications.show( + { + title: "Uploading code", + message: `Uploading code for service ${service.name}`, + color: "blue", + icon: , + autoClose: false, + loading: true, + } + ) + file.text() + .then( code => nfproxy.setpyfilterscode(service?.service_id??"",code.toString())) + .then( res => { + if (!res){ + notifications.update({ + id: notify_id, + title: "Code uploaded", + message: `Successfully uploaded code for service ${service.name}`, + color: "green", + icon: , + autoClose: 5000, + loading: false, + }) + }else{ + notifications.update({ + id: notify_id, + title: "Code upload failed", + message: `Error: ${res}`, + color: "red", + icon: , + autoClose: 5000, + loading: false, + }) + } + }).catch( err => { + notifications.update({ + id: notify_id, + title: "Code upload failed", + message: `Error: ${err}`, + color: "red", + icon: , + autoClose: 5000, + loading: false, + }) + }).finally(()=>{setFile(null)}) + } + }, [file]) + + useEffect(()=> { + if(services.isError) + errorNotify("NFProxy Update failed!", getErrorMessage(services.error)) + },[services.isError]) + + const closeModal = () => {setOpen(false);} + + return <> + + + <ThemeIcon radius="md" size="md" variant='filled' color='lime' ><TbPlugConnected size={20} /></ThemeIcon><Space w="xs" />Netfilter Proxy + {isMedium?:} + + {isMedium?"General stats:":null} + + Services: {services.isLoading?0:services.data?.length} + + {services.isLoading?0:services.data?.reduce((acc, s)=> acc+=s.blocked_packets, 0)} + + {services.isLoading?0:services.data?.reduce((acc, s)=> acc+=s.edited_packets, 0)} + + {services.isLoading?0:services.data?.reduce((acc, s)=> acc+=s.n_filters, 0)} + + + {isMedium?null:} + + { srv? + + + + + + : + setOpen(true)} size="lg" radius="md" variant="filled"> + + + + } + + + queryClient.invalidateQueries(["nfproxy"])} size="lg" radius="md" variant="filled" loading={services.isFetching}> + + + + + + + + + + {srv?null:<> + + {(services.data && services.data?.length > 0)?services.data.map( srv => { + navigator("/nfproxy/"+srv.service_id) + }} />):<> + + + Netfilter proxy is a simulated proxy written using python with a c++ core + + Filters are created using a simple python syntax, infact the first you need to do is to install the firegex lib:<Space w="xs" /><Code mb={-4} >pip install -U fgex</Code> + + Then you can create a new service and write custom filters for the service + + + + setOpen(true)} size="xl" radius="md" variant="filled"> + + + + + + + } + } + + {srv?children:null} + {!srv? + :null + } + +} + diff --git a/frontend/src/pages/NFRegex/ServiceDetails.tsx b/frontend/src/pages/NFRegex/ServiceDetails.tsx index bb6255f..7b3b806 100644 --- a/frontend/src/pages/NFRegex/ServiceDetails.tsx +++ b/frontend/src/pages/NFRegex/ServiceDetails.tsx @@ -27,15 +27,12 @@ export default function ServiceDetailsNFRegex() { const [open, setOpen] = useState(false) const services = nfregexServiceQuery() const serviceInfo = services.data?.find(s => s.service_id == srv) - const [tooltipAddRegexOpened, setTooltipAddRegexOpened] = useState(false) const regexesList = nfregexServiceRegexesQuery(srv??"") const [deleteModal, setDeleteModal] = useState(false) const [renameModal, setRenameModal] = useState(false) const [editModal, setEditModal] = useState(false) const [buttonLoading, setButtonLoading] = useState(false) const queryClient = useQueryClient() - const [tooltipStopOpened, setTooltipStopOpened] = useState(false); - const [tooltipBackOpened, setTooltipBackOpened] = useState(false); const navigate = useNavigate() const isMedium = isMediumScreen() @@ -133,23 +130,19 @@ export default function ServiceDetailsNFRegex() { {isMedium?null:} - + navigate("/")} size="xl" radius="md" variant="filled" - aria-describedby="tooltip-back-id" - onFocus={() => setTooltipBackOpened(false)} onBlur={() => setTooltipBackOpened(false)} - onMouseEnter={() => setTooltipBackOpened(true)} onMouseLeave={() => setTooltipBackOpened(false)}> + aria-describedby="tooltip-back-id"> - + setTooltipStopOpened(false)} onBlur={() => setTooltipStopOpened(false)} - onMouseEnter={() => setTooltipStopOpened(true)} onMouseLeave={() => setTooltipStopOpened(false)}> + aria-describedby="tooltip-stop-id"> @@ -168,11 +161,9 @@ export default function ServiceDetailsNFRegex() { No regex found for this service! Add one by clicking the "+" buttons - + setOpen(true)} size="xl" radius="md" variant="filled" - aria-describedby="tooltip-AddRegex-id" - onFocus={() => setTooltipAddRegexOpened(false)} onBlur={() => setTooltipAddRegexOpened(false)} - onMouseEnter={() => setTooltipAddRegexOpened(true)} onMouseLeave={() => setTooltipAddRegexOpened(false)}> + aria-describedby="tooltip-AddRegex-id"> : diff --git a/frontend/src/pages/NFRegex/index.tsx b/frontend/src/pages/NFRegex/index.tsx index 6153458..f51ed03 100644 --- a/frontend/src/pages/NFRegex/index.tsx +++ b/frontend/src/pages/NFRegex/index.tsx @@ -1,6 +1,6 @@ -import { ActionIcon, Badge, Box, LoadingOverlay, Space, Title, Tooltip } from '@mantine/core'; +import { ActionIcon, Badge, Box, LoadingOverlay, Space, ThemeIcon, Title, Tooltip } from '@mantine/core'; import { useEffect, useState } from 'react'; -import { BsPlusLg } from "react-icons/bs"; +import { BsPlusLg, BsRegex } from "react-icons/bs"; import { useNavigate, useParams } from 'react-router-dom'; import ServiceRow from '../../components/NFRegex/ServiceRow'; import { nfregexServiceQuery } from '../../components/NFRegex/utils'; @@ -9,7 +9,10 @@ import AddEditService from '../../components/NFRegex/AddEditService'; import AddNewRegex from '../../components/AddNewRegex'; import { useQueryClient } from '@tanstack/react-query'; import { TbReload } from 'react-icons/tb'; - +import { FaFilter } from 'react-icons/fa'; +import { FaServer } from "react-icons/fa6"; +import { VscRegex } from "react-icons/vsc"; +import { DocsButton } from '../../components/DocsButton'; function NFRegex({ children }: { children: any }) { @@ -17,9 +20,6 @@ function NFRegex({ children }: { children: any }) { const [open, setOpen] = useState(false); const {srv} = useParams() const queryClient = useQueryClient() - const [tooltipRefreshOpened, setTooltipRefreshOpened] = useState(false); - const [tooltipAddServOpened, setTooltipAddServOpened] = useState(false); - const [tooltipAddOpened, setTooltipAddOpened] = useState(false); const isMedium = isMediumScreen() const services = nfregexServiceQuery() @@ -33,37 +33,35 @@ function NFRegex({ children }: { children: any }) { return <> - Netfilter Regex + <ThemeIcon radius="md" size="md" variant='filled' color='grape' ><BsRegex size={20} /></ThemeIcon><Space w="xs" />Netfilter Regex {isMedium?:} - Services: {services.isLoading?0:services.data?.length} + {isMedium?"General stats:":null} - Filtered Connections: {services.isLoading?0:services.data?.reduce((acc, s)=> acc+=s.n_packets, 0)} + Services: {services.isLoading?0:services.data?.length} - Regexes: {services.isLoading?0:services.data?.reduce((acc, s)=> acc+=s.n_regex, 0)} + {services.isLoading?0:services.data?.reduce((acc, s)=> acc+=s.n_packets, 0)} + + {services.isLoading?0:services.data?.reduce((acc, s)=> acc+=s.n_regex, 0)} {isMedium?null:} { srv? - - setOpen(true)} size="lg" radius="md" variant="filled" - onFocus={() => setTooltipAddOpened(false)} onBlur={() => setTooltipAddOpened(false)} - onMouseEnter={() => setTooltipAddOpened(true)} onMouseLeave={() => setTooltipAddOpened(false)}> + + setOpen(true)} size="lg" radius="md" variant="filled"> - : - setOpen(true)} size="lg" radius="md" variant="filled" - onFocus={() => setTooltipAddOpened(false)} onBlur={() => setTooltipAddOpened(false)} - onMouseEnter={() => setTooltipAddOpened(true)} onMouseLeave={() => setTooltipAddOpened(false)}> + : + setOpen(true)} size="lg" radius="md" variant="filled"> } - + queryClient.invalidateQueries(["nfregex"])} size="lg" radius="md" variant="filled" - loading={services.isFetching} - onFocus={() => setTooltipRefreshOpened(false)} onBlur={() => setTooltipRefreshOpened(false)} - onMouseEnter={() => setTooltipRefreshOpened(true)} onMouseLeave={() => setTooltipRefreshOpened(false)}> + loading={services.isFetching}> + + @@ -72,13 +70,21 @@ function NFRegex({ children }: { children: any }) { {(services.data && services.data?.length > 0)?services.data.map( srv => { navigator("/nfregex/"+srv.service_id) - }} />):<> No services found! Add one clicking the "+" buttons - - - setOpen(true)} size="xl" radius="md" variant="filled" - onFocus={() => setTooltipAddServOpened(false)} onBlur={() => setTooltipAddServOpened(false)} - onMouseEnter={() => setTooltipAddServOpened(true)} onMouseLeave={() => setTooltipAddServOpened(false)}> - + }} />):<> + + + Netfilter Regex allows you to filter traffic using regexes + + Start a service, add your regexes and it's already done! + + + + setOpen(true)} size="xl" radius="md" variant="filled"> + + + + + } } diff --git a/frontend/src/pages/PortHijack/index.tsx b/frontend/src/pages/PortHijack/index.tsx index e4cdc08..288fa63 100644 --- a/frontend/src/pages/PortHijack/index.tsx +++ b/frontend/src/pages/PortHijack/index.tsx @@ -1,4 +1,4 @@ -import { ActionIcon, Badge, Box, Divider, LoadingOverlay, Space, Title, Tooltip } from '@mantine/core'; +import { ActionIcon, Badge, Box, Divider, LoadingOverlay, Space, ThemeIcon, Title, Tooltip } from '@mantine/core'; import { useEffect, useState } from 'react'; import { BsPlusLg } from "react-icons/bs"; import ServiceRow from '../../components/PortHijack/ServiceRow'; @@ -7,15 +7,15 @@ import { errorNotify, getErrorMessage, isMediumScreen } from '../../js/utils'; import AddNewService from '../../components/PortHijack/AddNewService'; import { useQueryClient } from '@tanstack/react-query'; import { TbReload } from 'react-icons/tb'; +import { FaServer } from 'react-icons/fa'; +import { GrDirections } from 'react-icons/gr'; +import { DocsButton } from '../../components/DocsButton'; function PortHijack() { const [open, setOpen] = useState(false); - const [tooltipAddServOpened, setTooltipAddServOpened] = useState(false); - const [tooltipAddOpened, setTooltipAddOpened] = useState(false); const queryClient = useQueryClient() - const [tooltipRefreshOpened, setTooltipRefreshOpened] = useState(false); const isMedium = isMediumScreen() const services = porthijackServiceQuery() @@ -30,36 +30,43 @@ function PortHijack() { return <> - Hijack port to proxy + <ThemeIcon radius="md" size="md" variant='filled' color='blue' ><GrDirections size={20} /></ThemeIcon><Space w="xs" />Hijack port to proxy {isMedium?:} - Services: {services.isLoading?0:services.data?.length} + Services: {services.isLoading?0:services.data?.length} - - setOpen(true)} size="lg" radius="md" variant="filled" - onFocus={() => setTooltipAddOpened(false)} onBlur={() => setTooltipAddOpened(false)} - onMouseEnter={() => setTooltipAddOpened(true)} onMouseLeave={() => setTooltipAddOpened(false)}> + + setOpen(true)} size="lg" radius="md" variant="filled"> - + queryClient.invalidateQueries(["porthijack"])} size="lg" radius="md" variant="filled" - loading={services.isFetching} - onFocus={() => setTooltipRefreshOpened(false)} onBlur={() => setTooltipRefreshOpened(false)} - onMouseEnter={() => setTooltipRefreshOpened(true)} onMouseLeave={() => setTooltipRefreshOpened(false)}> + loading={services.isFetching}> + + {(services.data && services.data.length > 0) ?services.data.map( srv => ):<> - No services found! Add one clicking the "+" buttons - - - setOpen(true)} size="xl" radius="md" variant="filled" - onFocus={() => setTooltipAddServOpened(false)} onBlur={() => setTooltipAddServOpened(false)} - onMouseEnter={() => setTooltipAddServOpened(true)} onMouseLeave={() => setTooltipAddServOpened(false)}> - + + + Hjiack Port to Proxy is a feature that allows you to run your custom proxy without touch the service config + + It hijack the traffic to a secondary port, where you can run your proxy, that will still be able to contact the original service using loopback + + Start using port hijacking creating a new service and routing the traffic to your proxy not changing the original service configs + + + + setOpen(true)} size="xl" radius="md" variant="filled"> + + + + + } diff --git a/proxy-client/requirements.txt b/proxy-client/requirements.txt deleted file mode 100644 index 593817c..0000000 --- a/proxy-client/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -typer==0.12.3 -requests>=2.32.3 -python-dateutil==2.9.0.post0 -pydantic >= 2 -typing-extensions >= 4.7.1 -textual==0.89.1 -toml==0.10.2 -psutil==6.0.0 -dirhash==0.5.0 -requests-toolbelt==1.0.0 -python-socketio[client]==5.11.4 -orjson - -# TODO choose dependencies \ No newline at end of file diff --git a/tests/benchmark.py b/tests/benchmark.py index 02b5aaa..64ace98 100644 --- a/tests/benchmark.py +++ b/tests/benchmark.py @@ -35,7 +35,7 @@ else: def exit_test(code): if service_id: - server.stop() + server.kill() if(firegex.nf_delete_service(service_id)): puts("Sucessfully deleted service ✔", color=colors.green) else: diff --git a/tests/nf_test.py b/tests/nf_test.py index 1fb8260..5ce58a8 100644 --- a/tests/nf_test.py +++ b/tests/nf_test.py @@ -43,6 +43,11 @@ def exit_test(code): exit_test(1) exit(code) +srvs = firegex.nf_get_services() +for ele in srvs: + if ele['name'] == args.service_name: + firegex.nf_delete_service(ele['service_id']) + service_id = firegex.nf_add_service(args.service_name, args.port, args.proto , "::1" if args.ipv6 else "127.0.0.1" ) if service_id: puts(f"Sucessfully created service {service_id} ✔", color=colors.green) @@ -64,7 +69,7 @@ try: else: puts("Test Failed: Data was corrupted ", color=colors.red) exit_test(1) -except Exception as e: +except Exception: puts("Test Failed: Couldn't send data to the server ", color=colors.red) exit_test(1) #Add new regex @@ -220,10 +225,24 @@ else: exit_test(1) #Check if service was renamed correctly -for services in firegex.nf_get_services(): - if services["name"] == f"{args.service_name}2": - puts("Checked that service was renamed correctly ✔", color=colors.green) - exit_test(0) +service = firegex.nf_get_service(service_id) +if service["name"] == f"{args.service_name}2": + puts("Checked that service was renamed correctly ✔", color=colors.green) +else: + puts("Test Failed: Service wasn't renamed correctly ✗", color=colors.red) + exit_test(1) -puts("Test Failed: Service wasn't renamed correctly ✗", color=colors.red) -exit_test(1) +#Change settings +opposite_proto = "udp" if args.proto == "tcp" else "tcp" +if(firegex.nf_settings_service(service_id, 1338, opposite_proto, "::dead:beef" if args.ipv6 else "123.123.123.123", True)): + srv_updated = firegex.nf_get_service(service_id) + if srv_updated["port"] == 1338 and srv_updated["proto"] == opposite_proto and ("::dead:beef" if args.ipv6 else "123.123.123.123") in srv_updated["ip_int"] and srv_updated["fail_open"]: + puts("Sucessfully changed service settings ✔", color=colors.green) + else: + puts("Test Failed: Service settings weren't updated correctly ✗", color=colors.red) + exit_test(1) +else: + puts("Test Failed: Coulnd't change service settings ✗", color=colors.red) + exit_test(1) + +exit_test(0) diff --git a/tests/ph_test.py b/tests/ph_test.py index 7e1df9c..c54dae2 100644 --- a/tests/ph_test.py +++ b/tests/ph_test.py @@ -42,6 +42,11 @@ def exit_test(code): exit_test(1) exit(code) +srvs = firegex.ph_get_services() +for ele in srvs: + if ele['name'] == args.service_name: + firegex.ph_delete_service(ele['service_id']) + #Create and start serivce service_id = firegex.ph_add_service(args.service_name, args.port, args.port+1, args.proto , "::1" if args.ipv6 else "127.0.0.1", "::1" if args.ipv6 else "127.0.0.1") if service_id: diff --git a/tests/run_tests.sh b/tests/run_tests.sh index cfe00d9..8812f44 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -27,5 +27,10 @@ python3 ph_test.py -p $PASSWORD -m udp || ERROR=1 echo "Running Port Hijack UDP ipv6" python3 ph_test.py -p $PASSWORD -m udp -6 || ERROR=1 +if [[ "$ERROR" == "0" ]] then + python3 benchmark.py -p $PASSWORD -r 5 -d 1 -s 10 || ERROR=1 +fi + + exit $ERROR diff --git a/tests/utils/firegexapi.py b/tests/utils/firegexapi.py index 2e878c5..1324923 100644 --- a/tests/utils/firegexapi.py +++ b/tests/utils/firegexapi.py @@ -101,6 +101,10 @@ class FiregexAPI: def nf_rename_service(self,service_id: str, newname: str): req = self.s.put(f"{self.address}api/nfregex/services/{service_id}/rename" , json={"name":newname}) return verify(req) + + def nf_settings_service(self,service_id: str, port: int, proto: str, ip_int: str, fail_open: bool): + req = self.s.put(f"{self.address}api/nfregex/services/{service_id}/settings" , json={"port":port, "proto":proto, "ip_int":ip_int, "fail_open":fail_open}) + return verify(req) def nf_get_service_regexes(self,service_id: str): req = self.s.get(f"{self.address}api/nfregex/services/{service_id}/regexes") @@ -127,9 +131,9 @@ class FiregexAPI: json={"service_id": service_id, "regex": regex, "mode": mode, "active": active, "is_case_sensitive": is_case_sensitive}) return verify(req) - def nf_add_service(self, name: str, port: int, proto: str, ip_int: str): + def nf_add_service(self, name: str, port: int, proto: str, ip_int: str, fail_open: bool = False): req = self.s.post(f"{self.address}api/nfregex/services" , - json={"name":name,"port":port, "proto": proto, "ip_int": ip_int}) + json={"name":name,"port":port, "proto": proto, "ip_int": ip_int, "fail_open": fail_open}) return req.json()["service_id"] if verify(req) else False def nf_get_metrics(self):