From 072745cc06b8027a1df46047c947613df534de71 Mon Sep 17 00:00:00 2001 From: Domingo Dirutigliano Date: Mon, 3 Mar 2025 20:25:36 +0100 Subject: [PATCH] code push --- backend/binsrc/classes/nfqueue.cpp | 4 +- backend/binsrc/nfproxy.cpp | 2 +- backend/binsrc/pyproxy/pyproxy.cpp | 37 +- backend/binsrc/pyproxy/settings.cpp | 40 +- backend/binsrc/pyproxy/stream_ctx.cpp | 34 +- backend/modules/nfproxy/firegex.py | 2 +- fgex-lib/firegex/nfproxy/__init__.py | 25 +- .../{internals.py => internals/__init__.py} | 97 ++--- fgex-lib/firegex/nfproxy/internals/data.py | 190 +++++++++ .../firegex/nfproxy/internals/exceptions.py | 3 + fgex-lib/firegex/nfproxy/internals/models.py | 40 ++ fgex-lib/firegex/nfproxy/models/__init__.py | 28 ++ fgex-lib/firegex/nfproxy/models/http.py | 385 ++++++++++++++++++ fgex-lib/firegex/nfproxy/models/tcp.py | 113 +++++ fgex-lib/firegex/nfproxy/params.py | 79 ---- fgex-lib/requirements.txt | 3 +- frontend/src/components/ModalLog.tsx | 4 +- .../components/NFProxy/ExceptionWarning.tsx | 19 +- frontend/src/components/NFProxy/utils.ts | 74 ++++ frontend/src/components/NavBar/index.tsx | 7 +- frontend/src/pages/NFProxy/ServiceDetails.tsx | 17 +- frontend/src/pages/NFProxy/index.tsx | 23 +- 22 files changed, 1020 insertions(+), 206 deletions(-) rename fgex-lib/firegex/nfproxy/{internals.py => internals/__init__.py} (69%) create mode 100644 fgex-lib/firegex/nfproxy/internals/data.py create mode 100644 fgex-lib/firegex/nfproxy/internals/exceptions.py create mode 100644 fgex-lib/firegex/nfproxy/internals/models.py create mode 100644 fgex-lib/firegex/nfproxy/models/__init__.py create mode 100644 fgex-lib/firegex/nfproxy/models/http.py create mode 100644 fgex-lib/firegex/nfproxy/models/tcp.py delete mode 100644 fgex-lib/firegex/nfproxy/params.py diff --git a/backend/binsrc/classes/nfqueue.cpp b/backend/binsrc/classes/nfqueue.cpp index 36d4d9e..02de950 100644 --- a/backend/binsrc/classes/nfqueue.cpp +++ b/backend/binsrc/classes/nfqueue.cpp @@ -364,9 +364,9 @@ class PktRequest { #endif if (tcp && ack_seq_offset && packet.size() != _original_size){ if (is_input){ - ack_seq_offset->in += packet.size() - _original_size; + ack_seq_offset->in += data_size() - _data_original_size; }else{ - ack_seq_offset->out += packet.size() - _original_size; + ack_seq_offset->out += data_size() - _data_original_size; } } nfq_nlmsg_verdict_put(nlh_verdict, ntohl(packet_id), NF_ACCEPT ); diff --git a/backend/binsrc/nfproxy.cpp b/backend/binsrc/nfproxy.cpp index 1d44efc..cbb6c38 100644 --- a/backend/binsrc/nfproxy.cpp +++ b/backend/binsrc/nfproxy.cpp @@ -72,7 +72,7 @@ PyFilterResponse { Every time a packet is received, the packet handler will execute the following code: ```python -firegex.nfproxy.internals.handle_packet() +firegex.nfproxy.internals.handle_packet(globals()) ```` The TCP stream is sorted by libtins using c++ code, but the c++ code is not responsabile di buffer the stream, but only to sort those diff --git a/backend/binsrc/pyproxy/pyproxy.cpp b/backend/binsrc/pyproxy/pyproxy.cpp index 93e7714..2062866 100644 --- a/backend/binsrc/pyproxy/pyproxy.cpp +++ b/backend/binsrc/pyproxy/pyproxy.cpp @@ -34,7 +34,7 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue { public: stream_ctx sctx; StreamFollower follower; - PyThreadState * gtstate = nullptr; + PyThreadState * tstate = nullptr; PyInterpreterConfig py_thread_config = { .use_main_obmalloc = 0, @@ -45,15 +45,16 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue { .check_multi_interp_extensions = 1, .gil = PyInterpreterConfig_OWN_GIL, }; - PyThreadState *tstate = NULL; NfQueue::PktRequest* pkt; NfQueue::tcp_ack_seq_ctx* current_tcp_ack = nullptr; + PyObject* handle_packet_code = nullptr; + void before_loop() override { PyStatus pystatus; // Create a new interpreter for the thread - gtstate = PyThreadState_New(PyInterpreterState_Main()); - PyEval_AcquireThread(gtstate); + tstate = PyThreadState_New(PyInterpreterState_Main()); + PyEval_AcquireThread(tstate); pystatus = Py_NewInterpreterFromConfig(&tstate, &py_thread_config); if(tstate == nullptr){ cerr << "[fatal] [main] Failed to create new interpreter" << endl; @@ -64,6 +65,12 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue { Py_ExitStatusException(pystatus); throw invalid_argument("Failed to create new interpreter (pystatus exc)"); } + + if(!PyGC_IsEnabled()){ + PyGC_Enable(); + } + + handle_packet_code = unmarshal_code(py_handle_packet_code); // Setting callbacks for the stream follower follower.new_stream_callback(bind(on_new_stream, placeholders::_1, this)); follower.stream_termination_callback(bind(on_stream_close, placeholders::_1, this)); @@ -100,11 +107,24 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue { if (compiled_code == nullptr){ stream.client_data_callback(nullptr); stream.server_data_callback(nullptr); + stream.ignore_client_data(); + stream.ignore_server_data(); return pkt->accept(); + }else{ + try{ + stream_match = new pyfilter_ctx(compiled_code, handle_packet_code); + }catch(invalid_argument& e){ + cerr << "[error] [filter_action] Failed to create the filter context" << endl; + print_exception_reason(); + sctx.clean_stream_by_id(pkt->sid); + stream.client_data_callback(nullptr); + stream.server_data_callback(nullptr); + stream.ignore_client_data(); + stream.ignore_server_data(); + return pkt->accept(); + } + sctx.streams_ctx.insert_or_assign(pkt->sid, stream_match); } - stream_match = new pyfilter_ctx(compiled_code); - Py_DECREF(compiled_code); - sctx.streams_ctx.insert_or_assign(pkt->sid, stream_match); }else{ stream_match = stream_search->second; } @@ -140,6 +160,8 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue { print_exception_reason(); sctx.clean_stream_by_id(pkt->sid); //Free the packet data + stream.ignore_client_data(); + stream.ignore_server_data(); stream.client_data_callback(nullptr); stream.server_data_callback(nullptr); return pkt->accept(); @@ -233,6 +255,7 @@ class PyProxyQueue: public NfQueue::ThreadNfQueue { PyEval_ReleaseThread(tstate); PyThreadState_Clear(tstate); PyThreadState_Delete(tstate); + Py_DECREF(handle_packet_code); sctx.clean(); } diff --git a/backend/binsrc/pyproxy/settings.cpp b/backend/binsrc/pyproxy/settings.cpp index 91b8cc2..6648994 100644 --- a/backend/binsrc/pyproxy/settings.cpp +++ b/backend/binsrc/pyproxy/settings.cpp @@ -16,9 +16,13 @@ namespace PyProxy { class PyCodeConfig; shared_ptr config; -PyObject* py_handle_packet_code = nullptr; UnixClientConnection control_socket; +PyObject* unmarshal_code(string encoded_code){ + if (encoded_code.empty()) return nullptr; + return PyMarshal_ReadObjectFromString(encoded_code.c_str(), encoded_code.size()); +} + class PyCodeConfig{ public: string encoded_code; @@ -32,22 +36,24 @@ class PyCodeConfig{ PyObject* glob = PyDict_New(); PyObject* result = PyEval_EvalCode(compiled_code, glob, glob); Py_DECREF(glob); - if (!result){ + if (PyErr_Occurred()){ PyErr_Print(); Py_DECREF(compiled_code); std::cerr << "[fatal] [main] Failed to execute the code" << endl; throw invalid_argument("Failed to execute the code, maybe an invalid filter code has been provided"); } - Py_DECREF(result); + Py_XDECREF(result); PyObject* code_dump = PyMarshal_WriteObjectToString(compiled_code, 4); Py_DECREF(compiled_code); if (code_dump == nullptr){ - PyErr_Print(); + if (PyErr_Occurred()) + PyErr_Print(); std::cerr << "[fatal] [main] Failed to dump the code" << endl; throw invalid_argument("Failed to dump the code"); } if (!PyBytes_Check(code_dump)){ std::cerr << "[fatal] [main] Failed to dump the code" << endl; + Py_DECREF(code_dump); throw invalid_argument("Failed to dump the code"); } encoded_code = string(PyBytes_AsString(code_dump), PyBytes_Size(code_dump)); @@ -55,8 +61,7 @@ class PyCodeConfig{ } PyObject* compiled_code(){ - if (encoded_code.empty()) return nullptr; - return PyMarshal_ReadObjectFromString(encoded_code.c_str(), encoded_code.size()); + return unmarshal_code(encoded_code); } PyCodeConfig(){} @@ -69,16 +74,27 @@ void init_control_socket(){ control_socket = UnixClientConnection(socket_path); } +string py_handle_packet_code; void init_handle_packet_code(){ - py_handle_packet_code = Py_CompileStringExFlags( - "firegex.nfproxy.internals.handle_packet()\n", "", + PyObject* compiled_code = Py_CompileStringExFlags( + "firegex.nfproxy.internals.handle_packet(globals())\n", "", Py_file_input, NULL, 2); - - if (py_handle_packet_code == nullptr){ - std::cerr << "[fatal] [main] Failed to compile the utility python code (strange behaviour, probably a bug)" << endl; - throw invalid_argument("Failed to compile the code"); + PyObject* code_dump = PyMarshal_WriteObjectToString(compiled_code, 4); + Py_DECREF(compiled_code); + if (code_dump == nullptr){ + if (PyErr_Occurred()) + PyErr_Print(); + std::cerr << "[fatal] [main] Failed to dump the code" << endl; + throw invalid_argument("Failed to dump the code"); } + if (!PyBytes_Check(code_dump)){ + std::cerr << "[fatal] [main] Failed to dump the code" << endl; + Py_DECREF(code_dump); + throw invalid_argument("Failed to dump the code"); + } + py_handle_packet_code = string(PyBytes_AsString(code_dump), PyBytes_Size(code_dump)); + Py_DECREF(code_dump); } }} diff --git a/backend/binsrc/pyproxy/stream_ctx.cpp b/backend/binsrc/pyproxy/stream_ctx.cpp index ba7db5c..f9e2cb4 100644 --- a/backend/binsrc/pyproxy/stream_ctx.cpp +++ b/backend/binsrc/pyproxy/stream_ctx.cpp @@ -55,11 +55,15 @@ typedef Tins::TCPIP::StreamIdentifier stream_id; struct pyfilter_ctx { PyObject * glob = nullptr; + PyObject * py_handle_packet = nullptr; - pyfilter_ctx(PyObject * compiled_code){ + pyfilter_ctx(PyObject * compiled_code, PyObject * handle_packet_code){ + py_handle_packet = handle_packet_code; + Py_INCREF(py_handle_packet); glob = PyDict_New(); PyObject* result = PyEval_EvalCode(compiled_code, glob, glob); - if (!result){ + Py_XDECREF(compiled_code); + if (PyErr_Occurred()){ PyErr_Print(); Py_XDECREF(glob); std::cerr << "[fatal] [main] Failed to compile the code" << endl; @@ -69,7 +73,10 @@ struct pyfilter_ctx { } ~pyfilter_ctx(){ + cerr << "[info] [pyfilter_ctx] Cleaning pyfilter_ctx" << endl; Py_DECREF(glob); + Py_DECREF(py_handle_packet); + PyGC_Collect(); } inline void set_item_to_glob(const char* key, PyObject* value){ @@ -82,14 +89,16 @@ struct pyfilter_ctx { void del_item_from_glob(const char* key){ if (PyDict_DelItemString(glob, key) != 0){ - PyErr_Print(); + if (PyErr_Occurred()) + PyErr_Print(); throw invalid_argument("Failed to delete item from dict"); } } inline void set_item_to_dict(PyObject* dict, const char* key, PyObject* value){ if (PyDict_SetItemString(dict, key, value) != 0){ - PyErr_Print(); + if (PyErr_Occurred()) + PyErr_Print(); throw invalid_argument("Failed to set item to dict"); } Py_DECREF(value); @@ -111,11 +120,18 @@ struct pyfilter_ctx { // Set packet info to the global context set_item_to_glob("__firegex_packet_info", packet_info); - PyObject * result = PyEval_EvalCode(py_handle_packet_code, glob, glob); + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] Calling python with a data of " << data.size() << endl; + #endif + PyObject * result = PyEval_EvalCode(py_handle_packet, glob, glob); + PyGC_Collect(); + #ifdef DEBUG + cerr << "[DEBUG] [handle_packet] End of python call" << endl; + #endif del_item_from_glob("__firegex_packet_info"); - Py_DECREF(packet_info); - if (!result){ + if (PyErr_Occurred()){ + cerr << "[error] [handle_packet] Failed to execute the code " << result << endl; PyErr_Print(); #ifdef DEBUG cerr << "[DEBUG] [handle_packet] Exception raised" << endl; @@ -134,7 +150,9 @@ struct pyfilter_ctx { } if (!PyDict_Check(result)){ - PyErr_Print(); + if (PyErr_Occurred()){ + PyErr_Print(); + } #ifdef DEBUG cerr << "[DEBUG] [handle_packet] Result is not a dict" << endl; #endif diff --git a/backend/modules/nfproxy/firegex.py b/backend/modules/nfproxy/firegex.py index 22c189a..5febad6 100644 --- a/backend/modules/nfproxy/firegex.py +++ b/backend/modules/nfproxy/firegex.py @@ -78,7 +78,7 @@ class FiregexInterceptor: await run_func(self.outstrem_function, self.srv.id, line) async def _start_binary(self): - proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../cpproxy") + proxy_binary_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../cpproxy")) self.process = await asyncio.create_subprocess_exec( proxy_binary_path, stdin=asyncio.subprocess.DEVNULL, stdout=asyncio.subprocess.PIPE, diff --git a/fgex-lib/firegex/nfproxy/__init__.py b/fgex-lib/firegex/nfproxy/__init__.py index 8948e4c..aaee250 100644 --- a/fgex-lib/firegex/nfproxy/__init__.py +++ b/fgex-lib/firegex/nfproxy/__init__.py @@ -1,23 +1,11 @@ import functools -from firegex.nfproxy.params import RawPacket -from enum import Enum - -class Action(Enum): - ACCEPT = 0 - DROP = 1 - REJECT = 2 - MANGLE = 3 - -class FullStreamAction(Enum): - FLUSH = 0 - ACCEPT = 1 - REJECT = 2 - DROP = 3 +from firegex.nfproxy.models import RawPacket, TCPInputStream, TCPOutputStream, TCPClientStream, TCPServerStream, TCPStreams +from firegex.nfproxy.internals.models import Action, FullStreamAction ACCEPT = Action.ACCEPT DROP = Action.DROP REJECT = Action.REJECT -MANGLE = Action.MANGLE +UNSTABLE_MANGLE = Action.MANGLE def pyfilter(func): """ @@ -45,8 +33,7 @@ def clear_pyfilter_registry(): pyfilter.registry.clear() __all__ = [ - "ACCEPT", "DROP", "REJECT", "MANGLE", "EXCEPTION", "INVALID", - "Action", "FullStreamAction", - "pyfilter", - "RawPacket" + "ACCEPT", "DROP", "REJECT", "UNSTABLE_MANGLE" + "Action", "FullStreamAction", "pyfilter", + "RawPacket", "TCPInputStream", "TCPOutputStream", "TCPClientStream", "TCPServerStream", "TCPStreams" ] \ No newline at end of file diff --git a/fgex-lib/firegex/nfproxy/internals.py b/fgex-lib/firegex/nfproxy/internals/__init__.py similarity index 69% rename from fgex-lib/firegex/nfproxy/internals.py rename to fgex-lib/firegex/nfproxy/internals/__init__.py index cfa9169..cf6d7a5 100644 --- a/fgex-lib/firegex/nfproxy/internals.py +++ b/fgex-lib/firegex/nfproxy/internals/__init__.py @@ -1,60 +1,23 @@ from inspect import signature -from firegex.nfproxy.params import RawPacket, NotReadyToRun -from firegex.nfproxy import Action, FullStreamAction -from dataclasses import dataclass, field +from firegex.nfproxy.internals.models import Action, FullStreamAction +from firegex.nfproxy.internals.models import FilterHandler, PacketHandlerResult +import functools +from firegex.nfproxy.internals.data import DataStreamCtx +from firegex.nfproxy.internals.exceptions import NotReadyToRun +from firegex.nfproxy.internals.data import RawPacket -type_annotations_associations = { - "tcp": { - RawPacket: RawPacket.fetch_from_global - }, - "http": { - RawPacket: RawPacket.fetch_from_global - } -} - -@dataclass -class FilterHandler: - func: callable - name: str - params: dict[type, callable] - proto: str - -class internal_data: - filter_call_info: list[FilterHandler] = [] - stream: list[RawPacket] = [] - stream_size: int = 0 - stream_max_size: int = 1*8e20 - full_stream_action: str = "flush" - filter_glob: dict = {} - -@dataclass -class PacketHandlerResult: - glob: dict = field(repr=False) - action: Action = Action.ACCEPT - matched_by: str = None - mangled_packet: bytes = None - - def set_result(self) -> None: - self.glob["__firegex_pyfilter_result"] = { - "action": self.action.value, - "matched_by": self.matched_by, - "mangled_packet": self.mangled_packet - } - - def reset_result(self) -> None: - self.glob["__firegex_pyfilter_result"] = None - -def context_call(func, *args, **kargs): - internal_data.filter_glob["__firegex_tmp_args"] = args - internal_data.filter_glob["__firegex_tmp_kargs"] = kargs - internal_data.filter_glob["__firege_tmp_call"] = func - res = eval("__firege_tmp_call(*__firegex_tmp_args, **__firegex_tmp_kargs)", internal_data.filter_glob, internal_data.filter_glob) - del internal_data.filter_glob["__firegex_tmp_args"] - del internal_data.filter_glob["__firegex_tmp_kargs"] - del internal_data.filter_glob["__firege_tmp_call"] +def context_call(glob, func, *args, **kargs): + glob["__firegex_tmp_args"] = args + glob["__firegex_tmp_kargs"] = kargs + glob["__firege_tmp_call"] = func + res = eval("__firege_tmp_call(*__firegex_tmp_args, **__firegex_tmp_kargs)", glob, glob) + del glob["__firegex_tmp_args"] + del glob["__firegex_tmp_kargs"] + del glob["__firege_tmp_call"] return res def generate_filter_structure(filters: list[str], proto:str, glob:dict) -> list[FilterHandler]: + from firegex.nfproxy.models import type_annotations_associations if proto not in type_annotations_associations.keys(): raise Exception("Invalid protocol") res = [] @@ -103,22 +66,27 @@ def get_filters_info(code:str, proto:str) -> list[FilterHandler]: def get_filter_names(code:str, proto:str) -> list[str]: return [ele.name for ele in get_filters_info(code, proto)] -def handle_packet() -> None: +def handle_packet(glob: dict) -> None: + internal_data = DataStreamCtx(glob) + print("I'm here", flush=True) cache_call = {} # Cache of the data handler calls - pkt_info = RawPacket.fetch_from_global(internal_data.filter_glob) + pkt_info = RawPacket._fetch_packet(internal_data) + internal_data.current_pkt = pkt_info cache_call[RawPacket] = pkt_info final_result = Action.ACCEPT data_size = len(pkt_info.data) - result = PacketHandlerResult(internal_data.filter_glob) + result = PacketHandlerResult(glob) if internal_data.stream_size+data_size > internal_data.stream_max_size: match internal_data.full_stream_action: case FullStreamAction.FLUSH: internal_data.stream = [] internal_data.stream_size = 0 + for func in internal_data.flush_action_set: + func() case FullStreamAction.ACCEPT: result.action = Action.ACCEPT return result.set_result() @@ -138,17 +106,19 @@ def handle_packet() -> None: mangled_packet = None for filter in internal_data.filter_call_info: final_params = [] + skip_call = False for data_type, data_func in filter.params.items(): if data_type not in cache_call.keys(): try: - cache_call[data_type] = data_func(internal_data.filter_glob) + cache_call[data_type] = data_func(internal_data) except NotReadyToRun: cache_call[data_type] = None - if cache_call[data_type] is None: - continue # Parsing raised NotReadyToRun, skip filter + skip_call = True + break final_params.append(cache_call[data_type]) - - res = context_call(filter.func, *final_params) + if skip_call: + continue + res = context_call(glob, filter.func, *final_params) if res is None: continue #ACCEPTED @@ -168,8 +138,10 @@ def handle_packet() -> None: return result.set_result() -def compile(glob:dict) -> None: - internal_data.filter_glob = glob +def compile(glob:dict) -> None: + internal_data = DataStreamCtx(glob) + + glob["print"] = functools.partial(print, flush = True) filters = glob["__firegex_pyfilter_enabled"] proto = glob["__firegex_proto"] @@ -187,3 +159,4 @@ def compile(glob:dict) -> None: internal_data.full_stream_action = FullStreamAction.FLUSH PacketHandlerResult(glob).reset_result() + diff --git a/fgex-lib/firegex/nfproxy/internals/data.py b/fgex-lib/firegex/nfproxy/internals/data.py new file mode 100644 index 0000000..0f4ff00 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/internals/data.py @@ -0,0 +1,190 @@ +from firegex.nfproxy.internals.models import FilterHandler +from typing import Callable + +class RawPacket: + """ + class rapresentation of the nfqueue packet sent in this context by the c++ core + """ + + def __init__(self, + data: bytes, + raw_packet: bytes, + is_input: bool, + is_ipv6: bool, + is_tcp: bool, + l4_size: int, + ): + self.__data = bytes(data) + self.__raw_packet = bytes(raw_packet) + self.__is_input = bool(is_input) + self.__is_ipv6 = bool(is_ipv6) + self.__is_tcp = bool(is_tcp) + self.__l4_size = int(l4_size) + self.__raw_packet_header_size = len(self.__raw_packet)-self.__l4_size + + @property + def is_input(self) -> bool: + return self.__is_input + + @property + def is_ipv6(self) -> bool: + return self.__is_ipv6 + + @property + def is_tcp(self) -> bool: + return self.__is_tcp + + @property + def data(self) -> bytes: + return self.__data + + @property + def l4_size(self) -> int: + return self.__l4_size + + @property + def raw_packet_header_len(self) -> int: + return self.__raw_packet_header_size + + @property + def l4_data(self) -> bytes: + return self.__raw_packet[self.raw_packet_header_len:] + + @l4_data.setter + def l4_data(self, v:bytes): + if not isinstance(v, bytes): + raise Exception("Invalid data type, data MUST be of type bytes") + #if len(v) != self.__l4_size: + # raise Exception("Invalid data size, must be equal to the original packet header size (due to a technical limitation)") + self.__raw_packet = self.__raw_packet[:self.raw_packet_header_len]+v + self.__l4_size = len(v) + + @property + def raw_packet(self) -> bytes: + return self.__raw_packet + + @raw_packet.setter + def raw_packet(self, v:bytes): + if not isinstance(v, bytes): + raise Exception("Invalid data type, data MUST be of type bytes") + #if len(v) != len(self.__raw_packet): + # raise Exception("Invalid data size, must be equal to the original packet size (due to a technical limitation)") + if len(v) < self.raw_packet_header_len: + raise Exception("Invalid data size, must be greater than the original packet header size") + self.__raw_packet = v + self.__l4_size = len(v)-self.raw_packet_header_len + + @classmethod + def _fetch_packet(cls, internal_data): + from firegex.nfproxy.internals.data import DataStreamCtx + if not isinstance(internal_data, DataStreamCtx): + if isinstance(internal_data, dict): + internal_data = DataStreamCtx(internal_data) + else: + raise Exception("Invalid data type, data MUST be of type DataStream, or glob dict") + + if "__firegex_packet_info" not in internal_data.filter_glob.keys(): + raise Exception("Packet info not found") + return cls(**internal_data.filter_glob["__firegex_packet_info"]) + + def __repr__(self): + return f"RawPacket(data={self.data}, raw_packet={self.raw_packet}, is_input={self.is_input}, is_ipv6={self.is_ipv6}, is_tcp={self.is_tcp}, l4_size={self.l4_size})" + + +class DataStreamCtx: + + def __init__(self, glob: dict): + if "__firegex_pyfilter_ctx" not in glob.keys(): + glob["__firegex_pyfilter_ctx"] = {} + self.__data = glob["__firegex_pyfilter_ctx"] + self.filter_glob = glob + + @property + def filter_call_info(self) -> list[FilterHandler]: + if "filter_call_info" not in self.__data.keys(): + self.__data["filter_call_info"] = [] + return self.__data.get("filter_call_info") + + @filter_call_info.setter + def filter_call_info(self, v: list[FilterHandler]): + self.__data["filter_call_info"] = v + + @property + def stream(self) -> list[RawPacket]: + if "stream" not in self.__data.keys(): + self.__data["stream"] = [] + return self.__data.get("stream") + + @stream.setter + def stream(self, v: list[RawPacket]): + self.__data["stream"] = v + + @property + def stream_size(self) -> int: + if "stream_size" not in self.__data.keys(): + self.__data["stream_size"] = 0 + return self.__data.get("stream_size") + + @stream_size.setter + def stream_size(self, v: int): + self.__data["stream_size"] = v + + @property + def stream_max_size(self) -> int: + if "stream_max_size" not in self.__data.keys(): + self.__data["stream_max_size"] = 1*8e20 + return self.__data.get("stream_max_size") + + @stream_max_size.setter + def stream_max_size(self, v: int): + self.__data["stream_max_size"] = v + + @property + def full_stream_action(self) -> str: + if "full_stream_action" not in self.__data.keys(): + self.__data["full_stream_action"] = "flush" + return self.__data.get("full_stream_action") + + @full_stream_action.setter + def full_stream_action(self, v: str): + self.__data["full_stream_action"] = v + + @property + def current_pkt(self) -> RawPacket: + return self.__data.get("current_pkt", None) + + @current_pkt.setter + def current_pkt(self, v: RawPacket): + self.__data["current_pkt"] = v + + @property + def http_data_objects(self) -> dict: + if "http_data_objects" not in self.__data.keys(): + self.__data["http_data_objects"] = {} + return self.__data.get("http_data_objects") + + @http_data_objects.setter + def http_data_objects(self, v: dict): + self.__data["http_data_objects"] = v + + @property + def save_http_data_in_streams(self) -> bool: + if "save_http_data_in_streams" not in self.__data.keys(): + self.__data["save_http_data_in_streams"] = False + return self.__data.get("save_http_data_in_streams") + + @save_http_data_in_streams.setter + def save_http_data_in_streams(self, v: bool): + self.__data["save_http_data_in_streams"] = v + + @property + def flush_action_set(self) -> set[Callable]: + if "flush_action_set" not in self.__data.keys(): + self.__data["flush_action_set"] = set() + return self.__data.get("flush_action_set") + + @flush_action_set.setter + def flush_action_set(self, v: set[Callable]): + self.__data["flush_action_set"] = v + + diff --git a/fgex-lib/firegex/nfproxy/internals/exceptions.py b/fgex-lib/firegex/nfproxy/internals/exceptions.py new file mode 100644 index 0000000..6084bf1 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/internals/exceptions.py @@ -0,0 +1,3 @@ + +class NotReadyToRun(Exception): + "raise this exception if the stream state is not ready to parse this object, the call will be skipped" diff --git a/fgex-lib/firegex/nfproxy/internals/models.py b/fgex-lib/firegex/nfproxy/internals/models.py new file mode 100644 index 0000000..86c1819 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/internals/models.py @@ -0,0 +1,40 @@ +from dataclasses import dataclass, field +from enum import Enum + +class Action(Enum): + ACCEPT = 0 + DROP = 1 + REJECT = 2 + MANGLE = 3 + +class FullStreamAction(Enum): + FLUSH = 0 + ACCEPT = 1 + REJECT = 2 + DROP = 3 + +@dataclass +class FilterHandler: + func: callable + name: str + params: dict[type, callable] + proto: str + +@dataclass +class PacketHandlerResult: + glob: dict = field(repr=False) + action: Action = Action.ACCEPT + matched_by: str = None + mangled_packet: bytes = None + + def set_result(self) -> None: + self.glob["__firegex_pyfilter_result"] = { + "action": self.action.value, + "matched_by": self.matched_by, + "mangled_packet": self.mangled_packet + } + + def reset_result(self) -> None: + self.glob["__firegex_pyfilter_result"] = None + + diff --git a/fgex-lib/firegex/nfproxy/models/__init__.py b/fgex-lib/firegex/nfproxy/models/__init__.py new file mode 100644 index 0000000..dc28830 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/models/__init__.py @@ -0,0 +1,28 @@ +from firegex.nfproxy.models.tcp import TCPInputStream, TCPOutputStream, TCPClientStream, TCPServerStream, TCPStreams +from firegex.nfproxy.models.http import HttpRequest, HttpResponse, HttpRequestHeader, HttpResponseHeader +from firegex.nfproxy.internals.data import RawPacket + +type_annotations_associations = { + "tcp": { + RawPacket: RawPacket._fetch_packet, + TCPInputStream: TCPInputStream._fetch_packet, + TCPOutputStream: TCPOutputStream._fetch_packet, + TCPStreams: TCPStreams._fetch_packet, + }, + "http": { + RawPacket: RawPacket._fetch_packet, + TCPInputStream: TCPInputStream._fetch_packet, + TCPOutputStream: TCPOutputStream._fetch_packet, + TCPStreams: TCPStreams._fetch_packet, + HttpRequest: HttpRequest._fetch_packet, + HttpResponse: HttpResponse._fetch_packet, + HttpRequestHeader: HttpRequestHeader._fetch_packet, + HttpResponseHeader: HttpResponseHeader._fetch_packet, + } +} + +__all__ = [ + "RawPacket", + "TCPInputStream", "TCPOutputStream", "TCPClientStream", "TCPServerStream", "TCPStreams", + "HttpRequest", "HttpResponse", "HttpRequestHeader", "HttpResponseHeader", +] \ No newline at end of file diff --git a/fgex-lib/firegex/nfproxy/models/http.py b/fgex-lib/firegex/nfproxy/models/http.py new file mode 100644 index 0000000..bfe1c05 --- /dev/null +++ b/fgex-lib/firegex/nfproxy/models/http.py @@ -0,0 +1,385 @@ +import pyllhttp +from firegex.nfproxy.internals.exceptions import NotReadyToRun +from firegex.nfproxy.internals.data import DataStreamCtx + +class InternalCallbackHandler(): + + url: str|None = None + _url_buffer: bytes = b"" + headers: dict[str, str] = {} + _header_fields: dict[bytes, bytes] = {} + has_begun: bool = False + body: bytes = None + _body_buffer: bytes = b"" + headers_complete: bool = False + message_complete: bool = False + status: str|None = None + _status_buffer: bytes = b"" + current_header_field = None + current_header_value = None + _save_body = True + + def on_message_begin(self): + self.has_begun = True + + def on_url(self, url): + self._url_buffer += url + + def on_url_complete(self): + self.url = self._url_buffer.decode(errors="ignore") + self._url_buffer = None + + def on_header_field(self, field): + if self.current_header_field is None: + self.current_header_field = bytearray(field) + else: + self.current_header_field += field + + def on_header_field_complete(self): + self.current_header_field = self.current_header_field + + def on_header_value(self, value): + if self.current_header_value is None: + self.current_header_value = bytearray(value) + else: + self.current_header_value += value + + def on_header_value_complete(self): + if self.current_header_value is not None and self.current_header_field is not None: + self._header_fields[self.current_header_field.decode(errors="ignore")] = self.current_header_value.decode(errors="ignore") + self.current_header_field = None + self.current_header_value = None + + def on_headers_complete(self): + self.headers_complete = True + self.headers = self._header_fields + self._header_fields = {} + self.current_header_field = None + self.current_header_value = None + + def on_body(self, body: bytes): + if self._save_body: + self._body_buffer += body + + def on_message_complete(self): + self.body = self._body_buffer + self._body_buffer = b"" + self.message_complete = True + + def on_status(self, status: bytes): + self._status_buffer += status + + def on_status_complete(self): + self.status = self._status_buffer.decode(errors="ignore") + self._status_buffer = b"" + + @property + def keep_alive(self) -> bool: + return self.should_keep_alive + + @property + def should_upgrade(self) -> bool: + return self.is_upgrading + + @property + def http_version(self) -> str: + return f"{self.major}.{self.minor}" + + @property + def method_parsed(self) -> str: + return self.method.decode(errors="ignore") + + @property + def content_length_parsed(self) -> int: + return self.content_length + + +class InternalHttpRequest(InternalCallbackHandler, pyllhttp.Request): + def __init__(self): + super(pyllhttp.Request, self).__init__() + super(InternalCallbackHandler, self).__init__() + +class InternalHttpResponse(InternalCallbackHandler, pyllhttp.Response): + def __init__(self): + super(pyllhttp.Response, self).__init__() + super(InternalCallbackHandler, self).__init__() + +class InternalBasicHttpMetaClass: + + def __init__(self): + self._parser: InternalHttpRequest|InternalHttpResponse + self._headers_were_set = False + self.stream = b"" + self.raised_error = False + + @property + def url(self) -> str|None: + return self._parser.url + + @property + def headers(self) -> dict[str, str]: + return self._parser.headers + + @property + def has_begun(self) -> bool: + return self._parser.has_begun + + @property + def body(self) -> bytes: + return self._parser.body + + @property + def headers_complete(self) -> bool: + return self._parser.headers_complete + + @property + def message_complete(self) -> bool: + return self._parser.message_complete + + @property + def http_version(self) -> str: + return self._parser.http_version + + @property + def keep_alive(self) -> bool: + return self._parser.keep_alive + + @property + def should_upgrade(self) -> bool: + return self._parser.should_upgrade + + @property + def content_length(self) -> int|None: + return self._parser.content_length_parsed + + @property + def method(self) -> str|None: + return self._parser.method_parsed + + def _fetch_current_packet(self, internal_data: DataStreamCtx): + # TODO: if an error is triggered should I reject the connection? + if internal_data.save_http_data_in_streams: # This is a websocket upgrade! + self.stream += internal_data.current_pkt.data + else: + try: + self._parser.execute(internal_data.current_pkt.data) + if self._parser.headers_complete and len(self._parser._body_buffer) == self._parser.content_length_parsed: + self._parser.on_message_complete() + except Exception as e: + self.raised_error = True + raise e + + #It's called the first time if the headers are complete, and second time with body complete + def _callable_checks(self, internal_data: DataStreamCtx): + if self._parser.headers_complete and not self._headers_were_set: + self._headers_were_set = True + return True + return self._parser.message_complete or internal_data.save_http_data_in_streams + + def _before_fetch_callable_checks(self, internal_data: DataStreamCtx): + return True + + def _trigger_remove_data(self, internal_data: DataStreamCtx): + return self.message_complete + + @classmethod + def _fetch_packet(cls, internal_data: DataStreamCtx): + if internal_data.current_pkt is None or internal_data.current_pkt.is_tcp is False: + raise NotReadyToRun() + + datahandler:InternalBasicHttpMetaClass = internal_data.http_data_objects.get(cls, None) + if datahandler is None or datahandler.raised_error: + datahandler = cls() + internal_data.http_data_objects[cls] = datahandler + + if not datahandler._before_fetch_callable_checks(internal_data): + raise NotReadyToRun() + datahandler._fetch_current_packet(internal_data) + if not datahandler._callable_checks(internal_data): + raise NotReadyToRun() + + if datahandler.should_upgrade: + internal_data.save_http_data_in_streams = True + + if datahandler._trigger_remove_data(internal_data): + if internal_data.http_data_objects.get(cls): + del internal_data.http_data_objects[cls] + + return datahandler + +class HttpRequest(InternalBasicHttpMetaClass): + def __init__(self): + super().__init__() + # These will be used in the metaclass + self._parser: InternalHttpRequest = InternalHttpRequest() + self._headers_were_set = False + + @property + def method(self) -> bytes: + return self._parser.method + + def _before_fetch_callable_checks(self, internal_data: DataStreamCtx): + return internal_data.current_pkt.is_input + + def __repr__(self): + return f"" + +class HttpResponse(InternalBasicHttpMetaClass): + def __init__(self): + super().__init__() + self._parser: InternalHttpResponse = InternalHttpResponse() + self._headers_were_set = False + + @property + def status_code(self) -> int: + return self._parser.status + + def _before_fetch_callable_checks(self, internal_data: DataStreamCtx): + return not internal_data.current_pkt.is_input + + def __repr__(self): + return f"" + +class HttpRequestHeader(HttpRequest): + def __init__(self): + super().__init__() + self._parser._save_body = False + + def _callable_checks(self, internal_data: DataStreamCtx): + if self._parser.headers_complete and not self._headers_were_set: + self._headers_were_set = True + return True + return False + +class HttpResponseHeader(HttpResponse): + def __init__(self): + super().__init__() + self._parser._save_body = False + + def _callable_checks(self, internal_data: DataStreamCtx): + if self._parser.headers_complete and not self._headers_were_set: + self._headers_were_set = True + return True + return False + +""" +#TODO include this? + +import codecs + +# Null bytes; no need to recreate these on each call to guess_json_utf +_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 +_null2 = _null * 2 +_null3 = _null * 3 + +def guess_json_utf(data): + "" + :rtype: str + "" + # JSON always starts with two ASCII characters, so detection is as + # easy as counting the nulls and from their location and count + # determine the encoding. Also detect a BOM, if present. + sample = data[:4] + if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + return "utf-32" # BOM included + if sample[:3] == codecs.BOM_UTF8: + return "utf-8-sig" # BOM included, MS style (discouraged) + if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + return "utf-16" # BOM included + nullcount = sample.count(_null) + if nullcount == 0: + return "utf-8" + if nullcount == 2: + if sample[::2] == _null2: # 1st and 3rd are null + return "utf-16-be" + if sample[1::2] == _null2: # 2nd and 4th are null + return "utf-16-le" + # Did not detect 2 valid UTF-16 ascii-range characters + if nullcount == 3: + if sample[:3] == _null3: + return "utf-32-be" + if sample[1:] == _null3: + return "utf-32-le" + # Did not detect a valid UTF-32 ascii-range character + return None + +from http_parser.pyparser import HttpParser +import json +from urllib.parse import parse_qsl +from dataclasses import dataclass + +@dataclass +class HttpMessage(): + fragment: str + headers: dict + method: str + parameters: dict + path: str + query_string: str + raw_body: bytes + status_code: int + url: str + version: str + +class HttpMessageParser(HttpParser): + def __init__(self, data:bytes, decompress_body=True): + super().__init__(decompress = decompress_body) + self.execute(data, len(data)) + self._parameters = {} + try: + self._parse_parameters() + except Exception as e: + print("Error in parameters parsing:", data) + print("Exception:", str(e)) + + def get_raw_body(self): + return b"\r\n".join(self._body) + + def _parse_query_string(self, raw_string): + parameters = parse_qsl(raw_string) + for key,value in parameters: + try: + key = key.decode() + value = value.decode() + except: + pass + if self._parameters.get(key): + if isinstance(self._parameters[key], list): + self._parameters[key].append(value) + else: + self._parameters[key] = [self._parameters[key], value] + else: + self._parameters[key] = value + + def _parse_parameters(self): + if self._method == "POST": + body = self.get_raw_body() + if len(body) == 0: + return + content_type = self.get_headers().get("Content-Type") + if not content_type or "x-www-form-urlencoded" in content_type: + try: + self._parse_query_string(body.decode()) + except: + pass + elif "json" in content_type: + self._parameters = json.loads(body) + elif self._method == "GET": + self._parse_query_string(self._query_string) + + def get_parameters(self): + ""returns parameters parsed from query string or body"" + return self._parameters + + def get_version(self): + if self._version: + return ".".join([str(x) for x in self._version]) + return None + + def to_message(self): + return HttpMessage(self._fragment, self._headers, self._method, + self._parameters, self._path, self._query_string, + self.get_raw_body(), self._status_code, + self._url, self.get_version() + ) +""" \ No newline at end of file diff --git a/fgex-lib/firegex/nfproxy/models/tcp.py b/fgex-lib/firegex/nfproxy/models/tcp.py new file mode 100644 index 0000000..e18671a --- /dev/null +++ b/fgex-lib/firegex/nfproxy/models/tcp.py @@ -0,0 +1,113 @@ +from firegex.nfproxy.internals.data import DataStreamCtx +from firegex.nfproxy.internals.exceptions import NotReadyToRun + +class TCPStreams: + """ + This datamodel will assemble the TCP streams from the input and output data. + The function that use this data model will be handled when: + - The packet is TCP + - At least 1 packet has been sent + """ + + def __init__(self, + input_data: bytes, + output_data: bytes, + is_ipv6: bool, + ): + self.__input_data = bytes(input_data) + self.__output_data = bytes(output_data) + self.__is_ipv6 = bool(is_ipv6) + + @property + def input_data(self) -> bytes: + return self.__input_data + + @property + def output_data(self) -> bytes: + return self.__output_data + + @property + def is_ipv6(self) -> bool: + return self.__is_ipv6 + + @classmethod + def _fetch_packet(cls, internal_data:DataStreamCtx): + + if internal_data.current_pkt is None or internal_data.current_pkt.is_tcp is False: + raise NotReadyToRun() + return cls( + input_data=b"".join([ele.data for ele in internal_data.stream if ele.is_input]), + output_data=b"".join([ele.data for ele in internal_data.stream if not ele.is_input]), + is_ipv6=internal_data.current_pkt.is_ipv6, + ) + + +class TCPInputStream: + """ + This datamodel will assemble the TCP input stream from the client sent data. + The function that use this data model will be handled when: + - The packet is TCP + - At least 1 packet has been sent + - A new client packet has been received + """ + def __init__(self, + data: bytes, + is_ipv6: bool, + ): + self.__data = bytes(data) + self.__is_ipv6 = bool(is_ipv6) + + @property + def data(self) -> bool: + return self.__data + + @property + def is_ipv6(self) -> bool: + return self.__is_ipv6 + + @classmethod + def _fetch_packet(cls, internal_data:DataStreamCtx): + if internal_data.current_pkt is None or internal_data.current_pkt.is_tcp is False or internal_data.current_pkt.is_input is False: + raise NotReadyToRun() + return cls( + data=internal_data.current_pkt.get_related_raw_stream(), + is_ipv6=internal_data.current_pkt.is_ipv6, + ) + +TCPClientStream = TCPInputStream + +class TCPOutputStream: + """ + This datamodel will assemble the TCP output stream from the server sent data. + The function that use this data model will be handled when: + - The packet is TCP + - At least 1 packet has been sent + - A new server packet has been sent + """ + + + def __init__(self, + data: bytes, + is_ipv6: bool, + ): + self.__data = bytes(data) + self.__is_ipv6 = bool(is_ipv6) + + @property + def data(self) -> bool: + return self.__data + + @property + def is_ipv6(self) -> bool: + return self.__is_ipv6 + + @classmethod + def _fetch_packet(cls, internal_data:DataStreamCtx): + if internal_data.current_pkt is None or internal_data.current_pkt.is_tcp is False or internal_data.current_pkt.is_input is True: + raise NotReadyToRun() + return cls( + data=internal_data.current_pkt.get_related_raw_stream(), + is_ipv6=internal_data.current_pkt.is_ipv6, + ) + +TCPServerStream = TCPOutputStream diff --git a/fgex-lib/firegex/nfproxy/params.py b/fgex-lib/firegex/nfproxy/params.py deleted file mode 100644 index 025ff3e..0000000 --- a/fgex-lib/firegex/nfproxy/params.py +++ /dev/null @@ -1,79 +0,0 @@ - -class NotReadyToRun(Exception): # raise this exception if the stream state is not ready to parse this object, the call will be skipped - pass - -class RawPacket: - def __init__(self, - data: bytes, - raw_packet: bytes, - is_input: bool, - is_ipv6: bool, - is_tcp: bool, - l4_size: int, - ): - self.__data = bytes(data) - self.__raw_packet = bytes(raw_packet) - self.__is_input = bool(is_input) - self.__is_ipv6 = bool(is_ipv6) - self.__is_tcp = bool(is_tcp) - self.__l4_size = int(l4_size) - self.__raw_packet_header_size = len(self.__raw_packet)-self.__l4_size - - @property - def is_input(self) -> bool: - return self.__is_input - - @property - def is_ipv6(self) -> bool: - return self.__is_ipv6 - - @property - def is_tcp(self) -> bool: - return self.__is_tcp - - @property - def data(self) -> bytes: - return self.__data - - @property - def l4_size(self) -> int: - return self.__l4_size - - @property - def raw_packet_header_len(self) -> int: - return self.__raw_packet_header_size - - @property - def l4_data(self) -> bytes: - return self.__raw_packet[self.raw_packet_header_len:] - - @l4_data.setter - def l4_data(self, v:bytes): - if not isinstance(v, bytes): - raise Exception("Invalid data type, data MUST be of type bytes") - #if len(v) != self.__l4_size: - # raise Exception("Invalid data size, must be equal to the original packet header size (due to a technical limitation)") - self.__raw_packet = self.__raw_packet[:self.raw_packet_header_len]+v - self.__l4_size = len(v) - - @property - def raw_packet(self) -> bytes: - return self.__raw_packet - - @raw_packet.setter - def raw_packet(self, v:bytes): - if not isinstance(v, bytes): - raise Exception("Invalid data type, data MUST be of type bytes") - #if len(v) != len(self.__raw_packet): - # raise Exception("Invalid data size, must be equal to the original packet size (due to a technical limitation)") - if len(v) < self.raw_packet_header_len: - raise Exception("Invalid data size, must be greater than the original packet header size") - self.__raw_packet = v - self.__l4_size = len(v)-self.raw_packet_header_len - - @classmethod - def fetch_from_global(cls, glob): - if "__firegex_packet_info" not in glob.keys(): - raise Exception("Packet info not found") - return cls(**glob["__firegex_packet_info"]) - diff --git a/fgex-lib/requirements.txt b/fgex-lib/requirements.txt index f7771d8..2cad7f7 100644 --- a/fgex-lib/requirements.txt +++ b/fgex-lib/requirements.txt @@ -7,4 +7,5 @@ textual==2.1.0 python-socketio[client]==5.12.1 fgex orjson - +httptools +pyllhttp diff --git a/frontend/src/components/ModalLog.tsx b/frontend/src/components/ModalLog.tsx index d57c515..4c07760 100644 --- a/frontend/src/components/ModalLog.tsx +++ b/frontend/src/components/ModalLog.tsx @@ -10,8 +10,8 @@ export const ModalLog = ( } ) => { return - - {data} + + {data} } \ No newline at end of file diff --git a/frontend/src/components/NFProxy/ExceptionWarning.tsx b/frontend/src/components/NFProxy/ExceptionWarning.tsx index 64886b3..45115d1 100644 --- a/frontend/src/components/NFProxy/ExceptionWarning.tsx +++ b/frontend/src/components/NFProxy/ExceptionWarning.tsx @@ -2,6 +2,7 @@ import { IoIosWarning } from "react-icons/io" import { socketio, WARNING_NFPROXY_TIME_LIMIT } from "../../js/utils" import { Tooltip } from "@mantine/core" import { useEffect, useState } from "react" +import { round } from "@mantine/core/lib/components/ColorPicker/converters/parsers" export const ExceptionWarning = ({ service_id }: { service_id: string }) => { @@ -17,9 +18,25 @@ export const ExceptionWarning = ({ service_id }: { service_id: string }) => { } }, []) + const [_time, setTime] = useState(new Date()); + + useEffect(() => { + const interval = setInterval(() => { + setTime(new Date()); + }, 1000); + + return () => clearInterval(interval); + }, []); + + const deltaTime = new Date().getTime()-lastExceptionTimestamp + const minutes = Math.floor(deltaTime/(1000*60)) + const seconds = Math.floor(deltaTime/1000)%60 + + const deltaStringTime = `${minutes.toString().length>1?minutes:"0"+minutes}:${seconds.toString().length>1?seconds:"0"+seconds}` + return <> {(new Date().getTime()-lastExceptionTimestamp <= WARNING_NFPROXY_TIME_LIMIT)? - + :null} diff --git a/frontend/src/components/NFProxy/utils.ts b/frontend/src/components/NFProxy/utils.ts index f5287ca..55ddec6 100644 --- a/frontend/src/components/NFProxy/utils.ts +++ b/frontend/src/components/NFProxy/utils.ts @@ -96,3 +96,77 @@ export const nfproxy = { return status === "ok"?undefined:status } } + + +export const EXAMPLE_PYFILTER = `# This in an example of a filter file with http protocol + +# From here we can import the DataTypes that we want to use: +# The data type must be specified in the filter functions +# And will also interally be used to decide when call some filters and how aggregate data +from firegex.nfproxy.params import RawPacket + +# global context in this execution is dedicated to a single TCP stream +# - This code will be executed once at the TCP stream start +# - The filter will be called for each packet in the stream +# - You can store in global context some data you need, but exceeding with data stored could be dangerous +# - At the end of the stream the global context will be destroyed + +from firegex.nfproxy import pyfilter +# pyfilter is a decorator, this will make the function become an effective filter and must have parameters with a specified type + +from firegex.nfproxy import REJECT, ACCEPT, UNSTABLE_MANGLE, DROP +# - The filter must return one of the following values: +# - ACCEPT: The packet will be accepted +# - REJECT: The packet will be rejected (will be activated a mechanism to send a FIN packet and drop all data in the stream) +# - UNSTABLE_MANGLE: The packet will be mangled and accepted +# - DROP: All the packets in this stream will be easly dropped + +# If you want, you can use print to debug your filters, but this could slow down the filter + +# Filter names must be unique and are specified by the name of the function wrapped by the decorator +@pyfilter +# This function will handle only a RawPacket object, this is the lowest level of the packet abstraction +def strange_filter(packet:RawPacket): + # Mangling packets can be dangerous, due to instability of the internal TCP state mangling done by the filter below + # Also is not garanteed that l4_data is the same of the packet data: + # packet data is the assembled TCP stream, l4_data is the TCP payload of the packet in the nfqueue + # Unorder packets in TCP are accepted by default, and python is not called in this case + # For this reason mangling will be only available RawPacket: higher level data abstraction will be read-only + if b"TEST_MANGLING" in packet.l4_data: + # It's possible to change teh raw_packet and l4_data values for mangling the packet, data is immutable instead + packet.l4_data = packet.l4_data.replace(b"TEST", b"UNSTABLE") + return UNSTABLE_MANGLE + # Drops the traffic + if b"BAD DATA 1" in packet.data: + return DROP + # Rejects the traffic + if b"BAD DATA 2" in packet.data: + return REJECT + # Accepts the traffic (default if None is returned) + return ACCEPT + +# Example with a higher level of abstraction +@pyfilter +def http_filter(http:HTTPRequest): + if http.method == "GET" and "test" in http.url: + return REJECT + +# ADVANCED OPTIONS +# You can specify some additional options on the streaming managment +# pyproxy will automatically store all the packets (already ordered by the c++ binary): +# +# If the stream is too big, you can specify what actions to take: +# This can be done defining some variables in the global context +# - FGEX_STREAM_MAX_SIZE: The maximum size of the stream in bytes (default 1MB) +# NOTE: the stream size is calculated by the sum of the dimension of the packets in the stream (both directions) +# - FGEX_FULL_STREAM_ACTION: The action to do when the stream is full +# - FLUSH: Flush the stream and continue to acquire new packets (default) +# - DROP: Drop the next stream packets - like a DROP action by filter +# - REJECT: Reject the stream and close the connection - like a REJECT action by filter +# - ACCEPT: Stops to call pyfilters and accept the traffic + +# Example of a global context +FGEX_STREAM_MAX_SIZE = 4096 +FGEX_FULL_STREAM_ACTION = REJECT +# This could be an ideal configuration if we expect to normally have streams with a maximum size of 4KB of traffic +` diff --git a/frontend/src/components/NavBar/index.tsx b/frontend/src/components/NavBar/index.tsx index 68a4f5f..ca00a25 100644 --- a/frontend/src/components/NavBar/index.tsx +++ b/frontend/src/components/NavBar/index.tsx @@ -37,9 +37,14 @@ export default function NavBar() { } /> - } /> } /> } /> + + Experimental Features 🧪 + + + + } /> diff --git a/frontend/src/pages/NFProxy/ServiceDetails.tsx b/frontend/src/pages/NFProxy/ServiceDetails.tsx index d642541..40edf80 100644 --- a/frontend/src/pages/NFProxy/ServiceDetails.tsx +++ b/frontend/src/pages/NFProxy/ServiceDetails.tsx @@ -3,7 +3,7 @@ import { Navigate, useNavigate, useParams } from 'react-router-dom'; import { Badge, Divider, Menu } from '@mantine/core'; import { useEffect, useState } from 'react'; import { FaFilter, FaPencilAlt, FaPlay, FaStop } from 'react-icons/fa'; -import { nfproxy, nfproxyServiceFilterCodeQuery, nfproxyServicePyfiltersQuery, nfproxyServiceQuery, serviceQueryKey } from '../../components/NFProxy/utils'; +import { EXAMPLE_PYFILTER, nfproxy, nfproxyServiceFilterCodeQuery, nfproxyServicePyfiltersQuery, nfproxyServiceQuery, serviceQueryKey } from '../../components/NFProxy/utils'; import { MdDoubleArrow } from "react-icons/md" import YesNoModal from '../../components/YesNoModal'; import { errorNotify, isMediumScreen, okNotify, regex_ipv4, socketio } from '../../js/utils'; @@ -45,17 +45,20 @@ export default function ServiceDetailsNFProxy() { useEffect(()=>{ if (srv){ if (openLogModal){ + logDataSetters.setState([]) socketio.emit("nfproxy-outstream-join", { service: srv }); socketio.on(`nfproxy-outstream-${srv}`, (data) => { logDataSetters.append(data) }); }else{ - logDataSetters.setState([]) socketio.emit("nfproxy-outstream-leave", { service: srv }); + socketio.off(`nfproxy-outstream-${srv}`); + logDataSetters.setState([]) } return () => { - logDataSetters.setState([]) socketio.emit("nfproxy-outstream-leave", { service: srv }); + socketio.off(`nfproxy-outstream-${srv}`); + logDataSetters.setState([]) } } }, [openLogModal, srv]) @@ -199,11 +202,13 @@ export default function ServiceDetailsNFProxy() { {(!filtersList.data || filtersList.data.length == 0)?<> - No filters found! Edit the proxy file + No filters found! Edit the proxy file, install the firegex client:<Space w="xs" /><Code mb={-4} >pip install fgex</Code> - Install the firegex client:<Space w="xs" /><Code mb={-4} >pip install fgex</Code> + Then create a new filter file with the following syntax and upload it here (using the button above) - Then run the command:<Space w="xs" /><Code mb={-4} >fgex nfproxy</Code> + Before upload the filter you can test it using the fgex command installed by the python lib + + :<>{filtersList.data?.map( (filterInfo) => )} } {(services.data && services.data?.length > 0)?services.data.map( srv => { navigator("/nfproxy/"+srv.service_id) - }} />):<> No services found! Add one clicking the "+" buttons - + }} />):<> + + + Netfilter proxy is a simulated proxy written using python with a c++ core + + Filters are created using a simple python syntax, infact the first you need to do is to install the firegex lib:<Space w="xs" /><Code mb={-4} >pip install firegex</Code> + + Then you can create a new service and write custom filters for the service + + Below there is a description and example about how a pyfilter has to be composed (this example is replicated in every empty service) + + + + Add your first service + setOpen(true)} size="xl" radius="md" variant="filled"> + } }