diff --git a/Dockerfile b/Dockerfile index c576406..dd7cc14 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,7 @@ RUN bun run build #Building main conteiner FROM --platform=$TARGETARCH debian:stable-slim AS base RUN apt-get update -qq && apt-get upgrade -qq && \ - apt-get install -qq python3-pip \ + apt-get install -qq python3-pip build-essentials \ libnetfilter-queue-dev libnfnetlink-dev libmnl-dev libcap2-bin\ nftables libvectorscan-dev libtins-dev python3-nftables diff --git a/backend/binsrc/classes/netfilter.cpp b/backend/binsrc/classes/netfilter.cpp index 89b0417..b226842 100644 --- a/backend/binsrc/classes/netfilter.cpp +++ b/backend/binsrc/classes/netfilter.cpp @@ -17,12 +17,42 @@ using Tins::TCPIP::Stream; using Tins::TCPIP::StreamFollower; using namespace std; - #ifndef NETFILTER_CLASSES_HPP #define NETFILTER_CLASSES_HPP typedef Tins::TCPIP::StreamIdentifier stream_id; typedef map matching_map; +/* Considering to use unorder_map using this hash of stream_id + +namespace std { + template<> + struct hash { + size_t operator()(const stream_id& sid) const + { + return std::hash()(sid.max_address[0] + sid.max_address[1] + sid.max_address[2] + sid.max_address[3] + sid.max_address_port + sid.min_address[0] + sid.min_address[1] + sid.min_address[2] + sid.min_address[3] + sid.min_address_port); + } + }; +} + +*/ + +#ifdef DEBUG +ostream& operator<<(ostream& os, const Tins::TCPIP::StreamIdentifier::address_type &sid){ + bool first_print = false; + for (auto ele: sid){ + if (first_print || ele){ + first_print = true; + os << (int)ele << "."; + } + } + return os; +} + +ostream& operator<<(ostream& os, const stream_id &sid){ + os << sid.max_address << ":" << sid.max_address_port << " -> " << sid.min_address << ":" << sid.min_address_port; + return os; +} +#endif struct packet_info; @@ -52,6 +82,60 @@ struct stream_ctx { in_scratch = nullptr; } } + + void clean_stream_by_id(stream_id sid){ + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.clean_stream_by_id] Cleaning stream context of " << sid << endl; + #endif + auto stream_search = in_hs_streams.find(sid); + hs_stream_t* stream_match; + if (stream_search != in_hs_streams.end()){ + stream_match = stream_search->second; + if (hs_close_stream(stream_match, in_scratch, nullptr, nullptr) != HS_SUCCESS) { + cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl; + throw invalid_argument("Cannot close stream match on hyperscan"); + } + in_hs_streams.erase(stream_search); + } + + stream_search = out_hs_streams.find(sid); + if (stream_search != out_hs_streams.end()){ + stream_match = stream_search->second; + if (hs_close_stream(stream_match, out_scratch, nullptr, nullptr) != HS_SUCCESS) { + cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl; + throw invalid_argument("Cannot close stream match on hyperscan"); + } + out_hs_streams.erase(stream_search); + } + } + + void clean(){ + + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.clean] Cleaning stream context" << endl; + #endif + + if (in_scratch){ + for(auto ele: in_hs_streams){ + if (hs_close_stream(ele.second, in_scratch, nullptr, nullptr) != HS_SUCCESS) { + cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl; + throw invalid_argument("Cannot close stream match on hyperscan"); + } + } + in_hs_streams.clear(); + } + + if (out_scratch){ + for(auto ele: out_hs_streams){ + if (hs_close_stream(ele.second, out_scratch, nullptr, nullptr) != HS_SUCCESS) { + cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl; + throw invalid_argument("Cannot close stream match on hyperscan"); + } + } + out_hs_streams.clear(); + } + clean_scratches(); + } }; struct packet_info { @@ -139,76 +223,59 @@ class NetfilterQueue { } - //Input data filtering - void on_client_data(Stream& stream) { - string data(stream.client_payload().begin(), stream.client_payload().end()); - sctx.tcp_match_util.pkt_info->is_input = true; - sctx.tcp_match_util.matching_has_been_called = true; - bool result = callback_func(*sctx.tcp_match_util.pkt_info); - if (result){ - clean_stream_by_id(sctx.tcp_match_util.pkt_info->sid); + static void on_data_recv(Stream& stream, stream_ctx* sctx, string data, bool is_input) { + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.on_data_recv] data: " << data << endl; + #endif + sctx->tcp_match_util.pkt_info->is_input = is_input; + sctx->tcp_match_util.matching_has_been_called = true; + bool result = callback_func(*sctx->tcp_match_util.pkt_info); + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.on_data_recv] result: " << result << endl; + #endif + if (!result){ + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.on_data_recv] Stream matched, removing all data about it" << endl; + #endif + sctx->clean_stream_by_id(sctx->tcp_match_util.pkt_info->sid); stream.ignore_client_data(); stream.ignore_server_data(); } - sctx.tcp_match_util.result = result; + sctx->tcp_match_util.result = result; + } + + //Input data filtering + static void on_client_data(Stream& stream, stream_ctx* sctx) { + on_data_recv(stream, sctx, string(stream.client_payload().begin(), stream.client_payload().end()), true); } //Server data filtering - void on_server_data(Stream& stream) { - string data(stream.server_payload().begin(), stream.server_payload().end()); - sctx.tcp_match_util.pkt_info->is_input = false; - sctx.tcp_match_util.matching_has_been_called = true; - bool result = callback_func(*sctx.tcp_match_util.pkt_info); - if (result){ - clean_stream_by_id(sctx.tcp_match_util.pkt_info->sid); - stream.ignore_client_data(); - stream.ignore_server_data(); - } - this->sctx.tcp_match_util.result = result; + static void on_server_data(Stream& stream, stream_ctx* sctx) { + on_data_recv(stream, sctx, string(stream.server_payload().begin(), stream.server_payload().end()), false); } - void on_new_stream(Stream& stream) { + static void on_new_stream(Stream& stream, stream_ctx* sctx) { + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.on_new_stream] New stream detected" << endl; + #endif if (stream.is_partial_stream()) { + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.on_new_stream] Partial stream detected, skipping" << endl; + #endif return; } - cerr << "[+] New connection!" << endl; stream.auto_cleanup_payloads(true); - stream.client_data_callback( - [&](auto a){this->on_client_data(a);} - ); - stream.server_data_callback( - [&](auto a){this->on_server_data(a);} - ); - } - - void clean_stream_by_id(stream_id stream_id){ - auto stream_search = this->sctx.in_hs_streams.find(stream_id); - hs_stream_t* stream_match; - if (stream_search != this->sctx.in_hs_streams.end()){ - stream_match = stream_search->second; - if (hs_close_stream(stream_match, sctx.in_scratch, nullptr, nullptr) != HS_SUCCESS) { - cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl; - throw invalid_argument("Cannot close stream match on hyperscan"); - } - this->sctx.in_hs_streams.erase(stream_search); - } - - stream_search = this->sctx.out_hs_streams.find(stream_id); - if (stream_search != this->sctx.out_hs_streams.end()){ - stream_match = stream_search->second; - if (hs_close_stream(stream_match, sctx.out_scratch, nullptr, nullptr) != HS_SUCCESS) { - cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl; - throw invalid_argument("Cannot close stream match on hyperscan"); - } - this->sctx.out_hs_streams.erase(stream_search); - } + stream.client_data_callback(bind(on_client_data, placeholders::_1, sctx)); + stream.server_data_callback(bind(on_server_data, placeholders::_1, sctx)); } // A stream was terminated. The second argument is the reason why it was terminated - void on_stream_terminated(Stream& stream, StreamFollower::TerminationReason reason) { + static void on_stream_terminated(Stream& stream, StreamFollower::TerminationReason reason, stream_ctx* sctx) { stream_id stream_id = stream_id::make_identifier(stream); - cerr << "[+] Connection closed: " << &stream_id << endl; - this->clean_stream_by_id(stream_id); + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.on_stream_terminated] Stream terminated, deleting all data" << endl; + #endif + sctx->clean_stream_by_id(stream_id); } @@ -220,12 +287,10 @@ class NetfilterQueue { */ int ret = 1; mnl_socket_setsockopt(sctx.nl, NETLINK_NO_ENOBUFS, &ret, sizeof(int)); - sctx.follower.new_stream_callback( - [&](auto a){this->on_new_stream(a);} - ); - sctx.follower.stream_termination_callback( - [&](auto a, auto b){this->on_stream_terminated(a, b);} - ); + + sctx.follower.new_stream_callback(bind(on_new_stream, placeholders::_1, &sctx)); + sctx.follower.stream_termination_callback(bind(on_stream_terminated, placeholders::_1, placeholders::_2, &sctx)); + for (;;) { ret = recv_packet(); if (ret == -1) { @@ -241,6 +306,9 @@ class NetfilterQueue { ~NetfilterQueue() { + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.~NetfilterQueue] Destructor called" << endl; + #endif send_config_cmd(NFQNL_CFG_CMD_UNBIND); _clear(); } @@ -263,23 +331,9 @@ class NetfilterQueue { } mnl_socket_close(sctx.nl); sctx.nl = nullptr; - sctx.clean_scratches(); - - for(auto ele: sctx.in_hs_streams){ - if (hs_close_stream(ele.second, sctx.in_scratch, nullptr, nullptr) != HS_SUCCESS) { - cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl; - throw invalid_argument("Cannot close stream match on hyperscan"); - } - } - sctx.in_hs_streams.clear(); - for(auto ele: sctx.out_hs_streams){ - if (hs_close_stream(ele.second, sctx.out_scratch, nullptr, nullptr) != HS_SUCCESS) { - cerr << "[error] [NetfilterQueue.clean_stream_by_id] Error closing the stream matcher (hs)" << endl; - throw invalid_argument("Cannot close stream match on hyperscan"); - } - } - sctx.out_hs_streams.clear(); + sctx.clean(); } + template static void build_verdict(T packet, uint8_t *payload, uint16_t plen, nlmsghdr *nlh_verdict, nfqnl_msg_packet_hdr *ph, stream_ctx* sctx){ Tins::TCP* tcp = packet.template find_pdu(); @@ -300,7 +354,17 @@ class NetfilterQueue { }; sctx->tcp_match_util.matching_has_been_called = false; sctx->tcp_match_util.pkt_info = &pktinfo; + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.build_verdict] TCP Packet received " << packet.src_addr() << ":" << tcp->sport() << " -> " << packet.dst_addr() << ":" << tcp->dport() << ", sending to libtins StreamFollower" << endl; + #endif sctx->follower.process_packet(packet); + #ifdef DEBUG + if (sctx->tcp_match_util.matching_has_been_called){ + cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has called matching functions" << endl; + }else{ + cerr << "[DEBUG] [NetfilterQueue.build_verdict] StreamFollower has NOT called matching functions" << endl; + } + #endif if (sctx->tcp_match_util.matching_has_been_called && !sctx->tcp_match_util.result){ Tins::PDU* data_layer = tcp->release_inner_pdu(); if (data_layer != nullptr){ @@ -317,7 +381,7 @@ class NetfilterQueue { if (!udp){ throw invalid_argument("Only TCP and UDP are supported"); } - Tins::PDU* application_layer = tcp->inner_pdu(); + Tins::PDU* application_layer = udp->inner_pdu(); u_int16_t payload_size = 0; if (application_layer != nullptr){ payload_size = application_layer->size(); @@ -369,6 +433,13 @@ class NetfilterQueue { nlh_verdict = nfq_nlmsg_put(buf, NFQNL_MSG_VERDICT, ntohs(nfg->res_id)); + #ifdef DEBUG + cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet received" << endl; + cerr << "[DEBUG] [NetfilterQueue.queue_cb] Packet ID: " << ntohl(ph->packet_id) << endl; + cerr << "[DEBUG] [NetfilterQueue.queue_cb] Payload size: " << plen << endl; + cerr << "[DEBUG] [NetfilterQueue.queue_cb] Payload: " << string(payload, payload+plen) << endl; + #endif + // Check IP protocol version if ( (payload[0] & 0xf0) == 0x40 ){ build_verdict(Tins::IP(payload, plen), payload, plen, nlh_verdict, ph, sctx); diff --git a/backend/binsrc/classes/regex_rules.cpp b/backend/binsrc/classes/regex_rules.cpp index 08a62ce..c01b2a2 100644 --- a/backend/binsrc/classes/regex_rules.cpp +++ b/backend/binsrc/classes/regex_rules.cpp @@ -78,9 +78,9 @@ class RegexRules{ if (n_of_regex == 0){ return; } - const char* regex_match_rules[n_of_regex]; - unsigned int regex_array_ids[n_of_regex]; - unsigned int regex_flags[n_of_regex]; + vector regex_match_rules(n_of_regex); + vector regex_array_ids(n_of_regex); + vector regex_flags(n_of_regex); for(int i = 0; i < n_of_regex; i++){ regex_match_rules[i] = decoded[i].second.regex.c_str(); regex_array_ids[i] = i; @@ -89,17 +89,25 @@ class RegexRules{ regex_flags[i] |= HS_FLAG_CASELESS; } } - - hs_database_t* rebuilt_db; - hs_compile_error_t *compile_err; + #ifdef DEBUG + cerr << "[DEBUG] [RegexRules.fill_ruleset] compiling " << n_of_regex << " regexes..." << endl; + for (int i = 0; i < n_of_regex; i++){ + cerr << "[DEBUG] [RegexRules.fill_ruleset] regex[" << i << "]: " << decoded[i].first << " " << decoded[i].second.regex << endl; + cerr << "[DEBUG] [RegexRules.fill_ruleset] regex_match_rules[" << i << "]: " << regex_match_rules[i] << endl; + cerr << "[DEBUG] [RegexRules.fill_ruleset] regex_flags[" << i << "]: " << regex_flags[i] << endl; + cerr << "[DEBUG] [RegexRules.fill_ruleset] regex_array_ids[" << i << "]: " << regex_array_ids[i] << endl; + } + #endif + hs_database_t* rebuilt_db = nullptr; + hs_compile_error_t *compile_err = nullptr; if ( hs_compile_multi( - regex_match_rules, - regex_flags, - regex_array_ids, + regex_match_rules.data(), + regex_flags.data(), + regex_array_ids.data(), n_of_regex, is_stream?HS_MODE_STREAM:HS_MODE_BLOCK, - nullptr,&rebuilt_db, &compile_err + nullptr, &rebuilt_db, &compile_err ) != HS_SUCCESS ) { cerr << "[warning] [RegexRules.fill_ruleset] hs_db failed to compile: '" << compile_err->message << "' skipping..." << endl; diff --git a/backend/binsrc/nfqueue.cpp b/backend/binsrc/nfqueue.cpp index d961807..bd7b6b5 100644 --- a/backend/binsrc/nfqueue.cpp +++ b/backend/binsrc/nfqueue.cpp @@ -54,10 +54,16 @@ struct matched_data{ bool has_matched = false; }; -bool filter_callback(packet_info & info){ + +bool filter_callback(packet_info& info){ shared_ptr conf = regex_config; - if (conf->ver() != info.sctx->latest_config_ver){ - info.sctx->clean_scratches(); + auto current_version = conf->ver(); + if (current_version != info.sctx->latest_config_ver){ + #ifdef DEBUG + cerr << "[DEBUG] [filter_callback] Configuration has changed (" << current_version << "!=" << info.sctx->latest_config_ver << "), cleaning scratch spaces" << endl; + #endif + info.sctx->clean(); + info.sctx->latest_config_ver = current_version; } scratch_setup(conf->input_ruleset, info.sctx->in_scratch); scratch_setup(conf->output_ruleset, info.sctx->out_scratch); @@ -66,6 +72,12 @@ bool filter_callback(packet_info & info){ if (regex_matcher == nullptr){ return true; } + + #ifdef DEBUG + cerr << "[DEBUG] [filter_callback] Matching packet with " << (info.is_input ? "input" : "output") << " ruleset" << endl; + cerr << "[DEBUG] [filter_callback] Packet: " << info.payload << endl; + #endif + matched_data match_res; hs_error_t err; hs_scratch_t* scratch_space = info.is_input ? info.sctx->in_scratch: info.sctx->out_scratch; @@ -77,25 +89,40 @@ bool filter_callback(packet_info & info){ }; hs_stream_t* stream_match; if (conf->stream_mode()){ - matching_map match_map = info.is_input ? info.sctx->in_hs_streams : info.sctx->out_hs_streams; - auto stream_search = match_map.find(info.sid); + matching_map* match_map = info.is_input ? &info.sctx->in_hs_streams : &info.sctx->out_hs_streams; + #ifdef DEBUG + cerr << "[DEBUG] [filter_callback] Dumping match_map " << match_map << endl; + for (auto ele: *match_map){ + cerr << "[DEBUG] [filter_callback] " << ele.first << " -> " << ele.second << endl; + } + cerr << "[DEBUG] [filter_callback] End of match_map" << endl; + #endif + auto stream_search = match_map->find(info.sid); - if (stream_search == match_map.end()){ + if (stream_search == match_map->end()){ + + #ifdef DEBUG + cerr << "[DEBUG] [filter_callback] Creating new stream matcher for " << info.sid << endl; + #endif if (hs_open_stream(regex_matcher, 0, &stream_match) != HS_SUCCESS) { cerr << "[error] [filter_callback] Error opening the stream matcher (hs)" << endl; throw invalid_argument("Cannot open stream match on hyperscan"); } - if (info.is_tcp){ - match_map[info.sid] = stream_match; - } + match_map->insert_or_assign(info.sid, stream_match); }else{ stream_match = stream_search->second; } + #ifdef DEBUG + cerr << "[DEBUG] [filter_callback] Matching as a stream" << endl; + #endif err = hs_scan_stream( stream_match,info.payload.c_str(), info.payload.length(), 0, scratch_space, match_func, &match_res ); }else{ + #ifdef DEBUG + cerr << "[DEBUG] [filter_callback] Matching as a block" << endl; + #endif err = hs_scan( regex_matcher,info.payload.c_str(), info.payload.length(), 0, scratch_space, match_func, &match_res @@ -105,13 +132,6 @@ bool filter_callback(packet_info & info){ cerr << "[error] [filter_callback] Error while matching the stream (hs)" << endl; throw invalid_argument("Error while matching the stream with hyperscan"); } - if ( - !info.is_tcp && conf->stream_mode() && - hs_close_stream(stream_match, scratch_space, nullptr, nullptr) != HS_SUCCESS - ){ - cerr << "[error] [filter_callback] Error closing the stream matcher (hs)" << endl; - throw invalid_argument("Cannot close stream match on hyperscan"); - } if (match_res.has_matched){ auto rules_vector = info.is_input ? conf->input_ruleset.regexes : conf->output_ruleset.regexes; stringstream msg; diff --git a/backend/modules/nfregex/firegex.py b/backend/modules/nfregex/firegex.py index 762e817..6e1e9bf 100644 --- a/backend/modules/nfregex/firegex.py +++ b/backend/modules/nfregex/firegex.py @@ -80,7 +80,8 @@ class FiregexInterceptor: proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../cppqueue") self.process = await asyncio.create_subprocess_exec( proxy_binary_path, - stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE + stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE, + env={"MATCH_MODE": "stream" if self.srv.proto == "tcp" else "block", "NTHREADS": os.getenv("NTHREADS","1")}, ) line_fut = self.process.stdout.readuntil() try: diff --git a/frontend/src/pages/NFRegex/ServiceDetails.tsx b/frontend/src/pages/NFRegex/ServiceDetails.tsx index 584117f..e1bdbdd 100644 --- a/frontend/src/pages/NFRegex/ServiceDetails.tsx +++ b/frontend/src/pages/NFRegex/ServiceDetails.tsx @@ -16,6 +16,7 @@ function ServiceDetailsNFRegex() { const [tooltipAddRegexOpened, setTooltipAddRegexOpened] = useState(false) const regexesList = nfregexServiceRegexesQuery(srv??"") + if (services.isLoading) return if (!srv || !serviceInfo || regexesList.isError) return return <> diff --git a/tests/nf_test.py b/tests/nf_test.py index 034b80a..cc780bd 100644 --- a/tests/nf_test.py +++ b/tests/nf_test.py @@ -88,7 +88,7 @@ def checkRegex(regex, should_work=True, upper=False): if r["regex"] == regex: #Test the regex s = base64.b64decode(regex).upper() if upper else base64.b64decode(regex) - if not server.sendCheckData(secrets.token_bytes(200) + s + secrets.token_bytes(200)): + if not server.sendCheckData(secrets.token_bytes(40) + s + secrets.token_bytes(40)): puts("The malicious request was successfully blocked ✔", color=colors.green) n_blocked += 1 time.sleep(1) @@ -104,7 +104,7 @@ def checkRegex(regex, should_work=True, upper=False): puts("Test Failed: The regex wasn't found ✗", color=colors.red) exit_test(1) else: - if server.sendCheckData(secrets.token_bytes(200) + base64.b64decode(regex) + secrets.token_bytes(200)): + if server.sendCheckData(secrets.token_bytes(40) + base64.b64decode(regex) + secrets.token_bytes(40)): puts("The request wasn't blocked ✔", color=colors.green) else: puts("Test Failed: The request was blocked when it shouldn't have", color=colors.red) @@ -186,18 +186,6 @@ checkRegex(regex) clear_regexes() -#Create Server regex and verify that should not matches -if(firegex.nf_add_regex(service_id,regex,"S",active=True, is_case_sensitive=True)): - puts(f"Sucessfully added server to client regex {str(secret)} ✔", color=colors.green) -else: - puts(f"Test Failed: Coulnd't add server to client regex {str(secret)} ✗", color=colors.red) - exit_test(1) - -checkRegex(regex, should_work=False) - -#Delete regex -clear_regexes() - #Rename service if(firegex.nf_rename_service(service_id,f"{args.service_name}2")): puts(f"Sucessfully renamed service to {args.service_name}2 ✔", color=colors.green) diff --git a/tests/run_tests.sh b/tests/run_tests.sh index 47bf7e2..cfe00d9 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -9,23 +9,23 @@ ERROR=0 pip3 install -r requirements.txt echo "Running standard API test" -#python3 api_test.py -p $PASSWORD || ERROR=1 +python3 api_test.py -p $PASSWORD || ERROR=1 echo "Running Netfilter Regex TCP ipv4" python3 nf_test.py -p $PASSWORD -m tcp || ERROR=1 echo "Running Netfilter Regex TCP ipv6" -#python3 nf_test.py -p $PASSWORD -m tcp -6 || ERROR=1 +python3 nf_test.py -p $PASSWORD -m tcp -6 || ERROR=1 echo "Running Netfilter Regex UDP ipv4" -#python3 nf_test.py -p $PASSWORD -m udp || ERROR=1 +python3 nf_test.py -p $PASSWORD -m udp || ERROR=1 echo "Running Netfilter Regex UDP ipv6" -#python3 nf_test.py -p $PASSWORD -m udp -6 || ERROR=1 +python3 nf_test.py -p $PASSWORD -m udp -6 || ERROR=1 echo "Running Port Hijack TCP ipv4" -#python3 ph_test.py -p $PASSWORD -m tcp || ERROR=1 +python3 ph_test.py -p $PASSWORD -m tcp || ERROR=1 echo "Running Port Hijack TCP ipv6" -#python3 ph_test.py -p $PASSWORD -m tcp -6 || ERROR=1 +python3 ph_test.py -p $PASSWORD -m tcp -6 || ERROR=1 echo "Running Port Hijack UDP ipv4" -#python3 ph_test.py -p $PASSWORD -m udp || ERROR=1 +python3 ph_test.py -p $PASSWORD -m udp || ERROR=1 echo "Running Port Hijack UDP ipv6" -#python3 ph_test.py -p $PASSWORD -m udp -6 || ERROR=1 +python3 ph_test.py -p $PASSWORD -m udp -6 || ERROR=1 exit $ERROR