Python integration with c++ binary (not totally working yet)
This commit is contained in:
@@ -1,12 +1,11 @@
|
||||
from typing import List
|
||||
from pypacker import interceptor
|
||||
from pypacker.layer3 import ip, ip6
|
||||
from pypacker.layer4 import tcp, udp
|
||||
from typing import Dict, List, Set
|
||||
from ipaddress import ip_interface
|
||||
from modules.iptables import IPTables
|
||||
import os, traceback
|
||||
|
||||
from modules.sqlite import Service
|
||||
import re, os, asyncio
|
||||
import traceback
|
||||
|
||||
from modules.sqlite import Regex
|
||||
|
||||
class FilterTypes:
|
||||
INPUT = "FIREGEX-INPUT"
|
||||
@@ -15,14 +14,13 @@ class FilterTypes:
|
||||
QUEUE_BASE_NUM = 1000
|
||||
|
||||
class FiregexFilter():
|
||||
def __init__(self, proto:str, port:int, ip_int:str, queue=None, target=None, id=None, func=None):
|
||||
def __init__(self, proto:str, port:int, ip_int:str, queue=None, target=None, id=None):
|
||||
self.target = target
|
||||
self.id = int(id) if id else None
|
||||
self.queue = queue
|
||||
self.proto = proto
|
||||
self.port = int(port)
|
||||
self.ip_int = str(ip_int)
|
||||
self.func = func
|
||||
|
||||
def __eq__(self, o: object) -> bool:
|
||||
if isinstance(o, FiregexFilter):
|
||||
@@ -35,16 +33,6 @@ class FiregexFilter():
|
||||
def ipv4(self):
|
||||
return ip_interface(self.ip_int).version == 4
|
||||
|
||||
def input_func(self):
|
||||
def none(pkt): return True
|
||||
def wrap(pkt): return self.func(pkt, True)
|
||||
return wrap if self.func else none
|
||||
|
||||
def output_func(self):
|
||||
def none(pkt): return True
|
||||
def wrap(pkt): return self.func(pkt, False)
|
||||
return wrap if self.func else none
|
||||
|
||||
class FiregexTables(IPTables):
|
||||
|
||||
def __init__(self, ipv6=False):
|
||||
@@ -108,9 +96,9 @@ class FiregexTables(IPTables):
|
||||
))
|
||||
return res
|
||||
|
||||
def add(self, filter:FiregexFilter):
|
||||
async def add(self, filter:FiregexFilter):
|
||||
if filter in self.get(): return None
|
||||
return FiregexInterceptor( iptables=self, filter=filter, n_threads=int(os.getenv("N_THREADS_NFQUEUE","1")))
|
||||
return await FiregexInterceptor.start( iptables=self, filter=filter, n_queues=int(os.getenv("N_THREADS_NFQUEUE","1")))
|
||||
|
||||
def delete_all(self):
|
||||
for filter_type in [FilterTypes.INPUT, FilterTypes.OUTPUT]:
|
||||
@@ -120,52 +108,143 @@ class FiregexTables(IPTables):
|
||||
for filter in self.get():
|
||||
if filter.port == srv.port and filter.proto == srv.proto and ip_interface(filter.ip_int) == ip_interface(srv.ip_int):
|
||||
self.delete_rule(filter.target, filter.id)
|
||||
|
||||
|
||||
class RegexFilter:
|
||||
def __init__(
|
||||
self, regex,
|
||||
is_case_sensitive=True,
|
||||
is_blacklist=True,
|
||||
input_mode=False,
|
||||
output_mode=False,
|
||||
blocked_packets=0,
|
||||
id=None,
|
||||
update_func = None
|
||||
):
|
||||
self.regex = regex
|
||||
self.is_case_sensitive = is_case_sensitive
|
||||
self.is_blacklist = is_blacklist
|
||||
if input_mode == output_mode: input_mode = output_mode = True # (False, False) == (True, True)
|
||||
self.input_mode = input_mode
|
||||
self.output_mode = output_mode
|
||||
self.blocked = blocked_packets
|
||||
self.id = id
|
||||
self.update_func = update_func
|
||||
self.compiled_regex = self.compile()
|
||||
|
||||
@classmethod
|
||||
def from_regex(cls, regex:Regex, update_func = None):
|
||||
return cls(
|
||||
id=regex.id, regex=regex.regex, is_case_sensitive=regex.is_case_sensitive,
|
||||
is_blacklist=regex.is_blacklist, blocked_packets=regex.blocked_packets,
|
||||
input_mode = regex.mode in ["C","B"], output_mode=regex.mode in ["S","B"],
|
||||
update_func = update_func
|
||||
)
|
||||
def compile(self):
|
||||
if isinstance(self.regex, str): self.regex = self.regex.encode()
|
||||
if not isinstance(self.regex, bytes): raise Exception("Invalid Regex Paramether")
|
||||
re.compile(self.regex) # raise re.error if it's invalid!
|
||||
case_sensitive = "1" if self.is_case_sensitive else "0"
|
||||
if self.input_mode:
|
||||
yield case_sensitive + "C" + self.regex.hex() if self.is_blacklist else case_sensitive + "c"+ self.regex.hex()
|
||||
if self.output_mode:
|
||||
yield case_sensitive + "S" + self.regex.hex() if self.is_blacklist else case_sensitive + "s"+ self.regex.hex()
|
||||
|
||||
async def update(self):
|
||||
if self.update_func:
|
||||
if asyncio.iscoroutinefunction(self.update_func): await self.update_func(self)
|
||||
else: self.update_func(self)
|
||||
|
||||
class FiregexInterceptor:
|
||||
def __init__(self, iptables: FiregexTables, filter: FiregexFilter, n_threads:int = 1):
|
||||
|
||||
def __init__(self):
|
||||
self.filter:FiregexFilter
|
||||
self.ipv6:bool
|
||||
self.filter_map_lock:asyncio.Lock
|
||||
self.filter_map: Dict[str, RegexFilter]
|
||||
self.regex_filters: Set[RegexFilter]
|
||||
self.update_config_lock:asyncio.Lock
|
||||
self.process:asyncio.subprocess.Process
|
||||
self.n_queues:int
|
||||
self.update_task: asyncio.Task
|
||||
self.iptables:FiregexTables
|
||||
|
||||
@classmethod
|
||||
async def start(cls, iptables: FiregexTables, filter: FiregexFilter, n_queues:int = 1):
|
||||
self = cls()
|
||||
self.filter = filter
|
||||
self.n_queues = n_queues
|
||||
self.iptables = iptables
|
||||
self.ipv6 = self.filter.ipv6()
|
||||
self.itor_input, codes = self._start_queue(filter.input_func(), n_threads)
|
||||
iptables.add_input(queue_range=codes, proto=self.filter.proto, port=self.filter.port, ip_int=self.filter.ip_int)
|
||||
self.itor_output, codes = self._start_queue(filter.output_func(), n_threads)
|
||||
iptables.add_output(queue_range=codes, proto=self.filter.proto, port=self.filter.port, ip_int=self.filter.ip_int)
|
||||
self.filter_map_lock = asyncio.Lock()
|
||||
self.update_config_lock = asyncio.Lock()
|
||||
input_range, output_range = await self._start_binary()
|
||||
self.update_task = asyncio.create_task(self.update_blocked())
|
||||
self.iptables.add_input(queue_range=input_range, proto=self.filter.proto, port=self.filter.port, ip_int=self.filter.ip_int)
|
||||
self.iptables.add_output(queue_range=output_range, proto=self.filter.proto, port=self.filter.port, ip_int=self.filter.ip_int)
|
||||
return self
|
||||
|
||||
async def _start_binary(self):
|
||||
proxy_binary_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"./cppqueue")
|
||||
self.process = await asyncio.create_subprocess_exec(
|
||||
proxy_binary_path, str(self.n_queues),
|
||||
stdout=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE
|
||||
)
|
||||
line_fut = self.process.stdout.readuntil()
|
||||
try:
|
||||
line_fut = await asyncio.wait_for(line_fut, timeout=1)
|
||||
except asyncio.TimeoutError:
|
||||
self.process.kill()
|
||||
raise Exception("Invalid binary output")
|
||||
line = line_fut.decode()
|
||||
if line.startswith("QUEUES "):
|
||||
params = line.split()
|
||||
return (int(params[2]), int(params[3])), (int(params[5]), int(params[6]))
|
||||
else:
|
||||
self.process.kill()
|
||||
raise Exception("Invalid binary output")
|
||||
|
||||
def _start_queue(self,func,n_threads):
|
||||
def func_wrap(ll_data, ll_proto_id, data, ctx, *args):
|
||||
pkt_parsed = ip6.IP6(data) if self.ipv6 else ip.IP(data)
|
||||
try:
|
||||
pkt_data = None
|
||||
if not pkt_parsed[tcp.TCP] is None:
|
||||
pkt_data = pkt_parsed[tcp.TCP].body_bytes
|
||||
elif not pkt_parsed[udp.UDP] is None:
|
||||
pkt_data = pkt_parsed[udp.UDP].body_bytes
|
||||
if pkt_data:
|
||||
if func(pkt_data):
|
||||
return data, interceptor.NF_ACCEPT
|
||||
elif pkt_parsed[tcp.TCP]:
|
||||
pkt_parsed[tcp.TCP].flags &= 0x00
|
||||
pkt_parsed[tcp.TCP].flags |= tcp.TH_FIN | tcp.TH_ACK
|
||||
pkt_parsed[tcp.TCP].body_bytes = b""
|
||||
return pkt_parsed.bin(), interceptor.NF_ACCEPT
|
||||
else: return b"", interceptor.NF_DROP
|
||||
else: return data, interceptor.NF_ACCEPT
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
return data, interceptor.NF_ACCEPT
|
||||
|
||||
ictor = interceptor.Interceptor()
|
||||
starts = QUEUE_BASE_NUM
|
||||
while True:
|
||||
if starts >= 65536:
|
||||
raise Exception("Netfilter queue is full!")
|
||||
queue_ids = list(range(starts,starts+n_threads))
|
||||
try:
|
||||
ictor.start(func_wrap, queue_ids=queue_ids)
|
||||
break
|
||||
except interceptor.UnableToBindException as e:
|
||||
starts = e.queue_id + 1
|
||||
return ictor, (starts, starts+n_threads-1)
|
||||
async def update_blocked(self):
|
||||
try:
|
||||
while True:
|
||||
line = (await self.process.stdout.readuntil()).decode()
|
||||
if line.startswith("BLOCKED"):
|
||||
regex_id = line.split()[1]
|
||||
async with self.filter_map_lock:
|
||||
if regex_id in self.filter_map:
|
||||
self.filter_map[regex_id].blocked+=1
|
||||
await self.filter_map[regex_id].update()
|
||||
except asyncio.CancelledError: pass
|
||||
except asyncio.IncompleteReadError: pass
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
|
||||
def stop(self):
|
||||
self.itor_input.stop()
|
||||
self.itor_output.stop()
|
||||
async def stop(self):
|
||||
self.update_task.cancel()
|
||||
self.process.kill()
|
||||
|
||||
async def _update_config(self, filters_codes):
|
||||
async with self.update_config_lock:
|
||||
self.process.stdin.write((" ".join(filters_codes)+"\n").encode())
|
||||
await self.process.stdin.drain()
|
||||
|
||||
async def reload(self, filters:List[RegexFilter]):
|
||||
async with self.filter_map_lock:
|
||||
self.filter_map = self.compile_filters(filters)
|
||||
filters_codes = self.get_filter_codes()
|
||||
await self._update_config(filters_codes)
|
||||
|
||||
def get_filter_codes(self):
|
||||
filters_codes = list(self.filter_map.keys())
|
||||
filters_codes.sort(key=lambda a: self.filter_map[a].blocked, reverse=True)
|
||||
return filters_codes
|
||||
|
||||
def compile_filters(self, filters:List[RegexFilter]):
|
||||
res = {}
|
||||
for filter_obj in filters:
|
||||
try:
|
||||
raw_filters = filter_obj.compile()
|
||||
for filter in raw_filters:
|
||||
res[filter] = filter_obj
|
||||
except Exception: pass
|
||||
return res
|
||||
@@ -1,6 +1,6 @@
|
||||
import traceback, asyncio, pcre
|
||||
import traceback, asyncio
|
||||
from typing import Dict
|
||||
from modules.firegex import FiregexFilter, FiregexTables
|
||||
from modules.firegex import FiregexFilter, FiregexTables, RegexFilter
|
||||
from modules.sqlite import Regex, SQLite, Service
|
||||
|
||||
class STATUS:
|
||||
@@ -12,17 +12,8 @@ class FirewallManager:
|
||||
self.db = db
|
||||
self.proxy_table: Dict[str, ServiceManager] = {}
|
||||
self.lock = asyncio.Lock()
|
||||
self.updater_task = None
|
||||
|
||||
def init_updater(self, callback = None):
|
||||
if not self.updater_task:
|
||||
self.updater_task = asyncio.create_task(self._stats_updater(callback))
|
||||
|
||||
def close_updater(self):
|
||||
if self.updater_task: self.updater_task.cancel()
|
||||
|
||||
async def close(self):
|
||||
self.close_updater()
|
||||
if self.updater_task: self.updater_task.cancel()
|
||||
for key in list(self.proxy_table.keys()):
|
||||
await self.remove(key)
|
||||
@@ -33,8 +24,7 @@ class FirewallManager:
|
||||
await self.proxy_table[srv_id].next(STATUS.STOP)
|
||||
del self.proxy_table[srv_id]
|
||||
|
||||
async def init(self, callback = None):
|
||||
self.init_updater(callback)
|
||||
async def init(self):
|
||||
await self.reload()
|
||||
|
||||
async def reload(self):
|
||||
@@ -43,7 +33,6 @@ class FirewallManager:
|
||||
srv = Service.from_dict(srv)
|
||||
if srv.id in self.proxy_table:
|
||||
continue
|
||||
|
||||
self.proxy_table[srv.id] = ServiceManager(srv, self.db)
|
||||
await self.proxy_table[srv.id].next(srv.status)
|
||||
|
||||
@@ -71,42 +60,6 @@ class FirewallManager:
|
||||
|
||||
class ServiceNotFoundException(Exception): pass
|
||||
|
||||
class RegexFilter:
|
||||
def __init__(
|
||||
self, regex,
|
||||
is_case_sensitive=True,
|
||||
is_blacklist=True,
|
||||
input_mode=False,
|
||||
output_mode=False,
|
||||
blocked_packets=0,
|
||||
id=None
|
||||
):
|
||||
self.regex = regex
|
||||
self.is_case_sensitive = is_case_sensitive
|
||||
self.is_blacklist = is_blacklist
|
||||
if input_mode == output_mode: input_mode = output_mode = True # (False, False) == (True, True)
|
||||
self.input_mode = input_mode
|
||||
self.output_mode = output_mode
|
||||
self.blocked = blocked_packets
|
||||
self.id = id
|
||||
self.compiled_regex = self.compile()
|
||||
|
||||
@classmethod
|
||||
def from_regex(cls, regex:Regex):
|
||||
return cls(
|
||||
id=regex.id, regex=regex.regex, is_case_sensitive=regex.is_case_sensitive,
|
||||
is_blacklist=regex.is_blacklist, blocked_packets=regex.blocked_packets,
|
||||
input_mode = regex.mode in ["C","B"], output_mode=regex.mode in ["S","B"]
|
||||
)
|
||||
|
||||
def compile(self):
|
||||
if isinstance(self.regex, str): self.regex = self.regex.encode()
|
||||
if not isinstance(self.regex, bytes): raise Exception("Invalid Regex Paramether")
|
||||
return pcre.compile(self.regex if self.is_case_sensitive else b"(?i)"+self.regex)
|
||||
|
||||
def check(self, data):
|
||||
return True if self.compiled_regex.search(data) else False
|
||||
|
||||
class ServiceManager:
|
||||
def __init__(self, srv: Service, db):
|
||||
self.srv = srv
|
||||
@@ -114,12 +67,10 @@ class ServiceManager:
|
||||
self.firegextable = FiregexTables(self.srv.ipv6)
|
||||
self.status = STATUS.STOP
|
||||
self.filters: Dict[int, FiregexFilter] = {}
|
||||
self._update_filters_from_db()
|
||||
self.lock = asyncio.Lock()
|
||||
self.interceptor = None
|
||||
|
||||
# TODO I don't like so much this method
|
||||
def _update_filters_from_db(self):
|
||||
async def _update_filters_from_db(self):
|
||||
regexes = [
|
||||
Regex.from_dict(ele) for ele in
|
||||
self.db.query("SELECT * FROM regexes WHERE service_id = ? AND active=1;", self.srv.id)
|
||||
@@ -127,17 +78,16 @@ class ServiceManager:
|
||||
#Filter check
|
||||
old_filters = set(self.filters.keys())
|
||||
new_filters = set([f.id for f in regexes])
|
||||
|
||||
#remove old filters
|
||||
for f in old_filters:
|
||||
if not f in new_filters:
|
||||
del self.filters[f]
|
||||
|
||||
#add new filters
|
||||
for f in new_filters:
|
||||
if not f in old_filters:
|
||||
filter = [ele for ele in regexes if ele.id == f][0]
|
||||
self.filters[f] = RegexFilter.from_regex(filter)
|
||||
self.filters[f] = RegexFilter.from_regex(filter, self._stats_updater)
|
||||
if self.interceptor: await self.interceptor.reload(self.filters.values())
|
||||
|
||||
def __update_status_db(self, status):
|
||||
self.db.query("UPDATE services SET status = ? WHERE service_id = ?;", status, self.srv.id)
|
||||
@@ -145,49 +95,36 @@ class ServiceManager:
|
||||
async def next(self,to):
|
||||
async with self.lock:
|
||||
if (self.status, to) == (STATUS.ACTIVE, STATUS.STOP):
|
||||
self.stop()
|
||||
await self.stop()
|
||||
self._set_status(to)
|
||||
# PAUSE -> ACTIVE
|
||||
elif (self.status, to) == (STATUS.STOP, STATUS.ACTIVE):
|
||||
self.restart()
|
||||
await self.restart()
|
||||
|
||||
def _stats_updater(self,filter:RegexFilter):
|
||||
self.db.query("UPDATE regexes SET blocked_packets = ? WHERE regex_id = ?;", filter.blocked, filter.id)
|
||||
|
||||
def update_stats(self):
|
||||
for ele in self.filters.values():
|
||||
self._stats_updater(ele)
|
||||
|
||||
def _set_status(self,status):
|
||||
self.status = status
|
||||
self.__update_status_db(status)
|
||||
|
||||
def start(self):
|
||||
async def start(self):
|
||||
if not self.interceptor:
|
||||
self.firegextable.delete_by_srv(self.srv)
|
||||
def regex_filter(pkt, by_client):
|
||||
try:
|
||||
for filter in self.filters.values():
|
||||
if (by_client and filter.input_mode) or (not by_client and filter.output_mode):
|
||||
match = filter.check(pkt)
|
||||
if (filter.is_blacklist and match) or (not filter.is_blacklist and not match):
|
||||
filter.blocked+=1
|
||||
return False
|
||||
except IndexError: pass
|
||||
return True
|
||||
self.interceptor = self.firegextable.add(FiregexFilter(self.srv.proto,self.srv.port, self.srv.ip_int, func=regex_filter))
|
||||
self.interceptor = await self.firegextable.add(FiregexFilter(self.srv.proto,self.srv.port, self.srv.ip_int))
|
||||
await self._update_filters_from_db()
|
||||
self._set_status(STATUS.ACTIVE)
|
||||
|
||||
def stop(self):
|
||||
async def stop(self):
|
||||
self.firegextable.delete_by_srv(self.srv)
|
||||
if self.interceptor:
|
||||
self.interceptor.stop()
|
||||
await self.interceptor.stop()
|
||||
self.interceptor = None
|
||||
|
||||
def restart(self):
|
||||
self.stop()
|
||||
self.start()
|
||||
async def restart(self):
|
||||
await self.stop()
|
||||
await self.start()
|
||||
|
||||
async def update_filters(self):
|
||||
async with self.lock:
|
||||
self._update_filters_from_db()
|
||||
await self._update_filters_from_db()
|
||||
Reference in New Issue
Block a user