Merge branch 'master' into debug

debug
Simon 3 years ago
commit 62277951a9

@ -22,9 +22,11 @@ build-dev: # This job runs in the build stage, which runs first.
- buildx - buildx
script: script:
- echo "Compiling the code..." - echo "Compiling the code..."
- cd docker-configs #- cd docker-configs
- docker buildx build --no-cache -f Dockerfile-proxy -t hacknix/freedmr:development-latest -t gitlab.hacknix.net:5050/hacknix/freedmr:development-latest --platform linux/arm/v7,linux/amd64,linux/i386,linux/arm64 --push . # - docker buildx build --no-cache -f Dockerfile-proxy -t hacknix/freedmr:development-latest -t gitlab.hacknix.net:5050/hacknix/freedmr:development-latest --platform linux/arm/v7,linux/amd64,linux/i386,linux/arm64 --push .
#- docker login -u $CI_DEPLOY_USER -p $CI_DEPLOY_PASSWORD $CI_REGISTRY
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker buildx build --no-cache -f docker-configs/Dockerfile-ci -t hacknix/freedmr:development-latest -t $CI_REGISTRY/hacknix/freedmr:development-latest --platform linux/arm/v7,linux/amd64,linux/i386,linux/arm64 --push .
- echo "Compile complete." - echo "Compile complete."
@ -37,14 +39,11 @@ build-extrastats: # This job runs in the build stage, which runs first.
- buildx - buildx
script: script:
- echo "Compiling the code..." - echo "Compiling the code..."
- cd docker-configs - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker buildx build --no-cache -f Dockerfile-proxy -t hacknix/freedmr:extrastats-development-latest -t gitlab.hacknix.net:5050/hacknix/freedmr:extrastats-development-latest --platform linux/arm64 --push . - docker buildx build --no-cache -f Dockerfile-ci -t hacknix/freedmr:extrastats-development-latest -t gitlab.hacknix.net:5050/hacknix/freedmr:extrastats-development-latest --platform linux/amd64 --push .
- echo "Compile complete." - echo "Compile complete."
only: only:
- extrastats - extrastats2
build-testing: # This job runs in the build stage, which runs first. build-testing: # This job runs in the build stage, which runs first.
stage: build stage: build
@ -52,8 +51,8 @@ build-testing: # This job runs in the build stage, which runs first.
- buildx - buildx
script: script:
- echo "Compiling the code..." - echo "Compiling the code..."
- cd docker-configs - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker buildx build --no-cache -f Dockerfile-proxy -t gitlab.hacknix.net:5050/hacknix/freedmr:testing --platform linux/amd64 --push . - docker buildx build --no-cache -f docker-configs/Dockerfile-ci -t gitlab.hacknix.net:5050/hacknix/freedmr:testing --platform linux/amd64 --push .
only: only:
- testing - testing
@ -63,8 +62,8 @@ build-debug: # This job runs in the build stage, which runs first.
- buildx - buildx
script: script:
- echo "Compiling the code..." - echo "Compiling the code..."
- cd docker-configs - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker buildx build --no-cache -f Dockerfile-proxy -t gitlab.hacknix.net:5050/hacknix/freedmr:debug --platform linux/amd64 --push . - docker buildx build --no-cache -f Dockerfile-ci -t gitlab.hacknix.net:5050/hacknix/freedmr:debug --platform linux/amd64 --push .
only: only:
- debug - debug
@ -74,8 +73,8 @@ build-release: # This job runs in the build stage, which runs first.
- buildx - buildx
script: script:
- echo "Compiling the code..." - echo "Compiling the code..."
- cd docker-configs - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker buildx build --no-cache -f Dockerfile-proxy -t hacknix/freedmr:latest -t gitlab.hacknix.net:5050/hacknix/freedmr:latest -t hacknix/$CI_COMMIT_TAG-with-proxy -t gitlab.hacknix.net:5050/hacknix/freedmr:$CI_COMMIT_TAG-with-proxy -t hacknix/freedmr:development-latest -t gitlab.hacknix.net:5050/hacknix/freedmr:development-latest --platform linux/arm/v7,linux/amd64,linux/i386,linux/arm64 --push . - docker buildx build --no-cache -f docker-configs/Dockerfile-ci -t hacknix/freedmr:latest -t gitlab.hacknix.net:5050/hacknix/freedmr:latest -t hacknix/$CI_COMMIT_TAG-with-proxy -t gitlab.hacknix.net:5050/hacknix/freedmr:$CI_COMMIT_TAG-with-proxy -t hacknix/freedmr:development-latest -t gitlab.hacknix.net:5050/hacknix/freedmr:development-latest --platform linux/arm/v7,linux/amd64,linux/i386,linux/arm64 --push .

@ -0,0 +1,42 @@
#This empty config file will use defaults for everything apart from OBP and HBP config
#This is usually a sensible choice.
[GLOBAL]
SERVER_ID: 0000
[REPORTS]
[LOGGER]
[ALIASES]
[ALLSTAR]
[SYSTEM]
MODE: MASTER
ENABLED: True
REPEAT: True
MAX_PEERS: 1
EXPORT_AMBE: False
IP: 127.0.0.1
PORT: 54000
PASSPHRASE:
GROUP_HANGTIME: 5
USE_ACL: True
REG_ACL: DENY:1
SUB_ACL: DENY:1
TGID_TS1_ACL: PERMIT:ALL
TGID_TS2_ACL: PERMIT:ALL
DEFAULT_UA_TIMER: 60
SINGLE_MODE: True
VOICE_IDENT: True
TS1_STATIC:
TS2_STATIC:
DEFAULT_REFLECTOR: 0
ANNOUNCEMENT_LANGUAGE: en_GB
GENERATOR: 100
ALLOW_UNREG_ID: False
PROXY_CONTROL: True
OVERRIDE_IDENT_TG:

@ -9,9 +9,10 @@ TGID_TS1_ACL: PERMIT:ALL
TGID_TS2_ACL: PERMIT:ALL TGID_TS2_ACL: PERMIT:ALL
GEN_STAT_BRIDGES: True GEN_STAT_BRIDGES: True
ALLOW_NULL_PASSPHRASE: True ALLOW_NULL_PASSPHRASE: True
ANNOUNCEMENT_LANGUAGES: en_GB,en_US,es_ES,fr_FR,de_DE,dk_DK,it_IT,no_NO,pl_PL,se_SE,pt_PT,cy_GB,el_GR,CW ANNOUNCEMENT_LANGUAGES:
SERVER_ID: 0000 SERVER_ID: 0000
DATA_GATEWAY: False DATA_GATEWAY: False
VALIDATE_SERVER_IDS: True
[REPORTS] [REPORTS]
@ -22,7 +23,7 @@ REPORT_CLIENTS: 127.0.0.1
[LOGGER] [LOGGER]
LOG_FILE: freedmr.log LOG_FILE: freedmr.log
LOG_HANDLERS: file-timed LOG_HANDLERS: console-timed
LOG_LEVEL: INFO LOG_LEVEL: INFO
LOG_NAME: FreeDMR LOG_NAME: FreeDMR
@ -32,14 +33,16 @@ PATH: ./
PEER_FILE: peer_ids.json PEER_FILE: peer_ids.json
SUBSCRIBER_FILE: subscriber_ids.json SUBSCRIBER_FILE: subscriber_ids.json
TGID_FILE: talkgroup_ids.json TGID_FILE: talkgroup_ids.json
PEER_URL: https://www.radioid.net/static/rptrs.json PEER_URL: https://freedmr-lh.gb7fr.org.uk/json/peer_ids.json
SUBSCRIBER_URL: https://www.radioid.net/static/users.json SUBSCRIBER_URL: https://freedmr-lh.gb7fr.org.uk/json/subscriber_ids.json
TGID_URL: http://downloads.freedmr.uk/downloads/talkgroup_ids.json TGID_URL: https://freedmr-lh.gb7fr.org.uk/json/talkgroup_ids.json
LOCAL_SUBSCRIBER_FILE: local_subcriber_ids.json LOCAL_SUBSCRIBER_FILE: local_subcriber_ids.json
STALE_DAYS: 1 STALE_DAYS: 1
SUB_MAP_FILE: SUB_MAP_FILE:
SERVER_ID_URL: http://downloads.freedmr.uk/downloads/FreeDMR_Hosts.csv SERVER_ID_URL: https://freedmr-lh.gb7fr.org.uk/json/server_ids.tsv
SERVER_ID_FILE: server_ids.tsv SERVER_ID_FILE: server_ids.tsv
CHECKSUM_URL: https://freedmr-lh.gb7fr.org.uk/file_checksums.json
CHECKSUM_FILE: file_checksums.json
#Control server shared allstar instance via dial / AMI #Control server shared allstar instance via dial / AMI
@ -51,15 +54,6 @@ SERVER: my.asl.server
PORT: 5038 PORT: 5038
NODE: 0000 NODE: 0000
[MYSQL]
USE_MYSQL: False
USER: hblink
PASS: mypassword
DB: hblink
SERVER: 127.0.0.1
PORT: 3306
TABLE: repeaters
[OBP-TEST] [OBP-TEST]
MODE: OPENBRIDGE MODE: OPENBRIDGE
ENABLED: False ENABLED: False
@ -101,3 +95,4 @@ ANNOUNCEMENT_LANGUAGE: en_GB
GENERATOR: 100 GENERATOR: 100
ALLOW_UNREG_ID: False ALLOW_UNREG_ID: False
PROXY_CONTROL: True PROXY_CONTROL: True
OVERRIDE_IDENT_TG:

@ -941,7 +941,7 @@ if __name__ == '__main__':
signal.signal(sig, sig_handler) signal.signal(sig, sig_handler)
# Create the name-number mapping dictionaries # Create the name-number mapping dictionaries
peer_ids, subscriber_ids, talkgroup_ids, local_subscriber_ids,server_ids = mk_aliases(CONFIG) peer_ids, subscriber_ids, talkgroup_ids, local_subscriber_ids,server_ids,checksums = mk_aliases(CONFIG)
#Add special IDs to DB #Add special IDs to DB
subscriber_ids[900999] = 'D-APRS' subscriber_ids[900999] = 'D-APRS'
@ -951,6 +951,7 @@ if __name__ == '__main__':
CONFIG['_PEER_IDS'] = peer_ids CONFIG['_PEER_IDS'] = peer_ids
CONFIG['_LOCAL_SUBSCRIBER_IDS'] = local_subscriber_ids CONFIG['_LOCAL_SUBSCRIBER_IDS'] = local_subscriber_ids
CONFIG['_SERVER_IDS'] = server_ids CONFIG['_SERVER_IDS'] = server_ids
CONFIG['_CHECKSUMS'] = checksums
# Import the ruiles file as a module, and create BRIDGES from it # Import the ruiles file as a module, and create BRIDGES from it
spec = importlib.util.spec_from_file_location("module.name", cli_args.RULES_FILE) spec = importlib.util.spec_from_file_location("module.name", cli_args.RULES_FILE)

@ -313,6 +313,11 @@ def remove_bridge_system(system):
if _bridge not in _bridgestemp: if _bridge not in _bridgestemp:
_bridgestemp[_bridge] = [] _bridgestemp[_bridge] = []
_bridgestemp[_bridge].append(_bridgesystem) _bridgestemp[_bridge].append(_bridgesystem)
else:
if _bridge not in _bridgestemp:
_bridgestemp[_bridge] = []
_bridgestemp[_bridge].append({'SYSTEM': system, 'TS': _bridgesystem['TS'], 'TGID': _bridgesystem['TGID'],'ACTIVE': False,'TIMEOUT': _bridgesystem['TIMEOUT'],'TO_TYPE': 'ON','OFF': [],'ON': [_bridgesystem['TGID'],],'RESET': [], 'TIMER': time() + _bridgesystem['TIMEOUT']})
BRIDGES.update(_bridgestemp) BRIDGES.update(_bridgestemp)
@ -337,7 +342,7 @@ def rule_timer_loop():
_bridge_used = True _bridge_used = True
logger.info('(ROUTER) Conference Bridge ACTIVE (ON timer running): System: %s Bridge: %s, TS: %s, TGID: %s, Timeout in: %.2fs,', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID']), timeout_in) logger.info('(ROUTER) Conference Bridge ACTIVE (ON timer running): System: %s Bridge: %s, TS: %s, TGID: %s, Timeout in: %.2fs,', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID']), timeout_in)
elif _system['ACTIVE'] == False: elif _system['ACTIVE'] == False:
logger.trace('(ROUTER) Conference Bridge INACTIVE (no change): System: %s Bridge: %s, TS: %s, TGID: %s', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID'])) logger.debug('(ROUTER) Conference Bridge INACTIVE (no change): System: %s Bridge: %s, TS: %s, TGID: %s', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID']))
elif _system['TO_TYPE'] == 'OFF': elif _system['TO_TYPE'] == 'OFF':
if _system['ACTIVE'] == False: if _system['ACTIVE'] == False:
if _system['TIMER'] < _now: if _system['TIMER'] < _now:
@ -350,13 +355,13 @@ def rule_timer_loop():
logger.info('(ROUTER) Conference Bridge INACTIVE (OFF timer running): System: %s Bridge: %s, TS: %s, TGID: %s, Timeout in: %.2fs,', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID']), timeout_in) logger.info('(ROUTER) Conference Bridge INACTIVE (OFF timer running): System: %s Bridge: %s, TS: %s, TGID: %s, Timeout in: %.2fs,', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID']), timeout_in)
elif _system['ACTIVE'] == True: elif _system['ACTIVE'] == True:
_bridge_used = True _bridge_used = True
logger.trace('(ROUTER) Conference Bridge ACTIVE (no change): System: %s Bridge: %s, TS: %s, TGID: %s', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID'])) logger.debug('(ROUTER) Conference Bridge ACTIVE (no change): System: %s Bridge: %s, TS: %s, TGID: %s', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID']))
else: else:
if _system['SYSTEM'][0:3] != 'OBP': if _system['SYSTEM'][0:3] != 'OBP':
_bridge_used = True _bridge_used = True
elif _system['SYSTEM'][0:3] == 'OBP' and _system['TO_TYPE'] == 'STAT': elif _system['SYSTEM'][0:3] == 'OBP' and _system['TO_TYPE'] == 'STAT':
_bridge_used = True _bridge_used = True
logger.trace('(ROUTER) Conference Bridge NO ACTION: System: %s, Bridge: %s, TS: %s, TGID: %s', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID'])) logger.debug('(ROUTER) Conference Bridge NO ACTION: System: %s, Bridge: %s, TS: %s, TGID: %s', _system['SYSTEM'], _bridge, _system['TS'], int_id(_system['TGID']))
if _bridge_used == False: if _bridge_used == False:
_remove_bridges.append(_bridge) _remove_bridges.append(_bridge)
@ -649,12 +654,12 @@ def threadAlias():
logger.debug('(ALIAS) starting alias thread') logger.debug('(ALIAS) starting alias thread')
reactor.callInThread(aliasb) reactor.callInThread(aliasb)
def setAlias(_peer_ids,_subscriber_ids, _talkgroup_ids, _local_subscriber_ids, _server_ids): def setAlias(_peer_ids,_subscriber_ids, _talkgroup_ids, _local_subscriber_ids, _server_ids, _checksums):
peer_ids, subscriber_ids, talkgroup_ids,local_subscriber_ids,server_ids = _peer_ids, _subscriber_ids, _talkgroup_ids, _local_subscriber_ids,_server_ids peer_ids, subscriber_ids, talkgroup_ids,local_subscriber_ids,server_ids,checksums = _peer_ids, _subscriber_ids, _talkgroup_ids, _local_subscriber_ids,_server_ids,_checksums
def aliasb(): def aliasb():
_peer_ids, _subscriber_ids, _talkgroup_ids, _local_subscriber_ids, _server_ids = mk_aliases(CONFIG) _peer_ids, _subscriber_ids, _talkgroup_ids, _local_subscriber_ids, _server_ids, _checksums = mk_aliases(CONFIG)
reactor.callFromThread(setAlias,_peer_ids, _subscriber_ids, _talkgroup_ids, _local_subscriber_ids, _server_ids) reactor.callFromThread(setAlias,_peer_ids, _subscriber_ids, _talkgroup_ids, _local_subscriber_ids, _server_ids, _checksums)
def ident(): def ident():
for system in systems: for system in systems:
@ -738,6 +743,7 @@ def options_config():
logger.debug('(OPTIONS) Bridge reset for %s - no peers',_system) logger.debug('(OPTIONS) Bridge reset for %s - no peers',_system)
remove_bridge_system(_system) remove_bridge_system(_system)
CONFIG['SYSTEMS'][_system]['_reset'] = False CONFIG['SYSTEMS'][_system]['_reset'] = False
continue
try: try:
if CONFIG['SYSTEMS'][_system]['MODE'] != 'MASTER': if CONFIG['SYSTEMS'][_system]['MODE'] != 'MASTER':
continue continue
@ -929,8 +935,7 @@ def options_config():
if _options['TS1_STATIC']: if _options['TS1_STATIC']:
ts1 = _options['TS1_STATIC'].split(',') ts1 = _options['TS1_STATIC'].split(',')
for tg in ts1: for tg in ts1:
if not tg or int(tg) == 0 or int(tg) >= 16777215 or tg == _options['DEFAULT_REFLECTOR']: if not tg:
logger.debug('(OPTIONS) %s not setting TS1 Static %s. Bad TG or conflict with DIAL',_system,tg)
continue continue
tg = int(tg) tg = int(tg)
make_static_tg(tg,1,_tmout,_system) make_static_tg(tg,1,_tmout,_system)
@ -941,8 +946,7 @@ def options_config():
if CONFIG['SYSTEMS'][_system]['TS2_STATIC']: if CONFIG['SYSTEMS'][_system]['TS2_STATIC']:
ts2 = CONFIG['SYSTEMS'][_system]['TS2_STATIC'].split(',') ts2 = CONFIG['SYSTEMS'][_system]['TS2_STATIC'].split(',')
for tg in ts2: for tg in ts2:
if not tg or int(tg) == 0 or int(tg) >= 16777215 or tg == _options['DEFAULT_REFLECTOR'] or (tg and ts1 and tg in ts1): if not tg or int(tg) == 0 or int(tg) >= 16777215:
logger.debug('(OPTIONS) %s not setting TS2 Static %s. Bad TG or conflict with DIAL or TS1',_system,tg)
continue continue
tg = int(tg) tg = int(tg)
reset_static_tg(tg,2,_tmout,_system) reset_static_tg(tg,2,_tmout,_system)
@ -2538,7 +2542,7 @@ if __name__ == '__main__':
if cli_args.LOG_LEVEL: if cli_args.LOG_LEVEL:
CONFIG['LOGGER']['LOG_LEVEL'] = cli_args.LOG_LEVEL CONFIG['LOGGER']['LOG_LEVEL'] = cli_args.LOG_LEVEL
logger = log.config_logging(CONFIG['LOGGER']) logger = log.config_logging(CONFIG['LOGGER'])
logger.info('\n\nCopyright (c) 2020, 2021, 2022 Simon G7RZU simon@gb7fr.org.uk') logger.info('\n\nCopyright (c) 2020, 2021, 2022, 2023 Simon G7RZU simon@gb7fr.org.uk')
logger.info('Copyright (c) 2013, 2014, 2015, 2016, 2018, 2019\n\tThe Regents of the K0USY Group. All rights reserved.\n') logger.info('Copyright (c) 2013, 2014, 2015, 2016, 2018, 2019\n\tThe Regents of the K0USY Group. All rights reserved.\n')
logger.debug('(GLOBAL) Logging system started, anything from here on gets logged') logger.debug('(GLOBAL) Logging system started, anything from here on gets logged')
@ -2563,7 +2567,7 @@ if __name__ == '__main__':
signal.signal(sig, sig_handler) signal.signal(sig, sig_handler)
# Create the name-number mapping dictionaries # Create the name-number mapping dictionaries
peer_ids, subscriber_ids, talkgroup_ids, local_subscriber_ids, server_ids = mk_aliases(CONFIG) peer_ids, subscriber_ids, talkgroup_ids, local_subscriber_ids, server_ids, checksums = mk_aliases(CONFIG)
#Add special IDs to DB #Add special IDs to DB
subscriber_ids[900999] = 'D-APRS' subscriber_ids[900999] = 'D-APRS'
@ -2573,7 +2577,8 @@ if __name__ == '__main__':
CONFIG['_PEER_IDS'] = peer_ids CONFIG['_PEER_IDS'] = peer_ids
CONFIG['_LOCAL_SUBSCRIBER_IDS'] = local_subscriber_ids CONFIG['_LOCAL_SUBSCRIBER_IDS'] = local_subscriber_ids
CONFIG['_SERVER_IDS'] = server_ids CONFIG['_SERVER_IDS'] = server_ids
CONFIG['CHECKSUMS'] = checksums
# Import the ruiles file as a module, and create BRIDGES from it # Import the ruiles file as a module, and create BRIDGES from it
@ -2748,10 +2753,10 @@ if __name__ == '__main__':
options = options_task.start(26) options = options_task.start(26)
options.addErrback(loopingErrHandle) options.addErrback(loopingErrHandle)
#STAT trimmer - once every hour (roughly - shifted so all timed tasks don't run at once #STAT trimmer - once every 10 mins (roughly - shifted so all timed tasks don't run at once
if CONFIG['GLOBAL']['GEN_STAT_BRIDGES']: if CONFIG['GLOBAL']['GEN_STAT_BRIDGES']:
stat_trimmer_task = task.LoopingCall(statTrimmer) stat_trimmer_task = task.LoopingCall(statTrimmer)
stat_trimmer = stat_trimmer_task.start(3700)#3600 stat_trimmer = stat_trimmer_task.start(523)#3600
stat_trimmer.addErrback(loopingErrHandle) stat_trimmer.addErrback(loopingErrHandle)
#KA Reporting #KA Reporting

@ -38,7 +38,7 @@ from languages import languages
# Does anybody read this stuff? There's a PEP somewhere that says I should do this. # Does anybody read this stuff? There's a PEP somewhere that says I should do this.
__author__ = 'Cortney T. Buffington, N0MJS' __author__ = 'Cortney T. Buffington, N0MJS'
__copyright__ = '(c) Simon Adlem, G7RZU 2020-2021, Copyright (c) 2016-2018 Cortney T. Buffington, N0MJS and the K0USY Group' __copyright__ = '(c) Simon Adlem, G7RZU 2020-2023, Copyright (c) 2016-2018 Cortney T. Buffington, N0MJS and the K0USY Group'
__credits__ = 'Colin Durbridge, G4EML, Steve Zingman, N4IRS; Mike Zingman, N4IRR; Jonathan Naylor, G4KLX; Hans Barthen, DL5DI; Torsten Shultze, DG1HT' __credits__ = 'Colin Durbridge, G4EML, Steve Zingman, N4IRS; Mike Zingman, N4IRR; Jonathan Naylor, G4KLX; Hans Barthen, DL5DI; Torsten Shultze, DG1HT'
__license__ = 'GNU GPLv3' __license__ = 'GNU GPLv3'
__maintainer__ = 'Simon Adlem, G7RZU' __maintainer__ = 'Simon Adlem, G7RZU'
@ -134,20 +134,20 @@ def build_config(_config_file):
for section in config.sections(): for section in config.sections():
if section == 'GLOBAL': if section == 'GLOBAL':
CONFIG['GLOBAL'].update({ CONFIG['GLOBAL'].update({
'PATH': config.get(section, 'PATH'), 'PATH': config.get(section, 'PATH',fallback='./'),
'PING_TIME': config.getint(section, 'PING_TIME'), 'PING_TIME': config.getint(section, 'PING_TIME', fallback=10),
'MAX_MISSED': config.getint(section, 'MAX_MISSED'), 'MAX_MISSED': config.getint(section, 'MAX_MISSED', fallback=3),
'USE_ACL': config.get(section, 'USE_ACL'), 'USE_ACL': config.get(section, 'USE_ACL', fallback=True),
'REG_ACL': config.get(section, 'REG_ACL'), 'REG_ACL': config.get(section, 'REG_ACL', fallback='PERMIT:ALL'),
'SUB_ACL': config.get(section, 'SUB_ACL'), 'SUB_ACL': config.get(section, 'SUB_ACL', fallback='DENY:1'),
'TG1_ACL': config.get(section, 'TGID_TS1_ACL'), 'TG1_ACL': config.get(section, 'TGID_TS1_ACL', fallback='PERMIT:ALL'),
'TG2_ACL': config.get(section, 'TGID_TS2_ACL'), 'TG2_ACL': config.get(section, 'TGID_TS2_ACL', fallback='PERMIT:ALL'),
'GEN_STAT_BRIDGES': config.getboolean(section, 'GEN_STAT_BRIDGES'), 'GEN_STAT_BRIDGES': config.getboolean(section, 'GEN_STAT_BRIDGES', fallback=True),
'ALLOW_NULL_PASSPHRASE': config.getboolean(section, 'ALLOW_NULL_PASSPHRASE'), 'ALLOW_NULL_PASSPHRASE': config.getboolean(section, 'ALLOW_NULL_PASSPHRASE', fallback=True),
'ANNOUNCEMENT_LANGUAGES': config.get(section, 'ANNOUNCEMENT_LANGUAGES'), 'ANNOUNCEMENT_LANGUAGES': config.get(section, 'ANNOUNCEMENT_LANGUAGES', fallback=''),
'SERVER_ID': config.getint(section, 'SERVER_ID').to_bytes(4, 'big'), 'SERVER_ID': config.getint(section, 'SERVER_ID', fallback=0).to_bytes(4, 'big'),
'DATA_GATEWAY': config.getboolean(section, 'DATA_GATEWAY'), 'DATA_GATEWAY': config.getboolean(section, 'DATA_GATEWAY', fallback=False),
'VALIDATE_SERVER_IDS': config.getboolean(section, 'VALIDATE_SERVER_IDS') 'VALIDATE_SERVER_IDS': config.getboolean(section, 'VALIDATE_SERVER_IDS', fallback=True)
}) })
if not CONFIG['GLOBAL']['ANNOUNCEMENT_LANGUAGES']: if not CONFIG['GLOBAL']['ANNOUNCEMENT_LANGUAGES']:
@ -155,49 +155,51 @@ def build_config(_config_file):
elif section == 'REPORTS': elif section == 'REPORTS':
CONFIG['REPORTS'].update({ CONFIG['REPORTS'].update({
'REPORT': config.getboolean(section, 'REPORT'), 'REPORT': config.getboolean(section, 'REPORT', fallback=True),
'REPORT_INTERVAL': config.getint(section, 'REPORT_INTERVAL'), 'REPORT_INTERVAL': config.getint(section, 'REPORT_INTERVAL', fallback=60),
'REPORT_PORT': config.getint(section, 'REPORT_PORT'), 'REPORT_PORT': config.getint(section, 'REPORT_PORT', fallback=4321),
'REPORT_CLIENTS': config.get(section, 'REPORT_CLIENTS').split(',') 'REPORT_CLIENTS': config.get(section, 'REPORT_CLIENTS',fallback='127.0.0.1').split(',')
}) })
elif section == 'LOGGER': elif section == 'LOGGER':
CONFIG['LOGGER'].update({ CONFIG['LOGGER'].update({
'LOG_FILE': config.get(section, 'LOG_FILE'), 'LOG_FILE': config.get(section, 'LOG_FILE', fallback='/dev/null'),
'LOG_HANDLERS': config.get(section, 'LOG_HANDLERS'), 'LOG_HANDLERS': config.get(section, 'LOG_HANDLERS', fallback='console-timed'),
'LOG_LEVEL': config.get(section, 'LOG_LEVEL'), 'LOG_LEVEL': config.get(section, 'LOG_LEVEL', fallback='INFO'),
'LOG_NAME': config.get(section, 'LOG_NAME') 'LOG_NAME': config.get(section, 'LOG_NAME', fallback='FreeDMR')
}) })
if not CONFIG['LOGGER']['LOG_FILE']:
CONFIG['LOGGER']['LOG_FILE'] = '/dev/null'
elif section == 'ALIASES': elif section == 'ALIASES':
CONFIG['ALIASES'].update({ CONFIG['ALIASES'].update({
'TRY_DOWNLOAD': config.getboolean(section, 'TRY_DOWNLOAD'), 'TRY_DOWNLOAD': config.getboolean(section, 'TRY_DOWNLOAD', fallback=True),
'PATH': config.get(section, 'PATH'), 'PATH': config.get(section, 'PATH', fallback='./json/'),
'PEER_FILE': config.get(section, 'PEER_FILE'), 'PEER_FILE': config.get(section, 'PEER_FILE', fallback='peer_ids.json'),
'SUBSCRIBER_FILE': config.get(section, 'SUBSCRIBER_FILE'), 'SUBSCRIBER_FILE': config.get(section, 'SUBSCRIBER_FILE', fallback='subscriber_ids.json'),
'TGID_FILE': config.get(section, 'TGID_FILE'), 'TGID_FILE': config.get(section, 'TGID_FILE', fallback='talkgroup_ids.json'),
'PEER_URL': config.get(section, 'PEER_URL'), 'PEER_URL': config.get(section, 'PEER_URL', fallback='https://freedmr-lh.gb7fr.org.uk/json/peer_ids.json'),
'SUBSCRIBER_URL': config.get(section, 'SUBSCRIBER_URL'), 'SUBSCRIBER_URL': config.get(section, 'SUBSCRIBER_URL', fallback='https://freedmr-lh.gb7fr.org.uk/json/subscriber_ids.json'),
'TGID_URL': config.get(section, 'TGID_URL'), 'TGID_URL': config.get(section, 'TGID_URL', fallback='https://freedmr-lh.gb7fr.org.uk/json/talkgroup_ids.json'),
'STALE_TIME': config.getint(section, 'STALE_DAYS') * 86400, 'STALE_TIME': config.getint(section, 'STALE_DAYS', fallback=1) * 86400,
'SUB_MAP_FILE': config.get(section, 'SUB_MAP_FILE'), 'SUB_MAP_FILE': config.get(section, 'SUB_MAP_FILE', fallback='sub_map.pkl'),
'LOCAL_SUBSCRIBER_FILE': config.get(section, 'LOCAL_SUBSCRIBER_FILE'), 'LOCAL_SUBSCRIBER_FILE': config.get(section, 'LOCAL_SUBSCRIBER_FILE', fallback='local_subscribers.json'),
'SERVER_ID_URL': config.get(section, 'SERVER_ID_URL'), 'SERVER_ID_URL': config.get(section, 'SERVER_ID_URL', fallback='https://freedmr-lh.gb7fr.org.uk/json/server_ids.tsv'),
'SERVER_ID_FILE': config.get(section, 'SERVER_ID_FILE') 'SERVER_ID_FILE': config.get(section, 'SERVER_ID_FILE', fallback='server_ids.tsv'),
'CHECKSUM_URL': config.get(section, 'CHECKSUM_URL', fallback='https://freedmr-lh.gb7fr.org.uk/file_checksums.json'),
'CHECKSUM_FILE': config.get(section, 'CHECKSUM_FILE', fallback='file_checksums.json')
}) })
elif section == 'ALLSTAR': elif section == 'ALLSTAR':
CONFIG['ALLSTAR'].update({ CONFIG['ALLSTAR'].update({
'ENABLED': config.getboolean(section, 'ENABLED'), 'ENABLED': config.getboolean(section, 'ENABLED', fallback=False),
'USER': config.get(section, 'USER'), 'USER': config.get(section, 'USER', fallback='llcgi'),
'PASS': config.get(section, 'PASS'), 'PASS': config.get(section, 'PASS', fallback='mypass'),
'SERVER': config.get(section, 'SERVER'), 'SERVER': config.get(section, 'SERVER', fallback='my.asl.server'),
'PORT': config.getint(section,'PORT'), 'PORT': config.getint(section,'PORT', fallback=5038),
'NODE' : config.getint(section,'NODE') 'NODE' : config.getint(section,'NODE', fallback=0)
}) })
elif section == 'PROXY': elif section == 'PROXY':
@ -302,52 +304,55 @@ def build_config(_config_file):
elif config.get(section, 'MODE') == 'MASTER': elif config.get(section, 'MODE') == 'MASTER':
CONFIG['SYSTEMS'].update({section: { CONFIG['SYSTEMS'].update({section: {
'MODE': config.get(section, 'MODE'), 'MODE': config.get(section, 'MODE'),
'ENABLED': config.getboolean(section, 'ENABLED'), 'ENABLED': config.getboolean(section, 'ENABLED', fallback=True ),
'REPEAT': config.getboolean(section, 'REPEAT'), 'REPEAT': config.getboolean(section, 'REPEAT', fallback=True),
'MAX_PEERS': config.getint(section, 'MAX_PEERS'), 'MAX_PEERS': config.getint(section, 'MAX_PEERS', fallback=1),
'IP': config.get(section, 'IP'), 'IP': config.get(section, 'IP', fallback='127.0.0.1'),
'PORT': config.getint(section, 'PORT'), 'PORT': config.getint(section, 'PORT', fallback=54000),
'PASSPHRASE': bytes(config.get(section, 'PASSPHRASE'), 'utf-8'), 'PASSPHRASE': bytes(config.get(section, 'PASSPHRASE', fallback=''), 'utf-8'),
'GROUP_HANGTIME': config.getint(section, 'GROUP_HANGTIME'), 'GROUP_HANGTIME': config.getint(section, 'GROUP_HANGTIME',fallback=5),
'USE_ACL': config.getboolean(section, 'USE_ACL'), 'USE_ACL': config.getboolean(section, 'USE_ACL', fallback=False),
'REG_ACL': config.get(section, 'REG_ACL'), 'REG_ACL': config.get(section, 'REG_ACL', fallback=''),
'SUB_ACL': config.get(section, 'SUB_ACL'), 'SUB_ACL': config.get(section, 'SUB_ACL', fallback=''),
'TG1_ACL': config.get(section, 'TGID_TS1_ACL'), 'TG1_ACL': config.get(section, 'TGID_TS1_ACL', fallback=''),
'TG2_ACL': config.get(section, 'TGID_TS2_ACL'), 'TG2_ACL': config.get(section, 'TGID_TS2_ACL', fallback=''),
'DEFAULT_UA_TIMER': config.getint(section, 'DEFAULT_UA_TIMER'), 'DEFAULT_UA_TIMER': config.getint(section, 'DEFAULT_UA_TIMER', fallback=10),
'SINGLE_MODE': config.getboolean(section, 'SINGLE_MODE'), 'SINGLE_MODE': config.getboolean(section, 'SINGLE_MODE', fallback=True),
'VOICE_IDENT': config.getboolean(section, 'VOICE_IDENT'), 'VOICE_IDENT': config.getboolean(section, 'VOICE_IDENT', fallback=True),
'TS1_STATIC': config.get(section,'TS1_STATIC'), 'TS1_STATIC': config.get(section,'TS1_STATIC', fallback=''),
'TS2_STATIC': config.get(section,'TS2_STATIC'), 'TS2_STATIC': config.get(section,'TS2_STATIC', fallback=''),
'DEFAULT_REFLECTOR': config.getint(section, 'DEFAULT_REFLECTOR'), 'DEFAULT_REFLECTOR': config.getint(section, 'DEFAULT_REFLECTOR'),
'GENERATOR': config.getint(section, 'GENERATOR'), 'GENERATOR': config.getint(section, 'GENERATOR', fallback=100),
'ANNOUNCEMENT_LANGUAGE': config.get(section, 'ANNOUNCEMENT_LANGUAGE'), 'ANNOUNCEMENT_LANGUAGE': config.get(section, 'ANNOUNCEMENT_LANGUAGE', fallback='en_GB'),
'ALLOW_UNREG_ID': config.getboolean(section,'ALLOW_UNREG_ID'), 'ALLOW_UNREG_ID': config.getboolean(section,'ALLOW_UNREG_ID', fallback=False),
'PROXY_CONTROL' : config.getboolean(section,'PROXY_CONTROL'), 'PROXY_CONTROL' : config.getboolean(section,'PROXY_CONTROL', fallback=True),
'OVERRIDE_IDENT_TG': config.get(section, 'OVERRIDE_IDENT_TG') 'OVERRIDE_IDENT_TG': config.get(section, 'OVERRIDE_IDENT_TG', fallback=False)
}}) }})
CONFIG['SYSTEMS'][section].update({'PEERS': {}}) CONFIG['SYSTEMS'][section].update({'PEERS': {}})
elif config.get(section, 'MODE') == 'OPENBRIDGE': elif config.get(section, 'MODE') == 'OPENBRIDGE':
CONFIG['SYSTEMS'].update({section: { CONFIG['SYSTEMS'].update({section: {
'MODE': config.get(section, 'MODE'), 'MODE': config.get(section, 'MODE'),
'ENABLED': config.getboolean(section, 'ENABLED'), 'ENABLED': config.getboolean(section, 'ENABLED', fallback=True),
'NETWORK_ID': config.getint(section, 'NETWORK_ID').to_bytes(4, 'big'), 'NETWORK_ID': config.getint(section, 'NETWORK_ID').to_bytes(4, 'big'),
#'OVERRIDE_SERVER_ID': config.getint(section, 'OVERRIDE_SERVER_ID').to_bytes(4, 'big'), #'OVERRIDE_SERVER_ID': config.getint(section, 'OVERRIDE_SERVER_ID').to_bytes(4, 'big'),
'IP': config.get(section, 'IP'), 'IP': config.get(section, 'IP', fallback=''),
'PORT': config.getint(section, 'PORT'), 'PORT': config.getint(section, 'PORT'),
'PASSPHRASE': bytes(config.get(section, 'PASSPHRASE').ljust(20,'\x00')[:20], 'utf-8'), 'PASSPHRASE': bytes(config.get(section, 'PASSPHRASE').ljust(20,'\x00')[:20], 'utf-8'),
#'TARGET_SOCK': (gethostbyname(config.get(section, 'TARGET_IP')), config.getint(section, 'TARGET_PORT')), #'TARGET_SOCK': (gethostbyname(config.get(section, 'TARGET_IP')), config.getint(section, 'TARGET_PORT')),
'TARGET_IP': config.get(section, 'TARGET_IP'), 'TARGET_IP': config.get(section, 'TARGET_IP'),
'TARGET_PORT': config.getint(section, 'TARGET_PORT'), 'TARGET_PORT': config.getint(section, 'TARGET_PORT'),
'USE_ACL': config.getboolean(section, 'USE_ACL'), 'USE_ACL': config.getboolean(section, 'USE_ACL', fallback=False),
'SUB_ACL': config.get(section, 'SUB_ACL'), 'SUB_ACL': config.get(section, 'SUB_ACL', fallback=''),
'TG1_ACL': config.get(section, 'TGID_ACL'), 'TG1_ACL': config.get(section, 'TGID_ACL', fallback=''),
'TG2_ACL': 'PERMIT:ALL', 'TG2_ACL': 'PERMIT:ALL',
'RELAX_CHECKS': config.getboolean(section, 'RELAX_CHECKS'), 'RELAX_CHECKS': config.getboolean(section, 'RELAX_CHECKS', fallback=True),
'ENHANCED_OBP': config.getboolean(section, 'ENHANCED_OBP'), 'ENHANCED_OBP': config.getboolean(section, 'ENHANCED_OBP',fallback=True),
'VER' : config.getint(section, 'PROTO_VER') 'VER' : config.getint(section, 'PROTO_VER', fallback=5)
}}) }})
if CONFIG['SYSTEMS'][section]['VER'] in (0,2,3) or CONFIG['SYSTEMS'][section]['VER'] > 5:
sys.exit('(%s) PROTO_VER not valid',section)
try: try:
@ -399,13 +404,13 @@ if __name__ == '__main__':
# CLI argument parser - handles picking up the config file from the command line, and sending a "help" message # CLI argument parser - handles picking up the config file from the command line, and sending a "help" message
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', action='store', dest='CONFIG_FILE', help='/full/path/to/config.file (usually hblink.cfg)') parser.add_argument('-c', '--config', action='store', dest='CONFIG_FILE', help='/full/path/to/config.file (usually freedmr.cfg)')
cli_args = parser.parse_args() cli_args = parser.parse_args()
# Ensure we have a path for the config file, if one wasn't specified, then use the execution directory # Ensure we have a path for the config file, if one wasn't specified, then use the execution directory
if not cli_args.CONFIG_FILE: if not cli_args.CONFIG_FILE:
cli_args.CONFIG_FILE = os.path.dirname(os.path.abspath(__file__))+'/hblink.cfg' cli_args.CONFIG_FILE = os.path.dirname(os.path.abspath(__file__))+'/freedmr.cfg'
CONFIG = build_config(cli_args.CONFIG_FILE) CONFIG = build_config(cli_args.CONFIG_FILE)
pprint(CONFIG) pprint(CONFIG)

@ -0,0 +1,36 @@
###############################################################################
# Copyright (C) 2020 Simon Adlem, G7RZU <g7rzu@gb7fr.org.uk>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
###############################################################################
FROM python:3.10-alpine
ENTRYPOINT [ "/entrypoint" ]
COPY . /opt/freedmr
RUN adduser -D -u 54000 radio && \
apk update && \
apk add git gcc musl-dev && \
cd /opt && \
cd /opt/freedmr && \
ls -lah && \
pip install --no-cache-dir -r requirements.txt && \
apk del git gcc musl-dev && \
chown -R radio: /opt/freedmr
COPY docker-configs/entrypoint-proxy /entrypoint
USER radio

@ -33,7 +33,7 @@ services:
- '62031:62031/udp' - '62031:62031/udp'
#Change the below to inlude ports used for your OBP(s) #Change the below to inlude ports used for your OBP(s)
- '62041:62041/udp' - '62041:62041/udp'
image: 'hacknix/freedmr:latest' image: 'gitlab.hacknix.net:5050/hacknix/freedmr:latest'
restart: "unless-stopped" restart: "unless-stopped"
networks: networks:
app_net: app_net:
@ -52,45 +52,56 @@ services:
#- FDPROXY_LISTENPORT=62031 #- FDPROXY_LISTENPORT=62031
read_only: "true" read_only: "true"
freedmrmon: freedmrmonitor2:
container_name: freedmrmon container_name: freedmrmonitor2
cpu_shares: 512 cpu_shares: 512
depends_on: depends_on:
- freedmr - freedmr
volumes: image: 'gitlab.hacknix.net:5050/freedmr/freedmrmonitor2/freedmrmonitor2:monitor-latest'
#This should be kept to a manageable size from
#cron or logrotate outisde of the container.
- '/var/log/FreeDMRmonitor/:/opt/FreeDMRmonitor/log/'
#Write JSON files outside of container
- '/etc/freedmr/json/:/opt/FreeDMRmonitor/json/'
#Override config file
# - '/etc/freedmr/config.py:/opt/FreeDMRmonitor/config.py'
ports:
- '9000:9000/tcp'
image: 'hacknix/freedmrmonitor:latest'
restart: "unless-stopped" restart: "unless-stopped"
networks: networks:
app_net: app_net:
ipv4_address: 172.16.238.20 ipv4_address: 172.16.238.20
read_only: "true"
logging:
driver: json-file
freedmrmonpache: freedmrmonpache:
container_name: freedmrmonapache container_name: freedmrmonapache
cpu_shares: 512 cpu_shares: 512
depends_on: depends_on:
- freedmrmon - freedmrmonitor2
#Use to override html files #where to store TLS certificates
#And images #and acme.sh files
#volumes: volumes:
# - '/var/www/html/:/var/www/html/' - '/etc/freedmr/certs/:/opt/apachecerts/'
# - '/var/www/html/images/:/var/www/html/images/' - '/etc/freedmr/acme.sh:/root/.acme.sh/'
ports: ports:
- '80:80/tcp' - '80:80/tcp'
image: hacknix/freedmrmonitor-apache:latest - '443:443/tcp'
image: 'gitlab.hacknix.net:5050/freedmr/freedmrmonitor2/freedmrmonitor2:apache-latest'
restart: "unless-stopped" restart: "unless-stopped"
environment:
#Set to 1 to enable TLS support
#you'll need to actually generate the certtificates too
#using these commands when the container is running:
#docker exec -it freedmrmonapache gencert.sh <admin email> <server FQDN>
#docker-compose restart freedmrmonapache
#This only needs to be done once - unless the files in the volumes above are deleted.
#The container will handle renewing the certificates every 60 days.
#Note -the gencert.sh script only works when the webserver is available on the default port 80
#If it's on non-standard ports, you'll need to request the certificates manually.
- 'USE_TLS=1'
networks: networks:
app_net: app_net:
ipv4_address: 172.16.238.30 ipv4_address: 172.16.238.30
logging:
driver: json-file
networks: networks:
app_net: app_net:

@ -21,19 +21,48 @@
echo FreeDMR Docker installer... echo FreeDMR Docker installer...
echo Installing required packages... echo Installing required packages...
apt-get -y install docker.io && echo Install Docker Community Edition...
apt-get -y remove docker docker-engine docker.io &&
apt-get -y update &&
apt-get -y install apt-transport-https ca-certificates curl gnupg2 software-properties-common &&
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - &&
ARCH=`/usr/bin/arch`
echo "System architecture is $ARCH"
if [ "$ARCH" == "x86_64" ]
then
ARCH="amd64"
fi
add-apt-repository \
"deb [arch=$ARCH] https://download.docker.com/linux/debian \
$(lsb_release -cs) \
stable" &&
apt-get -y update &&
apt-get -y install docker-ce &&
echo Install Docker Compose...
apt-get -y install docker-compose && apt-get -y install docker-compose &&
apt-get -y install conntrack &&
echo Set userland-proxy to false... echo Set userland-proxy to false...
echo '{ "userland-proxy": false}' > /etc/docker/daemon.json && cat <<EOF > /etc/docker/daemon.json &&
{
"userland-proxy": false,
"experimental": true,
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "3"
}
}
EOF
echo Restart docker... echo Restart docker...
systemctl restart docker && systemctl restart docker &&
echo Make config directory... echo Make config directory...
mkdir /etc/freedmr && mkdir /etc/freedmr &&
chmod 755 /etc/freedmr && mkdir -p /etc/freedmr/acme.sh &&
mkdir -p /etc/freedmr/certs &&
chmod -R 755 /etc/freedmr &&
echo make json directory... echo make json directory...
mkdir -p /etc/freedmr/json && mkdir -p /etc/freedmr/json &&
@ -41,60 +70,32 @@ chown 54000:54000 /etc/freedmr/json &&
echo Install /etc/freedmr/freedmr.cfg ... echo Install /etc/freedmr/freedmr.cfg ...
cat << EOF > /etc/freedmr/freedmr.cfg cat << EOF > /etc/freedmr/freedmr.cfg
#This empty config file will use defaults for everything apart from OBP and HBP config
#This is usually a sensible choice.
#I have moved to a config like this to encourage servers to use the accepted defaults
#unless you really know what you are doing.
[GLOBAL] [GLOBAL]
PATH: ./ #If you join the FreeDMR network, you need to add your ServerID Here.
PING_TIME: 10
MAX_MISSED: 3
USE_ACL: True
REG_ACL: DENY:0-100000
SUB_ACL: DENY:0-100000
TGID_TS1_ACL: PERMIT:ALL
TGID_TS2_ACL: PERMIT:ALL
GEN_STAT_BRIDGES: True
ALLOW_NULL_PASSPHRASE: True
ANNOUNCEMENT_LANGUAGES:
SERVER_ID: 0 SERVER_ID: 0
[REPORTS] [REPORTS]
REPORT: True
REPORT_INTERVAL: 60
REPORT_PORT: 4321
REPORT_CLIENTS: *
[LOGGER] [LOGGER]
LOG_FILE: log/freedmr.log
LOG_HANDLERS: file-timed
LOG_LEVEL: INFO
LOG_NAME: FreeDMR
[ALIASES] [ALIASES]
TRY_DOWNLOAD: True
PATH: ./json/
PEER_FILE: peer_ids.json
SUBSCRIBER_FILE: subscriber_ids.json
TGID_FILE: talkgroup_ids.json
PEER_URL: https://www.radioid.net/static/rptrs.json
SUBSCRIBER_URL: http://downloads.freedmr.uk/downloads/local_subscriber_ids.json
TGID_URL: TGID_URL: https://freedmr.cymru/talkgroups/talkgroup_ids_json.php
STALE_DAYS: 1
LOCAL_SUBSCRIBER_FILE: local_subcriber_ids.json
SUB_MAP_FILE: sub_map.pkl
[MYSQL]
USE_MYSQL: False
USER: hblink
PASS: mypassword
DB: hblink
SERVER: 127.0.0.1
PORT: 3306
TABLE: repeaters
[ALLSTAR]
#This is an example OpenBridgeProtocol (OBP) or FreeBridgeProtocol (FBP) configuration
#If you joing FreeDMR, you will be given a config like this to paste in
[OBP-TEST] [OBP-TEST]
MODE: OPENBRIDGE MODE: OPENBRIDGE
ENABLED: False ENABLED: False
IP: IP:
PORT: 62044 PORT: 62044
#The ID which you expect to see sent from the other end of the link.
NETWORK_ID: 1 NETWORK_ID: 1
PASSPHRASE: mypass PASSPHRASE: mypass
TARGET_IP: TARGET_IP:
@ -102,11 +103,17 @@ TARGET_PORT: 62044
USE_ACL: True USE_ACL: True
SUB_ACL: DENY:1 SUB_ACL: DENY:1
TGID_ACL: PERMIT:ALL TGID_ACL: PERMIT:ALL
#Should always be true if using docker.
RELAX_CHECKS: True RELAX_CHECKS: True
#True for FBP, False for OBP
ENHANCED_OBP: True ENHANCED_OBP: True
PROTO_VER: 2 #PROTO_VER should be 5 for FreeDMR servers using FBP
#1 for other servers using OBP
PROTO_VER: 5
#This defines parameters for repeater/hotspot connections
#via HomeBrewProtocol (HBP)
#I don't recommend changing most of this unless you know what you are doing
[SYSTEM] [SYSTEM]
MODE: MASTER MODE: MASTER
ENABLED: True ENABLED: True
@ -132,7 +139,9 @@ ANNOUNCEMENT_LANGUAGE: en_GB
GENERATOR: 100 GENERATOR: 100
ALLOW_UNREG_ID: False ALLOW_UNREG_ID: False
PROXY_CONTROL: True PROXY_CONTROL: True
OVERRIDE_IDENT_TG:
#Echo (Loro / Parrot) server
[ECHO] [ECHO]
MODE: PEER MODE: PEER
ENABLED: True ENABLED: True
@ -173,31 +182,27 @@ echo "BRIDGES = {'9990': [{'SYSTEM': 'ECHO', 'TS': 2, 'TGID': 9990, 'ACTIVE': Tr
echo Set perms on config directory... echo Set perms on config directory...
chown -R 54000 /etc/freedmr && chown -R 54000 /etc/freedmr &&
echo Setup logging...
mkdir -p /var/log/freedmr &&
touch /var/log/freedmr/freedmr.log &&
chown -R 54000 /var/log/freedmr &&
mkdir -p /var/log/FreeDMRmonitor &&
touch /var/log/FreeDMRmonitor/lastheard.log &&
touch /var/log/FreeDMRmonitor/hbmon.log &&
chown -R 54001 /var/log/FreeDMRmonitor &&
echo Get docker-compose.yml... echo Get docker-compose.yml...
cd /etc/freedmr && cd /etc/freedmr &&
curl https://gitlab.hacknix.net/hacknix/FreeDMR/-/raw/master/docker-configs/docker-compose.yml -o docker-compose.yml && curl https://gitlab.hacknix.net/hacknix/FreeDMR/-/raw/master/docker-configs/docker-compose.yml -o docker-compose.yml &&
echo Install crontab...
cat << EOF > /etc/cron.daily/lastheard
#!/bin/bash
mv /var/log/FreeDMRmonitor/lastheard.log /var/log/FreeDMRmonitor/lastheard.log.save
/usr/bin/tail -150 /var/log/FreeDMRmonitor/lastheard.log.save > /var/log/FreeDMRmonitor/lastheard.log
mv /var/log/FreeDMRmonitor/lastheard.log /var/log/FreeDMRmonitor/lastheard.log.save
/usr/bin/tail -150 /var/log/FreeDMRmonitor/lastheard.log.save > /var/log/FreeDMRmonitor/lastheard.log
EOF
chmod 755 /etc/cron.daily/lastheard chmod 755 /etc/cron.daily/lastheard
echo Tune network stack...
cat << EOF > /etc/sysctl.conf &&
net.core.rmem_default=134217728
net.core.rmem_max=134217728
net.core.wmem_max=134217728
net.core.rmem_default=134217728
net.core.netdev_max_backlog=250000
net.netfilter.nf_conntrack_udp_timeout=15
net.netfilter.nf_conntrack_udp_timeout_stream=35
EOF
/usr/sbin/sysctl -p &&
echo Run FreeDMR container... echo Run FreeDMR container...
docker-compose up -d docker-compose up -d
echo Read notes in /etc/freedmr/docker-compose.yml to understand how to implement extra functionality.
echo FreeDMR setup complete! echo FreeDMR setup complete!

@ -45,7 +45,8 @@ from twisted.internet import reactor, task
import log import log
import config import config
from const import * from const import *
from dmr_utils3.utils import int_id, bytes_4, mk_id_dict from utils import mk_id_dict, try_download,load_json,blake2bsum
from dmr_utils3.utils import int_id, bytes_4
# Imports for the reporting server # Imports for the reporting server
import pickle import pickle
@ -59,9 +60,10 @@ from functools import partial, partialmethod
import ssl import ssl
from os.path import isfile, getmtime from os.path import isfile, getmtime, exists, getsize
from urllib.request import urlopen from urllib.request import urlopen
import shutil
import csv import csv
@ -199,20 +201,10 @@ class OPENBRIDGE(DatagramProtocol):
self.transport.write(_packet, (self._config['TARGET_IP'], self._config['TARGET_PORT'])) self.transport.write(_packet, (self._config['TARGET_IP'], self._config['TARGET_PORT']))
elif 'VER' in self._config and self._config['VER'] == 3: elif 'VER' in self._config and self._config['VER'] == 3:
_packet = b''.join([DMRF,_packet[4:11], self._CONFIG['GLOBAL']['SERVER_ID'],_packet[15:]]) logger.error('(%s) protocol version 3 no longer supported',self._system)
_h = blake2b(key=self._config['PASSPHRASE'], digest_size=16)
_h.update(_packet)
_hash = _h.digest()
_packet = b''.join([_packet,time_ns().to_bytes(8,'big'), _hops, _hash])
self.transport.write(_packet, (self._config['TARGET_IP'], self._config['TARGET_PORT']))
elif 'VER' in self._config and self._config['VER'] == 2: elif 'VER' in self._config and self._config['VER'] == 2:
_packet = b''.join([DMRF,_packet[4:11], self._CONFIG['GLOBAL']['SERVER_ID'],_packet[15:], time_ns().to_bytes(8,'big')]) logger.error('(%s) protocol version 2 no longer supported',self._system)
_h = blake2b(key=self._config['PASSPHRASE'], digest_size=16)
_h.update(_packet)
_hash = _h.digest()
_packet = b''.join([_packet,_hops, _hash])
self.transport.write(_packet, (self._config['TARGET_IP'], self._config['TARGET_PORT']))
# KEEP THE FOLLOWING COMMENTED OUT UNLESS YOU'RE DEBUGGING DEEPLY!!!! # KEEP THE FOLLOWING COMMENTED OUT UNLESS YOU'RE DEBUGGING DEEPLY!!!!
#logger.debug('(%s) TX Packet to OpenBridge %s:%s -- %s %s', self._system, self._config['TARGET_IP'], self._config['TARGET_PORT'], _packet, _hash) #logger.debug('(%s) TX Packet to OpenBridge %s:%s -- %s %s', self._system, self._config['TARGET_IP'], self._config['TARGET_PORT'], _packet, _hash)
else: else:
@ -287,7 +279,9 @@ class OPENBRIDGE(DatagramProtocol):
if compare_digest(_hash, _ckhs) and (_sockaddr == self._config['TARGET_SOCK'] or self._config['RELAX_CHECKS']): if compare_digest(_hash, _ckhs) and (_sockaddr == self._config['TARGET_SOCK'] or self._config['RELAX_CHECKS']):
_peer_id = _data[11:15] _peer_id = _data[11:15]
if self._config['NETWORK_ID'] != _peer_id: if self._config['NETWORK_ID'] != _peer_id:
logger.error('(%s) OpenBridge packet discarded because NETWORK_ID: %s Does not match sent Peer ID: %s', self._system, int_id(self._config['NETWORK_ID']), int_id(_peer_id)) if _stream_id not in self._laststrid:
logger.error('(%s) OpenBridge packet discarded because NETWORK_ID: %s Does not match sent Peer ID: %s', self._system, int_id(self._config['NETWORK_ID']), int_id(_peer_id))
self._laststrid.append(_stream_id)
return return
#This is a v1 packet, so all the extended stuff we can set to default #This is a v1 packet, so all the extended stuff we can set to default
@ -412,11 +406,15 @@ class OPENBRIDGE(DatagramProtocol):
_h.update(_packet[:69]) _h.update(_packet[:69])
_ckhs = _h.digest() _ckhs = _h.digest()
_stream_id = _data[16:20]
if compare_digest(_hash, _ckhs) and (_sockaddr == self._config['TARGET_SOCK'] or self._config['RELAX_CHECKS']): if compare_digest(_hash, _ckhs) and (_sockaddr == self._config['TARGET_SOCK'] or self._config['RELAX_CHECKS']):
_peer_id = _data[11:15] _peer_id = _data[11:15]
if self._config['NETWORK_ID'] != _peer_id: if self._config['NETWORK_ID'] != _peer_id:
logger.error('(%s) OpenBridge packet discarded because NETWORK_ID: %s Does not match sent Peer ID: %s', self._system, int_id(self._config['NETWORK_ID']), int_id(_peer_id)) if _stream_id not in self._laststrid:
logger.error('(%s) OpenBridge packet discarded because NETWORK_ID: %s Does not match sent Peer ID: %s', self._system, int_id(self._config['NETWORK_ID']), int_id(_peer_id))
self._laststrid.append(_stream_id)
return return
_seq = _data[4] _seq = _data[4]
_rf_src = _data[5:8] _rf_src = _data[5:8]
@ -433,7 +431,6 @@ class OPENBRIDGE(DatagramProtocol):
_call_type = 'group' _call_type = 'group'
_frame_type = (_bits & 0x30) >> 4 _frame_type = (_bits & 0x30) >> 4
_dtype_vseq = (_bits & 0xF) # data, 1=voice header, 2=voice terminator; voice, 0=burst A ... 5=burst F _dtype_vseq = (_bits & 0xF) # data, 1=voice header, 2=voice terminator; voice, 0=burst A ... 5=burst F
_stream_id = _data[16:20]
#logger.debug('(%s) DMRD - Seqence: %s, RF Source: %s, Destination ID: %s', self._system, int_id(_seq), int_id(_rf_src), int_id(_dst_id)) #logger.debug('(%s) DMRD - Seqence: %s, RF Source: %s, Destination ID: %s', self._system, int_id(_seq), int_id(_rf_src), int_id(_dst_id))
#Don't do anything if we are STUNned #Don't do anything if we are STUNned
@ -479,120 +476,39 @@ class OPENBRIDGE(DatagramProtocol):
self.send_bcsq(_dst_id,_stream_id) self.send_bcsq(_dst_id,_stream_id)
return return
#Low-level TG filtering #Low-level TG filtering
if _call_type != 'unit': if _call_type != 'unit':
_int_dst_id = int_id(_dst_id) _int_dst_id = int_id(_dst_id)
if _int_dst_id <= 79 or (_int_dst_id >= 9990 and _int_dst_id <= 9999) or _int_dst_id == 900999:
if _stream_id not in self._laststrid: if _int_dst_id <= 79:
logger.info('(%s) CALL DROPPED WITH STREAM ID %s FROM SUBSCRIBER %s BY GLOBAL TG FILTER', self._system, int_id(_stream_id), _int_dst_id)
self.send_bcsq(_dst_id,_stream_id)
self._laststrid.append(_stream_id)
return
# ACL Processing
if self._CONFIG['GLOBAL']['USE_ACL']:
if not acl_check(_rf_src, self._CONFIG['GLOBAL']['SUB_ACL']):
if _stream_id not in self._laststrid: if _stream_id not in self._laststrid:
logger.info('(%s) CALL DROPPED WITH STREAM ID %s ON TGID %s BY GLOBAL TS1 ACL', self._system, int_id(_stream_id), int_id(_rf_src)) logger.info('(%s) CALL DROPPED WITH STREAM ID %s FROM SUBSCRIBER %s BY GLOBAL TG FILTER (local to repeater)', self._system, int_id(_stream_id), _int_dst_id)
self.send_bcsq(_dst_id,_stream_id) self.send_bcsq(_dst_id,_stream_id)
self._laststrid.append(_stream_id) self._laststrid.append(_stream_id)
return return
if _slot == 1 and not acl_check(_dst_id, self._CONFIG['GLOBAL']['TG1_ACL']):
if (_int_dst_id >= 9990 and _int_dst_id <= 9999) or _int_dst_id == 900999:
if _stream_id not in self._laststrid: if _stream_id not in self._laststrid:
logger.info('(%s) CALL DROPPED WITH STREAM ID %s ON TGID %s BY GLOBAL TS1 ACL', self._system, int_id(_stream_id), int_id(_dst_id)) logger.info('(%s) CALL DROPPED WITH STREAM ID %s FROM SUBSCRIBER %s BY GLOBAL TG FILTER (local to server)', self._system, int_id(_stream_id), _int_dst_id)
self.send_bcsq(_dst_id,_stream_id) self.send_bcsq(_dst_id,_stream_id)
self._laststrid.append(_stream_id) self._laststrid.append(_stream_id)
return return
if self._config['USE_ACL']:
if not acl_check(_rf_src, self._config['SUB_ACL']): if (_int_dst_id >= 92 and _int_dst_id <= 199) and int(str(int.from_bytes(_source_server,'big'))[:4]) != int(str(int.from_bytes(self._CONFIG['GLOBAL']['SERVER_ID'],'big'))[:4]):
if _stream_id not in self._laststrid: if _stream_id not in self._laststrid:
logger.info('(%s) CALL DROPPED WITH STREAM ID %s FROM SUBSCRIBER %s BY SYSTEM ACL', self._system, int_id(_stream_id), int_id(_rf_src)) logger.info('(%s) CALL DROPPED WITH STREAM ID %s FROM SUBSCRIBER %s BY GLOBAL TG FILTER (local to server main ID)', self._system, int_id(_stream_id), _int_dst_id)
self.send_bcsq(_dst_id,_stream_id) self.send_bcsq(_dst_id,_stream_id)
self._laststrid.append(_stream_id) self._laststrid.append(_stream_id)
return return
if not acl_check(_dst_id, self._config['TG1_ACL']):
if _stream_id not in self._laststrid:
logger.info('(%s) CALL DROPPED WITH STREAM ID %s ON TGID %s BY SYSTEM ACL', self._system, int_id(_stream_id), int_id(_dst_id))
self.send_bcsq(_dst_id,_stream_id)
self._laststrid.append(_stream_id)
return
_data = b''.join([DMRD,_data[4:]])
_hops = _inthops.to_bytes(1,'big')
# Userland actions -- typically this is the function you subclass for an application
self.dmrd_received(_peer_id, _rf_src, _dst_id, _seq, _slot, _call_type, _frame_type, _dtype_vseq, _stream_id, _data,_hash,_hops,_source_server,_ber,_rssi,_source_rptr)
#Silently treat a DMRD packet like a keepalive - this is because it's traffic and the
#Other end may not have enabled ENAHNCED_OBP
self._config['_bcka'] = time()
else:
h,p = _sockaddr
logger.warning('(%s) FreeBridge HMAC failed, packet discarded - OPCODE: %s DATA: %s HMAC LENGTH: %s HMAC: %s SRC IP: %s SRC PORT: %s', self._system, _packet[:4], repr(_packet[:69]), len(_packet[69:]), repr(_packet[61:]),h,p)
elif _packet[:4] == DMRF:
_data = _packet[:53]
_timestamp = _packet[53:60]
_hops = _packet[61]
_hash = _packet[62:]
#_ckhs = hmac_new(self._config['PASSPHRASE'],_data,sha1).digest()
_h = blake2b(key=self._config['PASSPHRASE'], digest_size=16)
if 'VER' in self._config and self._config['VER'] > 2:
_h.update(_packet[:53])
elif 'VER' in self._config and self._config['VER'] == 2:
_h.update(_packet[:61])
_ckhs = _h.digest()
if compare_digest(_hash, _ckhs) and (_sockaddr == self._config['TARGET_SOCK'] or self._config['RELAX_CHECKS']): if ((_int_dst_id >= 80 and _int_dst_id <= 89) or (_int_dst_id >= 800 and _int_dst_id <= 899)) and int(str(int.from_bytes(_source_server,'big'))[:3]) != int(str(int.from_bytes(self._CONFIG['GLOBAL']['SERVER_ID'],'big'))[:3]):
_peer_id = _data[11:15]
if self._config['NETWORK_ID'] != _peer_id:
logger.error('(%s) OpenBridge packet discarded because NETWORK_ID: %s Does not match sent Peer ID: %s', self._system, int_id(self._config['NETWORK_ID']), int_id(_peer_id))
return
_seq = _data[4]
_rf_src = _data[5:8]
_dst_id = _data[8:11]
_int_dst_id = int_id(_dst_id)
_bits = _data[15]
_slot = 2 if (_bits & 0x80) else 1
#_call_type = 'unit' if (_bits & 0x40) else 'group'
if _bits & 0x40:
_call_type = 'unit'
elif (_bits & 0x23) == 0x23:
_call_type = 'vcsbk'
else:
_call_type = 'group'
_frame_type = (_bits & 0x30) >> 4
_dtype_vseq = (_bits & 0xF) # data, 1=voice header, 2=voice terminator; voice, 0=burst A ... 5=burst F
_stream_id = _data[16:20]
#logger.debug('(%s) DMRD - Seqence: %s, RF Source: %s, Destination ID: %s', self._system, int_id(_seq), int_id(_rf_src), int_id(_dst_id))
#Don't do anything if we are STUNned
if 'STUN' in self._CONFIG:
if _stream_id not in self._laststrid: if _stream_id not in self._laststrid:
logger.warning('(%s) Bridge STUNned, discarding', self._system) logger.info('(%s) CALL DROPPED WITH STREAM ID %s FROM SUBSCRIBER %s BY GLOBAL TG FILTER (local to MCC)', self._system, int_id(_stream_id), _int_dst_id)
self._laststrid.append(_stream_id)
return
#Increment max hops
_inthops = _hops +1
if _inthops > 10:
logger.warning('(%s) MAX HOPS exceed, dropping. Hops: %s, DST: %s', self._system, _inthops, _int_dst_id)
self.send_bcsq(_dst_id,_stream_id)
return
#Low-level TG filtering
if _call_type != 'unit':
_int_dst_id = int_id(_dst_id)
if _int_dst_id <= 79 or (_int_dst_id >= 9990 and _int_dst_id <= 9999) or _int_dst_id == 900999:
if _stream_id not in self._laststrid:
logger.info('(%s) CALL DROPPED WITH STREAM ID %s FROM SUBSCRIBER %s BY GLOBAL TG FILTER', self._system, int_id(_stream_id), _int_dst_id)
self.send_bcsq(_dst_id,_stream_id) self.send_bcsq(_dst_id,_stream_id)
self._laststrid.append(_stream_id) self._laststrid.append(_stream_id)
return return
# ACL Processing # ACL Processing
if self._CONFIG['GLOBAL']['USE_ACL']: if self._CONFIG['GLOBAL']['USE_ACL']:
@ -621,23 +537,22 @@ class OPENBRIDGE(DatagramProtocol):
self.send_bcsq(_dst_id,_stream_id) self.send_bcsq(_dst_id,_stream_id)
self._laststrid.append(_stream_id) self._laststrid.append(_stream_id)
return return
#Remove timestamp from data. For now dmrd_received does not expect it
#Leaving it in screws up the AMBE data
#_data = b''.join([_data[:5],_data[12:]])
_data = b''.join([DMRD,_data[4:]]) _data = b''.join([DMRD,_data[4:]])
_hops = _inthops.to_bytes(1,'big') _hops = _inthops.to_bytes(1,'big')
# Userland actions -- typically this is the function you subclass for an application # Userland actions -- typically this is the function you subclass for an application
self.dmrd_received(_peer_id, _rf_src, _dst_id, _seq, _slot, _call_type, _frame_type, _dtype_vseq, _stream_id, _data,_hash,_hops) self.dmrd_received(_peer_id, _rf_src, _dst_id, _seq, _slot, _call_type, _frame_type, _dtype_vseq, _stream_id, _data,_hash,_hops,_source_server,_ber,_rssi,_source_rptr)
#Silently treat a DMRD packet like a keepalive - this is because it's traffic and the #Silently treat a DMRD packet like a keepalive - this is because it's traffic and the
#Other end may not have enabled ENAHNCED_OBP #Other end may not have enabled ENAHNCED_OBP
self._config['_bcka'] = time() self._config['_bcka'] = time()
else: else:
h,p = _sockaddr h,p = _sockaddr
logger.warning('(%s) FreeBridge HMAC failed, packet discarded - OPCODE: %s DATA: %s HMAC LENGTH: %s HMAC: %s SRC IP: %s SRC PORT: %s', self._system, _packet[:4], repr(_packet[:61]), len(_packet[61:]), repr(_packet[61:]),h,p) logger.warning('(%s) FreeBridge HMAC failed, packet discarded - OPCODE: %s DATA: %s HMAC LENGTH: %s HMAC: %s SRC IP: %s SRC PORT: %s', self._system, _packet[:4], repr(_packet[:69]), len(_packet[69:]), repr(_packet[61:]),h,p)
elif _packet[:4] == DMRF:
logger.error('(%s) Protocol versions 2 and 3 no longer supported',self._system)
if self._config['ENHANCED_OBP']: if self._config['ENHANCED_OBP']:
if _packet[:2] == BC: # Bridge Control packet (Extended OBP) if _packet[:2] == BC: # Bridge Control packet (Extended OBP)
@ -694,7 +609,9 @@ class OPENBRIDGE(DatagramProtocol):
if compare_digest(_hash, _ckhs): if compare_digest(_hash, _ckhs):
logger.trace('(%s) *ProtoControl* BCVE Version received, Ver: %s',self._system,_ver) logger.trace('(%s) *ProtoControl* BCVE Version received, Ver: %s',self._system,_ver)
if _ver > self._config['VER']: if _ver == 2 or _ver == 3 or _ver > 5:
logger.info('(%s) *ProtoControl* BCVE Version not supported, Ver: %s',self._system,_ver)
elif _ver > self._config['VER']:
logger.info('(%s) *ProtoControl* BCVE Version upgrade, Ver: %s',self._system,_ver) logger.info('(%s) *ProtoControl* BCVE Version upgrade, Ver: %s',self._system,_ver)
self._config['VER'] = _ver self._config['VER'] = _ver
elif _ver == self._config['VER']: elif _ver == self._config['VER']:
@ -761,7 +678,7 @@ class HBSYSTEM(DatagramProtocol):
# Aliased in __init__ to maintenance_loop if system is a master # Aliased in __init__ to maintenance_loop if system is a master
def master_maintenance_loop(self): def master_maintenance_loop(self):
logger.debug('(%s) Master maintenance loop started', self._system) logger.trace('(%s) Master maintenance loop started', self._system)
remove_list = deque() remove_list = deque()
for peer in self._peers: for peer in self._peers:
_this_peer = self._peers[peer] _this_peer = self._peers[peer]
@ -918,7 +835,6 @@ class HBSYSTEM(DatagramProtocol):
# Extract the command, which is various length, all but one 4 significant characters -- RPTCL # Extract the command, which is various length, all but one 4 significant characters -- RPTCL
_command = _data[:4] _command = _data[:4]
if _command == DMRD: # DMRData -- encapsulated DMR data frame if _command == DMRD: # DMRData -- encapsulated DMR data frame
_peer_id = _data[11:15] _peer_id = _data[11:15]
if _peer_id in self._peers \ if _peer_id in self._peers \
@ -1372,37 +1288,6 @@ class reportFactory(Factory):
logger.debug('(REPORT) Send config') logger.debug('(REPORT) Send config')
self.send_clients(b''.join([REPORT_OPCODES['CONFIG_SND'], serialized])) self.send_clients(b''.join([REPORT_OPCODES['CONFIG_SND'], serialized]))
#Use this try_download instead of that from dmr_utils3
def try_download(_path, _file, _url, _stale,):
no_verify = ssl._create_unverified_context()
now = time()
file_exists = isfile(''.join([_path,_file])) == True
if file_exists:
file_old = (getmtime(''.join([_path,_file])) + _stale) < now
if not file_exists or (file_exists and file_old):
try:
with urlopen(_url, context=no_verify) as response:
data = response.read()
#outfile.write(data)
response.close()
result = 'ID ALIAS MAPPER: \'{}\' successfully downloaded'.format(_file)
except IOError:
result = 'ID ALIAS MAPPER: \'{}\' could not be downloaded due to an IOError'.format(_file)
else:
if data and (data != b'{}'):
try:
with open(''.join([_path,_file]), 'wb') as outfile:
outfile.write(data)
outfile.close()
except IOError:
result = 'ID ALIAS mapper \'{}\' file could not be written due to an IOError'.format(_file)
else:
result = 'ID ALIAS mapper \'{}\' file not written because downloaded data is empty for some reason'.format(_file)
else:
result = 'ID ALIAS MAPPER: \'{}\' is current, not downloaded'.format(_file)
return result
#Read list of listed servers from CSV (actually TSV) file #Read list of listed servers from CSV (actually TSV) file
def mk_server_dict(path,filename): def mk_server_dict(path,filename):
@ -1413,9 +1298,9 @@ def mk_server_dict(path,filename):
for _row in reader: for _row in reader:
server_ids[_row['OPB Net ID']] = _row['Country'] server_ids[_row['OPB Net ID']] = _row['Country']
return(server_ids) return(server_ids)
except IOError as err: except Exception as err:
logger.warning('ID ALIAS MAPPER: %s could not be read due to IOError: %s',filename,err) logger.warning('ID ALIAS MAPPER: %s could not be read: %s',filename,err)
return(False) raise(err)
# ID ALIAS CREATION # ID ALIAS CREATION
@ -1426,7 +1311,21 @@ def mk_aliases(_config):
local_subscriber_ids = {} local_subscriber_ids = {}
talkgroup_ids = {} talkgroup_ids = {}
server_ids = {} server_ids = {}
checksums = {}
if _config['ALIASES']['TRY_DOWNLOAD'] == True: if _config['ALIASES']['TRY_DOWNLOAD'] == True:
#Try updating checksum file
if _config['ALIASES']['CHECKSUM_FILE'] and _config['ALIASES']['CHECKSUM_URL']:
result = try_download(_config['ALIASES']['PATH'], _config['ALIASES']['CHECKSUM_FILE'], _config['ALIASES']['CHECKSUM_URL'], _config['ALIASES']['STALE_TIME'])
logger.info('(ALIAS) %s', result)
try:
checksums = load_json(''.join([_config['ALIASES']['PATH'], _config['ALIASES']['CHECKSUM_FILE']]))
except Exception as e:
logger.error('(ALIAS) ID ALIAS MAPPER: Cannot load checksums: %s',e)
else:
logger.warning('(ALIAS) ID ALIAS MAPPER: CHECKSUM_FILE or CHECKSUM_URL is empty. Not downloading checksums!')
# Try updating peer aliases file # Try updating peer aliases file
result = try_download(_config['ALIASES']['PATH'], _config['ALIASES']['PEER_FILE'], _config['ALIASES']['PEER_URL'], _config['ALIASES']['STALE_TIME']) result = try_download(_config['ALIASES']['PATH'], _config['ALIASES']['PEER_FILE'], _config['ALIASES']['PEER_URL'], _config['ALIASES']['STALE_TIME'])
logger.info('(ALIAS) %s', result) logger.info('(ALIAS) %s', result)
@ -1439,50 +1338,141 @@ def mk_aliases(_config):
#Try updating server ids file #Try updating server ids file
result = try_download(_config['ALIASES']['PATH'], _config['ALIASES']['SERVER_ID_FILE'], _config['ALIASES']['SERVER_ID_URL'], _config['ALIASES']['STALE_TIME']) result = try_download(_config['ALIASES']['PATH'], _config['ALIASES']['SERVER_ID_FILE'], _config['ALIASES']['SERVER_ID_URL'], _config['ALIASES']['STALE_TIME'])
logger.info('(ALIAS) %s', result) logger.info('(ALIAS) %s', result)
# Make Dictionaries # Make Dictionaries
#Peer IDs
try: try:
peer_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['PEER_FILE']) if exists(_config['ALIASES']['PATH'] + _config['ALIASES']['PEER_FILE'] + '.bak') and (getsize(_config['ALIASES']['PATH'] + _config['ALIASES']['PEER_FILE'] + '.bak') > getsize(_config['ALIASES']['PATH'] + _config['ALIASES']['PEER_FILE'])):
raise Exception('backup peer_ids file is larger than new file')
try:
if blake2bsum(''.join([_config['ALIASES']['PATH'], _config['ALIASES']['PEER_FILE']])) != checksums['peer_ids']:
raise(Exception('bad checksum'))
except Exception as e:
logger.error('(ALIAS) ID ALIAS MAPPER: problem with blake2bsum of peer_ids file. not updating.: %s',e)
else:
peer_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['PEER_FILE'])
except Exception as e: except Exception as e:
logger.error('(ALIAS) ID ALIAS MAPPER: problem with data in peer_ids dictionary, not updating: %s',e) logger.error('(ALIAS) ID ALIAS MAPPER: problem with data in peer_ids dictionary, not updating: %s',e)
try:
peer_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['PEER_FILE'] + '.bak')
except Exception as f:
logger.error('(ALIAS) ID ALIAS MAPPER: Tried backup peer_ids file, but couldn\'t load that either: %s',f)
else: else:
if peer_ids: if peer_ids:
logger.info('(ALIAS) ID ALIAS MAPPER: peer_ids dictionary is available') logger.info('(ALIAS) ID ALIAS MAPPER: peer_ids dictionary is available')
try:
shutil.copy(_config['ALIASES']['PATH'] + _config['ALIASES']['PEER_FILE'],_config['ALIASES']['PATH'] + _config['ALIASES']['PEER_FILE'] + '.bak')
except IOError as g:
logger.info('(ALIAS) ID ALIAS MAPPER: couldn\'t make backup copy of peer_ids file %s',g)
#Subscriber IDs
try: try:
subscriber_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['SUBSCRIBER_FILE']) if exists(_config['ALIASES']['PATH'] + _config['ALIASES']['SUBSCRIBER_FILE'] + '.bak') and (getsize(_config['ALIASES']['PATH'] + _config['ALIASES']['SUBSCRIBER_FILE'] + '.bak') > getsize(_config['ALIASES']['PATH'] + _config['ALIASES']['SUBSCRIBER_FILE'])):
raise Exception('backup subscriber_ids file is larger than new file')
try:
if blake2bsum(''.join([_config['ALIASES']['PATH'], _config['ALIASES']['SUBSCRIBER_FILE']])) != checksums['subscriber_ids']:
raise(Exception('bad checksum'))
except Exception as e:
logger.error('(ALIAS) ID ALIAS MAPPER: problem with blake2bsum of subscriber_ids file. not updating.: %s',e)
else:
subscriber_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['SUBSCRIBER_FILE'])
except Exception as e: except Exception as e:
logger.info('(ALIAS) ID ALIAS MAPPER: problem with data in subscriber_ids dictionary, not updating: %s',e) logger.error('(ALIAS) ID ALIAS MAPPER: problem with data in subscriber_ids dictionary, not updating: %s',e)
try:
subscriber_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['SUBSCRIBER_FILE'] + '.bak')
except Exception as f:
logger.error('(ALIAS) ID ALIAS MAPPER: Tried backup subscriber_ids file, but couldn\'t load that either: %s',f)
else: else:
if subscriber_ids:
logger.info('(ALIAS) ID ALIAS MAPPER: subscriber_ids dictionary is available')
#Add special IDs to DB #Add special IDs to DB
subscriber_ids[900999] = 'D-APRS' subscriber_ids[900999] = 'D-APRS'
subscriber_ids[4294967295] = 'SC' subscriber_ids[4294967295] = 'SC'
if subscriber_ids: try:
logger.info('(ALIAS) ID ALIAS MAPPER: subscriber_ids dictionary is available') shutil.copy(_config['ALIASES']['PATH'] + _config['ALIASES']['SUBSCRIBER_FILE'],_config['ALIASES']['PATH'] + _config['ALIASES']['SUBSCRIBER_FILE'] + '.bak')
except IOError as g:
logger.info('(ALIAS) ID ALIAS MAPPER: couldn\'t make backup copy of subscriber_ids file %s',g)
#Talkgroup IDs
try: try:
talkgroup_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['TGID_FILE']) if exists(_config['ALIASES']['PATH'] + _config['ALIASES']['TGID_FILE'] + '.bak') and (getsize(_config['ALIASES']['PATH'] + _config['ALIASES']['TGID_FILE'] + '.bak') > getsize(_config['ALIASES']['PATH'] + _config['ALIASES']['TGID_FILE'])):
raise Exception('backup talkgroup_ids file is larger than new file')
try:
if blake2bsum(''.join([_config['ALIASES']['PATH'], _config['ALIASES']['TGID_FILE']])) != checksums['talkgroup_ids']:
raise(Exception('bad checksum'))
except Exception as e:
logger.error('(ALIAS) ID ALIAS MAPPER: problem with blake2bsum of talkgroup_ids file. not updating.: %s',e)
else:
talkgroup_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['TGID_FILE'])
except Exception as e: except Exception as e:
logger.info('(ALIAS) ID ALIAS MAPPER: problem with data in talkgroup_ids dictionary, not updating: %s',e) logger.error('(ALIAS) ID ALIAS MAPPER: problem with data in talkgroup_ids dictionary, not updating: %s',e)
try:
talkgroup_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['TGID_FILE'] + '.bak')
except Exception as f:
logger.error('(ALIAS) ID ALIAS MAPPER: Tried backup talkgroup_ids file, but couldn\'t load that either: %s',f)
else: else:
if talkgroup_ids: if talkgroup_ids:
logger.info('(ALIAS) ID ALIAS MAPPER: talkgroup_ids dictionary is available') logger.info('(ALIAS) ID ALIAS MAPPER: talkgroup_ids dictionary is available')
try:
local_subscriber_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['LOCAL_SUBSCRIBER_FILE']) try:
except Exception as e: shutil.copy(_config['ALIASES']['PATH'] + _config['ALIASES']['TGID_FILE'],_config['ALIASES']['PATH'] + _config['ALIASES']['TGID_FILE'] + '.bak')
logger.info('(ALIAS) ID ALIAS MAPPER: problem with data in local_subscriber_ids dictionary, not updating: %s',e) except IOError as g:
logger.info('(ALIAS) ID ALIAS MAPPER: couldn\'t make backup copy of talkgroup_ids file %s',g)
#Local subscriber override
if exists(_config['ALIASES']['PATH'] + _config['ALIASES']['LOCAL_SUBSCRIBER_FILE']):
try:
local_subscriber_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['LOCAL_SUBSCRIBER_FILE'])
except Exception as e:
logger.error('(ALIAS) ID ALIAS MAPPER: problem with data in local_subscriber_ids dictionary, not updating: %s',e)
try:
local_subscriber_ids = mk_id_dict(_config['ALIASES']['PATH'], _config['ALIASES']['PEER_FILE'] + '.bak')
except Exception as f:
logger.error('(ALIAS) ID ALIAS MAPPER: Tried backup local_subscriber_ids file, but couldn\'t load that either: %s',f)
else:
if local_subscriber_ids:
logger.info('(ALIAS) ID ALIAS MAPPER: local_subscriber_ids dictionary is available')
try:
shutil.copy(_config['ALIASES']['PATH'] + _config['ALIASES']['LOCAL_SUBSCRIBER_FILE'],_config['ALIASES']['PATH'] + _config['ALIASES']['LOCAL_SUBSCRIBER_FILE'] + '.bak')
except IOError as g:
logger.info('(ALIAS) ID ALIAS MAPPER: couldn\'t make backup copy of local_subscriber_ids file %s',g)
else: else:
if subscriber_ids: logger.info('(ALIAS) ID ALIAS MAPPER: local subscriber file does not exist and is optional, skipping. ')
logger.info('(ALIAS) ID ALIAS MAPPER: local_subscriber_ids dictionary is available')
try: #Server IDs
server_ids = mk_server_dict(_config['ALIASES']['PATH'], _config['ALIASES']['SERVER_ID_FILE']) try:
try:
if blake2bsum(''.join([_config['ALIASES']['PATH'], _config['ALIASES']['SERVER_ID_FILE']])) != checksums['server_ids']:
raise(Exception('bad checksum'))
except Exception as e:
logger.error('(ALIAS) ID ALIAS MAPPER: problem with blake2bsum of server_ids file: %s',e)
raise(e)
else:
server_ids = mk_server_dict(_config['ALIASES']['PATH'], _config['ALIASES']['SERVER_ID_FILE'])
except Exception as e: except Exception as e:
logger.info('(ALIAS) ID ALIAS MAPPER: problem with data in server_ids dictionary, not updating: %s',e) logger.error('(ALIAS) ID ALIAS MAPPER: problem with data in server_ids dictionary, not updating: %s',e)
if server_ids: try:
logger.info('(ALIAS) ID ALIAS MAPPER: server_ids dictionary is available') server_ids = mk_server_dict(_config['ALIASES']['PATH'], _config['ALIASES']['SERVER_ID_FILE'] + '.bak')
except Exception as f:
logger.error('(ALIAS) ID ALIAS MAPPER: Tried backup server_ids file, but couldn\'t load that either: %s',f)
else:
if server_ids:
logger.info('(ALIAS) ID ALIAS MAPPER: server_ids dictionary is available')
try:
shutil.copy(_config['ALIASES']['PATH'] + _config['ALIASES']['SERVER_ID_FILE'],_config['ALIASES']['PATH'] + _config['ALIASES']['SERVER_ID_FILE'] + '.bak')
except IOError as g:
logger.info('(ALIAS) ID ALIAS MAPPER: couldn\'t make backup copy of server_ids file %s',g)
return peer_ids, subscriber_ids, talkgroup_ids, local_subscriber_ids, server_ids return peer_ids, subscriber_ids, talkgroup_ids, local_subscriber_ids, server_ids, checksums
#************************************************ #************************************************

@ -312,7 +312,7 @@ if __name__ == '__main__':
if 'FDPROXY_CLIENTINFO' in os.environ: if 'FDPROXY_CLIENTINFO' in os.environ:
ClientInfo = bool(os.environ['FDPROXY_CLIENTINFO']) ClientInfo = bool(os.environ['FDPROXY_CLIENTINFO'])
if 'FDPROXY_LISTENPORT' in os.environ: if 'FDPROXY_LISTENPORT' in os.environ:
ListenPort = os.environ['FDPROXY_LISTENPORT'] ListenPort = int(os.environ['FDPROXY_LISTENPORT'])
for port in range(DestportStart,DestPortEnd+1,1): for port in range(DestportStart,DestPortEnd+1,1):
CONNTRACK[port] = False CONNTRACK[port] = False

@ -0,0 +1,105 @@
#
###############################################################################
# Copyright (C) 2020 Simon Adlem, G7RZU <g7rzu@gb7fr.org.uk>
# Copyright (C) 2016-2019 Cortney T. Buffington, N0MJS <n0mjs@me.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
###############################################################################
#Some utilty functions from dmr_utils3 have been modified. These live here.
# Also new FreeDMR specific functions.
import ssl
from time import time
from os.path import isfile, getmtime
from urllib.request import urlopen
from json import load as jload
import hashlib
#Use this try_download instead of that from dmr_utils3
def try_download(_path, _file, _url, _stale,):
no_verify = ssl._create_unverified_context()
now = time()
file_exists = isfile(''.join([_path,_file])) == True
if file_exists:
file_old = (getmtime(''.join([_path,_file])) + _stale) < now
if not file_exists or (file_exists and file_old):
try:
with urlopen(_url, context=no_verify) as response:
data = response.read()
#outfile.write(data)
response.close()
result = 'ID ALIAS MAPPER: \'{}\' successfully downloaded'.format(_file)
except IOError:
result = 'ID ALIAS MAPPER: \'{}\' could not be downloaded due to an IOError'.format(_file)
else:
if data and (data != b'{}'):
try:
with open(''.join([_path,_file]), 'wb') as outfile:
outfile.write(data)
outfile.close()
except IOError:
result = 'ID ALIAS mapper \'{}\' file could not be written due to an IOError'.format(_file)
else:
result = 'ID ALIAS mapper \'{}\' file not written because downloaded data is empty for some reason'.format(_file)
else:
result = 'ID ALIAS MAPPER: \'{}\' is current, not downloaded'.format(_file)
return result
# SHORT VERSION - MAKES A SIMPLE {INTEGER ID: 'CALLSIGN'} DICTIONARY
def mk_id_dict(_path, _file):
_dict = {}
try:
with open(_path+_file, 'r', encoding='latin1') as _handle:
records = jload(_handle)
if 'count' in [*records]:
records.pop('count')
records = records[[*records][0]]
_handle.close
for record in records:
try:
_dict[int(record['id'])] = record['callsign']
except:
pass
return _dict
except:
raise
#Read JSON from file
def load_json(filename):
try:
with open(filename) as f:
data = jload(f)
except:
raise
else:
return(data)
#Calculate blake2b checksum of file
def blake2bsum(filename):
blake2b_hash = hashlib.blake2b()
try:
with open(filename,"rb") as f:
for byte_block in iter(lambda: f.read(4096),b""):
blake2b_hash.update(byte_block)
return(blake2b_hash.hexdigest())
except:
raise
Loading…
Cancel
Save

Powered by TurnKey Linux.