From 12124176a491a873eb41423200c7f7add9ea99d4 Mon Sep 17 00:00:00 2001 From: Alex Stancu Date: Fri, 3 Oct 2025 18:18:24 +0300 Subject: [PATCH] Fix for TLS issues. Issue-ID: SIM-126 Change-Id: I3c8576ca7ac17357d142fd61ae1c7895e3fbacad Signed-off-by: Alex Stancu --- .env | 2 +- README.md | 2 + base/Dockerfile | 2 + base/docker/scripts/common.sh | 2 +- base/src/application.py | 1 - base/src/core/config.py | 193 ++++++++++++++++++++- base/src/core/netconf.py | 42 +++-- base/src/core/netconf_server.py | 2 +- base/src/feature/ietf_keystore_truststore.py | 6 +- base/src/requirements.txt | 1 + base/src/util/crypto.py | 6 +- doc/environment-variables.md | 8 +- docker-compose-o-du-o1.yaml | 19 +- docker-compose-o-ru-mplane.yaml | 20 ++- docker-compose.yaml | 24 ++- .../data/_3gpp-common-managed-element-running.json | 1 + o-du-o1/data/performance-management/index.json | 42 ++--- .../data/performance-management/template_5G_NR.xml | 14 +- o-du-o1/src/main.py | 19 +- o-ru-mplane/Dockerfile | 4 + .../data/ietf-netconf-server-ssh-callhome.json | 2 +- o-ru-mplane/data/ietf-netconf-server-template.json | 16 +- .../data/ietf-netconf-server-tls-callhome.json | 9 +- .../data/ietf-netconf-server-tls-listen.json | 7 +- o-ru-mplane/data/o-ran-operations-operational.json | 9 + o-ru-mplane/entrypoint.sh | 99 +++++++++++ o-ru-mplane/src/main.py | 84 ++++++++- 27 files changed, 532 insertions(+), 104 deletions(-) create mode 100644 o-ru-mplane/data/o-ran-operations-operational.json create mode 100755 o-ru-mplane/entrypoint.sh diff --git a/.env b/.env index 1cd4ea3..4eac5eb 100644 --- a/.env +++ b/.env @@ -15,7 +15,7 @@ # * limitations under the License. # ***************************************************************************/ -NTS_VERSION=0.9.1 +NTS_VERSION=0.9.5 # Network settings HOST_IP=192.168.10.253 diff --git a/README.md b/README.md index 8058f3c..4169e5d 100644 --- a/README.md +++ b/README.md @@ -286,6 +286,8 @@ Please note that `remote-address` and `remote-port` identify the Call-Home endpo Data can be loaded in the NETCONF datastores at boot-time. By creating files having the name "[yang-module-name]-[datastore].[xml|json]", and placing them in /data folder (/data can be mounted in the docker container and all files present there will be considered for loading). The files can be in either `xml` or `json` format. The accepted datastores are `running` or `operational`. +Another name format for the file can be specified when there is a desire to load the files in a specific order: the name can have a number prefix, followed by a dash "[number-prefix]-[yang-module-name]-[datastore].[xml|json]". The entire "[number-prefix]-" is optional, but files containing it will be loaded before files which don't contain it. The order is defined by the mechanism for sorting strings in Python. For example ['1', '2', '10', '3', '002'] will be sorted to ['002', '1', '10', '2', '3'], so we always recommend leading zeros (0) when enforcing more than 9 files. + ## Starting the simulator There are example docker-compose files for starting a simulated O-RU (actually 2 of them, one in hybrid mode, one in hierarchical mode) and another one for starting an O-DU. They can be started by simply doing `docker compose -f docker-compose-o-du-o1.yaml up -d` or `docker compose -f docker-compose-o-ru-mplane.yaml up -d`. diff --git a/base/Dockerfile b/base/Dockerfile index 4e625f8..a0a5fde 100644 --- a/base/Dockerfile +++ b/base/Dockerfile @@ -35,6 +35,8 @@ RUN apt-get update && DEBIAN_FRONTEND="noninteractive" apt-get install -y \ # ftpd related openssl openssh-client openssh-server \ vsftpd \ + # DHCP related + libpcap-dev tcpdump iproute2 \ && \ apt-get clean && \ rm -rf /var/cache/apt/archives/* /var/lib/apt/lists/* diff --git a/base/docker/scripts/common.sh b/base/docker/scripts/common.sh index 70f2429..a6b91bf 100644 --- a/base/docker/scripts/common.sh +++ b/base/docker/scripts/common.sh @@ -20,7 +20,7 @@ # arrays of modules to (un)install NP2_MODULES=( -"ietf-interfaces@2018-02-20.yang" +"ietf-interfaces@2018-02-20.yang -e if-mib -e arbitrary-names -e pre-provisioning" "ietf-ip@2018-02-22.yang" "ietf-netconf@2013-09-29.yang -e writable-running -e candidate -e rollback-on-error -e validate -e startup -e url -e xpath -e confirmed-commit" "ietf-netconf-nmda@2019-01-07.yang -e origin -e with-defaults" diff --git a/base/src/application.py b/base/src/application.py index 34a8c2c..497a37a 100755 --- a/base/src/application.py +++ b/base/src/application.py @@ -44,7 +44,6 @@ class Application(): # setup logging set_pynts_log_level(args.verbose.upper()) - logger.info("bbbbbbb") # get available extensions self.loaded_extensions = [Core()] diff --git a/base/src/core/config.py b/base/src/core/config.py index b56bac2..fd37eab 100644 --- a/base/src/core/config.py +++ b/base/src/core/config.py @@ -15,16 +15,28 @@ # * limitations under the License. # ***************************************************************************/ +from scapy.all import Ether, IP, UDP, BOOTP, DHCP, sendp, sniff, AsyncSniffer, get_if_list from typing import Optional from util.logging import get_pynts_logger import os import sys +import time from strenum import StrEnum -from util.docker import get_hostname, get_ip_from_env +from util.docker import get_hostname, get_container_mac_address logger = get_pynts_logger("config") +# Mapping of Option 43 sub-option types +OPTION_43_TYPES = { + 0x81: "Controller IP Address", + 0x82: "Controller FQDN", + 0x83: "Event Collector IP Address", + 0x84: "Event Collector FQDN", + 0x85: "PNF Registration Format", + 0x86: "NETCONF Call Home" +} + """ Configuration class Singleton @@ -61,6 +73,15 @@ class Config: ves_url: str ves_username: str ves_password: str + + dhcp_sdnr_controller_ip: str + dhcp_sdnr_fqdn: str + dhcp_sdnr_callhome_tls: bool + + dhcp_ves_ip: str + dhcp_ves_fqdn: str + + sdnr_certificate_markers: bool = False # json variables @@ -75,6 +96,13 @@ class Config: def reload(self) -> None: logger.info("reloading config") + + self.dhcp_sdnr_controller_ip: str = None + self.dhcp_sdnr_fqdn: str = None + self.dhcp_sdnr_callhome_tls: bool = False + + self.dhcp_ves_ip: str = None + self.dhcp_ves_fqdn: str = None self.netconf_function_type: str = os.environ.get("NETWORK_FUNCTION_TYPE", "undefined") @@ -82,6 +110,8 @@ class Config: self.tls_listen_endpoint: bool = self.get_envvar_bool("TLS_LISTEN_ENDPOINT", "False") self.ssh_callhome_endpoint: bool = self.get_envvar_bool("SSH_CALLHOME_ENDPOINT", "False") self.tls_callhome_endpoint: bool = self.get_envvar_bool("TLS_CALLHOME_ENDPOINT", "False") + + self.dhcp_get_config() endpoints = os.environ.get("ENDPOINT_COUNT", 1) try: @@ -97,9 +127,16 @@ class Config: self.sdnr_username: str = os.environ.get("SDNR_USERNAME", "admin") self.sdnr_password: str = os.environ.get("SDNR_PASSWORD", "admin") - self.ves_url = os.environ.get("VES_URL", "") + if self.dhcp_ves_fqdn is not None: + self.ves_url = f"https://{self.dhcp_ves_fqdn}/eventListener/v7" + elif self.dhcp_ves_ip is not None: + self.ves_url = f"https://{self.dhcp_ves_ip}/eventListener/v7" + else: + self.ves_url = os.environ.get("VES_URL", "") self.ves_username = os.environ.get("VES_USERNAME", "sample1") self.ves_password = os.environ.get("VES_PASSWORD", "sample1") + + self.sdnr_certificate_markers: bool = self.get_envvar_bool("SDNR_CERTIFICATE_MARKERS", "False") @staticmethod def get_envvar_bool(varname: str, default_value: str) -> bool: @@ -118,7 +155,7 @@ class Config: # if only_one_listen_true or only_one_callhome_true: # logger.error(f"Expecting only one type of connection (either SSH or TLS, but not both) at the same time! Got SSH_ENDPOINT={self.ssh_listen_endpoint}, TLS_ENDPOINT={self.tls_listen_endpoint}, SSH_CALLHOME={self.ssh_callhome_endpoint} and TLS_CALLHOME={self.tls_callhome_endpoint}. Please re-check the config!") # sys.exit("Invalid configuration for the SSH and TLS endpoints.") - + def to_dict(self) -> dict: return { "ssh_listen_endpoint": self.ssh_listen_endpoint, @@ -134,5 +171,153 @@ class Config: "sdnr_restconf_url": self.sdnr_restconf_url, "sdnr_username": self.sdnr_username, - "sdnr_password": self.sdnr_password + "sdnr_password": self.sdnr_password, + + "ves_url": self.ves_url, + "ves_username": self.ves_username, + "ves_password": self.ves_password } + + def parse_option_43(self, raw_bytes): + """Parses vendor-encapsulated Option 43 data.""" + index = 0 + + while index < len(raw_bytes): + if index + 2 > len(raw_bytes): + print("[!] Malformed Option 43 data (truncated)") + break + + opt_type = raw_bytes[index] # First byte is the sub-option type + opt_len = raw_bytes[index + 1] # Second byte is the length + + if index + 2 + opt_len > len(raw_bytes): + print(f"[!] Skipping invalid Option 43 sub-option {opt_type:#04x} (bad length)") + break + + opt_value = raw_bytes[index + 2: index + 2 + opt_len] # Value field + + if opt_type == 0x81: # IP Address (4 bytes) + if opt_len == 4: + parsed_value = ".".join(str(b) for b in opt_value) + self.dhcp_sdnr_controller_ip = parsed_value + logger.debug(f"Received Controller IP {self.dhcp_sdnr_controller_ip} via DHCP.") + else: + logger.error(f"Length of IP address is not 4, but {opt_len}. Failed to extract Controller IP address from DHCP.") + self.dhcp_sdnr_controller_ip = None + + elif opt_type == 0x83: # IP Address (4 bytes) + if opt_len == 4: + parsed_value = ".".join(str(b) for b in opt_value) + self.dhcp_ves_ip = parsed_value + logger.debug(f"Received VES Collector IP {self.dhcp_ves_ip} via DHCP.") + else: + logger.error(f"Length of IP address is not 4, but {opt_len}. Failed to extract VES Collector IP address from DHCP.") + self.dhcp_ves_ip = None + + elif opt_type == 0x82: # FQDN (ASCII string) + try: + self.dhcp_sdnr_fqdn = opt_value.decode("ascii") + logger.debug(f"Received Controller FQDN {self.dhcp_sdnr_fqdn} via DHCP.") + except UnicodeDecodeError: + logger.error(f"Could not decode Controller FQDN in ASCII. Received hex value: {opt_value.hex()}") + self.dhcp_sdnr_fqdn = None + + elif opt_type == 0x84: # FQDN (ASCII string) + try: + self.dhcp_ves_fqdn = opt_value.decode("ascii") + logger.debug(f"Received VES Collector FQDN {self.dhcp_ves_fqdn} via DHCP.") + except UnicodeDecodeError: + logger.error(f"Could not decode Controller FQDN in ASCII. Received hex value: {opt_value.hex()}") + self.dhcp_ves_fqdn = None + + elif opt_type == 0x86: # Single-byte flag + if opt_len == 1: + parsed_value = int(opt_value[0]) + if parsed_value == 0: + self.dhcp_sdnr_callhome_tls = False + logger.debug(f"Received CallHome over SSH via DHCP.") + elif parsed_value == 1: + logger.debug(f"Received CallHome over TLS via DHCP.") + self.dhcp_sdnr_callhome_tls = True + else: + logger.error(f"Could not get correct NETCONF Call Home information. Received hex value: {opt_value.hex()}") + index += 2 + opt_len # Move to the next sub-option + + def print_dhcp_options(self, dhcp_options): + """Print all DHCP options from a list of (option, value) tuples.""" + for opt in dhcp_options: + if isinstance(opt, tuple): + if opt[0] == "vendor_specific": # Fix: Use the correct Scapy name for Option 43 + logger.debug(" Option 43 (Vendor-Specific Information):") + self.parse_option_43(opt[1]) # Convert raw bytes + else: + logger.debug(f" Option {opt[0]}: {opt[1]}") + + def handle_packet(self, pkt): + """Callback to process incoming DHCP packets and detect DHCPOFFER.""" + if DHCP in pkt: + dhcp_opts = pkt[DHCP].options + for opt in dhcp_opts: + if isinstance(opt, tuple) and opt[0] == "message-type": + if opt[1] in [2, "offer"]: # DHCPOFFER detected + server_ip = pkt[IP].src + offered_ip = pkt[BOOTP].yiaddr + logger.debug(f"[+] Received DHCPOFFER from {server_ip}") + logger.debug(f" Offered IP: {offered_ip}") + logger.debug(" Full DHCP options:") + self.print_dhcp_options(dhcp_opts) + return True + return False + + def dhcp_get_config(self): + # Start sniffing in *async* mode so we don't block. + for iface in get_if_list(): + if iface == 'lo': + logger.debug(f"Skipping sending DHCPDISCOVER on {iface}...") + continue + logger.debug(f"Sending DHCPDISCOVER on {iface}") + # sendp(discover, iface=iface, verbose=False) + sniff_thread = AsyncSniffer( + iface=iface, + filter="udp and (port 67 or port 68)", + prn=self.handle_packet, + store=True # Ensure packets are stored in results + ) + sniff_thread.start() + + time.sleep(1) # Give it a moment to get ready + + mac_addr = get_container_mac_address() + logger.debug(f"Got back MAC address {mac_addr}") + + # Now send DHCPDISCOVER + discover = ( + Ether(src=mac_addr, dst="ff:ff:ff:ff:ff:ff") + / IP(src="0.0.0.0", dst="255.255.255.255") + / UDP(sport=68, dport=67) + / BOOTP(chaddr=b'\x02\x50\x02\x99\x00\x01', xid=0x99999999, flags=0x8000) + / DHCP(options=[ + ("message-type", "discover"), + ("parameter-request-list", [43]), # Explicitly request Option 43 + ("vendor_class_id", "o-ran-ru2/pynts"), # Option 60 + "end" + ]) + ) + logger.debug("[*] Sending DHCPDISCOVER...") + sendp(discover, iface=iface, verbose=False) + + logger.debug("[*] Sniffing for 5 seconds...") + time.sleep(5) + + # sniff_thread.running = False + sniff_thread.stop() + results = sniff_thread.results + logger.debug(f"[+] Captured {len(results)} packets in total") + + # for pkt in results: + # if self.handle_packet(pkt): + # logger.debug("[+] Test SUCCESS - Received a valid DHCPOFFER.") + # return True + + # logger.debug("[-] Test FAILED - No DHCPOFFER received.") + # return False diff --git a/base/src/core/netconf.py b/base/src/core/netconf.py index 7c65fe0..5e68ee0 100644 --- a/base/src/core/netconf.py +++ b/base/src/core/netconf.py @@ -127,20 +127,28 @@ class Netconf: sess.apply_changes() elif datastore == Datastore.RUNNING: with self.connection.start_session("running") as sess: - try: - data = sess.get_data(f"/{module_name}:*") - except sysrepo.SysrepoNotFoundError: - logger.debug(f"Did not find data for /{module_name}:*") - data = None - if not data: - with self.connection.get_ly_ctx() as ctx: - data = ctx.parse_data_file(file, format, parse_only=True) - # start with a fresh datastore, erase anything that was before - # sess.copy_config("startup", module_name) - sess.edit_batch_ly(data) - sess.apply_changes() - else: - logger.debug(f"Skipping loading data from file {file_path} into module {module_name}. Data already present...") + with self.connection.get_ly_ctx() as ctx: + data = ctx.parse_data_file(file, format, parse_only=True) + # start with a fresh datastore, erase anything that was before + # sess.copy_config("startup", module_name) + sess.edit_batch_ly(data) + sess.apply_changes() + + # try: + # data = sess.get_data(f"/{module_name}:*") + # except sysrepo.SysrepoNotFoundError: + # logger.debug(f"Did not find data for /{module_name}:*") + # data = None + + # if not data: + # with self.connection.get_ly_ctx() as ctx: + # data = ctx.parse_data_file(file, format, parse_only=True) + # # start with a fresh datastore, erase anything that was before + # # sess.copy_config("startup", module_name) + # sess.edit_batch_ly(data) + # sess.apply_changes() + # else: + # logger.debug(f"Skipping loading data from file {file_path} into module {module_name}. Data already present...") @staticmethod def get_datastore_files(directory: str, filter=None) -> list: @@ -149,19 +157,19 @@ class Netconf: extensions = filter.split("|") extensions_pattern = '|'.join(extensions) - pattern_string = r'^(.+)-(running|operational)\.(' + extensions_pattern + ')$' + pattern_string = r'^(?:(\d+)-)?(.+)-(running|operational)\.(' + extensions_pattern + ')$' pattern = re.compile(pattern_string) # Dictionary to store results results = [] # Iterate over files in the specified directory - for filename in os.listdir(directory): + for filename in sorted(os.listdir(directory)): # Check if the filename matches the expected pattern match = pattern.match(filename) if match: # Extract parts of the filename - module_name, datastore, extension = match.groups() + _, module_name, datastore, extension = match.groups() # Append the results results.append({ 'filename': f"{directory}/{filename}", diff --git a/base/src/core/netconf_server.py b/base/src/core/netconf_server.py index ca2087f..6aa12f0 100644 --- a/base/src/core/netconf_server.py +++ b/base/src/core/netconf_server.py @@ -173,7 +173,7 @@ class NetconfServer: odl_trusted_cert_template = DictFactory.get_template("odl-netconf-callhome-trusted-cert") odl_trusted_cert_template.update_key(["input", "trusted-certificate", 0, "name"], self.config.hostname) - odl_trusted_cert_template.update_key(["input", "trusted-certificate", 0, "certificate"], self.crypto.get_certificate_base64_encoding_no_markers()) + odl_trusted_cert_template.update_key(["input", "trusted-certificate", 0, "certificate"], self.crypto.get_certificate_base64_encoding(with_markers=False)) url = self.config.sdnr_restconf_url + ODL_ADD_TRUSTED_KEY_URL logger.debug(f"sending HTTP POST to {url} with payload {odl_trusted_cert_template.data}") diff --git a/base/src/feature/ietf_keystore_truststore.py b/base/src/feature/ietf_keystore_truststore.py index 8133654..98fa06a 100644 --- a/base/src/feature/ietf_keystore_truststore.py +++ b/base/src/feature/ietf_keystore_truststore.py @@ -34,12 +34,12 @@ class IetfKeystoreTruststoreFeature: ietf_keystore_template = DictFactory.get_template("ietf-keystore") ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 0, "public-key"], self.crypto.get_public_key_ssh_format()) ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 0, "cleartext-private-key"], self.crypto.get_private_key_base64_encoding_no_markers()) - ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 0, "certificates", "certificate", 0, "cert-data"], self.crypto.get_certificate_base64_encoding_no_markers(is_smo=False)) + ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 0, "certificates", "certificate", 0, "cert-data"], self.crypto.get_certificate_base64_encoding(is_smo=False, with_markers=False)) ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 1, "public-key"], self.crypto.get_public_key_base64_encoding_no_markers()) ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 1, "cleartext-private-key"], self.crypto.get_private_key_base64_encoding_no_markers()) - ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 1, "certificates", "certificate", 0, "cert-data"], self.crypto.get_certificate_base64_encoding_no_markers(is_smo=False)) - ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 1, "certificates", "certificate", 1, "cert-data"], self.crypto.get_certificate_base64_encoding_no_markers(is_smo=True)) + ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 1, "certificates", "certificate", 0, "cert-data"], self.crypto.get_certificate_base64_encoding(is_smo=False, with_markers=False)) + ietf_keystore_template.update_key(["ietf-keystore:keystore", "asymmetric-keys", "asymmetric-key", 1, "certificates", "certificate", 1, "cert-data"], self.crypto.get_certificate_base64_encoding(is_smo=True, with_markers=False)) self.netconf.set_data(Datastore.RUNNING, "ietf-keystore", ietf_keystore_template.data, "merge") self.netconf.set_data(Datastore.OPERATIONAL, "ietf-keystore", ietf_keystore_template.data) diff --git a/base/src/requirements.txt b/base/src/requirements.txt index d800ef9..e23ddd1 100644 --- a/base/src/requirements.txt +++ b/base/src/requirements.txt @@ -24,3 +24,4 @@ schedule dateutils falcon uvicorn +scapy diff --git a/base/src/util/crypto.py b/base/src/util/crypto.py index bd3ce7a..50d87ad 100644 --- a/base/src/util/crypto.py +++ b/base/src/util/crypto.py @@ -270,11 +270,11 @@ class CryptoUtils(): crypto_string = self.public_key_pem.decode("utf-8") return "\n".join(crypto_string.split("\n")[1:-2]) - def get_certificate_base64_encoding_no_markers(self, is_smo = False) -> str: - ''' Method for getting just the base64 encoding of the private key, removing the ---- BEGIN... and ---- END lines. + def get_certificate_base64_encoding(self, is_smo = False, with_markers = False) -> str: + ''' Method for getting just the base64 encoding of the private key, removing or keeping the ---- BEGIN... and ---- END lines. ''' crypto_string = self.smo_certificate.decode("utf-8") if is_smo else self.odu_certificate.decode("utf-8") - return "\n".join(crypto_string.split("\n")[1:-2]) + return "\n".join(crypto_string.split("\n")[1:-2]) if with_markers is False else crypto_string # def get_ca_certificate_base64_encoding_no_markers(self) -> str: # ''' Method for getting just the base64 encoding of the private key, removing the ---- BEGIN... and ---- END lines. diff --git a/doc/environment-variables.md b/doc/environment-variables.md index 06b9614..8c56a19 100644 --- a/doc/environment-variables.md +++ b/doc/environment-variables.md @@ -44,4 +44,10 @@ Below all the available environment variables will be described ## O_DU_CALLHOME_PORT - type string -- the port number where a simulated O-DU listens for call-home connections. Is only relevant when docker image is ran in network_mode="host". Default port is 4335 \ No newline at end of file +- the port number where a simulated O-DU listens for call-home connections. Is only relevant when docker image is ran in network_mode="host" +- default value is **4335** + +## SDNR_CERTIFICATE_MARKERS +- type bool +- if **True**, the *add-trusted-certificate* operation from the simulated O-RU going towards the SDN Controller will contain the *"--- BEGIN ---"* and *"--- END ---"* markers of a certificate, when sending it to ODL. If **False**, the markers will not be part of the certificate. The markers are needed starting with ODL Scandium version. Only relevant for NETCONF Call Home (implemented in O-RU currently). +- default value is **False** diff --git a/docker-compose-o-du-o1.yaml b/docker-compose-o-du-o1.yaml index 77c0bb2..43f4b92 100644 --- a/docker-compose-o-du-o1.yaml +++ b/docker-compose-o-du-o1.yaml @@ -17,10 +17,13 @@ services: pynts-o-du-o1: - image: pynts-o-du-o1:${NTS_VERSION} + image: pynts-o-du-o1:latest container_name: pynts-o-du-o1 hostname: pynts-o-du-o1 privileged: true + cap_add: + - NET_RAW + - NET_ADMIN environment: - ENDPOINT_COUNT=1 @@ -33,7 +36,7 @@ services: - SDNR_USERNAME=admin - SDNR_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U - - VES_URL=http://ves-collector.dcn.smo.o-ran-sc.org/eventListener/v7 + - VES_URL=https://ves-collector.dcn.smo.o-ran-sc.org/eventListener/v7 - VES_USERNAME=sample1 - VES_PASSWORD=sample1 # ports: @@ -45,13 +48,11 @@ services: volumes: - ./o-du-o1/data:/data networks: - oam: - smo: - + # - dhcp + - dcn + networks: - dmz: - external: true - smo: + dcn: external: true - oam: + dhcp: external: true diff --git a/docker-compose-o-ru-mplane.yaml b/docker-compose-o-ru-mplane.yaml index f44d485..bd7c883 100644 --- a/docker-compose-o-ru-mplane.yaml +++ b/docker-compose-o-ru-mplane.yaml @@ -21,6 +21,9 @@ services: container_name: pynts-o-ru-hybrid hostname: pynts-o-ru-hybrid privileged: true + cap_add: + - NET_RAW + - NET_ADMIN environment: - ENDPOINT_COUNT=1 @@ -33,8 +36,11 @@ services: volumes: - ./o-ru-mplane/data:/data - ./o-ru-mplane/data/ietf-netconf-server-running-hybrid.json:/data/ietf-netconf-server-running.json +# - ./o-ru-mplane/data/ietf-netconf-server-ssh-callhome.json:/data/ietf-netconf-server-running.json + - ./o-ru-mplane/src:/app/extensions/o-ru-mplane networks: - oam: + dhcp: + dcn: smo: pynts-o-ru-hierarchical: @@ -42,6 +48,9 @@ services: container_name: pynts-o-ru-hierarchical hostname: pynts-o-ru-hierarchical privileged: true + cap_add: + - NET_RAW + - NET_ADMIN environment: - ENDPOINT_COUNT=1 @@ -55,13 +64,14 @@ services: - ./o-ru-mplane/data:/data - ./o-ru-mplane/data/ietf-netconf-server-running-hierarchical.json:/data/ietf-netconf-server-running.json networks: - oam: + dhcp: + dcn: smo: networks: - dmz: + dhcp: external: true - smo: + dcn: external: true - oam: + smo: external: true diff --git a/docker-compose.yaml b/docker-compose.yaml index be45a86..e4f80b6 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -21,6 +21,9 @@ services: container_name: pynts-dev-du hostname: pynts-dev-du privileged: true + cap_add: + - NET_RAW + - NET_ADMIN environment: - ENDPOINT_COUNT=1 @@ -45,14 +48,18 @@ services: # - ./ietf-microwave/data:/data networks: - oam: - smo: + smo: + # dhcp: + dcn: pynts-dev-ru: image: pynts-o-ru-mplane:${NTS_VERSION} container_name: pynts-dev-ru hostname: pynts-dev-ru privileged: true + cap_add: + - NET_RAW + - NET_ADMIN environment: - ENDPOINT_COUNT=1 @@ -66,6 +73,8 @@ services: - VES_URL=https://ves-collector.dcn.smo.o-ran-sc.org/eventListener/v7 - VES_USERNAME=sample1 - VES_PASSWORD=sample1 + + - SDNR_CERTIFICATE_MARKERS=False # ports: # - "830:830" # - "6513:6513" @@ -73,15 +82,18 @@ services: - ./base/src:/app - ./o-ru-mplane/data:/data - ./o-ru-mplane/data/ietf-netconf-server-running-hybrid.json:/data/ietf-netconf-server-running.json + - ./o-ru-mplane/src:/app/extensions/o-ru-mplane networks: - oam: + # dhcp: + dcn: smo: networks: - dmz: + # dhcp: + # external: true + dcn: external: true smo: external: true - oam: - external: true + \ No newline at end of file diff --git a/o-du-o1/data/_3gpp-common-managed-element-running.json b/o-du-o1/data/_3gpp-common-managed-element-running.json index 8ddc6bc..7684edb 100644 --- a/o-du-o1/data/_3gpp-common-managed-element-running.json +++ b/o-du-o1/data/_3gpp-common-managed-element-running.json @@ -21,6 +21,7 @@ "attributes": { "priorityLabel": 1, "cellLocalId": 1, + "administrativeState": "LOCKED", "pLMNInfoList": [ { "mcc": "310", diff --git a/o-du-o1/data/performance-management/index.json b/o-du-o1/data/performance-management/index.json index 1a8a5d4..88d2b2f 100644 --- a/o-du-o1/data/performance-management/index.json +++ b/o-du-o1/data/performance-management/index.json @@ -1,40 +1,40 @@ { "config": { "log-period": 60, - "repetition-period": 180, + "repetition-period": 240, "points": [ - "DRB.MeanActiveUeDl", - "DRB.MeanActiveUeUl", - "DRB.UEThpDl", - "DRB.UEThpUl" + "RRC.ConnMean", + "RRC.ConnMean.226F04", + "RRC.ConnMax", + "RRC.ConnMax.226F04" ] }, "values": [ { - "DRB.MeanActiveUeDl": 22, - "DRB.MeanActiveUeUl": 3, - "DRB.UEThpDl": 17, - "DRB.UEThpUl": 5 + "RRC.ConnMean": 22, + "RRC.ConnMean.226F04": 3, + "RRC.ConnMax": 17, + "RRC.ConnMax.226F04": 5 }, { - "DRB.MeanActiveUeDl": 27, - "DRB.MeanActiveUeUl": 4, - "DRB.UEThpDl": 19, - "DRB.UEThpUl": 7 + "RRC.ConnMean": 27, + "RRC.ConnMean.226F04": 4, + "RRC.ConnMax": 19, + "RRC.ConnMax.226F04": 7 }, { - "DRB.MeanActiveUeDl": 30, - "DRB.MeanActiveUeUl": 9, - "DRB.UEThpDl": 19, - "DRB.UEThpUl": 3 + "RRC.ConnMean": 30, + "RRC.ConnMean.226F04": 9, + "RRC.ConnMax": 19, + "RRC.ConnMax.226F04": 3 }, { - "DRB.MeanActiveUeDl": 5, - "DRB.MeanActiveUeUl": 4, - "DRB.UEThpDl": 3, - "DRB.UEThpUl": 3 + "RRC.ConnMean": 5, + "RRC.ConnMean.226F04": 4, + "RRC.ConnMax": 3, + "RRC.ConnMax.226F04": 3 } ] } diff --git a/o-du-o1/data/performance-management/template_5G_NR.xml b/o-du-o1/data/performance-management/template_5G_NR.xml index dce9318..8eac48c 100644 --- a/o-du-o1/data/performance-management/template_5G_NR.xml +++ b/o-du-o1/data/performance-management/template_5G_NR.xml @@ -7,12 +7,22 @@ - + @point-start@@point-name@@point-end@ - + + @value-start@@value@@value-end@ + @suspect@ + + + + + + + @point-start@@point-name@@point-end@ + @value-start@@value@@value-end@ @suspect@ diff --git a/o-du-o1/src/main.py b/o-du-o1/src/main.py index 1a7f3f8..cda9efb 100644 --- a/o-du-o1/src/main.py +++ b/o-du-o1/src/main.py @@ -115,6 +115,9 @@ class Main(Extension): session = connect_tls(sock=conn, keyfile=KEY_FILE, certfile=CERT_FILE, ca_certs=CA_CERT_FILE) + # Retrieve and log TLS details + logger.info(f"TLS handshake completed (Session ID: {session_id}) with TLS version {session.sock.version()}") + mgr = Manager(session, timeout=3) hostname_xml_data_str = mgr.get_config(source="running", filter="").data_xml @@ -172,8 +175,8 @@ class Main(Extension): server_socket.listen(max_connections) logger.info(f"Listening for CallHome TLS connections on {host}:{port}") - # Create an SSL context for mutual TLS authentication - context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) + context = ssl.SSLContext(ssl.PROTOCOL_TLS) # Automatically enables the best available TLS protocols + context.load_cert_chain(certfile=CERT_FILE, keyfile=KEY_FILE) context.load_verify_locations(cafile=CA_CERT_FILE) context.verify_mode = ssl.CERT_REQUIRED @@ -182,10 +185,14 @@ class Main(Extension): with ThreadPoolExecutor(max_workers=max_connections) as executor: while not stop_event.is_set(): conn, addr = server_socket.accept() - # tls_conn = context.wrap_socket(conn) - logger.info(f"Accepted TLS connection from {addr}") - executor.submit(self.handle_callhome_session, conn, addr, session_id) - session_id += 1 + try: + # tls_conn = context.wrap_socket(conn, server_side=True) + logger.info(f"Accepted TLS connection from {addr}") + executor.submit(self.handle_callhome_session, conn, addr, session_id) + session_id += 1 + except ssl.SSLError as e: + logger.error(f"SSL error occurred while accepting connection from {addr}: {e}") + conn.close() def handle_notification(self, notification_xml, session_id) -> None: logger.debug(f"Handling NETCONF notification: {notification_xml}") diff --git a/o-ru-mplane/Dockerfile b/o-ru-mplane/Dockerfile index 0ecb523..b7faceb 100644 --- a/o-ru-mplane/Dockerfile +++ b/o-ru-mplane/Dockerfile @@ -31,5 +31,9 @@ WORKDIR /app ENV NETWORK_FUNCTION_TYPE="o-ru-ofmp" ENV HYBDIR_MPLANE=false +COPY ./o-ru-mplane/entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] + EXPOSE 830 CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"] diff --git a/o-ru-mplane/data/ietf-netconf-server-ssh-callhome.json b/o-ru-mplane/data/ietf-netconf-server-ssh-callhome.json index 018afd5..b97a410 100644 --- a/o-ru-mplane/data/ietf-netconf-server-ssh-callhome.json +++ b/o-ru-mplane/data/ietf-netconf-server-ssh-callhome.json @@ -10,7 +10,7 @@ "name": "default-ssh-callhome", "ssh": { "tcp-client-parameters": { - "remote-address": "192.168.10.253", + "remote-address": "controller.dcn.smo.o-ran-sc.org", "remote-port": 4334 }, "ssh-server-parameters": { diff --git a/o-ru-mplane/data/ietf-netconf-server-template.json b/o-ru-mplane/data/ietf-netconf-server-template.json index f12cf39..10b00a7 100644 --- a/o-ru-mplane/data/ietf-netconf-server-template.json +++ b/o-ru-mplane/data/ietf-netconf-server-template.json @@ -47,7 +47,7 @@ "certificate": { "central-keystore-reference": { "asymmetric-key": "serverkey-tls", - "certificate": "servercert" + "certificate": "servercert-smo" } } }, @@ -62,9 +62,8 @@ "cert-to-name": [ { "id": 1, - "fingerprint": "02:DC:0A:65:17:7F:E7:6D:2C:9A:8B:F1:AD:64:F9:EC:56:D7:36:F4:70", - "map-type": "ietf-x509-cert-to-name:specified", - "name": "netconf" + "fingerprint": "02:DC:CB:E3:29:E2:65:04:A8:DF:B3:63:E7:E4:1A:06:81:64:C6:DA:37", + "map-type": "ietf-x509-cert-to-name:san-rfc822-name" } ] } @@ -84,7 +83,7 @@ "name": "default-ssh-callhome", "ssh": { "tcp-client-parameters": { - "remote-address": "192.168.10.253", + "remote-address": "controller.dcn.smo.o-ran-sc.org", "remote-port": 4334 }, "ssh-server-parameters": { @@ -130,7 +129,7 @@ "name": "tls-auth-endpt", "tls": { "tcp-client-parameters": { - "remote-address": "192.168.10.253", + "remote-address": "controller.dcn.smo.o-ran-sc.org", "remote-port": 4335 }, "tls-server-parameters": { @@ -153,9 +152,8 @@ "cert-to-name": [ { "id": 1, - "fingerprint": "02:DC:0A:65:17:7F:E7:6D:2C:9A:8B:F1:AD:64:F9:EC:56:D7:36:F4:70", - "map-type": "ietf-x509-cert-to-name:specified", - "name": "netconf" + "fingerprint": "02:DC:CB:E3:29:E2:65:04:A8:DF:B3:63:E7:E4:1A:06:81:64:C6:DA:37", + "map-type": "ietf-x509-cert-to-name:san-rfc822-name" } ] } diff --git a/o-ru-mplane/data/ietf-netconf-server-tls-callhome.json b/o-ru-mplane/data/ietf-netconf-server-tls-callhome.json index c9b286c..9b0accf 100644 --- a/o-ru-mplane/data/ietf-netconf-server-tls-callhome.json +++ b/o-ru-mplane/data/ietf-netconf-server-tls-callhome.json @@ -10,7 +10,7 @@ "name": "tls-auth-endpt", "tls": { "tcp-client-parameters": { - "remote-address": "172.60.0.71", + "remote-address": "controller.dcn.smo.o-ran-sc.org", "remote-port": 4335 }, "tls-server-parameters": { @@ -18,7 +18,7 @@ "certificate": { "central-keystore-reference": { "asymmetric-key": "serverkey-tls", - "certificate": "servercert" + "certificate": "servercert-smo" } } }, @@ -33,9 +33,8 @@ "cert-to-name": [ { "id": 1, - "fingerprint": "02:e9:38:1f:f6:8b:62:de:0a:0b:c5:03:81:a8:03:49:a0:00:7f:8b:f3", - "map-type": "ietf-x509-cert-to-name:specified", - "name": "netconf" + "fingerprint": "02:DC:CB:E3:29:E2:65:04:A8:DF:B3:63:E7:E4:1A:06:81:64:C6:DA:37", + "map-type": "ietf-x509-cert-to-name:san-rfc822-name" } ] } diff --git a/o-ru-mplane/data/ietf-netconf-server-tls-listen.json b/o-ru-mplane/data/ietf-netconf-server-tls-listen.json index 65b8528..b51c035 100644 --- a/o-ru-mplane/data/ietf-netconf-server-tls-listen.json +++ b/o-ru-mplane/data/ietf-netconf-server-tls-listen.json @@ -15,7 +15,7 @@ "certificate": { "central-keystore-reference": { "asymmetric-key": "serverkey-tls", - "certificate": "servercert" + "certificate": "servercert-smo" } } }, @@ -30,9 +30,8 @@ "cert-to-name": [ { "id": 1, - "fingerprint": "02:02:00:6E:31:7C:65:CB:E0:72:37:5E:32:B2:AF:86:53:48:82:EC:98:3F", - "map-type": "ietf-x509-cert-to-name:specified", - "name": "netconf" + "fingerprint": "02:DC:CB:E3:29:E2:65:04:A8:DF:B3:63:E7:E4:1A:06:81:64:C6:DA:37", + "map-type": "ietf-x509-cert-to-name:san-rfc822-name" } ] } diff --git a/o-ru-mplane/data/o-ran-operations-operational.json b/o-ru-mplane/data/o-ran-operations-operational.json new file mode 100644 index 0000000..4e2f436 --- /dev/null +++ b/o-ru-mplane/data/o-ran-operations-operational.json @@ -0,0 +1,9 @@ +{ + "o-ran-operations:operational-info":{ + "declarations": + { + "ru-instance-id":"pynts-o-ru-hybrid", + "supported-mplane-version": "16.0.0" + } + } +} diff --git a/o-ru-mplane/entrypoint.sh b/o-ru-mplane/entrypoint.sh new file mode 100755 index 0000000..e310816 --- /dev/null +++ b/o-ru-mplane/entrypoint.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +echo "Running network setup..." + +# Function to clean interface names (remove @ifX suffix) +clean_iface_name() { + echo "$1" | cut -d'@' -f1 +} + +# Check if CMD is empty and set a fallback +if [[ -z "$1" ]]; then + echo "CMD is missing! Restoring default from the image." + set -- /usr/bin/supervisord -c /etc/supervisor/conf.d/supervisord.conf +fi + +# Identify interfaces based on their IP ranges +MACVLAN_IF="" +BRIDGE_IF="" + +for RAW_IFACE in $(ip -o link show | awk -F': ' '!/lo/ {print $2}'); do + IFACE=$(clean_iface_name "$RAW_IFACE") # Remove @ifX suffix + IP=$(ip -4 -o addr show dev "$IFACE" | awk '{print $4}' | cut -d'/' -f1) + + if [[ "$IP" == 172.99.* ]]; then + MACVLAN_IF="$IFACE" + else + BRIDGE_IF="$IFACE" + fi +done + +# Ensure both interfaces were correctly identified +if [[ -z "$BRIDGE_IF" || -z "$MACVLAN_IF" ]]; then + echo "Could not determine macvlan or bridge network interfaces. Exiting." + exec "$@" + exit 0 +fi + +echo "Macvlan interface: $MACVLAN_IF (IP range 172.99.x.x)" +echo "Bridge interface: $BRIDGE_IF (Internet access expected)" + +# Rename interfaces properly (only if needed) +if [[ "$MACVLAN_IF" == "eth0" ]]; then + echo "Renaming interfaces to ensure bridge is eth0..." + + # Bring interfaces down before renaming + ip link set eth0 down + ip link set eth1 down + + # Rename eth0 -> temp, eth1 -> eth0, temp -> eth1 + ip link set eth0 name tempeth + ip link set eth1 name eth0 + ip link set tempeth name eth1 + + # Bring interfaces back up + ip link set eth0 up + ip link set eth1 up + + # Update variable names (since we renamed them) + TMP="$MACVLAN_IF" + MACVLAN_IF="$BRIDGE_IF" + BRIDGE_IF="$TMP" +fi + +# Find the gateway of the bridge network +echo "Modifying default route to use $BRIDGE_IF for internet access..." +BRIDGE_GATEWAY=$(ip route | grep "default via" | grep "$BRIDGE_IF" | awk '{print $3}') + +if [[ -z "$BRIDGE_GATEWAY" ]]; then + # Fallback: Manually infer from subnet (assume .1 as gateway) + BRIDGE_SUBNET=$(ip route | grep "dev $BRIDGE_IF" | awk '{print $1}') + BRIDGE_GATEWAY=$(echo "$BRIDGE_SUBNET" | sed 's|0/.*|1|') +fi + +# Ensure the gateway was found before modifying routes +if [[ -z "$BRIDGE_GATEWAY" ]]; then + echo "Could not determine bridge network gateway. Exiting." + exec "$@" + exit 0 +fi + +echo "Bridge network gateway inferred as: $BRIDGE_GATEWAY" + +# **Fix: Ensure default route exists before deleting** +if ip route | grep -q "default via"; then + ip route del default +fi + +# **Fix: Only add route if it does not already exist** +if ! ip route | grep -q "default via $BRIDGE_GATEWAY"; then + ip route add default via "$BRIDGE_GATEWAY" dev "$BRIDGE_IF" +fi + +echo "Network setup complete. Starting application..." + +# Debug: Print CMD before executing +echo "Executing: $@" + +# Execute CMD +exec "$@" diff --git a/o-ru-mplane/src/main.py b/o-ru-mplane/src/main.py index 754dd17..4c23bac 100644 --- a/o-ru-mplane/src/main.py +++ b/o-ru-mplane/src/main.py @@ -25,6 +25,7 @@ from core.netconf import Netconf, Datastore from util.crypto import CryptoUtils from util.threading import sa_sleep from util.logging import get_pynts_logger +from sysrepo.errors import SysrepoNotFoundError logger = get_pynts_logger("o-ru-mplane") @@ -55,7 +56,8 @@ class Main(Extension): def startup(self) -> None: self.update_o_ran_certificates() - self.start_odl_allow_thread() + is_tls = self.replace_callhome_settings() + self.start_odl_allow_thread(is_tls) logger.info("o-ru-mplane extension loaded") def update_o_ran_certificates(self) -> None: @@ -66,15 +68,18 @@ class Main(Extension): self.netconf.set_data(Datastore.OPERATIONAL, "", o_ran_certificates_template.data) - def start_odl_allow_thread(self): - request_thread = threading.Thread(target=self.send_odl_callhome_allow_tls) + def start_odl_allow_thread(self, is_tls: bool): + if is_tls is True: + request_thread = threading.Thread(target=self.send_odl_callhome_allow_tls) + elif is_tls is False: + request_thread = threading.Thread(target=self.send_odl_callhome_allow_ssh) request_thread.daemon = True # Set as daemon so it exits when the main program exits request_thread.start() def send_odl_callhome_allow_tls(self) -> None: odl_trusted_cert_template = DictFactory.get_template("odl-netconf-callhome-trusted-cert") odl_trusted_cert_template.update_key(["input", "trusted-certificate", 0, "name"], self.config.hostname) - odl_trusted_cert_template.update_key(["input", "trusted-certificate", 0, "certificate"], self.crypto_util.get_certificate_base64_encoding_no_markers(is_smo=True)) + odl_trusted_cert_template.update_key(["input", "trusted-certificate", 0, "certificate"], self.crypto_util.get_certificate_base64_encoding(is_smo=True, with_markers=self.config.sdnr_certificate_markers)) odl_trusted_cert_template_remove = DictFactory.get_template("odl-netconf-callhome-trusted-cert-remove") odl_trusted_cert_template_remove.update_key(["input","name", 0], self.config.hostname) @@ -131,6 +136,77 @@ class Main(Extension): if not (success1 and success2): sa_sleep(10) + def send_odl_callhome_allow_ssh(self) -> None: + allow_ssh_template = DictFactory.get_template("odl-netconf-callhome-server-ssh") + + allow_ssh_template.update_key(["odl-netconf-callhome-server:device", "unique-id"], self.config.hostname) + allow_ssh_template.update_key(["odl-netconf-callhome-server:device", "ssh-client-params", "credentials", "username"], self.config.netconf_username) + allow_ssh_template.update_key(["odl-netconf-callhome-server:device", "ssh-client-params", "credentials", "passwords"], self.config.netconf_password, append_to_list=True) + allow_ssh_template.update_key(["odl-netconf-callhome-server:device", "ssh-client-params", "host-key"], self.crypto_util.get_public_key_ssh_format()) + + url = self.config.sdnr_restconf_url + ODL_CALLHOME_ALLOW_DEVICES_URL + self.config.hostname + + success1 = False # Flag to track the success of the request + while not success1: + logger.debug(f"sending HTTP PUT to {url} with payload {allow_ssh_template.data}") + response = requests.put(url, auth=(self.config.sdnr_username, self.config.sdnr_password), json=allow_ssh_template.data, headers=HTTP_YANG_JSON_HEADERS, verify=False) + if response.status_code >= 200 and response.status_code < 300: + logger.debug(f"HTTP response to {url} succeded with code {response.status_code}") + success1 = True + else: + logger.error(f"HTTP PUT request failed to {url} with payload {allow_ssh_template.data} with status_code={response.status_code}") + + # Wait 10 seconds before retrying + if not (success1): + sa_sleep(10) + + def replace_callhome_settings(self) -> bool: + is_tls = False + try: + client_parameters = self.netconf.running.get_data("/ietf-netconf-server:netconf-server/call-home/netconf-client/endpoints/endpoint/tls/tcp-client-parameters") + is_tls = True + except SysrepoNotFoundError as e: + try: + client_parameters = self.netconf.running.get_data("/ietf-netconf-server:netconf-server/call-home/netconf-client/endpoints/endpoint/ssh/tcp-client-parameters") + is_tls = False + except SysrepoNotFoundError as e: + try: + client_parameters = self.netconf.running.get_data( + "/ietf-netconf-server:netconf-server/listen/endpoints/endpoint/tls/tcp-client-parameters") + is_tls = True + except SysrepoNotFoundError as e: + return is_tls + return is_tls + + if self.config.dhcp_sdnr_fqdn is not None: + update_remote_address(client_parameters, "smo", self.config.dhcp_sdnr_fqdn) + elif self.config.dhcp_sdnr_controller_ip is not None: + update_remote_address(client_parameters, "smo", self.config.dhcp_sdnr_controller_ip) + else: + # we don't change anything if we got nothing via DHCP + return is_tls + + self.netconf.running.edit_batch(client_parameters, "ietf-netconf-server", default_operation="merge") + self.netconf.running.apply_changes() + + return is_tls + + +def update_remote_address(config, target_endpoint_name, new_address): + """Recursively update the 'remote-address' for a given endpoint name (just a substring comparison).""" + if isinstance(config, dict): + for key, value in config.items(): + if key == "endpoint" and isinstance(value, list): + for endpoint in value: + if target_endpoint_name in endpoint.get("name"): + # Update the remote-address if the structure matches + if "tls" in endpoint and "tcp-client-parameters" in endpoint["tls"]: + endpoint["tls"]["tcp-client-parameters"]["remote-address"] = new_address + else: + update_remote_address(value, target_endpoint_name, new_address) + elif isinstance(config, list): + for item in config: + update_remote_address(item, target_endpoint_name, new_address) class OranCertificatesTemplate(BaseTemplate): """A dictionary template for netconf-server-parameters objects.""" -- 2.16.6