# args: <log-dir> <file-prexix>
__CHARTMUS_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=CHARTMUS" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_chartmuseum.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=CHARTMUS" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_chartmuseum.log 2>&1
else
docker logs $CHART_MUS_APP_NAME > $1$2_chartmuseum.log 2>&1
fi
EYELLOW="\033[0m"
SAMELINE="\033[0K\r"
+KUBECONF=""
+
__kube_scale_all_resources() {
echo " Scaling down in namespace $1 ..."
namespace=$1
resources="deployment replicaset statefulset"
for restype in $resources; do
- result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
for resid in $result; do
- count=$(kubectl get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
+ count=$(kubectl $KUBECONF get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
if [ $? -eq 0 ] && [ ! -z "$count" ]; then
if [ $count -ne 0 ]; then
echo " Scaling $restype $resid in namespace $namespace with label autotest to 0, current count=$count."
- kubectl scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> /dev/null
+ kubectl $KUBECONF scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> /dev/null
fi
fi
done
namespace=$1
resources="deployment replicaset statefulset"
for restype in $resources; do
- result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
for resid in $result; do
T_START=$SECONDS
count=1
scaled=0
while [ $count -gt 0 ]; do
- count=$(kubectl get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
+ count=$(kubectl $KUBECONF get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
if [ $? -eq 0 ] && [ ! -z "$count" ]; then
if [ $count -ne 0 ]; then
echo -ne " Scaling $restype $resid in namespace $namespace with label autotest to 0, current count=$count....$(($SECONDS-$T_START)) seconds"$SAMELINE
namespace=$1
resources="deployments replicaset statefulset services pods configmaps pvc serviceaccounts"
for restype in $resources; do
- result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
for resid in $result; do
echo " Deleting $restype $resid in namespace $namespace with label autotest "
- kubectl delete --grace-period=1 $restype $resid -n $namespace 1> /dev/null 2> /dev/null
+ kubectl $KUBECONF delete --grace-period=1 $restype $resid -n $namespace 1> /dev/null 2> /dev/null
done
fi
done
echo " Delete all non-namespaced resources ..."
resources="pv clusterrolebindings"
for restype in $resources; do
- result=$(kubectl get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
for resid in $result; do
echo " Deleting $restype $resid with label autotest "
- kubectl delete --grace-period=1 $restype $resid 1> /dev/null 2> /dev/null
+ kubectl $KUBECONF delete --grace-period=1 $restype $resid 1> /dev/null 2> /dev/null
done
fi
done
namespace=$1
resources="deployments replicaset statefulset services pods configmaps pvc "
for restype in $resources; do
- result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
for resid in $result; do
echo " Deleting $restype $resid in namespace $namespace with label autotest "
- kubectl delete --grace-period=1 $restype $resid -n $namespace #1> /dev/null 2> /dev/null
+ kubectl $KUBECONF delete --grace-period=1 $restype $resid -n $namespace #1> /dev/null 2> /dev/null
echo -ne " Waiting for $restype $resid in namespace $namespace with label autotest to be deleted..."$SAMELINE
T_START=$SECONDS
result="dummy"
while [ ! -z "$result" ]; do
sleep 0.5
- result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
echo -ne " Waiting for $restype $resid in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
if [ -z "$result" ]; then
echo -e " Waiting for $restype $resid in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
echo " Wait for delete pv ..."
resources="pv "
for restype in $resources; do
- result=$(kubectl get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
for resid in $result; do
echo " Deleting $restype $resid with label autotest "
- kubectl delete --grace-period=1 $restype $resid -n $namespace #1> /dev/null 2> /dev/null
+ kubectl $KUBECONF delete --grace-period=1 $restype $resid -n $namespace #1> /dev/null 2> /dev/null
echo -ne " Waiting for $restype $resid with label autotest to be deleted..."$SAMELINE
T_START=$SECONDS
result="dummy"
while [ ! -z "$result" ]; do
sleep 0.5
- result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
echo -ne " Waiting for $restype $resid with label autotest to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
if [ -z "$result" ]; then
echo -e " Waiting for $restype $resid with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
echo "Will remove all kube resources marked with label 'autotest'"
+print_usage() {
+ echo "Usage: clean_kube.sh [--kubeconfig <kube-config-file>]"
+}
+
+if [ $# -eq 1 ]; then
+ print_usage
+ exit
+elif [ $# -eq 2 ]; then
+ if [ $1 == "--kubeconfig" ]; then
+ if [ ! -f $2 ]; then
+ echo "File $2 for --kubeconfig is not found"
+ print_usage
+ exit
+ fi
+ KUBECONF="--kubeconfig $2"
+ else
+ print_usage
+ exit
+ fi
+else
+ print_usage
+ exit
+fi
+
# List all namespace and scale/delete per namespace
-nss=$(kubectl get ns -o jsonpath='{.items[*].metadata.name}')
+nss=$(kubectl $KUBECONF get ns -o jsonpath='{.items[*].metadata.name}')
if [ ! -z "$nss" ]; then
for ns in $nss; do
__kube_scale_all_resources $ns
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+
+# Script to clean all namespaces from kubernetes having the label 'autotest', i.e started by autotest
+
+BOLD="\033[1m"
+EBOLD="\033[0m"
+RED="\033[31m\033[1m"
+ERED="\033[0m"
+GREEN="\033[32m\033[1m"
+EGREEN="\033[0m"
+YELLOW="\033[33m\033[1m"
+EYELLOW="\033[0m"
+SAMELINE="\033[0K\r"
+
+KUBECONF=""
+
+echo "Will remove all kube namespaces marked with label 'autotest'"
+
+print_usage() {
+ echo "Usage: clean_kube_ns.sh [--kubeconfig <kube-config-file>]"
+}
+
+if [ $# -eq 1 ]; then
+ print_usage
+ exit
+elif [ $# -eq 2 ]; then
+ if [ $1 == "--kubeconfig" ]; then
+ if [ ! -f $2 ]; then
+ echo "File $2 for --kubeconfig is not found"
+ print_usage
+ exit
+ fi
+ KUBECONF="--kubeconfig $2"
+ else
+ print_usage
+ exit
+ fi
+else
+ print_usage
+ exit
+fi
+
+indent1() { sed 's/^/ /'; }
+
+nss=$(kubectl $KUBECONF get ns -o 'jsonpath={.items[?(@.metadata.labels.autotest)].metadata.name}')
+if [ ! -z "$nss" ]; then
+ for ns in $nss; do
+ echo "Deleting namespace: "$ns
+ kubectl $KUBECONF delete ns $ns | indent1
+ done
+fi
+echo "Done"
\ No newline at end of file
# args: <log-dir> <file-prexix>
__CP_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=CP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_control-panel.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=CP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_control-panel.log 2>&1
else
docker logs $CONTROL_PANEL_APP_NAME > $1$2_control-panel.log 2>&1
fi
# args: <log-dir> <file-prexix>
__CR_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- for podname in $(kubectl get pods -n $KUBE_SIM_NAMESPACE -l "autotest=CR" -o custom-columns=":metadata.name"); do
- kubectl logs -n $KUBE_SIM_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
+ for podname in $(kubectl $KUBECONF get pods -n $KUBE_SIM_NAMESPACE -l "autotest=CR" -o custom-columns=":metadata.name"); do
+ kubectl $KUBECONF logs -n $KUBE_SIM_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
done
else
crs=$(docker ps --filter "name=$CR_APP_NAME" --filter "network=$DOCKER_SIM_NWNAME" --filter "status=running" --format {{.Names}})
# args: <log-dir> <file-prexix>
__DMAAPADP_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=DMAAPADP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapadapter.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=DMAAPADP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapadapter.log 2>&1
else
docker logs $DMAAP_ADP_APP_NAME > $1$2_dmaapadapter.log 2>&1
fi
# args: <log-dir> <file-prexix>
__DMAAPMED_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=DMAAPMED" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapmediator.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=DMAAPMED" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapmediator.log 2>&1
else
docker logs $DMAAP_MED_APP_NAME > $1$2_dmaapmediator.log 2>&1
fi
# args: <log-dir> <file-prexix>
__HELMMANAGER_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=HELMMANAGER" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_helmmanager.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=HELMMANAGER" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_helmmanager.log 2>&1
else
docker logs $HELM_MANAGER_APP_NAME > $1$2_helmmanager.log 2>&1
fi
return 1
fi
else
- retmsg=$(kubectl exec -it $HELM_MANAGER_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE -- helm repo add $1 $2)
+ retmsg=$(kubectl $KUBECONF exec -it $HELM_MANAGER_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE -- helm repo add $1 $2)
retcode=$?
if [ $retcode -ne 0 ]; then
__log_conf_fail_general " Cannot add repo to helm, return code: $retcode, msg: $retmsg"
# args: <log-dir> <file-prexix>
__HTTPPROXY_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=HTTPPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_httpproxy.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=HTTPPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_httpproxy.log 2>&1
else
docker logs $HTTP_PROXY_APP_NAME > $1$2_httpproxy.log 2>&1
fi
# args: <log-dir> <file-prexix>
__ICS_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=ICS" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_ics.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=ICS" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_ics.log 2>&1
else
docker logs $ICS_APP_NAME > $1$2_ics.log 2>&1
fi
__kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest ICS
echo " Deleting the replica set - a new will be started when the app is started"
- tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ICS")
+ tmp=$(kubectl $KUBECONF delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ICS")
if [ $? -ne 0 ]; then
echo -e $RED" Could not delete replica set "$RED
((RES_CONF_FAIL++))
else
echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__ICS_WORKER_NODE to deployment for $ICS_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
- tmp=$(kubectl patch deployment $ICS_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__ICS_WORKER_NODE'"}}}}}')
+ tmp=$(kubectl $KUBECONF patch deployment $ICS_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__ICS_WORKER_NODE'"}}}}}')
if [ $? -ne 0 ]; then
echo -e $YELLOW" Cannot set nodeSelector to deployment for $ICS_APP_NAME, persistency may not work"$EYELLOW
fi
ics_kube_pvc_reset() {
__log_test_start $@
- pvc_name=$(kubectl get pvc -n $KUBE_NONRTRIC_NAMESPACE --no-headers -o custom-columns=":metadata.name" | grep information)
+ pvc_name=$(kubectl $KUBECONF get pvc -n $KUBE_NONRTRIC_NAMESPACE --no-headers -o custom-columns=":metadata.name" | grep information)
if [ -z "$pvc_name" ]; then
pvc_name=informationservice-pvc
fi
# args: <log-dir> <file-prexix>
__KAFKAPC_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=KAFKAPC" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_kafkapc.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=KAFKAPC" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_kafkapc.log 2>&1
else
docker logs $KAFKAPC_APP_NAME > $1$2_kafkapc.log 2>&1
fi
# args: <log-dir> <file-prexix>
__KUBEPROXY_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=KUBEPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_kubeproxy.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=KUBEPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_kubeproxy.log 2>&1
else
docker logs $KUBE_PROXY_APP_NAME > $1$2_kubeproxy.log 2>&1
fi
#Finding host of the proxy
echo " Trying to find svc hostname..."
- CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE -o jsonpath={.status.loadBalancer.ingress[0].hostname}")
+ CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl $KUBECONF get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE -o jsonpath={.status.loadBalancer.ingress[0].hostname}")
if [ "$CLUSTER_KUBE_PROXY_HOST" == "localhost" ]; then
if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
#Host of proxy not found, trying to find the ip....
echo " Trying to find svc ip..."
- CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE -o jsonpath={.status.loadBalancer.ingress[0].ip}")
+ CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl $KUBECONF get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE -o jsonpath={.status.loadBalancer.ingress[0].ip}")
if [ ! -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
#Host ip found
echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
fi
if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
#Host/ip of proxy not found, try to use the cluster and the nodeports of the proxy
- CLUSTER_KUBE_PROXY_HOST=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+ CLUSTER_KUBE_PROXY_HOST=$(kubectl $KUBECONF config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
echo -e $YELLOW" The test environment cluster ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
CLUSTER_KUBE_PROXY_PORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http$PORT_KEY_PREFIX") # port for proxy access
KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web$PORT_KEY_PREFIX") # web port, only for alive test
# args: <log-dir> <file-prexix>
__MR_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=MR" -n $KUBE_ONAP_NAMESPACE --tail=-1 > $1$2_mr_stub.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=MR" -n $KUBE_ONAP_NAMESPACE --tail=-1 > $1$2_mr_stub.log 2>&1
else
docker logs $MR_STUB_APP_NAME > $1$2_mr_stub.log 2>&1
fi
# args: <log-dir> <file-prexix>
__DMAAPMR_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- for podname in $(kubectl get pods -n $KUBE_ONAP_NAMESPACE -l "autotest=DMAAPMR" -o custom-columns=":metadata.name"); do
- kubectl logs -n $KUBE_ONAP_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
+ for podname in $(kubectl $KUBECONF get pods -n $KUBE_ONAP_NAMESPACE -l "autotest=DMAAPMR" -o custom-columns=":metadata.name"); do
+ kubectl $KUBECONF logs -n $KUBE_ONAP_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
done
else
docker logs $MR_DMAAP_APP_NAME > $1$2_mr.log 2>&1
# args: <log-dir> <file-prexix>
__NGW_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=NGW" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_gateway.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=NGW" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_gateway.log 2>&1
else
docker logs $NRT_GATEWAY_APP_NAME > $1$2_gateway.log 2>&1
fi
# args: <log-dir> <file-prexix>
__PA_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_policy-agent.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_policy-agent.log 2>&1
else
docker logs $POLICY_AGENT_APP_NAME > $1$2_policy-agent.log 2>&1
fi
# Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
if [ $retcode_i -eq 0 ]; then
- __PA_WORKER_NODE=$(kubectl get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+ __PA_WORKER_NODE=$(kubectl $KUBECONF get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
if [ -z "$__PA_WORKER_NODE" ]; then
echo -e $YELLOW" Cannot find worker node for pod for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
fi
fi
__kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest PA
echo " Deleting the replica set - a new will be started when the app is started"
- tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=PA")
+ tmp=$(kubectl $KUBECONF delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=PA")
if [ $? -ne 0 ]; then
echo -e $RED" Could not delete replica set "$RED
((RES_CONF_FAIL++))
else
echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__PA_WORKER_NODE to deployment for $POLICY_AGENT_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
- tmp=$(kubectl patch deployment $POLICY_AGENT_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__PA_WORKER_NODE'"}}}}}')
+ tmp=$(kubectl $KUBECONF patch deployment $POLICY_AGENT_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__PA_WORKER_NODE'"}}}}}')
if [ $? -ne 0 ]; then
echo -e $YELLOW" Cannot set nodeSelector to deployment for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
fi
config_json=$config_json"\n \"ric\": ["
if [ $RUNMODE == "KUBE" ]; then
- result=$(kubectl get pods -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.items[?(@.metadata.labels.autotest=="RICSIM")].metadata.name}')
+ result=$(kubectl $KUBECONF get pods -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.items[?(@.metadata.labels.autotest=="RICSIM")].metadata.name}')
rics=""
ric_cntr=0
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
for im in $result; do
if [[ $im != *"-0" ]]; then
- ric_subdomain=$(kubectl get pod $im -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.spec.subdomain}')
+ ric_subdomain=$(kubectl $KUBECONF get pod $im -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.spec.subdomain}')
rics=$rics" "$im"."$ric_subdomain"."$KUBE_A1SIM_NAMESPACE
let ric_cntr=ric_cntr+1
fi
pms_kube_pvc_reset() {
__log_test_start $@
- pvc_name=$(kubectl get pvc -n $KUBE_NONRTRIC_NAMESPACE --no-headers -o custom-columns=":metadata.name" | grep policy)
+ pvc_name=$(kubectl $KUBECONF get pvc -n $KUBE_NONRTRIC_NAMESPACE --no-headers -o custom-columns=":metadata.name" | grep policy)
if [ -z "$pvc_name" ]; then
pvc_name=policymanagementservice-vardata-pvc
fi
# args: <log-dir> <file-prexix>
__PRODSTUB_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=PRODSTUB" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_prodstub.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=PRODSTUB" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_prodstub.log 2>&1
else
docker logs $PROD_STUB_APP_NAME > $1$2_prodstub.log 2>&1
fi
# args: <log-dir> <file-prexix>
__PVCCLEANER_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=PRODSTUB" -A --tail=-1 > $1$2_pvs_cleaner.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=PRODSTUB" -A --tail=-1 > $1$2_pvs_cleaner.log 2>&1
fi
}
# args: <log-dir> <file-prexix>
__RC_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=RC" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_rc.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=RC" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_rc.log 2>&1
else
docker logs $RAPP_CAT_APP_NAME > $1$2_rc.log 2>&1
fi
# args: <log-dir> <file-prexix>
__RICSIM_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- for podname in $(kubectl get pods -n $KUBE_A1SIM_NAMESPACE -l "autotest=RICSIM" -o custom-columns=":metadata.name"); do
- kubectl logs -n $KUBE_A1SIM_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
+ for podname in $(kubectl $KUBECONF get pods -n $KUBE_A1SIM_NAMESPACE -l "autotest=RICSIM" -o custom-columns=":metadata.name"); do
+ kubectl $KUBECONF logs -n $KUBE_A1SIM_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
done
else
# args: <log-dir> <file-prexix>
__SDNC_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=SDNC" -n $KUBE_SDNC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
- podname=$(kubectl get pods -n $KUBE_SDNC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
- kubectl exec -t -n $KUBE_SDNC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=SDNC" -n $KUBE_SDNC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
+ podname=$(kubectl $KUBECONF get pods -n $KUBE_SDNC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
+ kubectl $KUBECONF exec -t -n $KUBE_SDNC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
else
docker exec -t $SDNC_APP_NAME cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
fi
__print_args() {
echo "Args: remote|remote-remove docker|kube --env-file <environment-filename> [release] [auto-clean] [--stop-at-error] "
echo " [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+] [--use-snapshot-image <app-nam>+]"
- echo " [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
+ echo " [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address>]"
echo " [--repo-policy local|remote] [--cluster-timeout <timeout-in seconds>] [--print-stats]"
echo " [--override <override-environment-filename>] [--pre-clean] [--gen-stats] [--delete-namespaces]"
- echo " [--delete-containers] [--endpoint-stats]"
+ echo " [--delete-containers] [--endpoint-stats] [--kubeconfig <config-file>]"
}
if [ $# -eq 1 ] && [ "$1" == "help" ]; then
echo "--delete-namespaces - Delete kubernetes namespaces before starting tests - but only those created by the test scripts. Kube mode only. Ignored if running with prestarted apps."
echo "--delete-containers - Delete docker containers before starting tests - but only those created by the test scripts. Docker mode only."
echo "--endpoint-stats - Collect endpoint statistics"
+ echo "--kubeconfig - Configure kubectl to use cluster specific cluster config file"
echo ""
echo "List of app short names supported: "$APP_SHORT_NAMES
exit 0
#Var to control if containers shall be delete before test setup
DELETE_CONTAINERS=0
+#Var to configure kubectl from a config file.
+KUBECONF=""
+
#File to keep deviation messages
DEVIATION_FILE=".tmp_deviations"
rm $DEVIATION_FILE &> /dev/null
foundparm=0
fi
fi
-
+ if [ $paramerror -eq 0 ]; then
+ if [ "$1" == "--kubeconfig" ]; then
+ shift;
+ if [ -z "$1" ]; then
+ paramerror=1
+ if [ -z "$paramerror_str" ]; then
+ paramerror_str="No path found for : '--kubeconfig'"
+ fi
+ else
+ if [ -f $1 ]; then
+ KUBECONF="--kubeconfig $1"
+ echo "Option set - Kubeconfig path: "$1
+ shift;
+ foundparm=0
+ else
+ paramerror=1
+ if [ -z "$paramerror_str" ]; then
+ paramerror_str="File $1 for --kubeconfig not found"
+ fi
+ fi
+ fi
+ fi
+ fi
done
echo ""
if [ $RUNMODE == "KUBE" ]; then
echo " kubectl is installed and using versions:"
echo $(kubectl version --short=true) | indent2
- res=$(kubectl cluster-info 2>&1)
+ res=$(kubectl $KUBECONF cluster-info 2>&1)
if [ $? -ne 0 ]; then
echo -e "$BOLD$RED############################################# $ERED$EBOLD"
- echo -e $BOLD$RED"Command 'kubectl cluster-info' returned error $ERED$EBOLD"
+ echo -e $BOLD$RED"Command 'kubectl '$KUBECONF' cluster-info' returned error $ERED$EBOLD"
echo -e "$BOLD$RED############################################# $ERED$EBOLD"
echo " "
echo "kubectl response:"
echo $res
echo " "
echo "This script may have been started with user with no permission to run kubectl"
- echo "Try running with 'sudo' or set 'KUBECONFIG'"
+ echo "Try running with 'sudo', set env KUBECONFIG or set '--kubeconfig' parameter"
echo "Do either 1, 2 or 3 "
echo " "
echo "1"
echo -e $BOLD"sudo -E <test-script-and-parameters>"$EBOLD
echo " "
echo "3"
- echo "Set KUBECONFIG inline (replace user)"
- echo -e $BOLD"sudo KUBECONFIG='/home/<user>/.kube/config' <test-script-and-parameters>"$EBOLD
+ echo "Set KUBECONFIG via script parameter"
+ echo -e $BOLD"sudo ... --kubeconfig /home/<user>/.kube/<config-file> ...."$EBOLD
+ echo "The config file need to downloaded from the cluster"
exit 1
fi
echo " Node(s) and container runtime config"
- kubectl get nodes -o wide | indent2
+ kubectl $KUBECONF get nodes -o wide | indent2
fi
fi
if [ "$DELETE_KUBE_NAMESPACES" -eq 1 ]; then
- test_env_namespaces=$(kubectl get ns --no-headers -o custom-columns=":metadata.name" -l autotest=engine) #Get list of ns created by the test env
+ test_env_namespaces=$(kubectl $KUBECONF get ns --no-headers -o custom-columns=":metadata.name" -l autotest=engine) #Get list of ns created by the test env
if [ $? -ne 0 ]; then
echo " Cannot get list of namespaces...ignoring delete"
else
done
fi
else
- echo " Namespace delete option not set"
+ echo " Namespace delete option not set or ignored"
fi
echo ""
echo " Removing stopped containers..."
docker rm $(docker ps -qa --filter "label=nrttest_app") 2> /dev/null
else
- echo " Contatiner delete option not set"
+ echo " Contatiner delete option not set or ignored"
fi
echo ""
echo -e " Pulling remote snapshot or staging images my in some case result in pulling newer image versions outside the control of the test engine"
export KUBE_IMAGE_PULL_POLICY="Always"
fi
- CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+ CLUSTER_IP=$(kubectl $KUBECONF config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
echo -e $YELLOW" The cluster hostname/ip is: $CLUSTER_IP"$EYELLOW
echo "================================================================================="
# Get resource type for scaling
# args: <resource-name> <namespace>
__kube_get_resource_type() {
- kubectl get deployment $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF get deployment $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
if [ $? -eq 0 ]; then
echo "deployment"
return 0
fi
- kubectl get sts $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF get sts $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
if [ $? -eq 0 ]; then
echo "sts"
return 0
# (Not for test scripts)
__kube_scale() {
echo -ne " Setting $1 $2 replicas=$4 in namespace $3"$SAMELINE
- kubectl scale $1 $2 -n $3 --replicas=$4 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF scale $1 $2 -n $3 --replicas=$4 1> /dev/null 2> ./tmp/kubeerr
if [ $? -ne 0 ]; then
echo -e " Setting $1 $2 replicas=$4 in namespace $3 $RED Failed $ERED"
((RES_CONF_FAIL++))
TSTART=$SECONDS
for i in {1..500}; do
- count=$(kubectl get $1/$2 -n $3 -o jsonpath='{.status.replicas}' 2> /dev/null)
+ count=$(kubectl $KUBECONF get $1/$2 -n $3 -o jsonpath='{.status.replicas}' 2> /dev/null)
retcode=$?
if [ -z "$count" ]; then
#No value is sometimes returned for some reason, in case the resource has replica 0
labelid=$3
resources="deployment replicaset statefulset"
for restype in $resources; do
- result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
for resid in $result; do
echo -ne " Ordered caling $restype $resid in namespace $namespace with label $labelname=$labelid to 0"$SAMELINE
- kubectl scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
echo -e " Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0 $GREEN OK $EGREEN"
done
fi
scaled_all=0
for restype in $resources; do
if [ -z "$3" ]; then
- result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname')].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname')].metadata.name}')
else
- result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
fi
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
for resid in $result; do
echo -e " Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0"
- kubectl scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
count=1
T_START=$SECONDS
while [ $count -ne 0 ]; do
- count=$(kubectl get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
+ count=$(kubectl $KUBECONF get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
echo -ne " Scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0, current count=$count"$SAMELINE
if [ $? -eq 0 ] && [ ! -z "$count" ]; then
sleep 0.5
ns_flag=""
ns_text=""
fi
- result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
deleted_resourcetypes=$deleted_resourcetypes" "$restype
for resid in $result; do
if [ $restype == "replicaset" ] || [ $restype == "statefulset" ]; then
count=1
while [ $count -ne 0 ]; do
- count=$(kubectl get $restype $resid $ns_flag -o jsonpath='{.status.replicas}' 2> /dev/null)
+ count=$(kubectl $KUBECONF get $restype $resid $ns_flag -o jsonpath='{.status.replicas}' 2> /dev/null)
echo -ne " Scaling $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count"$SAMELINE
if [ $? -eq 0 ] && [ ! -z "$count" ]; then
sleep 0.5
echo -e " Scaled $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count $GREEN OK $EGREEN"
fi
echo -ne " Deleting $restype $resid $ns_text with label $labelname=$labelid "$SAMELINE
- kubectl delete --grace-period=1 $restype $resid $ns_flag 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF delete --grace-period=1 $restype $resid $ns_flag 1> /dev/null 2> ./tmp/kubeerr
if [ $? -eq 0 ]; then
echo -e " Deleted $restype $resid $ns_text with label $labelname=$labelid $GREEN OK $EGREEN"
else
result="dummy"
while [ ! -z "$result" ]; do
sleep 0.5
- result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+ result=$(kubectl $KUBECONF get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
echo -ne " Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
if [ -z "$result" ]; then
echo -e " Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
__kube_create_namespace() {
#Check if test namespace exists, if not create it
- kubectl get namespace $1 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF get namespace $1 1> /dev/null 2> ./tmp/kubeerr
if [ $? -ne 0 ]; then
echo -ne " Creating namespace "$1 $SAMELINE
- kubectl create namespace $1 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF create namespace $1 1> /dev/null 2> ./tmp/kubeerr
if [ $? -ne 0 ]; then
echo -e " Creating namespace $1 $RED$BOLD FAILED $EBOLD$ERED"
((RES_CONF_FAIL++))
echo " Message: $(<./tmp/kubeerr)"
return 1
else
- kubectl label ns $1 autotest=engine
+ kubectl $KUBECONF label ns $1 autotest=engine
echo -e " Creating namespace $1 $GREEN$BOLD OK $EBOLD$EGREEN"
fi
else
__kube_delete_namespace() {
#Check if test namespace exists, if so remove it
- kubectl get namespace $1 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF get namespace $1 1> /dev/null 2> ./tmp/kubeerr
if [ $? -eq 0 ]; then
echo -ne " Removing namespace "$1 $SAMELINE
- kubectl delete namespace $1 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF delete namespace $1 1> /dev/null 2> ./tmp/kubeerr
if [ $? -ne 0 ]; then
echo -e " Removing namespace $1 $RED$BOLD FAILED $EBOLD$ERED"
((RES_CONF_FAIL++))
exit 1
fi
for timeout in {1..60}; do
- host=$(kubectl get svc $1 -n $2 -o jsonpath='{.spec.clusterIP}')
+ host=$(kubectl $KUBECONF get svc $1 -n $2 -o jsonpath='{.spec.clusterIP}')
if [ $? -eq 0 ]; then
if [ ! -z "$host" ]; then
echo $host
fi
for timeout in {1..60}; do
- port=$(kubectl get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].port}')
+ port=$(kubectl $KUBECONF get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].port}')
if [ $? -eq 0 ]; then
if [ ! -z "$port" ]; then
echo $port
fi
for timeout in {1..60}; do
- port=$(kubectl get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].nodePort}')
+ port=$(kubectl $KUBECONF get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].nodePort}')
if [ $? -eq 0 ]; then
if [ ! -z "$port" ]; then
echo $port
__kube_create_instance() {
echo -ne " Creating $1 $2"$SAMELINE
envsubst < $3 > $4
- kubectl apply -f $4 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF apply -f $4 1> /dev/null 2> ./tmp/kubeerr
if [ $? -ne 0 ]; then
((RES_CONF_FAIL++))
echo -e " Creating $1 $2 $RED Failed $ERED"
echo -ne " Creating configmap $1 "$SAMELINE
envsubst < $5 > $5"_tmp"
cp $5"_tmp" $5 #Need to copy back to orig file name since create configmap neeed the original file name
- kubectl create configmap $1 -n $2 --from-file=$5 --dry-run=client -o yaml > $6
+ kubectl $KUBECONF create configmap $1 -n $2 --from-file=$5 --dry-run=client -o yaml > $6
if [ $? -ne 0 ]; then
echo -e " Creating configmap $1 $RED Failed $ERED"
((RES_CONF_FAIL++))
return 1
fi
- kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF apply -f $6 1> /dev/null 2> ./tmp/kubeerr
if [ $? -ne 0 ]; then
echo -e " Creating configmap $1 $RED Apply failed $ERED"
echo " Message: $(<./tmp/kubeerr)"
((RES_CONF_FAIL++))
return 1
fi
- kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr
+ kubectl $KUBECONF label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr
if [ $? -ne 0 ]; then
echo -e " Creating configmap $1 $RED Labeling failed $ERED"
echo " Message: $(<./tmp/kubeerr)"
return 1
fi
# Log the resulting map
- kubectl get configmap $1 -n $2 -o yaml > $6
+ kubectl $KUBECONF get configmap $1 -n $2 -o yaml > $6
echo -e " Creating configmap $1 $GREEN OK $EGREEN"
return 0
envsubst < $input_yaml > $output_yaml
- kubectl delete -f $output_yaml 1> /dev/null 2> /dev/null # Delete the previous terminated pod - if existing
+ kubectl $KUBECONF delete -f $output_yaml 1> /dev/null 2> /dev/null # Delete the previous terminated pod - if existing
__kube_create_instance pod $PVC_CLEANER_APP_NAME $input_yaml $output_yaml
if [ $? -ne 0 ]; then
term_ts=$(($SECONDS+30))
while [ $term_ts -gt $SECONDS ]; do
- pod_status=$(kubectl get pod pvc-cleaner -n $PVC_CLEANER_NAMESPACE --no-headers -o custom-columns=":status.phase")
+ pod_status=$(kubectl $KUBECONF get pod pvc-cleaner -n $PVC_CLEANER_NAMESPACE --no-headers -o custom-columns=":status.phase")
if [ "$pod_status" == "Succeeded" ]; then
return 0
fi
__clean_containers
if [ $PRE_CLEAN -eq 1 ]; then
echo " Cleaning kubernetes resouces to free up resources, may take time..."
- ../common/clean_kube.sh 2>&1 /dev/null
+ ../common/clean_kube.sh $KUBECONF 2>&1 /dev/null
echo ""
fi
fi
done
fi
if [ $RUNMODE == "KUBE" ]; then
- namespaces=$(kubectl get namespaces -o jsonpath='{.items[?(@.metadata.name)].metadata.name}')
+ namespaces=$(kubectl $KUBECONF get namespaces -o jsonpath='{.items[?(@.metadata.name)].metadata.name}')
for nsid in $namespaces; do
- pods=$(kubectl get pods -n $nsid -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+ pods=$(kubectl $KUBECONF get pods -n $nsid -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
for podid in $pods; do
- kubectl logs -n $nsid $podid > $TESTLOGS/$ATC/$1_${podid}.log
+ kubectl $KUBECONF logs -n $nsid $podid > $TESTLOGS/$ATC/$1_${podid}.log
done
done
fi
- odu-app
ports:
- 8086:8086
+ - 40936:40936
environment:
- MR_HOST=http://sdnr-mr-sim
- MR_PORT=3905
"fmt"
"io"
"net/http"
+ "net/http/httputil"
log "github.com/sirupsen/logrus"
)
+type RequestError struct {
+ StatusCode int
+ Body []byte
+}
+
+func (e RequestError) Error() string {
+ return fmt.Sprintf("error response with status: %v and body: %v", e.StatusCode, string(e.Body))
+}
+
type Client struct {
httpClient *http.Client
+ verbose bool
}
-func New(httpClient *http.Client) *Client {
+func New(httpClient *http.Client, verbose bool) *Client {
return &Client{
httpClient: httpClient,
+ verbose: verbose,
}
}
-func (c *Client) Get(path string, v interface{}) error {
- req, err := c.newRequest(http.MethodGet, path, nil)
+func (c *Client) Get(path string, v interface{}, userInfo ...string) error {
+ var req *http.Request
+ var err error
+
+ if len(userInfo) > 1 {
+ req, err = c.newRequest(http.MethodGet, path, nil, userInfo[0], userInfo[1])
+ } else {
+ req, err = c.newRequest(http.MethodGet, path, nil)
+ }
+
if err != nil {
return fmt.Errorf("failed to create GET request: %w", err)
}
return nil
}
-func (c *Client) Post(path string, payload interface{}, v interface{}) error {
+func (c *Client) Post(path string, payload interface{}, v interface{}, userInfo ...string) error {
+ var req *http.Request
+ var err error
- s, _ := json.MarshalIndent(payload, "", "\t")
- log.Debugf("Post request payload: " + string(s))
+ if len(userInfo) > 1 {
+ req, err = c.newRequest(http.MethodPost, path, payload, userInfo[0], userInfo[1])
+ } else {
+ req, err = c.newRequest(http.MethodPost, path, payload)
+ }
- req, err := c.newRequest(http.MethodPost, path, payload)
if err != nil {
return fmt.Errorf("failed to create POST request: %w", err)
}
return nil
}
-func (c *Client) newRequest(method, path string, payload interface{}) (*http.Request, error) {
+func (c *Client) Put(path string, payload interface{}, v interface{}, userName string, password string) error {
+ req, err := c.newRequest(http.MethodPut, path, payload, userName, password)
+ if err != nil {
+ return fmt.Errorf("failed to create PUT request: %w", err)
+ }
+
+ if err := c.doRequest(req, v); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *Client) newRequest(method, path string, payload interface{}, userInfo ...string) (*http.Request, error) {
var reqBody io.Reader
+
if payload != nil {
bodyBytes, err := json.Marshal(payload)
if err != nil {
}
req, err := http.NewRequest(method, path, reqBody)
+
if err != nil {
return nil, fmt.Errorf("failed to create HTTP request: %w", err)
}
+ if len(userInfo) > 0 {
+ req.SetBasicAuth(userInfo[0], userInfo[1])
+ }
+
if reqBody != nil {
- req.Header.Set("Content-Type", "application/json; charset=utf-8")
+ req.Header.Set("Content-Type", "application/json")
}
- log.Debugf("Http Client Request: [%s:%s]\n", req.Method, req.URL)
+
+ if c.verbose {
+ if reqDump, error := httputil.DumpRequest(req, true); error != nil {
+ fmt.Println(err)
+ } else {
+ fmt.Println(string(reqDump))
+ }
+ }
+
return req, nil
}
}
dec := json.NewDecoder(resp.Body)
- if err := dec.Decode(v); err != nil {
+ if err := dec.Decode(&v); err != nil {
return fmt.Errorf("could not parse response body: %w [%s:%s]", err, r.Method, r.URL.String())
}
log.Debugf("Http Client Response: %v\n", v)
return nil, fmt.Errorf("failed to make request [%s:%s]: %w", r.Method, r.URL.String(), err)
}
+ if c.verbose {
+ if responseDump, error := httputil.DumpResponse(resp, true); error != nil {
+ fmt.Println(err)
+ } else {
+ fmt.Println(string(responseDump))
+ }
+ }
+
if resp.StatusCode >= http.StatusOK && resp.StatusCode <= 299 {
return resp, nil
}
defer resp.Body.Close()
+ responseData, _ := io.ReadAll(resp.Body)
+
+ putError := RequestError{
+ StatusCode: resp.StatusCode,
+ Body: responseData,
+ }
- return resp, fmt.Errorf("failed to do request, %d status code received", resp.StatusCode)
+ return resp, putError
}
"net/http/httptest"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- client := New(&http.Client{})
+ client := New(&http.Client{}, false)
req, err := client.newRequest(tt.args.method, tt.args.path, tt.args.payload)
if tt.wantErr != nil {
assertions.EqualError(tt.wantErr, err.Error())
} else {
assertions.Equal("url", req.URL.Path)
- assertions.Equal("application/json; charset=utf-8", req.Header.Get("Content-Type"))
+ assertions.Equal("application/json", req.Header.Get("Content-Type"))
assertions.Empty(req.Header.Get("Authorization"))
assertions.Nil(err)
}
respCode: http.StatusBadRequest,
resp: nil,
},
- wantErr: "failed to do request, 400 status code received",
+ wantErr: "error response with status: 400 and body:",
},
}
t.Run(tt.name, func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ assertions.Equal(http.MethodGet, r.Method)
response, _ := json.Marshal(tt.args.resp)
w.Header().Set("Content-Type", tt.args.header)
w.WriteHeader(tt.args.respCode)
}))
defer srv.Close()
- client := New(&http.Client{})
+ client := New(&http.Client{}, false)
var res interface{}
err := client.Get(srv.URL, &res)
if err != nil {
- assertions.Equal(tt.wantErr, err.Error())
+ assertions.Contains(err.Error(), tt.wantErr)
}
assertions.Equal(tt.args.resp, res)
})
}
func TestPost(t *testing.T) {
- assertions := require.New(t)
- type args struct {
- header string
- respCode int
- resp interface{}
- }
- tests := []struct {
- name string
- args args
- wantErr string
- }{
- {
- name: "successful Post request",
- args: args{
- header: "application/json",
- respCode: http.StatusOK,
- resp: "Success!",
- },
- wantErr: "",
- },
+ header := "application/json"
+ respCode := http.StatusOK
+ resp := "Success!"
+
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ assert.Equal(t, http.MethodPost, r.Method)
+ assert.Contains(t, r.Header.Get("Content-Type"), "application/json")
+
+ var reqBody string
+ decoder := json.NewDecoder(r.Body)
+ decoder.Decode(&reqBody)
+ assert.Equal(t, reqBody, `json:"example"`)
+
+ response, _ := json.Marshal(resp)
+ w.Header().Set("Content-Type", header)
+ w.WriteHeader(respCode)
+ w.Write(response)
+ }))
+ defer srv.Close()
+
+ client := New(&http.Client{}, false)
+ payload := `json:"example"`
+ err := client.Post(srv.URL, payload, nil, "admin", "pass")
+
+ if err != nil {
+ assert.Equal(t, "", err.Error())
}
+}
- for _, tt := range tests {
+func TestPut(t *testing.T) {
+ header := "application/json"
+ respCode := http.StatusOK
+ resp := "Success!"
- t.Run(tt.name, func(t *testing.T) {
- srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- assertions.Equal(http.MethodPost, r.Method)
- assertions.Contains(r.Header.Get("Content-Type"), "application/json")
+ assert.Equal(t, http.MethodPut, r.Method)
+ assert.Contains(t, r.Header.Get("Content-Type"), "application/json")
- var reqBody interface{}
- decoder := json.NewDecoder(r.Body)
- decoder.Decode(&reqBody)
- assertions.Equal(reqBody, `json:"example"`)
+ var reqBody string
+ decoder := json.NewDecoder(r.Body)
+ decoder.Decode(&reqBody)
+ assert.Equal(t, reqBody, `json:"example"`)
- response, _ := json.Marshal(tt.args.resp)
- w.Header().Set("Content-Type", tt.args.header)
- w.WriteHeader(tt.args.respCode)
- w.Write(response)
- }))
- defer srv.Close()
+ response, _ := json.Marshal(resp)
+ w.Header().Set("Content-Type", header)
+ w.WriteHeader(respCode)
+ w.Write(response)
+ }))
+ defer srv.Close()
- client := New(&http.Client{})
- payload := `json:"example"`
- err := client.Post(srv.URL, payload, nil)
+ client := New(&http.Client{}, false)
+ payload := `json:"example"`
+ err := client.Put(srv.URL, payload, nil, "admin", "pass")
- if err != nil {
- assertions.Equal(tt.wantErr, err.Error())
- }
- })
+ if err != nil {
+ assert.Equal(t, "", err.Error())
}
}
package sliceassurance
import (
+ "encoding/json"
+ "fmt"
"net/http"
"time"
+ "oransc.org/usecase/oduclosedloop/internal/config"
"oransc.org/usecase/oduclosedloop/internal/restclient"
"oransc.org/usecase/oduclosedloop/internal/structures"
"oransc.org/usecase/oduclosedloop/messages"
)
const (
- THRESHOLD_TPUT = 700
- DEFAULT_DEDICATED_RATIO = 40
- NEW_DEDICATED_RATIO = 50
- NODE_ID = "O-DU-1211"
+ THRESHOLD_TPUT = 7000
+ DEFAULT_DEDICATED_RATIO = 15
+ NEW_DEDICATED_RATIO = 25
+ NODE_ID = "O-DU-1122"
)
type App struct {
var dmaapMRUrl string
var sDNRUrl string
+var sDNRUsername string
+var sDNRPassword string
-func (a *App) Initialize(dmaapUrl string, sdnrUrl string) {
- dmaapMRUrl = dmaapUrl
- sDNRUrl = sdnrUrl
+func (a *App) Initialize(config *config.Config) {
+ dmaapMRUrl = config.MRHost + ":" + config.MRPort
+ sDNRUrl = config.SDNRAddress
+ sDNRUsername = config.SDNRUser
+ sDNRPassword = config.SDNPassword
- a.client = restclient.New(&http.Client{})
+ a.client = restclient.New(&http.Client{}, false)
a.metricsPolicies = structures.NewSliceAssuranceMeas()
}
}
}
-func (a *App) getMessagesFromDmaap(url string) {
- var stdMessage messages.StdDefinedMessage
- a.client.Get(url, &stdMessage)
- log.Infof("Polling new messages from DmaapMR")
- for _, meas := range stdMessage.GetMeasurements() {
- //Create sliceMetric and check if metric exist and update existing one or create new one
- if _, err := a.metricsPolicies.AddOrUpdateMetric(meas); err != nil {
- log.Error("Metric could not be added ", err)
+func (a *App) getMessagesFromDmaap(path string) {
+ log.Infof("Polling new messages from DmaapMR %v", path)
+
+ //Added to work with onap-Dmaap
+ var messageStrings []string
+ if error := a.client.Get(path, &messageStrings); error != nil {
+ log.Warn("Send of Get messages from DmaapMR failed! ", error)
+ }
+
+ for _, msgString := range messageStrings {
+ var message messages.StdDefinedMessage
+ if err := json.Unmarshal([]byte(msgString), &message); err == nil {
+ for _, meas := range message.GetMeasurements() {
+ log.Infof("Create sliceMetric and check if metric exist and update existing one or create new one measurement: %+v\n", meas)
+ //Create sliceMetric and check if metric exist and update existing one or create new one
+ if _, err := a.metricsPolicies.AddOrUpdateMetric(meas); err != nil {
+ log.Error("Metric could not be added ", err)
+ }
+ }
+ } else {
+ log.Warn(err)
}
}
}
func (a *App) getRRMInformation(duid string) {
var duRRMPolicyRatio messages.ORanDuRestConf
- a.client.Get(getUrlForDistributedUnitFunctions(sDNRUrl, duid), &duRRMPolicyRatio)
- policies := duRRMPolicyRatio.DistributedUnitFunction.RRMPolicyRatio
- for _, policy := range policies {
- a.metricsPolicies.AddNewPolicy(duid, policy)
+ log.Infof("Get RRM Information from SDNR url: %v", sDNRUrl)
+ if error := a.client.Get(getUrlForDistributedUnitFunctions(sDNRUrl, duid), &duRRMPolicyRatio, sDNRUsername, sDNRPassword); error == nil {
+ prettyPrint(duRRMPolicyRatio.DistributedUnitFunction)
+ } else {
+ log.Warn("Send of Get RRM Information failed! ", error)
+ }
+
+ for _, odu := range duRRMPolicyRatio.DistributedUnitFunction {
+ for _, policy := range odu.RRMPolicyRatio {
+ log.Infof("Add or Update policy: %+v from DU id: %v", policy.Id, duid)
+ a.metricsPolicies.AddNewPolicy(duid, policy)
+ }
}
}
func (a *App) updateDedicatedRatio() {
-
for _, metric := range a.metricsPolicies.Metrics {
policy, check := a.metricsPolicies.Policies[metric.RRMPolicyRatioId]
//TODO What happened if dedicated ratio is already higher that default and threshold is exceed?
if check && policy.PolicyDedicatedRatio <= DEFAULT_DEDICATED_RATIO {
- log.Infof("Send Post Request to update DedicatedRatio for DU id: %v Policy id: %v", metric.DUId, policy.PolicyRatioId)
- url := getUrlUpdatePolicyDedicatedRatio(sDNRUrl, metric.DUId, policy.PolicyRatioId)
- a.client.Post(url, policy.GetUpdateDedicatedRatioMessage(metric.SliceDiff, metric.SliceServiceType, NEW_DEDICATED_RATIO), nil)
+ log.Infof("Send Request to update DedicatedRatio for DU id: %v Policy id: %v", metric.DUId, policy.PolicyRatioId)
+ path := getUrlUpdatePolicyDedicatedRatio(sDNRUrl, metric.DUId, policy.PolicyRatioId)
+ updatePolicyMessage := policy.GetUpdateDedicatedRatioMessage(metric.SliceDiff, metric.SliceServiceType, NEW_DEDICATED_RATIO)
+ prettyPrint(updatePolicyMessage)
+ if error := a.client.Put(path, updatePolicyMessage, nil, sDNRUsername, sDNRPassword); error == nil {
+ log.Infof("Policy Dedicated Ratio for PolicyId: %v was updated to %v", policy.PolicyRatioId, NEW_DEDICATED_RATIO)
+ } else {
+ log.Warn("Send of Put Request to update DedicatedRatio failed! ", error)
+ }
}
}
}
func getUrlUpdatePolicyDedicatedRatio(host string, duid string, policyid string) string {
return host + "/rests/data/network-topology:network-topology/topology=topology-netconf/node=" + NODE_ID + "/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions=" + duid + "/radio-resource-management-policy-ratio=" + policyid
}
+
+func prettyPrint(jsonStruct interface{}) {
+ b, err := json.MarshalIndent(jsonStruct, "", " ")
+ if err != nil {
+ fmt.Println("error:", err)
+ }
+ fmt.Print(string(b))
+}
return &pr
}
-func (pr *PolicyRatio) GetUpdateDedicatedRatioMessage(sd int, sst int, dedicatedRatio int) []messages.RRMPolicyRatio {
+func (pr *PolicyRatio) GetUpdateDedicatedRatioMessage(sd int, sst int, dedicatedRatio int) interface{} {
message := messages.RRMPolicyRatio{
Id: pr.PolicyRatioId,
- AdmState: "Locked",
- UserLabel: "Some user label",
+ AdmState: "unlocked",
+ UserLabel: pr.PolicyRatioId,
RRMPolicyMaxRatio: pr.PolicyMaxRatio,
RRMPolicyMinRatio: pr.PolicyMinRatio,
RRMPolicyDedicatedRatio: dedicatedRatio,
},
},
}
- return []messages.RRMPolicyRatio{message}
+ rrmPolicies := []messages.RRMPolicyRatio{message}
+
+ return struct {
+ RRMPolicies []messages.RRMPolicyRatio `json:"radio-resource-management-policy-ratio"`
+ }{
+ RRMPolicies: rrmPolicies,
+ }
+
}
var duid string
var sd, sst int
- regex := *regexp.MustCompile(`\/(.*)network-function\/distributed-unit-functions\[id=\'(.*)\'\]\/cell\[id=\'(.*)\'\]\/supported-measurements\/performance-measurement-type=\'(.*)\'\]\/supported-snssai-subcounter-instances\[slice-differentiator=\'(\d+)\'\]\[slice-service-type=\'(\d+)\'\]`)
+ regex := *regexp.MustCompile(`\/(.*)network-function\/distributed-unit-functions\[id=\'(.*)\'\]\/cell\[id=\'(.*)\'\]\/supported-measurements\[performance-measurement-type=\'(.*)\'\]\/supported-snssai-subcounter-instances\[slice-differentiator=\'(\d+)\'\]\[slice-service-type=\'(\d+)\'\]`)
res := regex.FindAllStringSubmatch(meas.MeasurementTypeInstanceReference, -1)
if res != nil && len(res[0]) == 7 {
name: "Test adding new metric",
args: args{
meas: messages.Measurement{
- MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements/performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
Value: 51232,
Unit: "kbit/s",
},
name: "Test with invalid input",
args: args{
meas: messages.Measurement{
- MeasurementTypeInstanceReference: "/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements/performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ MeasurementTypeInstanceReference: "/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
Value: 51232,
Unit: "kbit/s",
},
func TestUpdateExistingMetric(t *testing.T) {
assertions := require.New(t)
meas := messages.Measurement{
- MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements/performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
Value: 51232,
Unit: "kbit/s",
}
updateMeas := messages.Measurement{
- MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements/performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
Value: 897,
Unit: "kbit/s",
}
func TestDeleteMetricWhenValueLessThanThreshold(t *testing.T) {
meas := messages.Measurement{
- MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements/performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
Value: 51232,
Unit: "kbit/s",
}
newMeas := messages.Measurement{
- MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements/performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
Value: 50,
Unit: "kbit/s",
}
func TestAddPolicy(t *testing.T) {
meas := messages.Measurement{
- MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements/performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
Value: 51232,
Unit: "kbit/s",
}
log.Debug("Using configuration: ", configuration)
- dmaapUrl := configuration.MRHost + ":" + configuration.MRPort
-
if err := validateConfiguration(configuration); err != nil {
log.Fatalf("Unable to start consumer due to configuration error: %v", err)
}
a := sliceassurance.App{}
- a.Initialize(dmaapUrl, configuration.SDNRAddress)
+ a.Initialize(configuration)
go a.Run(TOPIC, configuration.Polltime)
http.HandleFunc("/status", statusHandler)
package messages
type ORanDuRestConf struct {
- DistributedUnitFunction DistributedUnitFunction `json:"distributed-unit-functions"`
+ DistributedUnitFunction []DistributedUnitFunction `json:"o-ran-sc-du-hello-world:distributed-unit-functions"`
}
type DistributedUnitFunction struct {
- Id string `json:"id"`
- RRMPolicyRatio []RRMPolicyRatio `json:"radio-resource-management-policy-ratio"`
+ Id string `json:"id"`
+ OperationalState string `json:"operational-state"`
+ AdmState string `json:"administrative-state"`
+ UserLabel string `json:"user-label"`
+ RRMPolicyRatio []RRMPolicyRatio `json:"radio-resource-management-policy-ratio"`
+ Cell []Cell `json:"cell"`
}
type RRMPolicyRatio struct {
SliceDifferentiator int `json:"slice-differentiator"`
SliceServiceType int `json:"slice-service-type"`
}
+
+type Cell struct {
+ Id string `json:"id"`
+ LocalId int `json:"local-id"`
+ PhysicalCellId int `json:"physical-cell-id"`
+ BaseStationChannelBandwidth BaseStationChannelBandwidth `json:"base-station-channel-bandwidth"`
+ OperationalState string `json:"operational-state"`
+ TrackingAreaCode int `json:"tracking-area-code"`
+ AdmState string `json:"administrative-state"`
+ PublicLandMobileNetworks []PublicLandMobileNetworks `json:"public-land-mobile-networks"`
+ SupportedMeasurements []SupportedMeasurements `json:"supported-measurements"`
+ TrafficState string `json:"traffic-state"`
+ AbsoluteRadioFrequencyChannelNumber AbsoluteRadioFrequencyChannelNumber `json:"absolute-radio-frequency-channel-number"`
+ UserLabel string `json:"user-label"`
+ SynchronizationSignalBlock SynchronizationSignalBlock `json:"synchronization-signal-block"`
+}
+
+type BaseStationChannelBandwidth struct {
+ Uplink int `json:"uplink"`
+ Downlink int `json:"downlink"`
+ SupplementaryUplink int `json:"supplementary-uplink"`
+}
+
+type PublicLandMobileNetworks struct {
+ SliceDifferentiator int `json:"slice-differentiator"`
+ SliceServiceType int `json:"slice-service-type"`
+ MobileCountryCode string `json:"mobile-country-code"`
+ MobileNetworkCode string `json:"mobile-network-code"`
+}
+
+type SupportedMeasurements struct {
+ PerformanceMeasurementType string `json:"performance-measurement-type"`
+ SupportedSnssaiSubcounterInstances []SupportedSnssaiSubcounterInstances `json:"supported-snssai-subcounter-instances"`
+}
+
+type SupportedSnssaiSubcounterInstances struct {
+ SliceDifferentiator int `json:"slice-differentiator"`
+ SliceServiceType int `json:"slice-service-type"`
+}
+
+type AbsoluteRadioFrequencyChannelNumber struct {
+ Uplink int `json:"uplink"`
+ Downlink int `json:"downlink"`
+ SupplementaryUplink int `json:"supplementary-uplink"`
+}
+
+type SynchronizationSignalBlock struct {
+ Duration int `json:"duration"`
+ FrequencyChannelNumber int `json:"frequency-channel-number"`
+ Periodicity int `json:"periodicity"`
+ SubcarrierSpacing int `json:"subcarrier-spacing"`
+ Offset int `json:"offset"`
+}
import (
"testing"
+
+ "github.com/stretchr/testify/require"
)
func TestGetMeasurements(t *testing.T) {
+ assertions := require.New(t)
type fields struct {
Event Event
}
message := StdDefinedMessage{
Event: tt.fields.Event,
}
- if got := message.GetMeasurements(); len(got) != len(tt.want) {
+ var got []Measurement
+ if got = message.GetMeasurements(); len(got) != len(tt.want) {
t.Errorf("Message.GetMeasurements() = %v, want %v", got, tt.want)
}
+
+ for _, meas := range got {
+ assertions.Equal(51232, meas.Value)
+ assertions.Contains(meas.MeasurementTypeInstanceReference, "user-equipment-average-throughput-uplink")
+ }
+
})
}
}
+++ /dev/null
-// -
-// ========================LICENSE_START=================================
-// O-RAN-SC
-// %%
-// Copyright (C) 2021: Nordix Foundation
-// %%
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// ========================LICENSE_END===================================
-//
-
-package main
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "math/rand"
- "net/http"
- "time"
-
- "github.com/gorilla/mux"
- "oransc.org/usecase/oduclosedloop/messages"
-)
-
-func main() {
- rand.Seed(time.Now().UnixNano())
- port := flag.Int("port", 3905, "The port this message router will listen on")
- flag.Parse()
-
- r := mux.NewRouter()
- r.HandleFunc("/events/unauthenticated.PERFORMANCE_MEASUREMENTS", sendStdMessage).Methods(http.MethodGet)
-
- fmt.Println("Starting mr on port: ", *port)
-
- http.ListenAndServe(fmt.Sprintf(":%v", *port), r)
-
-}
-
-// Variables ::
-// DU-ID: ERICSSON-O-DU-11220
-// Cell-ID: cell1
-// Slice-Diff: 2
-// Value: 300
-func sendStdMessage(w http.ResponseWriter, r *http.Request) {
- message := fetchMessage()
- fmt.Println("-----------------------------------------------------------------------------")
- fmt.Println("Sending message: ", message)
- fmt.Println("-----------------------------------------------------------------------------")
- response, _ := json.Marshal(message)
- time.Sleep(time.Duration(rand.Intn(3)) * time.Second)
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(http.StatusOK)
- w.Write(response)
-}
-
-func fetchMessage() messages.StdDefinedMessage {
-
- index := rand.Intn(5)
- fmt.Println(index)
-
- measurements := [5][]messages.Measurement{meas1, meas2, meas3, meas4, meas5}
-
- message := messages.StdDefinedMessage{
- Event: messages.Event{
- CommonEventHeader: messages.CommonEventHeader{
- Domain: "stndDefined",
- StndDefinedNamespace: "o-ran-sc-du-hello-world-pm-streaming-oas3",
- },
- StndDefinedFields: messages.StndDefinedFields{
- StndDefinedFieldsVersion: "1.0",
- SchemaReference: "https://gerrit.o-ran-sc.org/r/gitweb?p=scp/oam/modeling.git;a=blob_plain;f=data-model/oas3/experimental/o-ran-sc-du-hello-world-oas3.json;hb=refs/heads/master",
- Data: messages.Data{
- DataId: "id",
- Measurements: measurements[index],
- },
- },
- },
- }
- return message
-}
-
-var meas1 = []messages.Measurement{
- {
- MeasurementTypeInstanceReference: "/network-function/distributed-unit-functions[id='ERICSSON-O-DU-11220']/cell[id='cell1']/supported-measurements/performance-measurement-type[.='user-equipment-average-throughput-downlink']/supported-snssai-subcounter-instances/slice-differentiator[.=2][slice-service-type=1]",
- Value: 300,
- Unit: "kbit/s",
- },
-}
-
-var meas2 = []messages.Measurement{
- {
- MeasurementTypeInstanceReference: "/network-function/distributed-unit-functions[id='ERICSSON-O-DU-11220']/cell[id='cell1']/supported-measurements/performance-measurement-type[.='user-equipment-average-throughput-downlink']/supported-snssai-subcounter-instances/slice-differentiator[.=1]",
- Value: 400,
- Unit: "kbit/s",
- },
-}
-
-var meas3 = []messages.Measurement{
- {
- MeasurementTypeInstanceReference: "/network-function/distributed-unit-functions[id='ERICSSON-O-DU-11220']/cell[id='cell1']/supported-measurements/performance-measurement-type[.='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances/slice-differentiator[.=2][slice-service-type=2]",
- Value: 800,
- Unit: "kbit/s",
- },
-}
-
-var meas4 = []messages.Measurement{
- {
- MeasurementTypeInstanceReference: "/network-function/distributed-unit-functions[id='ERICSSON-O-DU-11220']/cell[id='cell1']/supported-measurements/performance-measurement-type[.='user-equipment-average-throughput-downlink']/supported-snssai-subcounter-instances/slice-differentiator[.=1]",
- Value: 750,
- Unit: "kbit/s",
- },
-}
-
-var meas5 = []messages.Measurement{
- {
- MeasurementTypeInstanceReference: "/network-function/distributed-unit-functions[id='ERICSSON-O-DU-11220']/cell[id='cell1']/supported-measurements/performance-measurement-type[.='user-equipment-average-throughput-downlink']/supported-snssai-subcounter-instances/[slice-differentiator[.=2]][slice-service-type=1]",
- Value: 900,
- Unit: "kbit/s",
- },
-}
+++ /dev/null
-// -
-// ========================LICENSE_START=================================
-// O-RAN-SC
-// %%
-// Copyright (C) 2021: Nordix Foundation
-// %%
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// ========================LICENSE_END===================================
-//
-
-package main
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "net/http"
-
- "github.com/gorilla/mux"
- "oransc.org/usecase/oduclosedloop/messages"
-)
-
-func main() {
- port := flag.Int("port", 3904, "The port this SDNR stub will listen on")
- flag.Parse()
-
- r := mux.NewRouter()
- r.HandleFunc("/rests/data/network-topology:network-topology/topology=topology-netconf/node={NODE-ID}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions={O-DU-ID}", getDistributedUnitFunctions).Methods(http.MethodGet)
- r.HandleFunc("/rests/data/network-topology:network-topology/topology=topology-netconf/node={NODE-ID}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions={O-DU-ID}/radio-resource-management-policy-ratio={POLICY-ID}", updateRRMPolicyDedicatedRatio).Methods(http.MethodPost)
-
- fmt.Println("Starting SDNR on port: ", *port)
- http.ListenAndServe(fmt.Sprintf(":%v", *port), r)
-
-}
-
-func getDistributedUnitFunctions(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
-
- message := messages.ORanDuRestConf{
- DistributedUnitFunction: messages.DistributedUnitFunction{
- Id: vars["O-DU-ID"],
- RRMPolicyRatio: []messages.RRMPolicyRatio{
- {
- Id: "rrm-pol-1",
- AdmState: "locked",
- UserLabel: "rrm-pol-1",
- RRMPolicyMaxRatio: 100,
- RRMPolicyMinRatio: 0,
- RRMPolicyDedicatedRatio: 0,
- ResourceType: "prb",
- RRMPolicyMembers: []messages.RRMPolicyMember{
- {
- MobileCountryCode: "046",
- MobileNetworkCode: "651",
- SliceDifferentiator: 1,
- SliceServiceType: 0,
- },
- },
- },
- {
- Id: "rrm-pol-2",
- AdmState: "unlocked",
- UserLabel: "rrm-pol-2",
- RRMPolicyMaxRatio: 20,
- RRMPolicyMinRatio: 10,
- RRMPolicyDedicatedRatio: 15,
- ResourceType: "prb",
- RRMPolicyMembers: []messages.RRMPolicyMember{
- {
- MobileCountryCode: "046",
- MobileNetworkCode: "651",
- SliceDifferentiator: 2,
- SliceServiceType: 1,
- },
- },
- },
- {
- Id: "rrm-pol-3",
- AdmState: "unlocked",
- UserLabel: "rrm-pol-3",
- RRMPolicyMaxRatio: 30,
- RRMPolicyMinRatio: 10,
- RRMPolicyDedicatedRatio: 5,
- ResourceType: "prb",
- RRMPolicyMembers: []messages.RRMPolicyMember{
- {
- MobileCountryCode: "310",
- MobileNetworkCode: "150",
- SliceDifferentiator: 2,
- SliceServiceType: 2,
- },
- },
- },
- },
- },
- }
-
- respondWithJSON(w, http.StatusOK, message)
-}
-
-func updateRRMPolicyDedicatedRatio(w http.ResponseWriter, r *http.Request) {
- var prMessage messages.DistributedUnitFunction
- decoder := json.NewDecoder(r.Body)
-
- if err := decoder.Decode(&prMessage); err != nil {
- respondWithError(w, http.StatusBadRequest, "Invalid request payload")
- return
- }
- defer r.Body.Close()
-
- fmt.Println("prMessage: ", prMessage)
-
- respondWithJSON(w, http.StatusOK, map[string]string{"status": "200"})
-}
-
-func respondWithError(w http.ResponseWriter, code int, message string) {
- fmt.Println("-----------------------------------------------------------------------------")
- fmt.Println("Sending error message: ", message)
- fmt.Println("-----------------------------------------------------------------------------")
- respondWithJSON(w, code, map[string]string{"error": message})
-}
-
-func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) {
- fmt.Println("-----------------------------------------------------------------------------")
- fmt.Println("Sending message: ", payload)
- fmt.Println("-----------------------------------------------------------------------------")
- response, _ := json.Marshal(payload)
-
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(code)
- w.Write(response)
-}
log "github.com/sirupsen/logrus"
)
-const THRESHOLD_TPUT int = 700
+const THRESHOLD_TPUT int = 7000
type SliceAssuranceInformation struct {
duId string
r := mux.NewRouter()
r.HandleFunc("/rests/data/network-topology:network-topology/topology=topology-netconf/node={NODE-ID}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions={O-DU-ID}", getSdnrResponseMessage).Methods(http.MethodGet)
- r.HandleFunc("/rests/data/network-topology:network-topology/topology=topology-netconf/node={NODE-ID}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions={O-DU-ID}/radio-resource-management-policy-ratio={POLICY-ID}", updateRRMPolicyDedicatedRatio).Methods(http.MethodPost)
+ r.HandleFunc("/rests/data/network-topology:network-topology/topology=topology-netconf/node={NODE-ID}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions={O-DU-ID}/radio-resource-management-policy-ratio={POLICY-ID}", updateRRMPolicyDedicatedRatio).Methods(http.MethodPut)
fmt.Println("Starting SDNR stub on port: ", *portSdnr)
vars := mux.Vars(r)
log.Info("Get messages for RRM Policy Ratio information for O-Du ID ", vars["O-DU-ID"])
- message := messages.ORanDuRestConf{
- DistributedUnitFunction: messages.DistributedUnitFunction{
- Id: vars["O-DU-ID"],
- RRMPolicyRatio: getPolicyRatioMessage(),
- },
- }
- respondWithJSON(w, http.StatusOK, message)
+ distUnitFunctions := getDistributedUnitFunctionMessage(vars["O-DU-ID"])
+
+ respondWithJSON(w, http.StatusOK, distUnitFunctions)
}
-func getPolicyRatioMessage() []messages.RRMPolicyRatio {
+func getDistributedUnitFunctionMessage(oduId string) messages.ORanDuRestConf {
+
var policies []messages.RRMPolicyRatio
+ keys := make(map[string]bool)
+ for _, entry := range data {
+ if _, value := keys[entry.policyRatioId]; !value {
+ keys[entry.policyRatioId] = true
+ message := messages.RRMPolicyRatio{
+
+ Id: entry.policyRatioId,
+ AdmState: "locked",
+ UserLabel: entry.policyRatioId,
+ RRMPolicyMaxRatio: entry.policyMaxRatio,
+ RRMPolicyMinRatio: entry.policyMinRatio,
+ RRMPolicyDedicatedRatio: entry.policyDedicatedRatio,
+ ResourceType: "prb",
+ RRMPolicyMembers: []messages.RRMPolicyMember{
+ {
+ MobileCountryCode: "310",
+ MobileNetworkCode: "150",
+ SliceDifferentiator: entry.sd,
+ SliceServiceType: entry.sst,
+ },
+ },
+ }
+ policies = append(policies, message)
+ }
+ }
+ var publicLandMobileNetworks []messages.PublicLandMobileNetworks
for _, entry := range data {
+ publicLandMobileNetwork := messages.PublicLandMobileNetworks{
+ MobileCountryCode: "310",
+ MobileNetworkCode: "150",
+ SliceDifferentiator: entry.sd,
+ SliceServiceType: entry.sst,
+ }
+ publicLandMobileNetworks = append(publicLandMobileNetworks, publicLandMobileNetwork)
+ }
- message := messages.RRMPolicyRatio{
- Id: entry.policyRatioId,
- AdmState: "locked",
- UserLabel: entry.policyRatioId,
- RRMPolicyMaxRatio: entry.policyMaxRatio,
- RRMPolicyMinRatio: entry.policyMinRatio,
- RRMPolicyDedicatedRatio: entry.policyDedicatedRatio,
- ResourceType: "prb",
- RRMPolicyMembers: []messages.RRMPolicyMember{
- {
- MobileCountryCode: "046",
- MobileNetworkCode: "651",
- SliceDifferentiator: entry.sd,
- SliceServiceType: entry.sst,
- },
- },
+ var supportedSnssaiSubcounterInstances []messages.SupportedSnssaiSubcounterInstances
+ for _, entry := range data {
+ supportedSnssaiSubcounterInstance := messages.SupportedSnssaiSubcounterInstances{
+ SliceDifferentiator: entry.sd,
+ SliceServiceType: entry.sst,
}
- policies = append(policies, message)
+ supportedSnssaiSubcounterInstances = append(supportedSnssaiSubcounterInstances, supportedSnssaiSubcounterInstance)
+ }
+
+ cell := messages.Cell{
+ Id: "cell-1",
+ LocalId: 1,
+ PhysicalCellId: 1,
+ BaseStationChannelBandwidth: messages.BaseStationChannelBandwidth{
+ Uplink: 83000,
+ Downlink: 80000,
+ SupplementaryUplink: 84000,
+ },
+ OperationalState: "enabled",
+ TrackingAreaCode: 10,
+ AdmState: "unlocked",
+ PublicLandMobileNetworks: publicLandMobileNetworks,
+ SupportedMeasurements: []messages.SupportedMeasurements{
+ {
+ PerformanceMeasurementType: "o-ran-sc-du-hello-world:user-equipment-average-throughput-uplink",
+ SupportedSnssaiSubcounterInstances: supportedSnssaiSubcounterInstances,
+ },
+ {
+ PerformanceMeasurementType: "o-ran-sc-du-hello-world:user-equipment-average-throughput-downlink",
+ SupportedSnssaiSubcounterInstances: supportedSnssaiSubcounterInstances,
+ },
+ },
+ TrafficState: "active",
+ AbsoluteRadioFrequencyChannelNumber: messages.AbsoluteRadioFrequencyChannelNumber{
+ Uplink: 14000,
+ Downlink: 15000,
+ SupplementaryUplink: 14500,
+ },
+ UserLabel: "cell-1",
+ SynchronizationSignalBlock: messages.SynchronizationSignalBlock{
+ Duration: 2,
+ FrequencyChannelNumber: 12,
+ Periodicity: 10,
+ SubcarrierSpacing: 30,
+ Offset: 3,
+ },
}
- return policies
+
+ distUnitFunction := messages.DistributedUnitFunction{
+ Id: oduId,
+ OperationalState: "enabled",
+ AdmState: "unlocked",
+ UserLabel: oduId,
+ Cell: []messages.Cell{
+ cell,
+ },
+ RRMPolicyRatio: policies,
+ }
+
+ duRRMPolicyRatio := messages.ORanDuRestConf{
+ DistributedUnitFunction: []messages.DistributedUnitFunction{
+ distUnitFunction,
+ },
+ }
+
+ return duRRMPolicyRatio
}
func updateRRMPolicyDedicatedRatio(w http.ResponseWriter, r *http.Request) {
-
- var prMessages []messages.RRMPolicyRatio
+ var policies struct {
+ RRMPolicies []messages.RRMPolicyRatio `json:"radio-resource-management-policy-ratio"`
+ }
decoder := json.NewDecoder(r.Body)
- if err := decoder.Decode(&prMessages); err != nil {
+ if err := decoder.Decode(&policies); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
+ prMessages := policies.RRMPolicies
log.Infof("Post request to update RRMPolicyDedicatedRatio %+v", prMessages)
-
findAndUpdatePolicy(prMessages)
respondWithJSON(w, http.StatusOK, map[string]string{"status": "200"})
}
log.Info("Using tput value higher than THRESHOLD_TPUT ", randomTput)
entry.metricValue = randomTput
}
-
+ randomEventId := rand.Intn(10000)
messagesToSend = append(messagesToSend, generateMeasurementEntry(entry))
message := messages.StdDefinedMessage{
Event: messages.Event{
CommonEventHeader: messages.CommonEventHeader{
Domain: "stndDefined",
- EventId: "pm-1_1644252450",
+ EventId: "pm-1_16442" + strconv.Itoa(randomEventId),
EventName: "stndDefined_performanceMeasurementStreaming",
EventType: "performanceMeasurementStreaming",
Sequence: 825,
},
}
- fmt.Printf("Send Dmaap messages\n %+v\n", message)
+ fmt.Printf("Sending Dmaap message:\n %+v\n", message)
+
+ messageAsByteArray, _ := json.Marshal(message)
+ response := [1]string{string(messageAsByteArray)}
time.Sleep(time.Duration(rand.Intn(3)) * time.Second)
- respondWithJSON(w, http.StatusOK, message)
+ respondWithJSON(w, http.StatusOK, response)
messagesToSend = nil
}
func generateMeasurementEntry(entry *SliceAssuranceInformation) messages.Measurement {
- measurementTypeInstanceReference := "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='" + entry.duId + "']/cell[id='" + entry.cellId + "']/supported-measurements/performance-measurement-type='(urn:o-ran-sc:yang:o-ran-sc-du-hello-world?revision=2021-11-23)" + entry.metricName + "']/supported-snssai-subcounter-instances[slice-differentiator='" + strconv.Itoa(entry.sd) + "'][slice-service-type='" + strconv.Itoa(entry.sst) + "']"
+ measurementTypeInstanceReference := "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='" + entry.duId + "']/cell[id='" + entry.cellId + "']/supported-measurements[performance-measurement-type='(urn:o-ran-sc:yang:o-ran-sc-du-hello-world?revision=2021-11-23)" + entry.metricName + "']/supported-snssai-subcounter-instances[slice-differentiator='" + strconv.Itoa(entry.sd) + "'][slice-service-type='" + strconv.Itoa(entry.sst) + "']"
meas := messages.Measurement{
MeasurementTypeInstanceReference: measurementTypeInstanceReference,
-O-DU-1211,cell-1,1,1,user-equipment-average-throughput-downlink,300,rrm-pol-1,20,10,15
-O-DU-1211,cell-1,1,1,user-equipment-average-throughput-uplink,500,rrm-pol-1,20,10,15
-O-DU-1211,cell-1,1,2,user-equipment-average-throughput-downlink,700,rrm-pol-2,20,10,15
-O-DU-1211,cell-1,1,2,user-equipment-average-throughput-uplink,400,rrm-pol-2,20,10,15
-O-DU-1211,cell-1,2,1,user-equipment-average-throughput-downlink,800,rrm-pol-3,20,10,15
-O-DU-1211,cell-1,2,1,user-equipment-average-throughput-uplink,100,rrm-pol-3,20,10,15
-O-DU-1211,cell-1,2,2,user-equipment-average-throughput-downlink,900,rrm-pol-4,20,10,15
-O-DU-1211,cell-1,2,2,user-equipment-average-throughput-uplink,500,rrm-pol-4,20,10,15
-O-DU-1211,cell-1,3,1,user-equipment-average-throughput-downlink,800,rrm-pol-5,20,10,15
-O-DU-1211,cell-1,3,1,user-equipment-average-throughput-uplink,100,rrm-pol-5,20,10,15
\ No newline at end of file
+O-DU-1122,cell-1,1,1,user-equipment-average-throughput-downlink,3761,rrm-pol-1,20,10,15
+O-DU-1122,cell-1,1,1,user-equipment-average-throughput-uplink,5861,rrm-pol-1,20,10,15
+O-DU-1122,cell-1,1,2,user-equipment-average-throughput-downlink,7791,rrm-pol-2,20,10,15
+O-DU-1122,cell-1,1,2,user-equipment-average-throughput-uplink,4539,rrm-pol-2,20,10,15
+O-DU-1122,cell-1,2,1,user-equipment-average-throughput-downlink,8987,rrm-pol-3,20,10,15
+O-DU-1122,cell-1,2,1,user-equipment-average-throughput-uplink,1134,rrm-pol-3,20,10,15
+O-DU-1122,cell-1,2,2,user-equipment-average-throughput-downlink,9123,rrm-pol-4,20,10,15
+O-DU-1122,cell-1,2,2,user-equipment-average-throughput-uplink,5368,rrm-pol-4,20,10,15
+O-DU-1122,cell-1,3,1,user-equipment-average-throughput-downlink,8764,rrm-pol-5,20,10,15
+O-DU-1122,cell-1,3,1,user-equipment-average-throughput-uplink,1367,rrm-pol-5,20,10,15
\ No newline at end of file