Add initial meta-stx to support StarlingX build 20/3420/1
authorJackie Huang <jackie.huang@windriver.com>
Thu, 23 Apr 2020 03:29:15 +0000 (11:29 +0800)
committerJackie Huang <jackie.huang@windriver.com>
Thu, 23 Apr 2020 05:42:57 +0000 (13:42 +0800)
meta-stx is a yocto compatible layer that includes
required recipes to build image for StarlingX on top
of yocto 2.7.3 (warrior).

And the following components are included:
- Fault management
- Configuration management
- Software management
- Host management
- Service management
- Ansible and puppet for provisioning

Issue-ID: INF-8
Issue-ID: INF-9
Issue-ID: INF-10
Issue-ID: INF-11
Issue-ID: INF-12
Issue-ID: INF-13
Issue-ID: INF-19
Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
Change-Id: I4e85c8232df3bf390aa247c75061a54b914bd28a

900 files changed:
meta-stx/.gitignore [new file with mode: 0644]
meta-stx/LICENSE [new file with mode: 0644]
meta-stx/README.md [new file with mode: 0644]
meta-stx/classes/extrausers-config.bbclass [new file with mode: 0644]
meta-stx/classes/openssl10.bbclass [new file with mode: 0644]
meta-stx/classes/python-backports-init.bbclass [new file with mode: 0644]
meta-stx/classes/stx-anaconda-image.bbclass [new file with mode: 0644]
meta-stx/classes/stx-postrun.bbclass [new file with mode: 0644]
meta-stx/conf/distro/files/ks/aio_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/ks/aio_lowlatency_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/ks/controller_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/ks/net_controller_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/ks/net_smallsystem_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/ks/net_smallsystem_lowlatency_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/ks/net_storage_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/ks/net_worker_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/ks/net_worker_lowlatency_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/ks/poky_stx_aio_ks.cfg [new file with mode: 0644]
meta-stx/conf/distro/files/syslinux.cfg [new file with mode: 0644]
meta-stx/conf/distro/include/stx-features.inc [new file with mode: 0644]
meta-stx/conf/distro/include/stx-preferred-vers.inc [new file with mode: 0644]
meta-stx/conf/distro/poky-stx.conf [new file with mode: 0644]
meta-stx/conf/layer.conf [new file with mode: 0644]
meta-stx/recipes-bsp/grub/grub-efi_2.02.bbappend [new file with mode: 0644]
meta-stx/recipes-bsp/grub/grub_2.02.bbappend [new file with mode: 0644]
meta-stx/recipes-connectivity/etcd/etcd_git.bb [new file with mode: 0644]
meta-stx/recipes-connectivity/etcd/etcd_git.bbappend [new file with mode: 0644]
meta-stx/recipes-connectivity/etcd/files/etcd.conf [new file with mode: 0644]
meta-stx/recipes-connectivity/etcd/files/etcd.service [new file with mode: 0644]
meta-stx/recipes-connectivity/haproxy/files/haproxy.cfg [new file with mode: 0644]
meta-stx/recipes-connectivity/haproxy/files/haproxy.service [new file with mode: 0644]
meta-stx/recipes-connectivity/haproxy/haproxy-1.7.11/haproxy-1.7.11-tpm-support.patch [new file with mode: 0644]
meta-stx/recipes-connectivity/haproxy/haproxy.inc [new file with mode: 0644]
meta-stx/recipes-connectivity/haproxy/haproxy_1.7.11.bb [new file with mode: 0644]
meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap/0001-include-sys-types.h-for-getting-u_-typedefs.patch [new file with mode: 0644]
meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap/Set_nobody_user_group.patch [new file with mode: 0644]
meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap/fix-ac-prereq.patch [new file with mode: 0644]
meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap_0.25.bb [new file with mode: 0644]
meta-stx/recipes-connectivity/nfs-utils/nfs-utils_%.bbappend [new file with mode: 0644]
meta-stx/recipes-connectivity/openssl/openssl10_1.0.%.bbappend [new file with mode: 0644]
meta-stx/recipes-connectivity/qpid/files/fix-compile-through-disable-cflag-werror.patch [new file with mode: 0644]
meta-stx/recipes-connectivity/qpid/qpid_0.20.bbappend [new file with mode: 0644]
meta-stx/recipes-containers/docker-distribution/docker-distribution_git.bb [new file with mode: 0644]
meta-stx/recipes-containers/docker-distribution/docker-distribution_git.bbappend [new file with mode: 0644]
meta-stx/recipes-containers/docker-distribution/files/config.yml [new file with mode: 0644]
meta-stx/recipes-containers/docker-distribution/files/docker-distribution.service [new file with mode: 0644]
meta-stx/recipes-containers/docker-forward-journald/docker-forward-journald_git.bb [new file with mode: 0644]
meta-stx/recipes-containers/docker/docker-ce_git.bbappend [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/README [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/apiserver [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/config [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/controller-manager [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/kubelet [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/kubelet.kubeconfig [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/proxy [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/scheduler [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-apiserver.service [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-controller-manager.service [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-proxy.service [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-scheduler.service [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kubelet.service [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/tmpfiles.d/kubernetes.conf [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/helm-upload [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/helm.sudo [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/kubeadm.conf [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/kubelet-cgroup-setup.sh [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/files/kubernetes-accounting.conf [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/helm_2.13.1.bb [new file with mode: 0644]
meta-stx/recipes-containers/kubernetes/kubernetes_git.bbappend [new file with mode: 0644]
meta-stx/recipes-containers/registry-token-server/files/registry-token-server-1.0.0.tar.gz [new file with mode: 0644]
meta-stx/recipes-containers/registry-token-server/files/registry-token-server.service [new file with mode: 0644]
meta-stx/recipes-containers/registry-token-server/files/token-server-certificate.pem [new file with mode: 0644]
meta-stx/recipes-containers/registry-token-server/files/token-server-private-key.pem [new file with mode: 0644]
meta-stx/recipes-containers/registry-token-server/files/token_server.conf [new file with mode: 0644]
meta-stx/recipes-containers/registry-token-server/registry-token-server_1.0.0.bb [new file with mode: 0644]
meta-stx/recipes-core/distributedcloud/distributedcloud-client-dcmanager_git.bb [new file with mode: 0644]
meta-stx/recipes-core/distributedcloud/distributedcloud_git.bb [new file with mode: 0644]
meta-stx/recipes-core/images/stx-image-aio-installer.bb [new file with mode: 0644]
meta-stx/recipes-core/images/stx-image-aio.bb [new file with mode: 0644]
meta-stx/recipes-core/initrdscripts/files/init-install.sh [new file with mode: 0755]
meta-stx/recipes-core/initrdscripts/initramfs-module-install_1.0.bbappend [new file with mode: 0644]
meta-stx/recipes-core/initrdscripts/initramfs-module-setup-live/setup-live [new file with mode: 0644]
meta-stx/recipes-core/initrdscripts/initramfs-module-setup-live_1.0.bbappend [new file with mode: 0644]
meta-stx/recipes-core/packagegroups/packagegroup-stak-base.bb [new file with mode: 0644]
meta-stx/recipes-core/packagegroups/packagegroup-stx.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-ansible-playbooks/files/0001-stx.3.0-rebase-adjust-path.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ansible-playbooks/files/0002-update_sysinv_database-do-not-fail-if-ceph-monitor-a.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ansible-playbooks/files/0003-update_sysinv_database-wait-after-provision.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ansible-playbooks/files/0004-bringup_flock_services-use-systmd-for-fminit-and-add.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ansible-playbooks/files/0005-persist-config-add-retry-for-etcd.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ansible-playbooks/files/0006-bringup_helm-wait-after-initialize-helm-to-avoid-tim.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ansible-playbooks/playbookconfig.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-config-files/config-files_1.0.0.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-config-files/files/openssh-config-rm-hmac-ripemd160.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config-files/files/syslog-ng-conf-fix-the-source.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config-files/files/syslog-ng-conf-replace-match-with-message.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config-files/files/syslog-ng-config-parse-err.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config-files/files/syslog-ng-config-systemd-service.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config-files/files/util-linux-pam-postlogin.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config/cgts-client.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-config/config-gate.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-config/controllerconfig.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-config/files/0001-puppet-manifests-adjust-path-variable.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config/files/0001-puppet-manifests-integ-set-correct-ldap-module-path.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config/files/0001-stx-config-puppet-manifests-cast-to-Integer.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config/files/0001-stx-config-remove-argparse-requirement-from-sysinv.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-config/storageconfig.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-config/stx-config.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-config/sysinv-agent.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-config/sysinv.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-config/tsconfig.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-config/workerconfig.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/files/0001-Honor-the-build-system-LDFLAGS.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/files/0001-Use-build-systems-LDFLAGS.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/files/0001-snmp-ext-use-build-systems-LDFLAGS.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/fm-api.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/fm-common.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/fm-doc.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/fm-mgr.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/fm-rest-api.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/python-fmclient.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/snmp-audittrail.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/snmp-ext.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-fault/stx-fault.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-gui/stx-gui.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/files/0001-Allow-user-to-define-destination-libdir.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/files/0002-Install-sm-eru-sm-eru-dump-and-sm-eru-watchdog.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/files/0003-pragma-ignore-Wunused-result-errors-with-gcc-8.3.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/files/0004-Cast-size_t-to-int-to-silence-gcc-8.3.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/sm-api.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/sm-client.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/sm-common.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/sm-db.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/sm-tools.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/sm.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/stx-ha.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-ha/stx-ocf-scripts.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ-kubernetes/cloud-provider-openstack.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/base/0001-cgcs-users-with-patch-ibsh-patches.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/base/0002-Add-DESTDIR-CFLAGS-and-LDFLAGS.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/LICENSE [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/admin.cmds [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/admin.xtns [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/ibsh-0.3e-cgcs-copyright.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/ibsh-0.3e-cgcs.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/ibsh-0.3e.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/operator.cmds [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/operator.xtns [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/secadmin.cmds [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/files/ibsh/secadmin.xtns [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/filesystem/filesystem-scripts.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/filesystem/iscsi-initiator-utils-config.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/filesystem/nfs-utils-config.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/filesystem/nfscheck.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/ibsh_0.3e.bbappend [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/logging/logmgmt.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/logging/logrotate-config.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/spectre-meltdown-checker_git.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/stx-collector.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/stx-config-files.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/stx-filesystem.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/stx-integ.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/stx-ldap.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/stx-logging.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/stx-networking.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/stx-utilities.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/utilities/buildinfo.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/utilities/namespace-utils.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-integ/utilities/platform-util.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/files/0001-Use-LDFLAGS-when-linking-and-pass-flags-down-to-subm.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/files/0001-Use-snprintf-to-avoid-overflowing-amon.tx_buf.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/files/0001-mtce-compute-dont-install-empty-directory-unless-nee.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/files/0001-mtce-control-dont-install-empty-directory-unless-nee.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/files/0001-mtce-storage-dont-install-empty-directory-unless-nee.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/files/0001-stx-metal-remove-argparse-requirement-from-inventory.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/files/stx-warrior-adjust-paths.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/inventory.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/mtce-common.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/mtce-compute.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/mtce-control.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/mtce-storage.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/mtce.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/pxe-network-installer.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/python-inventoryclient.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-metal/stx-metal.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-monitor-armada-app/monitor-helm-elastic_1.0.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-monitor-armada-app/monitor-helm_1.0.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-monitor-armada-app/stx-monitor-helm_1.0.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-monitoring/collectd-extensions.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-monitoring/influxdb-extensions.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-monitoring/monitor-tools.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-monitoring/stx-monitoring.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-monitoring/vm-topology.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-nfv/files/use-ldflags-mtce-guest.patch [new file with mode: 0644]
meta-stx/recipes-core/stx-nfv/mtce-guest.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-nfv/nfv-client.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-nfv/nfv-common.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-nfv/nfv-plugins.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-nfv/nfv-tools.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-nfv/nfv-vim.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-nfv/nova-api-proxy.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-nfv/stx-nfv.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-openstack-armada-app/openstack-helm-infra_1.0.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-openstack-armada-app/openstack-helm_1.0.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-openstack-armada-app/stx-openstack-helm_1.0.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-platform-armada-app/stx-platform-helm_1.0.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-update/cgcs-patch.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-update/enable-dev-patch.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-update/patch-alarm.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-update/stx-update.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-update/tsconfig.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-upstream/openstack-ras_git.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/ceph/ceph-manager.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/ceph/python-cephclient.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/files/build.info [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/security/stx-ssl.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/stx-utilities_git.bb [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/tools/collect-engtools.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/tools/collector.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/build-info.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/collect-engtools.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/logmgmt.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/namespace-utils.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/nfscheck.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/pci-irq-affinity.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/platform-util.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/stx-extensions.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/update-motd.inc [new file with mode: 0644]
meta-stx/recipes-core/stx-utilities/utilities/worker-utils.inc [new file with mode: 0644]
meta-stx/recipes-core/systemd/files/0900-inject-milisec-in-syslog-date.patch [new file with mode: 0644]
meta-stx/recipes-core/systemd/systemd_241.bbappend [new file with mode: 0644]
meta-stx/recipes-core/util-linux/util-linux_%.bbappend [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/files/0001-lldpd-client-add-show-interfaces-cmd-from-upstream.patch [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/files/0002-Clear-station-bit-if-any-other-capability-is-enabled.patch [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/files/i40e-lldp-configure.sh [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/files/lldpd-clear-station.patch [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/files/lldpd-create-run-dir.patch [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/files/lldpd-i40e-disable.patch [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/files/lldpd-interface-show.patch [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/files/lldpd.default [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/files/lldpd.init [new file with mode: 0644]
meta-stx/recipes-daemons/lldpd/lldpd_%.bbappend [new file with mode: 0644]
meta-stx/recipes-dbs/mysql/mysql-python/0001-_mysql.c-fix-compilation-with-MariaDB-with-10.3.13.patch [new file with mode: 0644]
meta-stx/recipes-dbs/mysql/mysql-python_1.2.5.bb [new file with mode: 0644]
meta-stx/recipes-devtools/erlang/erlang-native_R16B03-1.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/erlang/erlang_R16B03-1.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/erlang/files/erts-configure.in-avoid-RPATH-warning.patch [new file with mode: 0644]
meta-stx/recipes-devtools/erlang/files/fix-install-ownership.patch [new file with mode: 0644]
meta-stx/recipes-devtools/go/go-phercloud_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/grubby/files/1000-Generic-name-for-Titanium.patch [new file with mode: 0644]
meta-stx/recipes-devtools/grubby/files/1001-Add-support-for-updating-grub-cfg-with-multiboot-2.patch [new file with mode: 0644]
meta-stx/recipes-devtools/grubby/files/1002-Install-into-libdir-instead-of-hard-coding.patch [new file with mode: 0644]
meta-stx/recipes-devtools/grubby/grubby_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/perl/filter-perl_1.59.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libhtml-tagset-perl_3.20.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libmailtools-perl_2.18.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libmailtools-perl_2.20.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libsocket6-perl/0001-socket6-perl-fix-configure-error.patch [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libsocket6-perl_0.23.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libsocket6-perl_0.28.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libtest-pod-perl_1.51.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libtest-pod-perl_1.52.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libwww-perl_6.05.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/libwww-perl_6.35.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/pathtools-perl_3.75.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/podlators-perl_4.12.bb [new file with mode: 0644]
meta-stx/recipes-devtools/perl/scalar-list-utils-perl_1.50.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/eventlet/0001-CGTS-2869-close-connection-on-HTTP-413-Request-Entit.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-barbican/barbican-fix-path-to-find-configuration-files.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-barbican/gunicorn-config.py [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-barbican/openstack-barbican-api.service [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-barbican/openstack-barbican-keystone-listener.service [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-barbican/openstack-barbican-worker.service [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-django-horizon/guni_config.py [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-django-horizon/horizon-assets-compress [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-django-horizon/horizon-clearsessions [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-django-horizon/horizon-patching-restart [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-django-horizon/horizon.init [new file with mode: 0755]
meta-stx/recipes-devtools/python/files/python-django-horizon/openstack-dashboard-httpd-2.4.conf [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-django-horizon/openstack-dashboard-httpd-logging.conf [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-django-horizon/python-django-horizon-logrotate.conf [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-django-horizon/python-django-horizon-systemd.conf [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/chmod_keyringlock2.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/chown_keyringlock_file.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/fix_keyring_lockfile_location.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/keyring_path_change.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/lock_keyring_file.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/lock_keyring_file2.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/no_keyring_password.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/remove-reader-lock.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/remove_others_perms_on_keyringcfg_file.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/use_new_lock.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keyring/use_temporary_file.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/admin-openrc [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/convert_keystone_backend.py [new file with mode: 0755]
meta-stx/recipes-devtools/python/files/python-keystone/hybrid-backend-setup [new file with mode: 0755]
meta-stx/recipes-devtools/python/files/python-keystone/identity.sh [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/keystone-explicitly-import-localcontext-from-oslo.me.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/keystone-fix-location-of-files-for-tests.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/keystone-init [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/keystone-init.service [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/keystone-remove-git-commands-in-tests.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/keystone-search-in-etc-directory-for-config-files.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/keystone.conf [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/stx-files/keystone-all [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/stx-files/keystone-fernet-keys-rotate-active [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/stx-files/openstack-keystone.service [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/stx-files/password-rules.conf [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/stx-files/public.py [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-keystone/wsgi-keystone.conf [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-pynacl/0001-Enable-cross-compile.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-redfishtool/0001-Adapt-redfishtool-to-python2.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/python-unittest2/0001-port-unittest2-argparse-is-part-of-stdlib.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/files/requests/0001-close-connection-on-HTTP-413-Request-Entit.patch [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-3parclient_4.2.3.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-adal_1.0.2.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-alabaster_0.7.12.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-amqp_2.5.2.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-aniso8601_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-ansible_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-aodhclient_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-backports-functools-lru-cache_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-backports-init/backports/__init__.py [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-backports-init_1.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-backports-ssl_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-barbican_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-beaker_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-castellan_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-cffi_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-cherrypy_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-cinderclient/cinder-api-check.sh [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-cinderclient_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-cliff_2.14.1.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-configobj_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-configshell_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-construct_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-d2to1_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-dateutil.inc [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-dateutil_2.8.1.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-defusedxml_0.6.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-django-babel_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-django-debreach_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-django-horizon_15.1.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-django-horizon_15.1.0.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-django-openstack-auth_git.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-django_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-django_git.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-docker_3.3.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-ethtool_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-eventlet_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-firewall_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-futurist_1.8.1.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-glanceclient_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-gnocchiclient_7.0.4.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-google-auth_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-gunicorn_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-hgtools_6.3.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-horizon_git.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-imagesize_1.2.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-importlib-metadata_0.23.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-influxdb_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-iniparse_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-ironicclient_2.7.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-jmespath_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-jwcrypto_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-jwt_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-keyring_5.3.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-keystone_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-keystoneauth1.inc [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-keystoneauth1_3.17.1.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-keystoneclient_git.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-keystonemiddleware_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-kombu_4.6.7.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-kubernetes_8.0.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-ldap3_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-ldap_3.2.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-ldappool_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-lefthandclient_2.1.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-lefthandclient_2.1.0.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-linecache2_1.0.0.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-magnumclient_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-migrate_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-munch_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-murano-pkg-check_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-muranoclient_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-neutronclient_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-novaclient/nova-api-check.sh [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-novaclient_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-openstackclient_git.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-openstacksdk_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-openstacksdk_git.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-osc-lib_1.12.1.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.cache_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.concurrency_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.config_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.context_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.db_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.log_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.middleware_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.policy_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.serialization_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.upgradecheck_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.utils_git.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-oslo.versionedobjects_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-osprofiler_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-packaging_20.1.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pankoclient_0.5.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-paramiko_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pecan_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pika_1.1.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pycadf.bbappend_ [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pycurl_7.43.0.3.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pyelftools_0.25.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pyghmi_1.5.7.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pymysql_0.9.3.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pynacl_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pyngus_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pyperf_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-pysaml2_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-redfishtool_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-requests-oauthlib_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-requests-oauthlib_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-requests-toolbelt_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-requests_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-ruamel.ordereddict_0.4.9.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-ruamel.yaml_0.15.9.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-ryu_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-saharaclient_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-scss_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-snowballstemmer_2.0.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-sphinx_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-sphinxcontrib-websupport_1.1.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-testtools_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-traceback2_1.4.0.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-unittest2_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-urlgrabber_4.0.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-urllib3_1.23.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-urwid_2.0.1.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-versiontools.inc [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-versiontools_1.9.1.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-websocket-client_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-websockify_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-webtest_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-wsme_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-yaql_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python-zipp_0.6.0.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-cheroot_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-cherrypy_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-lang_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-linux-procfs_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-logutils_0.3.5.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-mako_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-pecan_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-prettytable_0.7.2.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-pymysql_0.9.3.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-redfishtool_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-versiontools_1.9.1.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-webtest_2.0.33.bb [new file with mode: 0644]
meta-stx/recipes-devtools/python/python3-wsme_git.bb [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-Add-a-color-setting-for-mips64_n32-binaries.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-Do-not-add-an-unsatisfiable-dependency-when-building.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-Do-not-hardcode-lib-rpm-as-the-installation-path-for.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-Do-not-read-config-files-from-HOME.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-Do-not-reset-the-PATH-environment-variable-before-ru.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-Fix-build-with-musl-C-library.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-Split-binary-package-building-into-a-separate-functi.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-When-cross-installing-execute-package-scriptlets-wit.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-perl-disable-auto-reqs.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0001-rpm-rpmio.c-restrict-virtual-memory-usage-if-limit-s.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0002-Add-support-for-prefixing-etc-from-RPM_ETCCONFIGDIR-.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0002-Run-binary-package-creation-via-thread-pools.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0003-rpmstrpool.c-make-operations-over-string-pools-threa.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0004-build-pack.c-remove-static-local-variables-from-buil.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/files/0011-Do-not-require-that-ELF-binaries-are-executable-to-b.patch [new file with mode: 0644]
meta-stx/recipes-devtools/rpm/rpm2_4.14.2.bb [new file with mode: 0644]
meta-stx/recipes-devtools/rsync/rsync_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/ruby-shadow/ruby-shadow_%.bbappend [new file with mode: 0644]
meta-stx/recipes-devtools/ruby/ruby.inc [new file with mode: 0644]
meta-stx/recipes-devtools/ruby/ruby/0001-openembedded-socket-extconf-hardcode-wide-getaddr-in.patch [new file with mode: 0644]
meta-stx/recipes-devtools/ruby/ruby/0002-Obey-LDFLAGS-for-the-link-of-libruby.patch [new file with mode: 0644]
meta-stx/recipes-devtools/ruby/ruby/ext.socket.extmk.patch [new file with mode: 0644]
meta-stx/recipes-devtools/ruby/ruby/extmk.patch [new file with mode: 0644]
meta-stx/recipes-devtools/ruby/ruby/ruby-CVE-2017-9226.patch [new file with mode: 0644]
meta-stx/recipes-devtools/ruby/ruby/ruby-CVE-2017-9228.patch [new file with mode: 0644]
meta-stx/recipes-devtools/ruby/ruby_2.0.0-p648.bb [new file with mode: 0644]
meta-stx/recipes-extended/ceph/ceph-13.2.2/0001-Correct-the-path-to-find-version.h-in-rocksdb.patch [new file with mode: 0644]
meta-stx/recipes-extended/ceph/ceph-13.2.2/0002-zstd-fix-error-for-cross-compile.patch [new file with mode: 0644]
meta-stx/recipes-extended/ceph/ceph-13.2.2/0003-ceph-add-pybind-support-in-OE.patch [new file with mode: 0644]
meta-stx/recipes-extended/ceph/ceph-13.2.2/0004-ceph-detect-init-correct-the-installation-for-OE.patch [new file with mode: 0644]
meta-stx/recipes-extended/ceph/ceph-13.2.2/0005-Add-hooks-for-orderly-shutdown-on-controller.patch [new file with mode: 0644]
meta-stx/recipes-extended/ceph/ceph-14.1.0/0001-ceph-rebase-on-stx.3.0-and-warrior.patch [new file with mode: 0644]
meta-stx/recipes-extended/ceph/ceph-14.1.0/rados.runtime.decode.error.patch [new file with mode: 0644]
meta-stx/recipes-extended/ceph/ceph_13.2.2.bb [new file with mode: 0644]
meta-stx/recipes-extended/ceph/ceph_14.1.0.bbappend [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/ceph-init-wrapper.sh [new file with mode: 0755]
meta-stx/recipes-extended/ceph/files/ceph-manage-journal.py [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/ceph-preshutdown.sh [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/ceph-radosgw.service [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/ceph.conf [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/ceph.conf.pmon [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/ceph.service [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/ceph.sh [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/mgr-restful-plugin.py [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/mgr-restful-plugin.service [new file with mode: 0644]
meta-stx/recipes-extended/ceph/files/starlingx-docker-override.conf [new file with mode: 0644]
meta-stx/recipes-extended/cloud-init/cloud-init_0.7.6.bbappend [new file with mode: 0644]
meta-stx/recipes-extended/cloud-init/files/cloud-init-interactive-parted.patch [new file with mode: 0644]
meta-stx/recipes-extended/cloud-init/files/first_boot.patch [new file with mode: 0644]
meta-stx/recipes-extended/collectd/collectd_%.bbappend [new file with mode: 0644]
meta-stx/recipes-extended/collectd/files/collectd-fix-for-LIBPYTHON_LDFLAGS.patch [new file with mode: 0644]
meta-stx/recipes-extended/ibsh/ibsh_0.3e.bb [new file with mode: 0644]
meta-stx/recipes-extended/libpwquality/libpwquality_1.4.0.bbappend [new file with mode: 0644]
meta-stx/recipes-extended/lsb/files/service-redirect-to-restart-for-reload.patch [new file with mode: 0644]
meta-stx/recipes-extended/lsb/lsbinitscripts_9.79.bbappend [new file with mode: 0644]
meta-stx/recipes-extended/postgresql/postgresql_%.bbappend [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-common-0001-Avoid-RPC-roundtrips-while-listing-items.patch [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-common-0002-Use-proto_dist-from-command-line.patch [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-script-wrapper [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0001-Remove-excessive-sd_notify-code.patch [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0002-Add-systemd-notification-support.patch [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0003-Revert-Distinct-exit-codes-for-CLI-utilities.patch [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0004-Allow-guest-login-from-non-loopback-connections.patch [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0005-Avoid-RPC-roundtrips-in-list-commands.patch [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0006-rabbit_prelaunch-must-use-RABBITMQ_SERVER_ERL_ARGS.patch [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-fails-with-home-not-set.patch [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server.logrotate [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server.service [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server.tmpfiles [new file with mode: 0644]
meta-stx/recipes-extended/rabbitmq/rabbitmq-server_3.6.5.bb [new file with mode: 0644]
meta-stx/recipes-extended/registry-token-server/files/registry-token-server-1.0.0.tar.gz [new file with mode: 0644]
meta-stx/recipes-extended/registry-token-server/files/registry-token-server.service [new file with mode: 0644]
meta-stx/recipes-extended/registry-token-server/files/token-server-certificate.pem [new file with mode: 0644]
meta-stx/recipes-extended/registry-token-server/files/token-server-private-key.pem [new file with mode: 0644]
meta-stx/recipes-extended/registry-token-server/files/token_server.conf [new file with mode: 0644]
meta-stx/recipes-extended/registry-token-server/registry-token-server_git.bb [new file with mode: 0644]
meta-stx/recipes-extended/sudo/files/sudo-1.6.7p5-strip.patch [new file with mode: 0644]
meta-stx/recipes-extended/sudo/files/sudo-1.7.2p1-envdebug.patch [new file with mode: 0644]
meta-stx/recipes-extended/sudo/files/sudo-1.8.23-fix-double-quote-parsing-for-Defaults-values.patch [new file with mode: 0644]
meta-stx/recipes-extended/sudo/files/sudo-1.8.23-ldapsearchuidfix.patch [new file with mode: 0644]
meta-stx/recipes-extended/sudo/files/sudo-1.8.23-legacy-group-processing.patch [new file with mode: 0644]
meta-stx/recipes-extended/sudo/files/sudo-1.8.23-nowaitopt.patch [new file with mode: 0644]
meta-stx/recipes-extended/sudo/files/sudo-1.8.23-sudoldapconfman.patch [new file with mode: 0644]
meta-stx/recipes-extended/sudo/files/sudo-1.8.6p7-logsudouser.patch [new file with mode: 0644]
meta-stx/recipes-extended/sudo/sudo_%.bbappend [new file with mode: 0644]
meta-stx/recipes-extended/uswgi/files/0001-pragma-ignore-cast-type-errors-with-gcc-8.3.patch [new file with mode: 0644]
meta-stx/recipes-extended/uswgi/uwsgi_git.bbappend [new file with mode: 0644]
meta-stx/recipes-graphics/mesa/mesa_%.bbappend [new file with mode: 0644]
meta-stx/recipes-httpd/apache2/apache2_%.bbappend [new file with mode: 0644]
meta-stx/recipes-httpd/lighttpd/lighttpd_%.bbappend [new file with mode: 0644]
meta-stx/recipes-kernel/linux/linux-yocto-rt_%.bbappend [new file with mode: 0644]
meta-stx/recipes-kernel/linux/linux-yocto-stx.inc [new file with mode: 0644]
meta-stx/recipes-kernel/linux/linux-yocto_%.bbappend [new file with mode: 0644]
meta-stx/recipes-kernel/linux/linux/stx-kconfig.cfg [new file with mode: 0644]
meta-stx/recipes-networking/ipset/ipset_6.38.bb [new file with mode: 0644]
meta-stx/recipes-networking/openvswitch/openvswitch_%.bbappend [new file with mode: 0644]
meta-stx/recipes-networking/vlan/vlan_2.0.5.bb [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Add-Client-ID-to-debug-messages.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Allow-connection-to-self-when-impersonator-set.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Always-choose-highest-requested-debug-level.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Always-use-the-encype-we-selected.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Appease-gcc-7-s-fallthrough-detection.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Change-impersonator-check-code.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Clarify-debug-and-debug_level-in-man-pages.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Conditionally-reload-kernel-interface-on-SIGHUP.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Do-not-call-gpm_grab_sock-twice.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Don-t-leak-mech_type-when-CONTINUE_NEEDED-from-init_.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Emit-debug-on-queue-errors.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-error-checking-on-get_impersonator_fallback.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-error-handling-in-gp_config_from_dir.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-error-handling-in-gpm_send_buffer-gpm_recv_buffe.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-handling-of-non-EPOLLIN-EPOLLOUT-events.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-memory-leak.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-mismatched-sign-comparisons.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-most-memory-leaks.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-potential-free-of-non-heap-address.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-segfault-when-no-config-files-are-present.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-silent-crash-with-duplicate-config-sections.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Fix-unused-variables.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Handle-outdated-encrypted-ccaches.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Include-header-for-writev.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Make-proc-file-failure-loud-but-nonfatal.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Only-empty-FILE-ccaches-when-storing-remote-creds.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Prevent-uninitialized-read-in-error-path-of-XDR-cont.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Properly-initialize-ccaches-before-storing-into-them.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Properly-locate-credentials-in-collection-caches-in-.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Properly-renew-expired-credentials.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Remove-gpm_release_ctx-to-fix-double-unlock.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Separate-cred-and-ccache-manipulation-in-gpp_store_r.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Simplify-setting-NONBLOCK-on-socket.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Tolerate-NULL-pointers-in-gp_same.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Turn-on-Wextra.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/Update-systemd-file.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/client-Switch-to-non-blocking-sockets.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/files/server-Add-detailed-request-logging.patch [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/gssproxy_0.7.0.bb [new file with mode: 0644]
meta-stx/recipes-security/gssproxy/gssproxy_0.7.0.bbappend [new file with mode: 0644]
meta-stx/recipes-security/krb5/files/soname_majversion.diff [new file with mode: 0644]
meta-stx/recipes-security/krb5/krb5_%.bbappend [new file with mode: 0644]
meta-stx/recipes-security/libtomcrypt/libtomcrypt_1.18.2.bb [new file with mode: 0644]
meta-stx/recipes-support/boost/boost_1.69.0.bbappend [new file with mode: 0644]
meta-stx/recipes-support/cluster-glue/cluster-glue/0001-don-t-compile-doc-and-Error-Fix.patch [new file with mode: 0644]
meta-stx/recipes-support/cluster-glue/cluster-glue/0001-ribcl.py.in-Warning-Fix.patch [new file with mode: 0644]
meta-stx/recipes-support/cluster-glue/cluster-glue/kill-stack-protector.patch [new file with mode: 0644]
meta-stx/recipes-support/cluster-glue/cluster-glue/tmpfiles [new file with mode: 0644]
meta-stx/recipes-support/cluster-glue/cluster-glue/volatiles [new file with mode: 0644]
meta-stx/recipes-support/cluster-glue/cluster-glue_1.0.12.bb [new file with mode: 0644]
meta-stx/recipes-support/cluster-resource-agents/resource-agents/01-disable-doc-build.patch [new file with mode: 0644]
meta-stx/recipes-support/cluster-resource-agents/resource-agents/02-set-OCF_ROOT_DIR-to-libdir-ocf.patch [new file with mode: 0644]
meta-stx/recipes-support/cluster-resource-agents/resource-agents/03-fix-header-defs-lookup.patch [new file with mode: 0644]
meta-stx/recipes-support/cluster-resource-agents/resource-agents/fix-install-sh-not-found.patch [new file with mode: 0644]
meta-stx/recipes-support/cluster-resource-agents/resource-agents/fs.sh-fix-builds-when-srcdir-and-builddir-are-sepera.patch [new file with mode: 0644]
meta-stx/recipes-support/cluster-resource-agents/resource-agents_4.0.1.bb [new file with mode: 0644]
meta-stx/recipes-support/cluster-resource-agents/resource-agents_4.0.1.bbappend [new file with mode: 0644]
meta-stx/recipes-support/deltarpm/deltarpm_git.bb [new file with mode: 0644]
meta-stx/recipes-support/deltarpm/files/0001-Makefile-patch-fix-build-errors.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0001-skip_wait_con_int_on_simplex.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0002-drbd-conditional-crm-dependency.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0003-drbd_report_condition.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0004-drbdadm-ipaddr-change.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0005-drbd_reconnect_standby_standalone.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0006-avoid-kernel-userspace-version-check.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0007-Update-OCF-to-attempt-connect-in-certain-states.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0008-Increase-short-cmd-timeout-to-15-secs.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0009-Check-for-mounted-device-before-demoting-Primary-DRB.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0010-Include-sysmacros-for-major-minor-macros.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/0011-Disable-documentation.patch [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils/drbd.service [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils_8.4.3.bb [new file with mode: 0644]
meta-stx/recipes-support/drbd/drbd-utils_8.4.3.bbappend [new file with mode: 0644]
meta-stx/recipes-support/eventlog/eventlog_git.bb [new file with mode: 0644]
meta-stx/recipes-support/facter/facter_2.5.0.bbappend [new file with mode: 0644]
meta-stx/recipes-support/facter/files/0001-ps.patch [new file with mode: 0644]
meta-stx/recipes-support/facter/files/0002-personality.patch [new file with mode: 0644]
meta-stx/recipes-support/facter/files/0003-centos_remove-net-commands-that-can-timeout.patch [new file with mode: 0644]
meta-stx/recipes-support/facter/files/0004-centos_fix-ipv6-regex.patch [new file with mode: 0644]
meta-stx/recipes-support/facter/files/0004-centos_fix-ipv6-regex.patch.bak [new file with mode: 0644]
meta-stx/recipes-support/facter/files/0005-Hardcode-ipaddress-fact-to-localhost.patch [new file with mode: 0644]
meta-stx/recipes-support/facter/files/0006-facter-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/hardlink/hardlink_0.3.0.bb [new file with mode: 0644]
meta-stx/recipes-support/hiera/hiera_%.bbappend [new file with mode: 0644]
meta-stx/recipes-support/ldapscripts/files/allow-anonymous-bind-for-ldap-search.patch [new file with mode: 0644]
meta-stx/recipes-support/ldapscripts/files/ldap-user-setup-support.patch [new file with mode: 0644]
meta-stx/recipes-support/ldapscripts/files/ldapaddgroup.template.cgcs [new file with mode: 0755]
meta-stx/recipes-support/ldapscripts/files/ldapaddsudo.template.cgcs [new file with mode: 0755]
meta-stx/recipes-support/ldapscripts/files/ldapadduser.template.cgcs [new file with mode: 0755]
meta-stx/recipes-support/ldapscripts/files/ldapmodsudo.template.cgcs [new file with mode: 0755]
meta-stx/recipes-support/ldapscripts/files/ldapmoduser.template.cgcs [new file with mode: 0755]
meta-stx/recipes-support/ldapscripts/files/ldapscripts.conf.cgcs [new file with mode: 0755]
meta-stx/recipes-support/ldapscripts/files/ldapscripts.passwd [new file with mode: 0644]
meta-stx/recipes-support/ldapscripts/files/log_timestamp.patch [new file with mode: 0644]
meta-stx/recipes-support/ldapscripts/files/sudo-delete-support.patch [new file with mode: 0644]
meta-stx/recipes-support/ldapscripts/files/sudo-support.patch [new file with mode: 0644]
meta-stx/recipes-support/ldapscripts/ldapscripts_2.0.8.bb [new file with mode: 0644]
meta-stx/recipes-support/libtommath/libtommath_1.1.0.bb [new file with mode: 0644]
meta-stx/recipes-support/libtpms/libtpms_git.bb [new file with mode: 0644]
meta-stx/recipes-support/libverto/libverto_0.2.5.bb [new file with mode: 0644]
meta-stx/recipes-support/memcached/files/memcached.sysconfig [new file with mode: 0644]
meta-stx/recipes-support/memcached/memcached_%.bbappend [new file with mode: 0644]
meta-stx/recipes-support/mod-wsgi/mod-wsgi_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/nss-pam-ldapd/nss-pam-ldapd_%.bbappend [new file with mode: 0644]
meta-stx/recipes-support/openldap/files/0021-openldap-and-stx-source-and-config-files.patch [new file with mode: 0644]
meta-stx/recipes-support/openldap/files/rootdn-should-not-bypass-ppolicy.patch [new file with mode: 0644]
meta-stx/recipes-support/openldap/files/stx-slapd.service [new file with mode: 0644]
meta-stx/recipes-support/openldap/openldap_%.bbappend [new file with mode: 0644]
meta-stx/recipes-support/openstack-barbican-api/files/LICENSE [new file with mode: 0644]
meta-stx/recipes-support/openstack-barbican-api/files/barbican-api-paste.ini [new file with mode: 0644]
meta-stx/recipes-support/openstack-barbican-api/files/barbican.conf [new file with mode: 0644]
meta-stx/recipes-support/openstack-barbican-api/files/gunicorn-config.py [new file with mode: 0644]
meta-stx/recipes-support/openstack-barbican-api/files/openstack-barbican-api.service [new file with mode: 0644]
meta-stx/recipes-support/openstack-barbican-api/openstack-barbican-api.bb [new file with mode: 0644]
meta-stx/recipes-support/os-service-types/python2-os-service-types_1.3.0.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/network/Don-t-write-absent-to-redhat-route-files-and-test-fo.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/network/fix-absent-options.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/network/ipv6-static-route-support.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/network/permit-inservice-update-of-static-routes.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/network/puppet-network-Kilo-quilt-changes.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/network/puppet-network-support-ipv6.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/network/route-options-support.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-barbican/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-barbican/puppet-barbican-do-not-fail-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-barbican/puppet-barbican-fix-the-pkg-and-service-names-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-boolean/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0001-Roll-up-TIS-patches.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0002-Newton-rebase-fixes.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0003-Ceph-Jewel-rebase.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0004-US92424-Add-OSD-support-for-persistent-naming.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0005-Remove-puppetlabs-apt-as-ceph-requirement.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0006-ceph-disk-prepare-invalid-data-disk-value.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0007-Add-StarlingX-specific-restart-command-for-Ceph-moni.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0008-ceph-mimic-prepare-activate-osd.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0009-fix-ceph-osd-disk-partition-for-nvme-disks.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0010-wipe-unprepared-disks.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/0011-puppet-ceph-changes-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ceph/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-certmonger/0001-puppet-certmonger-adjust-path-to-poky-rootfs.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-certmonger/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-collectd/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0001-puppet-dnsmasq-Kilo-quilt-patches.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0002-Fixing-mismatched-permission-on-dnsmasq-conf.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0003-Support-management-of-tftp_max-option.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0004-Enable-clear-DNS-cache-on-reload.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0005-puppet-dnsmasq-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-dnsmasq/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-dnsmasq/metadata.json.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/0001-TIS-Patches.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/0002-Disable-timeout-for-mkfs-command.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/0003-drbd-parallel-to-serial-synchronization.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/0004-US-96914-reuse-existing-drbd-cinder-resource.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/0005-Add-PausedSync-states-to-acceptable-cstate.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/0006-CGTS-7164-Add-resource-options-cpu-mask-to-affine-drbd-kernel-threads.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/0007-Add-disk-by-path-test.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/0008-CGTS-7953-support-for-new-drbd-resources.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/0009-drbd-slow-before-swact.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-drbd/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-etcd/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-etcd/puppet-etcd-changes-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-filemapper/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-filemapper/metadata.json.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-horizon/0001-Update-memcached-dependency.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-horizon/0002-puppet-horizon-changes-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-horizon/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-keystone/0001-pike-rebase-squash-titanium-patches.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-keystone/0002-remove-the-Keystone-admin-app.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-keystone/0003-remove-eventlet_bindhost-from-Keystoneconf.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-keystone/0004-escape-special-characters-in-bootstrap.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-keystone/0005-Add-support-for-fernet-receipts.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-keystone/0006-workaround-Adjust-keystone-naming-to-poky.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-keystone/0007-puppet-keystone-specify-full-path-to-openrc.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-keystone/0008-params.pp-fix-the-service-name-of-openstack-keystone.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-keystone/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-kmod/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ldap/0001-puppet-ldap-add-os-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ldap/0002-puppet-ldap-poky-stx-fix-pkg-name.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-ldap/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-memcached/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/0001-Stx-uses-puppet-boolean-instead-of-adrien-boolean.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/Don-t-write-absent-to-redhat-route-files-and-test-fo.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/fix-absent-options.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/ipv6-static-route-support.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/metadata.json.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/permit-inservice-update-of-static-routes.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-Kilo-quilt-changes.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-config-poky-provider.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-support-ipv6.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-network/route-options-support.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-nslcd/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-nslcd/metadata.json.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-nslcd/puppet-nslcd-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-nssdb/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-nssdb/metadata.json.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-openstacklib/0001-Roll-up-TIS-patches.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-openstacklib/0002-puppet-openstacklib-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-openstacklib/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-oslo/0001-Remove-log_dir-from-conf-files.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-oslo/0002-add-psycopg2-drivername-to-postgresql-settings.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-oslo/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-puppi/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-puppi/puppet-puppi-adjust-path.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-puppi/puppet-puppi-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-staging/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-sysctl/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-vlan/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet-vlan/metadata.json.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet/4.8.2/add_puppet_gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet-poky-dnf.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet.conf [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet.init [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet.service [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet/puppet-poky-yum.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppet/puppet-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-apache/0001-maint-Fix-conditional-in-vhost-ssl-template.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-apache/0002-maint-Fix-the-vhost-ssl-template-correctly-this-time.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-apache/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-apache/puppetlabs-apache-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-concat/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-create-resources/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-create-resources/metadata.json.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-firewall/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-firewall/poky-firewall-updates.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-firewall/puppet-firewall-poky.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-firewall/puppet-firewall-random-fully-support.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-firewall/puppet-firewall-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0001-Roll-up-TIS-patches.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0002-disable-config-validation-prechecks.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0003-Fix-global_options-log-default-value.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0004-Stop-invalid-warning-message [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-inifile/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0001-puppet-lvm-kilo-quilt-changes.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0002-UEFI-pvcreate-fix.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0003-US94222-Persistent-Dev-Naming.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0004-extendind-nuke_fs_on_resize_failure-functionality.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-lvm/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-lvm/Fix-the-logical-statement-for-nuke_fs_on_resize.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0001-Fix-ruby-path.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0001-Stx-uses-nanliu-staging-module.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0002-puppet-mysql-changes-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0003-puppet-mysqltuner-adjust-path.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-mysql/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0001-Roll-up-TIS-patches.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0002-remove-puppetlabs-apt-as-a-requirement.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0003-puppetlabs-postgresql-account-for-naming-diffs.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0004-poky-postgresql-updates.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0005-puppetlabs-postgresql-poky.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0006-adjust_path-remove-refs-to-local-bin.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/postgresql.service [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0001-Roll-up-TIS-patches.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0002-Changed-cipher-specification-to-openssl-format.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0004-Partially-revert-upstream-commit-f7c3a4a637d59f3065d.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0005-Remove-the-rabbitmq_nodename-fact.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0007-init.pp-do-not-check-the-apt-resource.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0008-puppet-rabbitmq-poky.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0009-remove-apt-requirement.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-stdlib/0001-Filter-password-in-logs.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/puppetlabs-stdlib/Add-gemspec.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0001-puppet-manifest-apply-rebase-adjust-path.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0002-puppet-manifests-port-Adjust-path-default-bindir.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0003-puppet-dcmanager-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0004-puppet-dcorch-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0005-puppet-sysinv-updates-for-poky-stx.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0006-puppet-manifest-apply-do-not-treat-warnings-as-block.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0007-puppet-manifests-etcd-override-typo-and-journalctl.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0008-puppet-manifests-keystone-include-platform-client.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0009-puppet-manifests-lvm-remove-lvmetad.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/0010-puppet-manifest-apply-workaround-to-ignore-known-err.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/apply_network_config_poky.sh [new file with mode: 0755]
meta-stx/recipes-support/puppet/files/stx-puppet/get-boot-device-from-cmdline.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/files/stx-puppet/poky-specific-apply-network-config-script.patch [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-barbican_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-barbican_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-boolean_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-ceph_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-ceph_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-certmonger_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-certmonger_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-collectd_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-dnsmasq_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-dnsmasq_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-drbd_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-drbd_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-etcd_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-etcd_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-filemapper_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-horizon_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-horizon_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-keystone_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-keystone_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-kmod_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-ldap_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-ldap_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-memcached_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-network_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-network_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-nslcd_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-nslcd_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-nssdb_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-openstacklib_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-openstacklib_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-oslo_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-oslo_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-puppi_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-puppi_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-staging_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-sysctl_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-vlan_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet-vswitch_%.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet_4.8.2.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppet_5.4.0.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-apache_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-apache_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-concat_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-create-resources_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-firewall_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-firewall_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-haproxy_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-haproxy_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-inifile.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-lvm_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-lvm_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-mysql_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-mysql_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-postgresql_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-postgresql_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-rabbitmq_git.bb [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-rabbitmq_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/puppetlabs-stdlib_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/puppet/stx-puppet_git.bb [new file with mode: 0644]
meta-stx/recipes-support/qpid-proton/qpid-proton/fix-missing-libary-for-cpp-binding.patch [new file with mode: 0644]
meta-stx/recipes-support/qpid-proton/qpid-proton_0.28.0.bb [new file with mode: 0644]
meta-stx/recipes-support/ruby-shadow/ruby-shadow_git.bbappend [new file with mode: 0644]
meta-stx/recipes-support/sshpass/sshpass.inc [new file with mode: 0644]
meta-stx/recipes-support/sshpass/sshpass_1.06.bb [new file with mode: 0644]
meta-stx/recipes-support/syslog-ng/syslog-ng_%.bbappend [new file with mode: 0644]
meta-stx/recipes-upstream/python/python-boto3.inc [new file with mode: 0644]
meta-stx/recipes-upstream/python/python-boto3_1.10.25.bb [new file with mode: 0644]

diff --git a/meta-stx/.gitignore b/meta-stx/.gitignore
new file mode 100644 (file)
index 0000000..1377554
--- /dev/null
@@ -0,0 +1 @@
+*.swp
diff --git a/meta-stx/LICENSE b/meta-stx/LICENSE
new file mode 100644 (file)
index 0000000..752c239
--- /dev/null
@@ -0,0 +1,209 @@
+Copyright (C) 2019 Wind River Systems, Inc.
+
+Source code included in the tree for individual recipes is under the LICENSE
+stated in the associated recipe (.bb file) unless otherwise stated.
+
+The metadata is under the following license unless otherwise stated.
+
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/meta-stx/README.md b/meta-stx/README.md
new file mode 100644 (file)
index 0000000..98dffe5
--- /dev/null
@@ -0,0 +1,148 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+#
+meta-stx
+=========
+
+Build
+------------------------
+```
+git clone --branch r3_rebase git@github.com:zbsarashki/staging-stx.git
+cd staging-stx
+./setup.sh
+```
+
+Introduction
+------------------------
+
+This layer enables starlingx on poky. 
+
+
+Dependencies
+-------------------------
+
+This layer depends on:
+
+```
+       URI: git://git.openembedded.org/meta-openembedded/
+       revision: HEAD
+
+       layes: meta-oe
+       meta-python
+       meta-networking
+```
+You are solely responsible for determining the appropriateness of using or redistributing the above dependencies and assume any risks associated with your exercise of permissions under the license.
+
+Maintenance
+-------------------------
+
+Maintainer:
+
+
+
+Building the meta-stx layer
+---------------------------
+
+
+Setup workspace
+```
+mkdir -p $P/workspace/{layers,build}
+cd $P/workspace/layers
+
+git clone --branch thud git://git.yoctoproject.org/poky.git
+git clone --branch thud git://git.openembedded.org/meta-openembedded
+git clone --branch thud git://git.yoctoproject.org/meta-virtualization
+git clone --branch thud git://git.yoctoproject.org/meta-cloud-services
+git clone --branch thud git://git.yoctoproject.org/meta-intel
+git clone --branch thud git://git.yoctoproject.org/meta-intel-qat
+git clone --branch thud git://git.yoctoproject.org/meta-selinux
+git clone --branch thud git://git.yoctoproject.org/meta-security
+git clone --branch thud https://github.com/jiazhang0/meta-secure-core.git
+git clone --branch thud https://github.com/intel-iot-devkit/meta-iot-cloud.git 
+git clone --branch thud https://github.com/rauc/meta-rauc.git
+git clone --branch thud git://git.yoctoproject.org/meta-intel
+git clone --branch thud git://git.yoctoproject.org/meta-intel-qat
+git clone --branch thud https://github.com/intel-iot-devkit/meta-iot-cloud.git
+git clone https://github.com/zbsarashki/meta-stx.git
+
+```
+Add the following layers to conf/bblayers.conf
+
+```
+
+P=Path to workspace 
+
+cd $P/workspace/layers/poky
+source oe-init-build-env $P/workspace/build
+
+cat > conf/bblayers.conf << EOF
+# POKY_BBLAYERS_CONF_VERSION is increased each time build/conf/bblayers.conf
+# changes incompatibly
+POKY_BBLAYERS_CONF_VERSION = "2"
+BBPATH = "\${TOPDIR}"
+BBFILES ?= ""
+
+BBLAYERS ?= " \\
+       $P/workspace/layers/poky/meta \\
+       $P/workspace/layers/poky/meta-poky \\
+       $P/workspace/layers/poky/meta-yocto-bsp \\
+       $P/workspace/layers/meta-openembedded/meta-oe \\
+       $P/workspace/layers/meta-openembedded/meta-networking \\
+       $P/workspace/layers/meta-openembedded/meta-filesystems \\
+       $P/workspace/layers/meta-openembedded/meta-perl \\
+       $P/workspace/layers/meta-openembedded/meta-python \\
+       $P/workspace/layers/meta-openembedded/meta-webserver \\
+       $P/workspace/layers/meta-openembedded/meta-initramfs \\
+       $P/workspace/layers/meta-openembedded/meta-gnome \\
+       $P/workspace/layers/meta-virtualization \\
+       $P/workspace/layers/meta-cloud-services \\
+       $P/workspace/layers/meta-cloud-services/meta-openstack \\
+       $P/workspace/layers/meta-cloud-services/meta-openstack-aio-deploy \\
+       $P/workspace/layers/meta-cloud-services/meta-openstack-compute-deploy \\
+       $P/workspace/layers/meta-cloud-services/meta-openstack-controller-deploy \\
+       $P/workspace/layers/meta-cloud-services/meta-openstack-qemu \\
+       $P/workspace/layers/meta-cloud-services/meta-openstack-swift-deploy \\
+       $P/workspace/layers/meta-secure-core/meta-signing-key \\
+       $P/workspace/layers/meta-secure-core/meta-efi-secure-boot \\
+       $P/workspace/layers/meta-secure-core/meta-encrypted-storage \\
+       $P/workspace/layers/meta-secure-core/meta-integrity \\
+       $P/workspace/layers/meta-secure-core/meta-tpm2 \\
+       $P/workspace/layers/meta-secure-core/meta \\
+       $P/workspace/layers/meta-security \\
+       $P/workspace/layers/meta-security/meta-security-compliance \\
+       $P/workspace/layers/meta-selinux \\
+       $P/workspace/layers/meta-intel \\
+       $P/workspace/layers/meta-intel-qat \\
+       $P/workspace/layers/meta-rauc \\
+       $P/workspace/layers/meta-iot-cloud \\
+       $P/workspace/layers/meta-stx \\
+       "
+EOF
+       sed -i -e 's/^\(#MACHINE.*\"qemuarm\"\)/MACHINE \?= \"intel-corei7-64\"\n\1/g' conf/local.conf
+       echo 'PREFERRED_PROVIDER_virtual/kernel = "linux-yocto"' >> conf/local.conf
+
+```
+
+Use Case:
+---------------------------
+
+
+# Legal Notices
+
+All product names, logos, and brands are property of their respective owners. All company, product and service names used in this software are for identification purposes only. Wind River is a registered trademarks of Wind River Systems, Inc. Linux is a registered trademark of Linus Torvalds.
+
+Disclaimer of Warranty / No Support: Wind River does not provide support and maintenance services for this software, under Wind River’s standard Software Support and Maintenance Agreement or otherwise. Unless required by applicable law, Wind River provides the software (and each contributor provides its contribution) on an “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, either express or implied, including, without limitation, any warranties of TITLE, NONINFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the software and assume any risks associated with your exercise of permissions under the license.
diff --git a/meta-stx/classes/extrausers-config.bbclass b/meta-stx/classes/extrausers-config.bbclass
new file mode 100644 (file)
index 0000000..b4ffe9c
--- /dev/null
@@ -0,0 +1,37 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# This bbclass creates users based on EXTRA_USER_PARAMS through
+# extrausers bbclass and is intended to take config options for 
+# configuring users uniq environment.
+
+inherit extrausers
+
+
+PACKAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS_CONFIG'))]}"
+
+ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group_config;"
+
+set_user_group_config () {
+
+# FIXME: parse EXTRA_USERS_PARAMS_CONFIG for options
+       
+       EXTRA_USERS_PARAMS=" ${EXTRA_USERS_PARAMS_CONFIG}"
+       set_user_group
+
+       #Extend path variable for sysadmin
+       echo 'PATH=/sbin:/usr/sbin:$PATH' >> ${IMAGE_ROOTFS}/home/sysadmin/.bashrc
+       chown sysadmin:sys_protected ${IMAGE_ROOTFS}/home/sysadmin/.bashrc
+}
diff --git a/meta-stx/classes/openssl10.bbclass b/meta-stx/classes/openssl10.bbclass
new file mode 100644 (file)
index 0000000..1e92746
--- /dev/null
@@ -0,0 +1,45 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+python __anonymous() {
+   if bb.utils.contains('DEPENDS', 'openssl', True, False, d) or \
+      bb.utils.contains('DEPENDS', 'openssl-native', True, False, d):
+          d.setVar('DEPENDS', d.getVar('DEPENDS').replace('openssl', 'openssl10'))
+          d.setVar('DEPENDS', d.getVar('DEPENDS').replace('openssl-native', 'openssl10-native'))
+}
+
+
+python do_ssl10_mk_symlink() {
+
+    import shutil
+    l = d.getVar("STAGING_INCDIR") + "/openssl"
+
+    if os.path.islink(l):
+        os.unlink(l)
+    elif os.path.isdir(l):
+        shutil.rmtree(l)
+
+    os.symlink("openssl10/openssl",l)
+
+    l = d.getVar("STAGING_LIBDIR")
+    if os.path.islink(l + "/libssl.so"):
+        os.unlink(l + "/libssl.so")
+        os.unlink(l + "/libcrypto.so")
+
+    os.symlink("libssl.so.1.0.2", l + "/libssl.so")
+    os.symlink("libcrypto.so.1.0.2", l + "/libcrypto.so")
+}
+
+addtask ssl10_mk_symlink before do_configure after do_prepare_recipe_sysroot
diff --git a/meta-stx/classes/python-backports-init.bbclass b/meta-stx/classes/python-backports-init.bbclass
new file mode 100644 (file)
index 0000000..02fb480
--- /dev/null
@@ -0,0 +1,22 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit python-dir
+
+RDEPENDS_${PN} += "python-backports-init"
+
+do_install_prepend() {
+    rm -rf $(find . -path "*/backports/__init__.py" -type f)
+}
diff --git a/meta-stx/classes/stx-anaconda-image.bbclass b/meta-stx/classes/stx-anaconda-image.bbclass
new file mode 100644 (file)
index 0000000..7232c0c
--- /dev/null
@@ -0,0 +1,369 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RPM_POSTPROCESS_COMMANDS_append = "wrl_installer;"
+do_rootfs[vardeps] += "INSTALLER_TARGET_BUILD INSTALLER_TARGET_IMAGE"
+
+# Fix system-shutdown hang at ratelimiting
+APPEND_append = " printk.devkmsg=on"
+
+INSTPRODUCT ?= "${DISTRO_NAME}"
+INSTVER     ?= "${DISTRO_VERSION}"
+INSTBUGURL  ?= "http://www.windriver.com/"
+
+# NOTE: Please update anaconda-init when you change INSTALLER_CONFDIR, use "="
+#       but not "?=" since this is not configurable.
+INSTALLER_CONFDIR = "${IMAGE_ROOTFS}/installer-config"
+KICKSTART_FILE ??= ""
+KICKSTART_FILE_EXTRA ??= ""
+WRL_INSTALLER_CONF ?= ""
+REPO_EXCLUDE_PKG ?= ""
+
+build_iso_prepend() {
+       install -d ${ISODIR}
+       ln -snf /.discinfo ${ISODIR}/.discinfo
+       ln -snf /.buildstamp ${ISODIR}/.buildstamp
+       ln -snf /Packages ${ISODIR}/Packages
+}
+
+build_iso_append() {
+       implantisomd5 ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso
+}
+
+# Check WRL_INSTALLER_CONF and copy it to
+# ${IMAGE_ROOTFS}/.buildstamp.$prj_name when exists
+wrl_installer_copy_buildstamp() {
+    prj_name=$1
+    buildstamp=$2
+    if [ -f $buildstamp ]; then
+        bbnote "Using $buildstamp as the buildstamp"
+        cp $buildstamp ${IMAGE_ROOTFS}/.buildstamp.$prj_name
+    else
+        bbfatal "Can't find WRL_INSTALLER_CONF: $buildstamp"
+    fi
+}
+
+# Hardlink when possible, otherwise copy.
+# $1: src
+# $2: target
+wrl_installer_hardlinktree() {
+    src_dev="`stat -c %d $1`"
+    if [ -e "$2" ]; then
+        tgt_dev="`stat -c %d $2`"
+    else
+        tgt_dev="`stat -c %d $(dirname $2)`"
+    fi
+    hdlink=""
+    if [ "$src_dev" = "$tgt_dev" ]; then
+        hdlink="--link"
+    fi
+    cp -rvf $hdlink $1 $2
+}
+
+wrl_installer_copy_local_repos() {
+    deploy_dir_rpm=$1
+
+    if [ -d "$deploy_dir_rpm" ]; then
+        echo "Copy rpms from target build to installer image."
+        mkdir -p ${IMAGE_ROOTFS}/Packages.$prj_name
+
+        : > ${IMAGE_ROOTFS}/Packages.$prj_name/.treeinfo
+        echo "[general]" >> ${IMAGE_ROOTFS}/Packages.$prj_name/.treeinfo
+        echo "version = ${DISTRO_VERSION}" >> ${IMAGE_ROOTFS}/Packages.$prj_name/.treeinfo
+
+        # Determine the max channel priority
+        channel_priority=5
+        for pt in $installer_target_archs ; do
+            channel_priority=$(expr $channel_priority + 5)
+        done
+
+        : > ${IMAGE_ROOTFS}/Packages.$prj_name/.feedpriority
+        for arch in $installer_target_archs; do
+            if [ -d "$deploy_dir_rpm/"$arch -a ! -d "${IMAGE_ROOTFS}/Packages.$prj_name/"$arch ]; then
+                channel_priority=$(expr $channel_priority - 5)
+                echo "$channel_priority $arch" >> ${IMAGE_ROOTFS}/Packages.$prj_name/.feedpriority
+                wrl_installer_hardlinktree "$deploy_dir_rpm/"$arch "${IMAGE_ROOTFS}/Packages.$prj_name/."
+            fi
+        done
+
+        for pkg in ${REPO_EXCLUDE_PKG}; do
+            rm -rf ${IMAGE_ROOTFS}/Packages.$prj_name/${pkg}
+        done
+
+        createrepo_c --update -q ${IMAGE_ROOTFS}/Packages.$prj_name/
+    fi
+}
+
+# Update .buildstamp and copy rpm packages to IMAGE_ROOTFS
+wrl_installer_copy_pkgs() {
+
+    target_build="$1"
+    target_image="$2"
+    prj_name="$3"
+    if [ -n "$4" ]; then
+        installer_conf="$4"
+    else
+        installer_conf=""
+    fi
+
+    common_grep="-e '^ALL_MULTILIB_PACKAGE_ARCHS=.*' \
+            -e '^MULTILIB_VARIANTS=.*' -e '^PACKAGE_ARCHS=.*'\
+            -e '^PACKAGE_ARCH=.*' -e '^PACKAGE_INSTALL_ATTEMPTONLY=.*' \
+            -e '^DISTRO=.*' -e '^DISTRO_NAME=.*' -e '^DISTRO_VERSION=.*' \
+            "
+
+    if [ -f "$installer_conf" ]; then
+        eval "grep -e \"^PACKAGE_INSTALL=.*\" $common_grep $installer_conf \
+            | sed -e 's/=/=\"/' -e 's/$/\"/' > ${BB_LOGFILE}.distro_vals"
+
+        eval "cat $target_build/installersupport_$target_image | \
+            grep -e '^WORKDIR=.*' >> ${BB_LOGFILE}.distro_vals"
+
+        eval `cat ${BB_LOGFILE}.distro_vals`
+        if [ $? -ne 0 ]; then
+            bbfatal "Something is wrong in $installer_conf, please correct it"
+        fi
+        if [ -z "$PACKAGE_ARCHS" -o -z "$PACKAGE_INSTALL" ]; then
+            bbfatal "PACKAGE_ARCHS or PACKAGE_INSTALL is null, please check $installer_conf"
+        fi
+    else
+        eval "cat $target_build/installersupport_$target_image | \
+            grep $common_grep -e '^PN=.*' -e '^SUMMARY=.*' -e '^WORKDIR=.*'\
+            -e '^DESCRIPTION=.*' -e '^export PACKAGE_INSTALL=.*' > ${BB_LOGFILE}.distro_vals"
+
+        eval `cat ${BB_LOGFILE}.distro_vals`
+    fi
+
+    export installer_default_arch="$PACKAGE_ARCH"
+    # Reverse it for priority
+    export installer_default_archs="`for arch in $PACKAGE_ARCHS; do echo $arch; done | tac | tr - _`"
+    installer_target_archs="$installer_default_archs"
+    if [ -n "$MULTILIB_VARIANTS" ]; then
+        export MULTILIB_VARIANTS
+        mlarchs_reversed="`for mlarch in $ALL_MULTILIB_PACKAGE_ARCHS; do echo $mlarch; \
+            done | tac | tr - _`"
+        for arch in $mlarchs_reversed; do
+            if [ "$arch" != "noarch" -a "$arch" != "all" -a "$arch" != "any" ]; then
+                installer_target_archs="$installer_target_archs $arch"
+            fi
+        done
+    fi
+    export installer_target_archs
+
+    # Save the vars to .buildstamp when no installer_conf
+    if [ ! -f "$installer_conf" ]; then
+        cat >> ${IMAGE_ROOTFS}/.buildstamp.$prj_name <<_EOF
+DISTRO=$DISTRO
+DISTRO_NAME=$DISTRO_NAME
+DISTRO_VERSION=$DISTRO_VERSION
+
+[Rootfs]
+LIST=$PN
+
+[$PN]
+SUMMARY=$SUMMARY
+DESCRIPTION=$DESCRIPTION
+
+PACKAGE_INSTALL=$PACKAGE_INSTALL
+PACKAGE_INSTALL_ATTEMPTONLY=$PACKAGE_INSTALL_ATTEMPTONLY
+ALL_MULTILIB_PACKAGE_ARCHS=$ALL_MULTILIB_PACKAGE_ARCHS
+MULTILIB_VARIANTS=$MULTILIB_VARIANTS
+PACKAGE_ARCHS=$PACKAGE_ARCHS
+PACKAGE_ARCH=$PACKAGE_ARCH
+IMAGE_LINGUAS=${IMAGE_LINGUAS}
+_EOF
+    fi
+
+    if [ -d "$WORKDIR/oe-rootfs-repo/rpm" ]; then
+        # Copy local repos while the image is not initramfs
+        bpn=${BPN}
+        if [ "${bpn##*initramfs}" = "${bpn%%initramfs*}" ]; then
+            wrl_installer_copy_local_repos $WORKDIR/oe-rootfs-repo/rpm
+        fi
+        echo "$DISTRO::$prj_name::$DISTRO_NAME::$DISTRO_VERSION" >> ${IMAGE_ROOTFS}/.target_build_list
+    fi
+}
+
+wrl_installer_get_count() {
+    sum=0
+    for i in $*; do
+        sum=$(expr $sum + 1)
+    done
+    echo $sum
+}
+
+wrl_installer[vardepsexclude] = "DATETIME"
+wrl_installer() {
+    cat >${IMAGE_ROOTFS}/.discinfo <<_EOF
+${DATETIME}
+${DISTRO_NAME} ${DISTRO_VERSION}
+${TARGET_ARCH}
+_EOF
+
+    : > ${IMAGE_ROOTFS}/.target_build_list
+    counter=0
+    targetimage_counter=0
+    for target_build in ${INSTALLER_TARGET_BUILD}; do
+        target_build="`readlink -f $target_build`"
+        echo "Installer Target Build: $target_build"
+        counter=$(expr $counter + 1)
+        prj_name="`echo $target_build | sed -e 's#/ *$##g' -e 's#.*/##'`"
+        prj_name="$prj_name-$counter"
+
+           # Generate .buildstamp
+           if [ -n "${WRL_INSTALLER_CONF}" ]; then
+               installer_conf="`echo ${WRL_INSTALLER_CONF} | awk '{print $'"$counter"'}'`"
+               wrl_installer_copy_buildstamp $prj_name $installer_conf
+           else
+               cat >${IMAGE_ROOTFS}/.buildstamp.$prj_name <<_EOF
+[Main]
+Product=${INSTPRODUCT}
+Version=${INSTVER}
+BugURL=${INSTBUGURL}
+IsFinal=True
+UUID=${DATETIME}.${TARGET_ARCH}
+_EOF
+           fi
+
+           if [ -f "$target_build" ]; then
+               filename=$(basename "$target_build")
+               extension="${filename##*.}"
+               bpn=${BPN}
+               # Do not copy image for initramfs
+               if [ "${bpn##*initramfs}" != "${bpn%%initramfs*}" ]; then
+                   continue
+               elif [ "x$extension" = "xext2" -o "x$extension" = "xext3" -o "x$extension" = "xext4" ]; then
+                   echo "Image based target install selected."
+                   mkdir -p "${IMAGE_ROOTFS}/LiveOS.$prj_name"
+                   wrl_installer_hardlinktree "$target_build" "${IMAGE_ROOTFS}/LiveOS.$prj_name/rootfs.img"
+                   echo "::$prj_name::" >> ${IMAGE_ROOTFS}/.target_build_list
+               else
+                   bberror "Unsupported image: $target_build."
+                   bberror "The image must be ext2, ext3 or ext4"
+                   exit 1
+               fi
+           elif [ -d "$target_build" ]; then
+               targetimage_counter=$(expr $targetimage_counter + 1)
+               target_image="`echo ${INSTALLER_TARGET_IMAGE} | awk '{print $'"$targetimage_counter"'}'`"
+               echo "Target Image: $target_image"
+               wrl_installer_copy_pkgs $target_build $target_image $prj_name $installer_conf
+           else
+               bberror "Invalid configuration of INSTALLER_TARGET_BUILD: $target_build."
+               bberror "It must either point to an image (ext2, ext3 or ext4) or to the root of another build directory"
+               exit 1
+           fi
+
+           ks_cfg="${INSTALLER_CONFDIR}/ks.cfg.$prj_name"
+           if [ -n "${KICKSTART_FILE}" ]; then
+               ks_file="`echo ${KICKSTART_FILE} | awk '{print $'"$counter"'}'`"
+               bbnote "Copying kickstart file $ks_file to $ks_cfg ..."
+               mkdir -p ${INSTALLER_CONFDIR}
+               cp $ks_file $ks_cfg
+           fi
+           if [ -n "${KICKSTART_FILE_EXTRA}" ]; then
+               for ks_file in ${KICKSTART_FILE_EXTRA}; do
+                   if [ -f $ks_file ]; then
+                       cp $ks_file ${INSTALLER_CONFDIR}
+                   else
+                       bbwarn "The kickstart file $ks_file in KICKSTART_FILE_EXTRA doesn't exist!"
+                   fi
+               done
+           fi
+    done
+
+    # Setup the symlink if only one target build dir.
+    if [ "$counter" = "1" ]; then
+        prj_name="`awk -F:: '{print $2}' ${IMAGE_ROOTFS}/.target_build_list`"
+        entries=".buildstamp LiveOS Packages installer-config/ks.cfg"
+        for i in $entries; do
+            if [ -e ${IMAGE_ROOTFS}/$i.$prj_name ]; then
+                ln -sf `basename $i.$prj_name` ${IMAGE_ROOTFS}/$i
+            fi
+        done
+    fi
+}
+
+python __anonymous() {
+    if "selinux" in d.getVar("DISTRO_FEATURES", True).split():
+        raise bb.parse.SkipPackage("Unable to build the installer when selinux is enabled.")
+
+    if bb.data.inherits_class('image', d):
+        if d.getVar("DISTRO", True) != "anaconda":
+            raise bb.parse.SkipPackage("Set DISTRO = 'anaconda' in local.conf")
+
+        target_builds = d.getVar('INSTALLER_TARGET_BUILD', True)
+        if not target_builds:
+            errmsg = "No INSTALLER_TARGET_BUILD is found,\n"
+            errmsg += "set INSTALLER_TARGET_BUILD = '<target-build-topdir>' and\n"
+            errmsg += "INSTALLER_TARGET_IMAGE = '<target-image-pn>' to do RPMs\n"
+            errmsg += "install, or\n"
+            errmsg += "set INSTALLER_TARGET_BUILD = '<target-build-image>' to do\n"
+            errmsg += "image copy install"
+            raise bb.parse.SkipPackage(errmsg)
+
+        count = 0
+        for target_build in target_builds.split():
+            if not os.path.exists(target_build):
+                raise bb.parse.SkipPackage("The %s of INSTALLER_TARGET_BUILD does not exist" % target_build)
+
+            if os.path.isdir(target_build):
+                count += 1
+
+        # While do package management install
+        if count > 0:
+            target_images = d.getVar('INSTALLER_TARGET_IMAGE', True)
+            if not target_images:
+                errmsg = "The INSTALLER_TARGET_BUILD is a dir, but not found INSTALLER_TARGET_IMAGE,\n"
+                errmsg += "set INSTALLER_TARGET_IMAGE = '<target-image-pn>' to do RPMs install"
+                raise bb.parse.SkipPackage(errmsg)
+
+            elif count != len(target_images.split()):
+                errmsg = "The INSTALLER_TARGET_BUILD has %s build dirs: %s\n" % (count, target_builds)
+                errmsg += "But INSTALLER_TARGET_IMAGE has %s build images: %s\n" % (len(target_images.split()), target_images)
+                raise bb.parse.SkipPackage(errmsg)
+
+        # The count of INSTALLER_TARGET_BUILD and WRL_INSTALLER_CONF must match when set.
+        wrlinstaller_confs = d.getVar('WRL_INSTALLER_CONF', True)
+        if wrlinstaller_confs:
+            if len(wrlinstaller_confs.split()) != len(target_builds.split()):
+                raise bb.parse.SkipPackage("The count of INSTALLER_TARGET_BUILD and WRL_INSTALLER_CONF not match!")
+            for wrlinstaller_conf in wrlinstaller_confs.split():
+                if not os.path.exists(wrlinstaller_conf):
+                    raise bb.parse.SkipPackage("The installer conf %s in WRL_INSTALLER_CONF doesn't exist!" % wrlinstaller_conf)
+
+        # The count of INSTALLER_TARGET_BUILD and KICKSTART_FILE must match when set.
+        kickstart_files = d.getVar('KICKSTART_FILE', True)
+        if kickstart_files:
+            if len(kickstart_files.split()) != len(target_builds.split()):
+                raise bb.parse.SkipPackage("The count of INSTALLER_TARGET_BUILD and KICKSTART_FILE not match!")
+            for kickstart_file in kickstart_files.split():
+                if not os.path.exists(kickstart_file):
+                    raise bb.parse.SkipPackage("The kickstart file %s in KICKSTART_FILE doesn't exist!" % kickstart_file)
+
+}
+
+python build_syslinux_cfg () {
+    import copy
+    import sys
+
+    workdir = d.getVar('WORKDIR')
+    if not workdir:
+        bb.error("WORKDIR not defined, unable to package")
+        return
+
+    cfile = d.getVar('SYSLINUX_CFG')
+    if not cfile:
+        bb.fatal('Unable to read SYSLINUX_CFG')
+}
diff --git a/meta-stx/classes/stx-postrun.bbclass b/meta-stx/classes/stx-postrun.bbclass
new file mode 100644 (file)
index 0000000..8590c46
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# This class is intended to include workarounds and finetuning of the rootfs
+# Most of the stuff in here needs to go else where.
+
+ROOTFS_POSTPROCESS_COMMAND_append = " stx_postprocess_rootfs;"
+# ETHDEV = "enp0s5"
+
+stx_postprocess_rootfs() {
+
+       # Issue: #83 /dev/root does not exist
+       # This workaround is to be removed once initramfs is added
+
+       cat > ${IMAGE_ROOTFS}/etc/udev/rules.d/99-dev-root-symlink.rules << \EOF
+KERNEL=="sda3", SYMLINK+="root"
+EOF
+}
diff --git a/meta-stx/conf/distro/files/ks/aio_ks.cfg b/meta-stx/conf/distro/files/ks/aio_ks.cfg
new file mode 100644 (file)
index 0000000..c1edfc9
--- /dev/null
@@ -0,0 +1,1019 @@
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+%include /tmp/timezone-include
+# set to 'x' so we can use shadow password
+rootpw  --iscrypted x
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+reboot --eject
+
+
+# Template from: pre_pkglist.cfg
+%packages
+@core
+@base
+-kernel-module-igb-uio-rt
+-kernel-module-wrs-avp-rt
+-kernel-rt
+-kernel-rt-kvm
+-kernel-rt-tools
+-kernel-rt-tools-libs
+-kmod-drbd-rt
+-kmod-e1000e-rt
+-kmod-i40e-rt
+-kmod-ixgbe-rt
+-kmod-tpm-rt
+-mlnx-ofa_kernel
+-mlnx-ofa_kernel-rt
+-mlnx-ofa_kernel-rt-modules
+-qat16-rt
+@platform-controller-worker
+@updates-controller-worker
+%end
+
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_aio.cfg
+
+## NOTE: updates to partition sizes need to be also reflected in
+##  - stx-config/.../sysinv/conductor/manager.py:create_controller_filesystems()
+##  - stx-config/.../sysinv/common/constants.py
+##
+## NOTE: When adding partitions, we currently have a max of 4 primary partitions.
+##       If more than 4 partitions are required, we can use a max of 3 --asprimary,
+##       to allow 1 primary logical partition with extended partitions
+##
+## NOTE: Max default PV size must align with the default controllerfs sizes
+##
+## BACKUP_OVERHEAD = 20
+##
+## Physical install (for disks over 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_IMAGE_STOR_SIZE = 10
+## DEFAULT_DATABASE_STOR_SIZE = 20
+## DEFAULT_IMG_CONVERSION_STOR_SIZE = 20
+## BACKUP = DEFAULT_DATABASE_STOR_SIZE + DEFAULT_IMAGE_STOR_SIZE
+##                                     + BACKUP_OVERHEAD = 50
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+## CGCS_PV_SIZE = 10240 + 2*20480 + 20480 + 51200 + 8196 + 8196 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 233496
+##
+## small install - (for disks below 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_SMALL_IMAGE_STOR_SIZE = 10
+## DEFAULT_SMALL_DATABASE_STOR_SIZE = 10
+## DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE = 10
+## DEFAULT_SMALL_BACKUP_STOR_SIZE = 40
+##
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+##
+## CGCS_PV_SIZE = 10240 + 2*10240 + 10240 + 40960 + 8192 + 8192 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 192528
+##
+## NOTE: To maintain upgrade compatability within the volume group, keep the
+## undersized LOG_VOL_SIZE and SCRATCH_VOL_SIZE, but size the minimally size
+## physical volume correctly.
+##
+##  R4 AIO installations:
+##  - R4 (case #1): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), /local_pv (239G)
+##  - R4 (case #2): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), cgts-vg (239G)
+##
+##  Upgrade migration will start with R5 install and create a partition to align
+##  above so filesystems within the volume group will be able to maintain their
+##  sizes in R5
+##    - R5 install  : /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), un-partitioned (336G)
+##    - R5 (case #1): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (97G), unpartitioned (239G)
+##    - R5 (case #2): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (336G)
+##
+
+sz=$(blockdev --getsize64 $(get_disk $rootfs_device))
+if [ $sz -le $((240*$gb)) ] ; then
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 192528/1024=188.01. CGCS_PV_SIZE=189*1024=193536. Using a disk with a
+    # size under 189GiB will fail.
+    CGCS_PV_SIZE=193536
+else
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 233496/1024=228.02. CGCS_PV_SIZE=229*1024=234496.
+    CGCS_PV_SIZE=234496
+fi
+
+ROOTFS_SIZE=20000
+LOG_VOL_SIZE=8000
+SCRATCH_VOL_SIZE=8000
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=500 --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --grow --size=500 --maxsize=$CGCS_PV_SIZE --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_aio.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=controller
+subfunction=controller,worker
+system_type=All-in-one
+security_profile=$secprofile
+EOF
+
+%end
+
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_aio_and_worker.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+# Update grub with custom kernel bootargs
+source /etc/init.d/cpumap_functions.sh
+n_cpus=$(cat /proc/cpuinfo 2>/dev/null | \
+  awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+n_numa=$(ls -d /sys/devices/system/node/node* 2>/dev/null | wc -l)
+KERN_OPTS=" iommu=pt usbcore.autosuspend=-1"
+
+KERN_OPTS="${KERN_OPTS} hugepagesz=2M hugepages=0 default_hugepagesz=2M"
+
+# If this is an all-in-one system, we need at least 4 CPUs
+if [ "$system_type" = "All-in-one" -a ${n_cpus} -lt 4 ]; then
+    report_post_failure_with_msg "ERROR: At least 4 CPUs are required for controller+worker node."
+fi
+
+# Add kernel options for cpu isolation / affinity
+if [ ${n_cpus} -gt 1 ]
+then
+  base_cpulist=$(platform_expanded_cpu_list)
+  base_cpumap=$(cpulist_to_cpumap ${base_cpulist} ${n_cpus})
+  avp_cpulist=$(vswitch_expanded_cpu_list)
+  norcu_cpumap=$(invert_cpumap ${base_cpumap} ${n_cpus})
+  norcu_cpulist=$(cpumap_to_cpulist ${norcu_cpumap} ${n_cpus})
+
+  if [[ "$subfunction" =~ lowlatency ]]; then
+    KERN_OPTS="${KERN_OPTS} isolcpus=${norcu_cpulist}"
+    KERN_OPTS="${KERN_OPTS} nohz_full=${norcu_cpulist}"
+  else
+    KERN_OPTS="${KERN_OPTS} isolcpus=${avp_cpulist}"
+  fi
+  KERN_OPTS="${KERN_OPTS} rcu_nocbs=${norcu_cpulist}"
+  KERN_OPTS="${KERN_OPTS} kthread_cpus=${base_cpulist}"
+  KERN_OPTS="${KERN_OPTS} irqaffinity=${base_cpulist}"
+  # Update vswitch.conf
+  sed -i "s/^VSWITCH_CPU_LIST=.*/VSWITCH_CPU_LIST=\"${avp_cpulist}\"/" /etc/vswitch/vswitch.conf
+fi
+
+# Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to set NMI watchdog
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=0 softlockup_panic=0"
+else
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+fi
+
+if [[ "$(dmidecode -s system-product-name)" =~ ^ProLiant.*Gen8$ ]]; then
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on,eth_no_rmrr"
+else
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on"
+fi
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+  KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# Add kernel options to disable kvm-intel.eptad on Broadwell
+# Broadwell: Model: 79, Model name: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
+if grep -q -E "^model\s+:\s+79$" /proc/cpuinfo
+then
+  KERN_OPTS="${KERN_OPTS} kvm-intel.eptad=0"
+fi
+
+# k8s updates:
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# Add kernel option to avoid jiffies_lock contention on real-time kernel
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} skew_tick=1"
+fi
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub2-mkconfig -o /boot/grub2/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_system_aio.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+## Reserve more memory for base processes since the controller has higher
+## memory requirements but cap it to better handle systems with large
+## amounts of memory
+TOTALMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024)}')
+
+if [ -e /sys/devices/system/node/node0 ]; then
+  RESERVEDMEM=$(grep MemTotal /sys/devices/system/node/node0/meminfo | awk '{printf "%d\n", $4/1024}')
+else
+  RESERVEDMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024/4)}')
+fi
+
+if [ ${RESERVEDMEM} -lt 6144 ]; then
+    RESERVEDMEM=6144
+elif [ ${RESERVEDMEM} -gt 14500 ]; then
+    RESERVEDMEM=14500
+elif [ ${RESERVEDMEM} -gt 8192 ]; then
+    RESERVEDMEM=8192
+fi
+
+sed -i -e "s#\(WORKER_BASE_RESERVED\)=.*#\1=(\"node0:${RESERVEDMEM}MB:1\" \"node1:2000MB:0\" \"node2:2000MB:0\" \"node3:2000MB:0\")#g" /etc/platform/worker_reserved.conf
+
+# Update WORKER_CPU_LIST
+N_CPUS=$(cat /proc/cpuinfo 2>/dev/null | awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+sed -i "s/^WORKER_CPU_LIST=.*/WORKER_CPU_LIST=\"0-$((N_CPUS-1))\"/" /etc/platform/worker_reserved.conf
+
+%end
+
+
+# Template from: post_usb_controller.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+touch /tmp/repo-include
+
+if [ -d ${srcdir}/patches ]; then
+    echo "repo --name=updates --baseurl=file://${srcdir}/patches/" > /tmp/repo-include
+fi
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+mgmt_dev=none
+
+# Persist the boot device to the platform configuration. This will get
+# overwritten when config_controller is run.
+echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+# persist the default http port number to platform configuration. This
+# will get overwritten when config_controller is run.
+echo http_port=8080 >> /etc/platform/platform.conf
+
+# Build networking scripts
+cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+%end
+
+
+# Note, this section is different and replaced with a wget
+# if doing the initial install off the network
+%post --nochroot
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+if [ -d $srcdir/Packages ] ; then
+    mkdir -p /mnt/sysimage/www/pages/feed/rel-19.12
+    cp -r $srcdir/Packages /mnt/sysimage/www/pages/feed/rel-19.12/Packages
+    cp -r $srcdir/repodata /mnt/sysimage/www/pages/feed/rel-19.12/repodata
+    cp $srcdir/*.cfg /mnt/sysimage/www/pages/feed/rel-19.12
+fi
+
+if [ -d $srcdir/patches ]; then
+    mkdir -p /mnt/sysimage/www/pages/updates/rel-19.12
+    cp -r $srcdir/patches/Packages /mnt/sysimage/www/pages/updates/rel-19.12/Packages
+    cp -r $srcdir/patches/repodata /mnt/sysimage/www/pages/updates/rel-19.12/repodata
+    mkdir -p /mnt/sysimage/opt/patching
+    cp -r $srcdir/patches/metadata /mnt/sysimage/opt/patching/metadata
+    mkdir -p /mnt/sysimage/opt/patching/packages/19.12
+    
+    find /mnt/sysimage/www/pages/updates/rel-19.12/Packages -name '*.rpm' \
+        | xargs --no-run-if-empty -I files cp --preserve=all files /mnt/sysimage/opt/patching/packages/19.12/
+fi
+
+# Create a uuid specific to this installation
+INSTALL_UUID=`uuidgen`
+echo $INSTALL_UUID > /mnt/sysimage/www/pages/feed/rel-19.12/install_uuid
+echo "INSTALL_UUID=$INSTALL_UUID" >> /mnt/sysimage/etc/platform/platform.conf
+%end
+
+%post
+
+# This is a USB install, so set ONBOOT=yes for network devices.
+# Doing this in the %post so we don't unintentionally setup a
+# network device during the installation.
+for f in /etc/sysconfig/network-scripts/ifcfg-*; do
+    if grep -q '^ONBOOT=' ${f}; then
+        sed -i 's/^ONBOOT=.*/ONBOOT=yes/' ${f}
+    else
+        echo "ONBOOT=yes" >> ${f}
+    fi
+    if grep -q '^IPV6_AUTOCONF=' ${f}; then
+        sed -i 's/^IPV6_AUTOCONF=.*/IPV6_AUTOCONF=no/' ${f}
+    else
+        echo "IPV6_AUTOCONF=no" >> ${f}
+    fi
+done
+
+%end
+
+
+# Template from: post_usb_addon.cfg
+%pre --erroronfail
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+if [ -f ${srcdir}/ks-addon.cfg ]; then
+    cp ${srcdir}/ks-addon.cfg /tmp/
+else
+    cat <<EOF > /tmp/ks-addon.cfg
+# No custom addon included
+EOF
+fi
+%end
+
+%post --nochroot
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+# Store the ks-addon.cfg for debugging
+mkdir -p /mnt/sysimage/var/log/anaconda
+cp /tmp/ks-addon.cfg /mnt/sysimage/var/log/anaconda/
+%end
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+%include /tmp/ks-addon.cfg
+
+%end
diff --git a/meta-stx/conf/distro/files/ks/aio_lowlatency_ks.cfg b/meta-stx/conf/distro/files/ks/aio_lowlatency_ks.cfg
new file mode 100644 (file)
index 0000000..9b2c0e4
--- /dev/null
@@ -0,0 +1,1018 @@
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+%include /tmp/timezone-include
+# set to 'x' so we can use shadow password
+rootpw  --iscrypted x
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+reboot --eject
+
+
+# Template from: pre_pkglist_lowlatency.cfg
+%packages
+@core
+@base
+-kernel-module-igb-uio
+-kernel-module-wrs-avp
+-kernel
+-kernel-tools
+-kernel-tools-libs
+-kmod-drbd
+-kmod-e1000e
+-kmod-i40e
+-kmod-ixgbe
+-kmod-tpm
+-mlnx-ofa_kernel
+-mlnx-ofa_kernel-rt
+-mlnx-ofa_kernel-modules
+-qat16
+@platform-controller-worker-lowlatency
+@updates-controller-worker-lowlatency
+%end
+
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_aio.cfg
+
+## NOTE: updates to partition sizes need to be also reflected in
+##  - stx-config/.../sysinv/conductor/manager.py:create_controller_filesystems()
+##  - stx-config/.../sysinv/common/constants.py
+##
+## NOTE: When adding partitions, we currently have a max of 4 primary partitions.
+##       If more than 4 partitions are required, we can use a max of 3 --asprimary,
+##       to allow 1 primary logical partition with extended partitions
+##
+## NOTE: Max default PV size must align with the default controllerfs sizes
+##
+## BACKUP_OVERHEAD = 20
+##
+## Physical install (for disks over 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_IMAGE_STOR_SIZE = 10
+## DEFAULT_DATABASE_STOR_SIZE = 20
+## DEFAULT_IMG_CONVERSION_STOR_SIZE = 20
+## BACKUP = DEFAULT_DATABASE_STOR_SIZE + DEFAULT_IMAGE_STOR_SIZE
+##                                     + BACKUP_OVERHEAD = 50
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+## CGCS_PV_SIZE = 10240 + 2*20480 + 20480 + 51200 + 8196 + 8196 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 233496
+##
+## small install - (for disks below 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_SMALL_IMAGE_STOR_SIZE = 10
+## DEFAULT_SMALL_DATABASE_STOR_SIZE = 10
+## DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE = 10
+## DEFAULT_SMALL_BACKUP_STOR_SIZE = 40
+##
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+##
+## CGCS_PV_SIZE = 10240 + 2*10240 + 10240 + 40960 + 8192 + 8192 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 192528
+##
+## NOTE: To maintain upgrade compatability within the volume group, keep the
+## undersized LOG_VOL_SIZE and SCRATCH_VOL_SIZE, but size the minimally size
+## physical volume correctly.
+##
+##  R4 AIO installations:
+##  - R4 (case #1): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), /local_pv (239G)
+##  - R4 (case #2): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), cgts-vg (239G)
+##
+##  Upgrade migration will start with R5 install and create a partition to align
+##  above so filesystems within the volume group will be able to maintain their
+##  sizes in R5
+##    - R5 install  : /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), un-partitioned (336G)
+##    - R5 (case #1): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (97G), unpartitioned (239G)
+##    - R5 (case #2): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (336G)
+##
+
+sz=$(blockdev --getsize64 $(get_disk $rootfs_device))
+if [ $sz -le $((240*$gb)) ] ; then
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 192528/1024=188.01. CGCS_PV_SIZE=189*1024=193536. Using a disk with a
+    # size under 189GiB will fail.
+    CGCS_PV_SIZE=193536
+else
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 233496/1024=228.02. CGCS_PV_SIZE=229*1024=234496.
+    CGCS_PV_SIZE=234496
+fi
+
+ROOTFS_SIZE=20000
+LOG_VOL_SIZE=8000
+SCRATCH_VOL_SIZE=8000
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=500 --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --grow --size=500 --maxsize=$CGCS_PV_SIZE --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_aio_lowlatency.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=controller
+subfunction=controller,worker,lowlatency
+system_type=All-in-one
+security_profile=$secprofile
+EOF
+
+%end
+
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_aio_and_worker.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+# Update grub with custom kernel bootargs
+source /etc/init.d/cpumap_functions.sh
+n_cpus=$(cat /proc/cpuinfo 2>/dev/null | \
+  awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+n_numa=$(ls -d /sys/devices/system/node/node* 2>/dev/null | wc -l)
+KERN_OPTS=" iommu=pt usbcore.autosuspend=-1"
+
+KERN_OPTS="${KERN_OPTS} hugepagesz=2M hugepages=0 default_hugepagesz=2M"
+
+# If this is an all-in-one system, we need at least 4 CPUs
+if [ "$system_type" = "All-in-one" -a ${n_cpus} -lt 4 ]; then
+    report_post_failure_with_msg "ERROR: At least 4 CPUs are required for controller+worker node."
+fi
+
+# Add kernel options for cpu isolation / affinity
+if [ ${n_cpus} -gt 1 ]
+then
+  base_cpulist=$(platform_expanded_cpu_list)
+  base_cpumap=$(cpulist_to_cpumap ${base_cpulist} ${n_cpus})
+  avp_cpulist=$(vswitch_expanded_cpu_list)
+  norcu_cpumap=$(invert_cpumap ${base_cpumap} ${n_cpus})
+  norcu_cpulist=$(cpumap_to_cpulist ${norcu_cpumap} ${n_cpus})
+
+  if [[ "$subfunction" =~ lowlatency ]]; then
+    KERN_OPTS="${KERN_OPTS} isolcpus=${norcu_cpulist}"
+    KERN_OPTS="${KERN_OPTS} nohz_full=${norcu_cpulist}"
+  else
+    KERN_OPTS="${KERN_OPTS} isolcpus=${avp_cpulist}"
+  fi
+  KERN_OPTS="${KERN_OPTS} rcu_nocbs=${norcu_cpulist}"
+  KERN_OPTS="${KERN_OPTS} kthread_cpus=${base_cpulist}"
+  KERN_OPTS="${KERN_OPTS} irqaffinity=${base_cpulist}"
+  # Update vswitch.conf
+  sed -i "s/^VSWITCH_CPU_LIST=.*/VSWITCH_CPU_LIST=\"${avp_cpulist}\"/" /etc/vswitch/vswitch.conf
+fi
+
+# Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to set NMI watchdog
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=0 softlockup_panic=0"
+else
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+fi
+
+if [[ "$(dmidecode -s system-product-name)" =~ ^ProLiant.*Gen8$ ]]; then
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on,eth_no_rmrr"
+else
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on"
+fi
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+  KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# Add kernel options to disable kvm-intel.eptad on Broadwell
+# Broadwell: Model: 79, Model name: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
+if grep -q -E "^model\s+:\s+79$" /proc/cpuinfo
+then
+  KERN_OPTS="${KERN_OPTS} kvm-intel.eptad=0"
+fi
+
+# k8s updates:
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# Add kernel option to avoid jiffies_lock contention on real-time kernel
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} skew_tick=1"
+fi
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub2-mkconfig -o /boot/grub2/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_system_aio.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+## Reserve more memory for base processes since the controller has higher
+## memory requirements but cap it to better handle systems with large
+## amounts of memory
+TOTALMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024)}')
+
+if [ -e /sys/devices/system/node/node0 ]; then
+  RESERVEDMEM=$(grep MemTotal /sys/devices/system/node/node0/meminfo | awk '{printf "%d\n", $4/1024}')
+else
+  RESERVEDMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024/4)}')
+fi
+
+if [ ${RESERVEDMEM} -lt 6144 ]; then
+    RESERVEDMEM=6144
+elif [ ${RESERVEDMEM} -gt 14500 ]; then
+    RESERVEDMEM=14500
+elif [ ${RESERVEDMEM} -gt 8192 ]; then
+    RESERVEDMEM=8192
+fi
+
+sed -i -e "s#\(WORKER_BASE_RESERVED\)=.*#\1=(\"node0:${RESERVEDMEM}MB:1\" \"node1:2000MB:0\" \"node2:2000MB:0\" \"node3:2000MB:0\")#g" /etc/platform/worker_reserved.conf
+
+# Update WORKER_CPU_LIST
+N_CPUS=$(cat /proc/cpuinfo 2>/dev/null | awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+sed -i "s/^WORKER_CPU_LIST=.*/WORKER_CPU_LIST=\"0-$((N_CPUS-1))\"/" /etc/platform/worker_reserved.conf
+
+%end
+
+
+# Template from: post_usb_controller.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+touch /tmp/repo-include
+
+if [ -d ${srcdir}/patches ]; then
+    echo "repo --name=updates --baseurl=file://${srcdir}/patches/" > /tmp/repo-include
+fi
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+mgmt_dev=none
+
+# Persist the boot device to the platform configuration. This will get
+# overwritten when config_controller is run.
+echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+# persist the default http port number to platform configuration. This
+# will get overwritten when config_controller is run.
+echo http_port=8080 >> /etc/platform/platform.conf
+
+# Build networking scripts
+cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+%end
+
+
+# Note, this section is different and replaced with a wget
+# if doing the initial install off the network
+%post --nochroot
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+if [ -d $srcdir/Packages ] ; then
+    mkdir -p /mnt/sysimage/www/pages/feed/rel-19.12
+    cp -r $srcdir/Packages /mnt/sysimage/www/pages/feed/rel-19.12/Packages
+    cp -r $srcdir/repodata /mnt/sysimage/www/pages/feed/rel-19.12/repodata
+    cp $srcdir/*.cfg /mnt/sysimage/www/pages/feed/rel-19.12
+fi
+
+if [ -d $srcdir/patches ]; then
+    mkdir -p /mnt/sysimage/www/pages/updates/rel-19.12
+    cp -r $srcdir/patches/Packages /mnt/sysimage/www/pages/updates/rel-19.12/Packages
+    cp -r $srcdir/patches/repodata /mnt/sysimage/www/pages/updates/rel-19.12/repodata
+    mkdir -p /mnt/sysimage/opt/patching
+    cp -r $srcdir/patches/metadata /mnt/sysimage/opt/patching/metadata
+    mkdir -p /mnt/sysimage/opt/patching/packages/19.12
+    
+    find /mnt/sysimage/www/pages/updates/rel-19.12/Packages -name '*.rpm' \
+        | xargs --no-run-if-empty -I files cp --preserve=all files /mnt/sysimage/opt/patching/packages/19.12/
+fi
+
+# Create a uuid specific to this installation
+INSTALL_UUID=`uuidgen`
+echo $INSTALL_UUID > /mnt/sysimage/www/pages/feed/rel-19.12/install_uuid
+echo "INSTALL_UUID=$INSTALL_UUID" >> /mnt/sysimage/etc/platform/platform.conf
+%end
+
+%post
+
+# This is a USB install, so set ONBOOT=yes for network devices.
+# Doing this in the %post so we don't unintentionally setup a
+# network device during the installation.
+for f in /etc/sysconfig/network-scripts/ifcfg-*; do
+    if grep -q '^ONBOOT=' ${f}; then
+        sed -i 's/^ONBOOT=.*/ONBOOT=yes/' ${f}
+    else
+        echo "ONBOOT=yes" >> ${f}
+    fi
+    if grep -q '^IPV6_AUTOCONF=' ${f}; then
+        sed -i 's/^IPV6_AUTOCONF=.*/IPV6_AUTOCONF=no/' ${f}
+    else
+        echo "IPV6_AUTOCONF=no" >> ${f}
+    fi
+done
+
+%end
+
+
+# Template from: post_usb_addon.cfg
+%pre --erroronfail
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+if [ -f ${srcdir}/ks-addon.cfg ]; then
+    cp ${srcdir}/ks-addon.cfg /tmp/
+else
+    cat <<EOF > /tmp/ks-addon.cfg
+# No custom addon included
+EOF
+fi
+%end
+
+%post --nochroot
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+# Store the ks-addon.cfg for debugging
+mkdir -p /mnt/sysimage/var/log/anaconda
+cp /tmp/ks-addon.cfg /mnt/sysimage/var/log/anaconda/
+%end
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+%include /tmp/ks-addon.cfg
+
+%end
diff --git a/meta-stx/conf/distro/files/ks/controller_ks.cfg b/meta-stx/conf/distro/files/ks/controller_ks.cfg
new file mode 100644 (file)
index 0000000..6e1c504
--- /dev/null
@@ -0,0 +1,835 @@
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+%include /tmp/timezone-include
+# set to 'x' so we can use shadow password
+rootpw  --iscrypted x
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+reboot --eject
+
+
+# Template from: pre_pkglist.cfg
+%packages
+@core
+@base
+-kernel-module-igb-uio-rt
+-kernel-module-wrs-avp-rt
+-kernel-rt
+-kernel-rt-kvm
+-kernel-rt-tools
+-kernel-rt-tools-libs
+-kmod-drbd-rt
+-kmod-e1000e-rt
+-kmod-i40e-rt
+-kmod-ixgbe-rt
+-kmod-tpm-rt
+-mlnx-ofa_kernel
+-mlnx-ofa_kernel-rt
+-mlnx-ofa_kernel-rt-modules
+-qat16-rt
+@platform-controller
+@updates-controller
+%end
+
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_controller.cfg
+
+## NOTE: updates to partition sizes need to be also reflected in
+## _controller_filesystem_limits() in sysinv/api/controllers/v1/istorconfig.py
+
+ROOTFS_SIZE=20000
+LOG_VOL_SIZE=8000
+SCRATCH_VOL_SIZE=8000
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=500 --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --grow --asprimary --size=500 --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_controller.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=controller
+subfunction=controller
+system_type=Standard
+security_profile=$secprofile
+EOF
+
+%end
+
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_controller.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+## Custom kernel options
+KERN_OPTS=" intel_iommu=off usbcore.autosuspend=-1"
+
+## Setup the loop module to support up to 15 partitions so that we can enable the
+## customer to manually resize images if needed.
+##
+KERN_OPTS="${KERN_OPTS} loop.max_part=15"
+
+## Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to ensure NMI watchdog is enabled, if supported
+KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+    KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# k8s updates
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub2-mkconfig -o /boot/grub2/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_usb_controller.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+touch /tmp/repo-include
+
+if [ -d ${srcdir}/patches ]; then
+    echo "repo --name=updates --baseurl=file://${srcdir}/patches/" > /tmp/repo-include
+fi
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+mgmt_dev=none
+
+# Persist the boot device to the platform configuration. This will get
+# overwritten when config_controller is run.
+echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+# persist the default http port number to platform configuration. This
+# will get overwritten when config_controller is run.
+echo http_port=8080 >> /etc/platform/platform.conf
+
+# Build networking scripts
+cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+%end
+
+
+# Note, this section is different and replaced with a wget
+# if doing the initial install off the network
+%post --nochroot
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+if [ -d $srcdir/Packages ] ; then
+    mkdir -p /mnt/sysimage/www/pages/feed/rel-19.12
+    cp -r $srcdir/Packages /mnt/sysimage/www/pages/feed/rel-19.12/Packages
+    cp -r $srcdir/repodata /mnt/sysimage/www/pages/feed/rel-19.12/repodata
+    cp $srcdir/*.cfg /mnt/sysimage/www/pages/feed/rel-19.12
+fi
+
+if [ -d $srcdir/patches ]; then
+    mkdir -p /mnt/sysimage/www/pages/updates/rel-19.12
+    cp -r $srcdir/patches/Packages /mnt/sysimage/www/pages/updates/rel-19.12/Packages
+    cp -r $srcdir/patches/repodata /mnt/sysimage/www/pages/updates/rel-19.12/repodata
+    mkdir -p /mnt/sysimage/opt/patching
+    cp -r $srcdir/patches/metadata /mnt/sysimage/opt/patching/metadata
+    mkdir -p /mnt/sysimage/opt/patching/packages/19.12
+    
+    find /mnt/sysimage/www/pages/updates/rel-19.12/Packages -name '*.rpm' \
+        | xargs --no-run-if-empty -I files cp --preserve=all files /mnt/sysimage/opt/patching/packages/19.12/
+fi
+
+# Create a uuid specific to this installation
+INSTALL_UUID=`uuidgen`
+echo $INSTALL_UUID > /mnt/sysimage/www/pages/feed/rel-19.12/install_uuid
+echo "INSTALL_UUID=$INSTALL_UUID" >> /mnt/sysimage/etc/platform/platform.conf
+%end
+
+%post
+
+# This is a USB install, so set ONBOOT=yes for network devices.
+# Doing this in the %post so we don't unintentionally setup a
+# network device during the installation.
+for f in /etc/sysconfig/network-scripts/ifcfg-*; do
+    if grep -q '^ONBOOT=' ${f}; then
+        sed -i 's/^ONBOOT=.*/ONBOOT=yes/' ${f}
+    else
+        echo "ONBOOT=yes" >> ${f}
+    fi
+    if grep -q '^IPV6_AUTOCONF=' ${f}; then
+        sed -i 's/^IPV6_AUTOCONF=.*/IPV6_AUTOCONF=no/' ${f}
+    else
+        echo "IPV6_AUTOCONF=no" >> ${f}
+    fi
+done
+
+%end
+
+
+# Template from: post_usb_addon.cfg
+%pre --erroronfail
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+if [ -f ${srcdir}/ks-addon.cfg ]; then
+    cp ${srcdir}/ks-addon.cfg /tmp/
+else
+    cat <<EOF > /tmp/ks-addon.cfg
+# No custom addon included
+EOF
+fi
+%end
+
+%post --nochroot
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+# Store the ks-addon.cfg for debugging
+mkdir -p /mnt/sysimage/var/log/anaconda
+cp /tmp/ks-addon.cfg /mnt/sysimage/var/log/anaconda/
+%end
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+%include /tmp/ks-addon.cfg
+
+%end
diff --git a/meta-stx/conf/distro/files/ks/net_controller_ks.cfg b/meta-stx/conf/distro/files/ks/net_controller_ks.cfg
new file mode 100644 (file)
index 0000000..c51c2b0
--- /dev/null
@@ -0,0 +1,937 @@
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+%include /tmp/timezone-include
+# set to 'x' so we can use shadow password
+rootpw  --iscrypted x
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+reboot --eject
+
+
+# Template from: pre_net_common.cfg
+%pre
+
+# Setup ntp.conf and sync time
+cat <<EOF >/etc/ntp_kickstart.conf
+server pxecontroller
+EOF
+
+/usr/sbin/ntpd -g -q -n -c /etc/ntp_kickstart.conf
+if [ $? -eq 0 ]; then
+    /sbin/hwclock --systohc --utc
+fi
+
+%end
+
+
+# Template from: pre_pkglist.cfg
+%packages
+@core
+@base
+-kernel-module-igb-uio-rt
+-kernel-module-wrs-avp-rt
+-kernel-rt
+-kernel-rt-kvm
+-kernel-rt-tools
+-kernel-rt-tools-libs
+-kmod-drbd-rt
+-kmod-e1000e-rt
+-kmod-i40e-rt
+-kmod-ixgbe-rt
+-kmod-tpm-rt
+-mlnx-ofa_kernel
+-mlnx-ofa_kernel-rt
+-mlnx-ofa_kernel-rt-modules
+-qat16-rt
+@platform-controller
+@updates-controller
+%end
+
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_controller.cfg
+
+## NOTE: updates to partition sizes need to be also reflected in
+## _controller_filesystem_limits() in sysinv/api/controllers/v1/istorconfig.py
+
+ROOTFS_SIZE=20000
+LOG_VOL_SIZE=8000
+SCRATCH_VOL_SIZE=8000
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=500 --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --grow --asprimary --size=500 --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_controller.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=controller
+subfunction=controller
+system_type=Standard
+security_profile=$secprofile
+EOF
+
+%end
+
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_controller.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+## Custom kernel options
+KERN_OPTS=" intel_iommu=off usbcore.autosuspend=-1"
+
+## Setup the loop module to support up to 15 partitions so that we can enable the
+## customer to manually resize images if needed.
+##
+KERN_OPTS="${KERN_OPTS} loop.max_part=15"
+
+## Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to ensure NMI watchdog is enabled, if supported
+KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+    KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# k8s updates
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub2-mkconfig -o /boot/grub2/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_net_controller.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+http_port=$(get_http_port)
+# Retrieve the installation uuid from the controller we booted from
+INSTALL_UUID=`curl -sf http://pxecontroller:${http_port:-8080}/feed/rel-19.12/install_uuid`
+if [ $? -ne 0 ]
+then
+  INSTALL_UUID=unknown
+fi
+
+grep -q INSTALL_UUID /etc/platform/platform.conf
+if [ $? -ne 0 ]; then
+    echo "INSTALL_UUID=$INSTALL_UUID" >> /etc/platform/platform.conf
+fi
+
+cd /www/pages
+# Sync software repository
+feed_url=http://pxecontroller:${http_port:-8080}/feed/
+anaconda_logdir=/var/log/anaconda
+mkdir -p $anaconda_logdir
+
+echo "Mirroring software repository (may take several minutes)..." >/dev/console
+wget --recursive --no-parent --no-host-directories --no-clobber --reject 'index.html*' --reject '*.log' $feed_url/ -o $anaconda_logdir/wget-feed-mirror.log \
+    || report_post_failure_with_logfile $anaconda_logdir/wget-feed-mirror.log
+
+# Sync patching repository
+updates_url=http://pxecontroller:${http_port:-8080}/updates/
+wget --mirror --no-parent --no-host-directories --reject 'index.html*' --reject '*.log' $updates_url/ -o $anaconda_logdir/wget-updates-mirror.log \
+    || report_post_failure_with_logfile $anaconda_logdir/wget-updates-mirror.log
+echo "Done" >/dev/console
+
+shopt -s nullglob
+
+# Check whether a second release is installed
+. /etc/build.info
+CURRENT_REL_DIR=rel-${SW_VERSION}
+OTHER_REL_DIR=
+for REL_DIR in /www/pages/feed/*; do
+    if [[ ! $REL_DIR =~ "${SW_VERSION}" ]]; then
+        OTHER_REL_DIR=`basename $REL_DIR`
+        OTHER_REL_VERSION=${OTHER_REL_DIR:4}
+        break
+    fi
+done
+
+# If second release is installed, find the latest version of the installer
+# RPM and install the pxeboot files we require to boot hosts with that release.
+if [ ! -z "$OTHER_REL_DIR" ]; then
+    PATCH_RPM=`find /www/pages/updates/${OTHER_REL_DIR}/Packages -name 'pxe-network-installer*' | sort -V | tail -1`
+    BASE_RPM=`find /www/pages/feed/${OTHER_REL_DIR}/Packages -name 'pxe-network-installer*' | sort -V | tail -1`
+
+    if [ ! -z "$PATCH_RPM" ]; then
+        INSTALL_RPM=$PATCH_RPM
+    elif [ ! -z "$BASE_RPM" ]; then
+        INSTALL_RPM=$BASE_RPM
+    else
+        report_post_failure_with_msg "ERROR: Unable to find pxe-network-installer RPM for $OTHER_REL_DIR. Aborting installation."
+    fi
+
+    echo "Installing pxeboot files for release $OTHER_REL_DIR from $INSTALL_RPM" >/dev/console
+    TMP_RPM=/tmp/pxe-network-installer
+    mkdir $TMP_RPM
+    pushd $TMP_RPM
+    /usr/bin/rpm2cpio $INSTALL_RPM | cpio -idm \
+        || report_post_failure_with_msg "Failed to extract pxe-network-installer"
+
+    cp -r $TMP_RPM/usr / \
+        || report_post_failure_with_msg "Failed to copy pxe-network-installer /usr"
+    cp -r $TMP_RPM/pxeboot/$OTHER_REL_DIR /pxeboot/ \
+        || report_post_failure_with_msg "Failed to copy pxe-network-installer /pxeboot/$OTHER_REL_DIR"
+    cp $TMP_RPM/pxeboot/pxelinux.cfg.files/*-$OTHER_REL_VERSION /pxeboot/pxelinux.cfg.files/ \
+        || report_post_failure_with_msg "Failed to copy pxe-network-installer pxelinux.cfg files"
+
+    rm -rf $TMP_RPM
+fi
+
+%end
+
+# Template from: post_net_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+http_port=$(get_http_port)
+echo "repo --name=base --baseurl=http://pxecontroller:${http_port:-8080}/feed/rel-19.12/" > /tmp/repo-include
+echo "repo --name=updates --baseurl=http://pxecontroller:${http_port:-8080}/updates/rel-19.12/" > /tmp/repo-include
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Persist the http port to the platform configuration
+echo http_port=$(get_http_port) >> /etc/platform/platform.conf
+
+# Obtain the boot interface from the PXE boot
+BOOTIF=`cat /proc/cmdline |xargs -n1 echo |grep BOOTIF=`
+if [ -d /sys/firmware/efi ] ; then
+    BOOTIF=${BOOTIF#BOOTIF=}
+else
+    BOOTIF=${BOOTIF#BOOTIF=01-}
+    BOOTIF=${BOOTIF//-/:}
+fi
+
+mgmt_dev=none
+mgmt_vlan=0
+if [ -n "$BOOTIF" ] ; then
+    ndev=`ip link show |grep -B 1 $BOOTIF |head -1 |awk '{print $2}' |sed -e 's/://'`
+    if [ -n "$ndev" ] ; then
+        mgmt_dev=$ndev
+        # Retrieve the management VLAN from sysinv if it exists
+        mgmt_vlan=`curl -sf http://pxecontroller:6385/v1/isystems/mgmtvlan`
+        if [ $? -ne 0 ]
+        then
+          report_post_failure_with_msg "ERROR: Unable to communicate with System Inventory REST API. Aborting installation."
+        fi
+    else
+        report_post_failure_with_msg "ERROR: Unable to determine mgmt interface from BOOTIF=$BOOTIF."
+    fi
+else
+    report_post_failure_with_msg "ERROR: BOOTIF is not set. Unable to determine mgmt interface."
+fi
+
+if [ $mgmt_vlan -eq 0 ] ; then
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=dhcp
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+else
+
+    # Check whether to use inet or inet6
+    ipv6_addr=$(dig +short AAAA controller)
+    if [[ -n "$ipv6_addr" ]]
+    then
+        mgmt_address_family=inet6
+        ipv6init=yes
+        dhcpv6c=yes
+        dhclientargs=-1
+    else
+        mgmt_address_family=inet
+        ipv6init=no
+        dhcpv6c=no
+        dhclientargs=
+    fi
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=vlan$mgmt_vlan >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=none
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-vlan$mgmt_vlan
+DEVICE=vlan$mgmt_vlan
+BOOTPROTO=dhcp
+DHCLIENTARGS=$dhclientargs
+IPV6INIT=$ipv6init
+DHCPV6C=$dhcpv6c
+ONBOOT=yes
+IPV6_AUTOCONF=no
+PHYSDEV=$mgmt_dev
+VLAN=yes
+LINKDELAY=20
+EOF
+
+    # Reject DHCPOFFER from DHCP server that doesn't send
+    # wrs-install-uuid option
+    echo "require wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+    echo "require dhcp6.wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+
+    # Bring up the mgmt vlan so that a dhcp lease is acquired and an address is
+    # setup prior to the post-install reboot.  This is so that the timing of the IP
+    # address allocation is similar to how normal/non-pxe installation works.
+    mgmt_iface=vlan$mgmt_vlan
+    dhclient_family=$([[ $mgmt_address_family == "inet" ]] && echo -4 || echo -6)
+    ip link add link $mgmt_dev name $mgmt_iface type vlan id $mgmt_vlan
+    ip link set up dev $mgmt_iface
+    dhclient $dhclient_family $mgmt_iface || true
+
+fi
+
+%end
diff --git a/meta-stx/conf/distro/files/ks/net_smallsystem_ks.cfg b/meta-stx/conf/distro/files/ks/net_smallsystem_ks.cfg
new file mode 100644 (file)
index 0000000..1b425b0
--- /dev/null
@@ -0,0 +1,1121 @@
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+%include /tmp/timezone-include
+# set to 'x' so we can use shadow password
+rootpw  --iscrypted x
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+reboot --eject
+
+
+# Template from: pre_net_common.cfg
+%pre
+
+# Setup ntp.conf and sync time
+cat <<EOF >/etc/ntp_kickstart.conf
+server pxecontroller
+EOF
+
+/usr/sbin/ntpd -g -q -n -c /etc/ntp_kickstart.conf
+if [ $? -eq 0 ]; then
+    /sbin/hwclock --systohc --utc
+fi
+
+%end
+
+
+# Template from: pre_pkglist.cfg
+%packages
+@core
+@base
+-kernel-module-igb-uio-rt
+-kernel-module-wrs-avp-rt
+-kernel-rt
+-kernel-rt-kvm
+-kernel-rt-tools
+-kernel-rt-tools-libs
+-kmod-drbd-rt
+-kmod-e1000e-rt
+-kmod-i40e-rt
+-kmod-ixgbe-rt
+-kmod-tpm-rt
+-mlnx-ofa_kernel
+-mlnx-ofa_kernel-rt
+-mlnx-ofa_kernel-rt-modules
+-qat16-rt
+@platform-controller-worker
+@updates-controller-worker
+%end
+
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_aio.cfg
+
+## NOTE: updates to partition sizes need to be also reflected in
+##  - stx-config/.../sysinv/conductor/manager.py:create_controller_filesystems()
+##  - stx-config/.../sysinv/common/constants.py
+##
+## NOTE: When adding partitions, we currently have a max of 4 primary partitions.
+##       If more than 4 partitions are required, we can use a max of 3 --asprimary,
+##       to allow 1 primary logical partition with extended partitions
+##
+## NOTE: Max default PV size must align with the default controllerfs sizes
+##
+## BACKUP_OVERHEAD = 20
+##
+## Physical install (for disks over 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_IMAGE_STOR_SIZE = 10
+## DEFAULT_DATABASE_STOR_SIZE = 20
+## DEFAULT_IMG_CONVERSION_STOR_SIZE = 20
+## BACKUP = DEFAULT_DATABASE_STOR_SIZE + DEFAULT_IMAGE_STOR_SIZE
+##                                     + BACKUP_OVERHEAD = 50
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+## CGCS_PV_SIZE = 10240 + 2*20480 + 20480 + 51200 + 8196 + 8196 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 233496
+##
+## small install - (for disks below 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_SMALL_IMAGE_STOR_SIZE = 10
+## DEFAULT_SMALL_DATABASE_STOR_SIZE = 10
+## DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE = 10
+## DEFAULT_SMALL_BACKUP_STOR_SIZE = 40
+##
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+##
+## CGCS_PV_SIZE = 10240 + 2*10240 + 10240 + 40960 + 8192 + 8192 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 192528
+##
+## NOTE: To maintain upgrade compatability within the volume group, keep the
+## undersized LOG_VOL_SIZE and SCRATCH_VOL_SIZE, but size the minimally size
+## physical volume correctly.
+##
+##  R4 AIO installations:
+##  - R4 (case #1): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), /local_pv (239G)
+##  - R4 (case #2): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), cgts-vg (239G)
+##
+##  Upgrade migration will start with R5 install and create a partition to align
+##  above so filesystems within the volume group will be able to maintain their
+##  sizes in R5
+##    - R5 install  : /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), un-partitioned (336G)
+##    - R5 (case #1): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (97G), unpartitioned (239G)
+##    - R5 (case #2): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (336G)
+##
+
+sz=$(blockdev --getsize64 $(get_disk $rootfs_device))
+if [ $sz -le $((240*$gb)) ] ; then
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 192528/1024=188.01. CGCS_PV_SIZE=189*1024=193536. Using a disk with a
+    # size under 189GiB will fail.
+    CGCS_PV_SIZE=193536
+else
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 233496/1024=228.02. CGCS_PV_SIZE=229*1024=234496.
+    CGCS_PV_SIZE=234496
+fi
+
+ROOTFS_SIZE=20000
+LOG_VOL_SIZE=8000
+SCRATCH_VOL_SIZE=8000
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=500 --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --grow --size=500 --maxsize=$CGCS_PV_SIZE --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_aio.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=controller
+subfunction=controller,worker
+system_type=All-in-one
+security_profile=$secprofile
+EOF
+
+%end
+
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_aio_and_worker.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+# Update grub with custom kernel bootargs
+source /etc/init.d/cpumap_functions.sh
+n_cpus=$(cat /proc/cpuinfo 2>/dev/null | \
+  awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+n_numa=$(ls -d /sys/devices/system/node/node* 2>/dev/null | wc -l)
+KERN_OPTS=" iommu=pt usbcore.autosuspend=-1"
+
+KERN_OPTS="${KERN_OPTS} hugepagesz=2M hugepages=0 default_hugepagesz=2M"
+
+# If this is an all-in-one system, we need at least 4 CPUs
+if [ "$system_type" = "All-in-one" -a ${n_cpus} -lt 4 ]; then
+    report_post_failure_with_msg "ERROR: At least 4 CPUs are required for controller+worker node."
+fi
+
+# Add kernel options for cpu isolation / affinity
+if [ ${n_cpus} -gt 1 ]
+then
+  base_cpulist=$(platform_expanded_cpu_list)
+  base_cpumap=$(cpulist_to_cpumap ${base_cpulist} ${n_cpus})
+  avp_cpulist=$(vswitch_expanded_cpu_list)
+  norcu_cpumap=$(invert_cpumap ${base_cpumap} ${n_cpus})
+  norcu_cpulist=$(cpumap_to_cpulist ${norcu_cpumap} ${n_cpus})
+
+  if [[ "$subfunction" =~ lowlatency ]]; then
+    KERN_OPTS="${KERN_OPTS} isolcpus=${norcu_cpulist}"
+    KERN_OPTS="${KERN_OPTS} nohz_full=${norcu_cpulist}"
+  else
+    KERN_OPTS="${KERN_OPTS} isolcpus=${avp_cpulist}"
+  fi
+  KERN_OPTS="${KERN_OPTS} rcu_nocbs=${norcu_cpulist}"
+  KERN_OPTS="${KERN_OPTS} kthread_cpus=${base_cpulist}"
+  KERN_OPTS="${KERN_OPTS} irqaffinity=${base_cpulist}"
+  # Update vswitch.conf
+  sed -i "s/^VSWITCH_CPU_LIST=.*/VSWITCH_CPU_LIST=\"${avp_cpulist}\"/" /etc/vswitch/vswitch.conf
+fi
+
+# Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to set NMI watchdog
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=0 softlockup_panic=0"
+else
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+fi
+
+if [[ "$(dmidecode -s system-product-name)" =~ ^ProLiant.*Gen8$ ]]; then
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on,eth_no_rmrr"
+else
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on"
+fi
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+  KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# Add kernel options to disable kvm-intel.eptad on Broadwell
+# Broadwell: Model: 79, Model name: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
+if grep -q -E "^model\s+:\s+79$" /proc/cpuinfo
+then
+  KERN_OPTS="${KERN_OPTS} kvm-intel.eptad=0"
+fi
+
+# k8s updates:
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# Add kernel option to avoid jiffies_lock contention on real-time kernel
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} skew_tick=1"
+fi
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub2-mkconfig -o /boot/grub2/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_system_aio.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+## Reserve more memory for base processes since the controller has higher
+## memory requirements but cap it to better handle systems with large
+## amounts of memory
+TOTALMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024)}')
+
+if [ -e /sys/devices/system/node/node0 ]; then
+  RESERVEDMEM=$(grep MemTotal /sys/devices/system/node/node0/meminfo | awk '{printf "%d\n", $4/1024}')
+else
+  RESERVEDMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024/4)}')
+fi
+
+if [ ${RESERVEDMEM} -lt 6144 ]; then
+    RESERVEDMEM=6144
+elif [ ${RESERVEDMEM} -gt 14500 ]; then
+    RESERVEDMEM=14500
+elif [ ${RESERVEDMEM} -gt 8192 ]; then
+    RESERVEDMEM=8192
+fi
+
+sed -i -e "s#\(WORKER_BASE_RESERVED\)=.*#\1=(\"node0:${RESERVEDMEM}MB:1\" \"node1:2000MB:0\" \"node2:2000MB:0\" \"node3:2000MB:0\")#g" /etc/platform/worker_reserved.conf
+
+# Update WORKER_CPU_LIST
+N_CPUS=$(cat /proc/cpuinfo 2>/dev/null | awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+sed -i "s/^WORKER_CPU_LIST=.*/WORKER_CPU_LIST=\"0-$((N_CPUS-1))\"/" /etc/platform/worker_reserved.conf
+
+%end
+
+
+# Template from: post_net_controller.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+http_port=$(get_http_port)
+# Retrieve the installation uuid from the controller we booted from
+INSTALL_UUID=`curl -sf http://pxecontroller:${http_port:-8080}/feed/rel-19.12/install_uuid`
+if [ $? -ne 0 ]
+then
+  INSTALL_UUID=unknown
+fi
+
+grep -q INSTALL_UUID /etc/platform/platform.conf
+if [ $? -ne 0 ]; then
+    echo "INSTALL_UUID=$INSTALL_UUID" >> /etc/platform/platform.conf
+fi
+
+cd /www/pages
+# Sync software repository
+feed_url=http://pxecontroller:${http_port:-8080}/feed/
+anaconda_logdir=/var/log/anaconda
+mkdir -p $anaconda_logdir
+
+echo "Mirroring software repository (may take several minutes)..." >/dev/console
+wget --recursive --no-parent --no-host-directories --no-clobber --reject 'index.html*' --reject '*.log' $feed_url/ -o $anaconda_logdir/wget-feed-mirror.log \
+    || report_post_failure_with_logfile $anaconda_logdir/wget-feed-mirror.log
+
+# Sync patching repository
+updates_url=http://pxecontroller:${http_port:-8080}/updates/
+wget --mirror --no-parent --no-host-directories --reject 'index.html*' --reject '*.log' $updates_url/ -o $anaconda_logdir/wget-updates-mirror.log \
+    || report_post_failure_with_logfile $anaconda_logdir/wget-updates-mirror.log
+echo "Done" >/dev/console
+
+shopt -s nullglob
+
+# Check whether a second release is installed
+. /etc/build.info
+CURRENT_REL_DIR=rel-${SW_VERSION}
+OTHER_REL_DIR=
+for REL_DIR in /www/pages/feed/*; do
+    if [[ ! $REL_DIR =~ "${SW_VERSION}" ]]; then
+        OTHER_REL_DIR=`basename $REL_DIR`
+        OTHER_REL_VERSION=${OTHER_REL_DIR:4}
+        break
+    fi
+done
+
+# If second release is installed, find the latest version of the installer
+# RPM and install the pxeboot files we require to boot hosts with that release.
+if [ ! -z "$OTHER_REL_DIR" ]; then
+    PATCH_RPM=`find /www/pages/updates/${OTHER_REL_DIR}/Packages -name 'pxe-network-installer*' | sort -V | tail -1`
+    BASE_RPM=`find /www/pages/feed/${OTHER_REL_DIR}/Packages -name 'pxe-network-installer*' | sort -V | tail -1`
+
+    if [ ! -z "$PATCH_RPM" ]; then
+        INSTALL_RPM=$PATCH_RPM
+    elif [ ! -z "$BASE_RPM" ]; then
+        INSTALL_RPM=$BASE_RPM
+    else
+        report_post_failure_with_msg "ERROR: Unable to find pxe-network-installer RPM for $OTHER_REL_DIR. Aborting installation."
+    fi
+
+    echo "Installing pxeboot files for release $OTHER_REL_DIR from $INSTALL_RPM" >/dev/console
+    TMP_RPM=/tmp/pxe-network-installer
+    mkdir $TMP_RPM
+    pushd $TMP_RPM
+    /usr/bin/rpm2cpio $INSTALL_RPM | cpio -idm \
+        || report_post_failure_with_msg "Failed to extract pxe-network-installer"
+
+    cp -r $TMP_RPM/usr / \
+        || report_post_failure_with_msg "Failed to copy pxe-network-installer /usr"
+    cp -r $TMP_RPM/pxeboot/$OTHER_REL_DIR /pxeboot/ \
+        || report_post_failure_with_msg "Failed to copy pxe-network-installer /pxeboot/$OTHER_REL_DIR"
+    cp $TMP_RPM/pxeboot/pxelinux.cfg.files/*-$OTHER_REL_VERSION /pxeboot/pxelinux.cfg.files/ \
+        || report_post_failure_with_msg "Failed to copy pxe-network-installer pxelinux.cfg files"
+
+    rm -rf $TMP_RPM
+fi
+
+%end
+
+# Template from: post_net_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+http_port=$(get_http_port)
+echo "repo --name=base --baseurl=http://pxecontroller:${http_port:-8080}/feed/rel-19.12/" > /tmp/repo-include
+echo "repo --name=updates --baseurl=http://pxecontroller:${http_port:-8080}/updates/rel-19.12/" > /tmp/repo-include
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Persist the http port to the platform configuration
+echo http_port=$(get_http_port) >> /etc/platform/platform.conf
+
+# Obtain the boot interface from the PXE boot
+BOOTIF=`cat /proc/cmdline |xargs -n1 echo |grep BOOTIF=`
+if [ -d /sys/firmware/efi ] ; then
+    BOOTIF=${BOOTIF#BOOTIF=}
+else
+    BOOTIF=${BOOTIF#BOOTIF=01-}
+    BOOTIF=${BOOTIF//-/:}
+fi
+
+mgmt_dev=none
+mgmt_vlan=0
+if [ -n "$BOOTIF" ] ; then
+    ndev=`ip link show |grep -B 1 $BOOTIF |head -1 |awk '{print $2}' |sed -e 's/://'`
+    if [ -n "$ndev" ] ; then
+        mgmt_dev=$ndev
+        # Retrieve the management VLAN from sysinv if it exists
+        mgmt_vlan=`curl -sf http://pxecontroller:6385/v1/isystems/mgmtvlan`
+        if [ $? -ne 0 ]
+        then
+          report_post_failure_with_msg "ERROR: Unable to communicate with System Inventory REST API. Aborting installation."
+        fi
+    else
+        report_post_failure_with_msg "ERROR: Unable to determine mgmt interface from BOOTIF=$BOOTIF."
+    fi
+else
+    report_post_failure_with_msg "ERROR: BOOTIF is not set. Unable to determine mgmt interface."
+fi
+
+if [ $mgmt_vlan -eq 0 ] ; then
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=dhcp
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+else
+
+    # Check whether to use inet or inet6
+    ipv6_addr=$(dig +short AAAA controller)
+    if [[ -n "$ipv6_addr" ]]
+    then
+        mgmt_address_family=inet6
+        ipv6init=yes
+        dhcpv6c=yes
+        dhclientargs=-1
+    else
+        mgmt_address_family=inet
+        ipv6init=no
+        dhcpv6c=no
+        dhclientargs=
+    fi
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=vlan$mgmt_vlan >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=none
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-vlan$mgmt_vlan
+DEVICE=vlan$mgmt_vlan
+BOOTPROTO=dhcp
+DHCLIENTARGS=$dhclientargs
+IPV6INIT=$ipv6init
+DHCPV6C=$dhcpv6c
+ONBOOT=yes
+IPV6_AUTOCONF=no
+PHYSDEV=$mgmt_dev
+VLAN=yes
+LINKDELAY=20
+EOF
+
+    # Reject DHCPOFFER from DHCP server that doesn't send
+    # wrs-install-uuid option
+    echo "require wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+    echo "require dhcp6.wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+
+    # Bring up the mgmt vlan so that a dhcp lease is acquired and an address is
+    # setup prior to the post-install reboot.  This is so that the timing of the IP
+    # address allocation is similar to how normal/non-pxe installation works.
+    mgmt_iface=vlan$mgmt_vlan
+    dhclient_family=$([[ $mgmt_address_family == "inet" ]] && echo -4 || echo -6)
+    ip link add link $mgmt_dev name $mgmt_iface type vlan id $mgmt_vlan
+    ip link set up dev $mgmt_iface
+    dhclient $dhclient_family $mgmt_iface || true
+
+fi
+
+%end
diff --git a/meta-stx/conf/distro/files/ks/net_smallsystem_lowlatency_ks.cfg b/meta-stx/conf/distro/files/ks/net_smallsystem_lowlatency_ks.cfg
new file mode 100644 (file)
index 0000000..8b136a8
--- /dev/null
@@ -0,0 +1,1120 @@
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+%include /tmp/timezone-include
+# set to 'x' so we can use shadow password
+rootpw  --iscrypted x
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+reboot --eject
+
+
+# Template from: pre_net_common.cfg
+%pre
+
+# Setup ntp.conf and sync time
+cat <<EOF >/etc/ntp_kickstart.conf
+server pxecontroller
+EOF
+
+/usr/sbin/ntpd -g -q -n -c /etc/ntp_kickstart.conf
+if [ $? -eq 0 ]; then
+    /sbin/hwclock --systohc --utc
+fi
+
+%end
+
+
+# Template from: pre_pkglist_lowlatency.cfg
+%packages
+@core
+@base
+-kernel-module-igb-uio
+-kernel-module-wrs-avp
+-kernel
+-kernel-tools
+-kernel-tools-libs
+-kmod-drbd
+-kmod-e1000e
+-kmod-i40e
+-kmod-ixgbe
+-kmod-tpm
+-mlnx-ofa_kernel
+-mlnx-ofa_kernel-rt
+-mlnx-ofa_kernel-modules
+-qat16
+@platform-controller-worker-lowlatency
+@updates-controller-worker-lowlatency
+%end
+
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_aio.cfg
+
+## NOTE: updates to partition sizes need to be also reflected in
+##  - stx-config/.../sysinv/conductor/manager.py:create_controller_filesystems()
+##  - stx-config/.../sysinv/common/constants.py
+##
+## NOTE: When adding partitions, we currently have a max of 4 primary partitions.
+##       If more than 4 partitions are required, we can use a max of 3 --asprimary,
+##       to allow 1 primary logical partition with extended partitions
+##
+## NOTE: Max default PV size must align with the default controllerfs sizes
+##
+## BACKUP_OVERHEAD = 20
+##
+## Physical install (for disks over 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_IMAGE_STOR_SIZE = 10
+## DEFAULT_DATABASE_STOR_SIZE = 20
+## DEFAULT_IMG_CONVERSION_STOR_SIZE = 20
+## BACKUP = DEFAULT_DATABASE_STOR_SIZE + DEFAULT_IMAGE_STOR_SIZE
+##                                     + BACKUP_OVERHEAD = 50
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+## CGCS_PV_SIZE = 10240 + 2*20480 + 20480 + 51200 + 8196 + 8196 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 233496
+##
+## small install - (for disks below 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_SMALL_IMAGE_STOR_SIZE = 10
+## DEFAULT_SMALL_DATABASE_STOR_SIZE = 10
+## DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE = 10
+## DEFAULT_SMALL_BACKUP_STOR_SIZE = 40
+##
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+##
+## CGCS_PV_SIZE = 10240 + 2*10240 + 10240 + 40960 + 8192 + 8192 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 192528
+##
+## NOTE: To maintain upgrade compatability within the volume group, keep the
+## undersized LOG_VOL_SIZE and SCRATCH_VOL_SIZE, but size the minimally size
+## physical volume correctly.
+##
+##  R4 AIO installations:
+##  - R4 (case #1): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), /local_pv (239G)
+##  - R4 (case #2): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), cgts-vg (239G)
+##
+##  Upgrade migration will start with R5 install and create a partition to align
+##  above so filesystems within the volume group will be able to maintain their
+##  sizes in R5
+##    - R5 install  : /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), un-partitioned (336G)
+##    - R5 (case #1): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (97G), unpartitioned (239G)
+##    - R5 (case #2): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (336G)
+##
+
+sz=$(blockdev --getsize64 $(get_disk $rootfs_device))
+if [ $sz -le $((240*$gb)) ] ; then
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 192528/1024=188.01. CGCS_PV_SIZE=189*1024=193536. Using a disk with a
+    # size under 189GiB will fail.
+    CGCS_PV_SIZE=193536
+else
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 233496/1024=228.02. CGCS_PV_SIZE=229*1024=234496.
+    CGCS_PV_SIZE=234496
+fi
+
+ROOTFS_SIZE=20000
+LOG_VOL_SIZE=8000
+SCRATCH_VOL_SIZE=8000
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=500 --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --grow --size=500 --maxsize=$CGCS_PV_SIZE --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_aio_lowlatency.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=controller
+subfunction=controller,worker,lowlatency
+system_type=All-in-one
+security_profile=$secprofile
+EOF
+
+%end
+
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_aio_and_worker.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+# Update grub with custom kernel bootargs
+source /etc/init.d/cpumap_functions.sh
+n_cpus=$(cat /proc/cpuinfo 2>/dev/null | \
+  awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+n_numa=$(ls -d /sys/devices/system/node/node* 2>/dev/null | wc -l)
+KERN_OPTS=" iommu=pt usbcore.autosuspend=-1"
+
+KERN_OPTS="${KERN_OPTS} hugepagesz=2M hugepages=0 default_hugepagesz=2M"
+
+# If this is an all-in-one system, we need at least 4 CPUs
+if [ "$system_type" = "All-in-one" -a ${n_cpus} -lt 4 ]; then
+    report_post_failure_with_msg "ERROR: At least 4 CPUs are required for controller+worker node."
+fi
+
+# Add kernel options for cpu isolation / affinity
+if [ ${n_cpus} -gt 1 ]
+then
+  base_cpulist=$(platform_expanded_cpu_list)
+  base_cpumap=$(cpulist_to_cpumap ${base_cpulist} ${n_cpus})
+  avp_cpulist=$(vswitch_expanded_cpu_list)
+  norcu_cpumap=$(invert_cpumap ${base_cpumap} ${n_cpus})
+  norcu_cpulist=$(cpumap_to_cpulist ${norcu_cpumap} ${n_cpus})
+
+  if [[ "$subfunction" =~ lowlatency ]]; then
+    KERN_OPTS="${KERN_OPTS} isolcpus=${norcu_cpulist}"
+    KERN_OPTS="${KERN_OPTS} nohz_full=${norcu_cpulist}"
+  else
+    KERN_OPTS="${KERN_OPTS} isolcpus=${avp_cpulist}"
+  fi
+  KERN_OPTS="${KERN_OPTS} rcu_nocbs=${norcu_cpulist}"
+  KERN_OPTS="${KERN_OPTS} kthread_cpus=${base_cpulist}"
+  KERN_OPTS="${KERN_OPTS} irqaffinity=${base_cpulist}"
+  # Update vswitch.conf
+  sed -i "s/^VSWITCH_CPU_LIST=.*/VSWITCH_CPU_LIST=\"${avp_cpulist}\"/" /etc/vswitch/vswitch.conf
+fi
+
+# Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to set NMI watchdog
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=0 softlockup_panic=0"
+else
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+fi
+
+if [[ "$(dmidecode -s system-product-name)" =~ ^ProLiant.*Gen8$ ]]; then
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on,eth_no_rmrr"
+else
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on"
+fi
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+  KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# Add kernel options to disable kvm-intel.eptad on Broadwell
+# Broadwell: Model: 79, Model name: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
+if grep -q -E "^model\s+:\s+79$" /proc/cpuinfo
+then
+  KERN_OPTS="${KERN_OPTS} kvm-intel.eptad=0"
+fi
+
+# k8s updates:
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# Add kernel option to avoid jiffies_lock contention on real-time kernel
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} skew_tick=1"
+fi
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub2-mkconfig -o /boot/grub2/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_system_aio.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+## Reserve more memory for base processes since the controller has higher
+## memory requirements but cap it to better handle systems with large
+## amounts of memory
+TOTALMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024)}')
+
+if [ -e /sys/devices/system/node/node0 ]; then
+  RESERVEDMEM=$(grep MemTotal /sys/devices/system/node/node0/meminfo | awk '{printf "%d\n", $4/1024}')
+else
+  RESERVEDMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024/4)}')
+fi
+
+if [ ${RESERVEDMEM} -lt 6144 ]; then
+    RESERVEDMEM=6144
+elif [ ${RESERVEDMEM} -gt 14500 ]; then
+    RESERVEDMEM=14500
+elif [ ${RESERVEDMEM} -gt 8192 ]; then
+    RESERVEDMEM=8192
+fi
+
+sed -i -e "s#\(WORKER_BASE_RESERVED\)=.*#\1=(\"node0:${RESERVEDMEM}MB:1\" \"node1:2000MB:0\" \"node2:2000MB:0\" \"node3:2000MB:0\")#g" /etc/platform/worker_reserved.conf
+
+# Update WORKER_CPU_LIST
+N_CPUS=$(cat /proc/cpuinfo 2>/dev/null | awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+sed -i "s/^WORKER_CPU_LIST=.*/WORKER_CPU_LIST=\"0-$((N_CPUS-1))\"/" /etc/platform/worker_reserved.conf
+
+%end
+
+
+# Template from: post_net_controller.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+http_port=$(get_http_port)
+# Retrieve the installation uuid from the controller we booted from
+INSTALL_UUID=`curl -sf http://pxecontroller:${http_port:-8080}/feed/rel-19.12/install_uuid`
+if [ $? -ne 0 ]
+then
+  INSTALL_UUID=unknown
+fi
+
+grep -q INSTALL_UUID /etc/platform/platform.conf
+if [ $? -ne 0 ]; then
+    echo "INSTALL_UUID=$INSTALL_UUID" >> /etc/platform/platform.conf
+fi
+
+cd /www/pages
+# Sync software repository
+feed_url=http://pxecontroller:${http_port:-8080}/feed/
+anaconda_logdir=/var/log/anaconda
+mkdir -p $anaconda_logdir
+
+echo "Mirroring software repository (may take several minutes)..." >/dev/console
+wget --recursive --no-parent --no-host-directories --no-clobber --reject 'index.html*' --reject '*.log' $feed_url/ -o $anaconda_logdir/wget-feed-mirror.log \
+    || report_post_failure_with_logfile $anaconda_logdir/wget-feed-mirror.log
+
+# Sync patching repository
+updates_url=http://pxecontroller:${http_port:-8080}/updates/
+wget --mirror --no-parent --no-host-directories --reject 'index.html*' --reject '*.log' $updates_url/ -o $anaconda_logdir/wget-updates-mirror.log \
+    || report_post_failure_with_logfile $anaconda_logdir/wget-updates-mirror.log
+echo "Done" >/dev/console
+
+shopt -s nullglob
+
+# Check whether a second release is installed
+. /etc/build.info
+CURRENT_REL_DIR=rel-${SW_VERSION}
+OTHER_REL_DIR=
+for REL_DIR in /www/pages/feed/*; do
+    if [[ ! $REL_DIR =~ "${SW_VERSION}" ]]; then
+        OTHER_REL_DIR=`basename $REL_DIR`
+        OTHER_REL_VERSION=${OTHER_REL_DIR:4}
+        break
+    fi
+done
+
+# If second release is installed, find the latest version of the installer
+# RPM and install the pxeboot files we require to boot hosts with that release.
+if [ ! -z "$OTHER_REL_DIR" ]; then
+    PATCH_RPM=`find /www/pages/updates/${OTHER_REL_DIR}/Packages -name 'pxe-network-installer*' | sort -V | tail -1`
+    BASE_RPM=`find /www/pages/feed/${OTHER_REL_DIR}/Packages -name 'pxe-network-installer*' | sort -V | tail -1`
+
+    if [ ! -z "$PATCH_RPM" ]; then
+        INSTALL_RPM=$PATCH_RPM
+    elif [ ! -z "$BASE_RPM" ]; then
+        INSTALL_RPM=$BASE_RPM
+    else
+        report_post_failure_with_msg "ERROR: Unable to find pxe-network-installer RPM for $OTHER_REL_DIR. Aborting installation."
+    fi
+
+    echo "Installing pxeboot files for release $OTHER_REL_DIR from $INSTALL_RPM" >/dev/console
+    TMP_RPM=/tmp/pxe-network-installer
+    mkdir $TMP_RPM
+    pushd $TMP_RPM
+    /usr/bin/rpm2cpio $INSTALL_RPM | cpio -idm \
+        || report_post_failure_with_msg "Failed to extract pxe-network-installer"
+
+    cp -r $TMP_RPM/usr / \
+        || report_post_failure_with_msg "Failed to copy pxe-network-installer /usr"
+    cp -r $TMP_RPM/pxeboot/$OTHER_REL_DIR /pxeboot/ \
+        || report_post_failure_with_msg "Failed to copy pxe-network-installer /pxeboot/$OTHER_REL_DIR"
+    cp $TMP_RPM/pxeboot/pxelinux.cfg.files/*-$OTHER_REL_VERSION /pxeboot/pxelinux.cfg.files/ \
+        || report_post_failure_with_msg "Failed to copy pxe-network-installer pxelinux.cfg files"
+
+    rm -rf $TMP_RPM
+fi
+
+%end
+
+# Template from: post_net_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+http_port=$(get_http_port)
+echo "repo --name=base --baseurl=http://pxecontroller:${http_port:-8080}/feed/rel-19.12/" > /tmp/repo-include
+echo "repo --name=updates --baseurl=http://pxecontroller:${http_port:-8080}/updates/rel-19.12/" > /tmp/repo-include
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Persist the http port to the platform configuration
+echo http_port=$(get_http_port) >> /etc/platform/platform.conf
+
+# Obtain the boot interface from the PXE boot
+BOOTIF=`cat /proc/cmdline |xargs -n1 echo |grep BOOTIF=`
+if [ -d /sys/firmware/efi ] ; then
+    BOOTIF=${BOOTIF#BOOTIF=}
+else
+    BOOTIF=${BOOTIF#BOOTIF=01-}
+    BOOTIF=${BOOTIF//-/:}
+fi
+
+mgmt_dev=none
+mgmt_vlan=0
+if [ -n "$BOOTIF" ] ; then
+    ndev=`ip link show |grep -B 1 $BOOTIF |head -1 |awk '{print $2}' |sed -e 's/://'`
+    if [ -n "$ndev" ] ; then
+        mgmt_dev=$ndev
+        # Retrieve the management VLAN from sysinv if it exists
+        mgmt_vlan=`curl -sf http://pxecontroller:6385/v1/isystems/mgmtvlan`
+        if [ $? -ne 0 ]
+        then
+          report_post_failure_with_msg "ERROR: Unable to communicate with System Inventory REST API. Aborting installation."
+        fi
+    else
+        report_post_failure_with_msg "ERROR: Unable to determine mgmt interface from BOOTIF=$BOOTIF."
+    fi
+else
+    report_post_failure_with_msg "ERROR: BOOTIF is not set. Unable to determine mgmt interface."
+fi
+
+if [ $mgmt_vlan -eq 0 ] ; then
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=dhcp
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+else
+
+    # Check whether to use inet or inet6
+    ipv6_addr=$(dig +short AAAA controller)
+    if [[ -n "$ipv6_addr" ]]
+    then
+        mgmt_address_family=inet6
+        ipv6init=yes
+        dhcpv6c=yes
+        dhclientargs=-1
+    else
+        mgmt_address_family=inet
+        ipv6init=no
+        dhcpv6c=no
+        dhclientargs=
+    fi
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=vlan$mgmt_vlan >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=none
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-vlan$mgmt_vlan
+DEVICE=vlan$mgmt_vlan
+BOOTPROTO=dhcp
+DHCLIENTARGS=$dhclientargs
+IPV6INIT=$ipv6init
+DHCPV6C=$dhcpv6c
+ONBOOT=yes
+IPV6_AUTOCONF=no
+PHYSDEV=$mgmt_dev
+VLAN=yes
+LINKDELAY=20
+EOF
+
+    # Reject DHCPOFFER from DHCP server that doesn't send
+    # wrs-install-uuid option
+    echo "require wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+    echo "require dhcp6.wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+
+    # Bring up the mgmt vlan so that a dhcp lease is acquired and an address is
+    # setup prior to the post-install reboot.  This is so that the timing of the IP
+    # address allocation is similar to how normal/non-pxe installation works.
+    mgmt_iface=vlan$mgmt_vlan
+    dhclient_family=$([[ $mgmt_address_family == "inet" ]] && echo -4 || echo -6)
+    ip link add link $mgmt_dev name $mgmt_iface type vlan id $mgmt_vlan
+    ip link set up dev $mgmt_iface
+    dhclient $dhclient_family $mgmt_iface || true
+
+fi
+
+%end
diff --git a/meta-stx/conf/distro/files/ks/net_storage_ks.cfg b/meta-stx/conf/distro/files/ks/net_storage_ks.cfg
new file mode 100644 (file)
index 0000000..dcac1f2
--- /dev/null
@@ -0,0 +1,861 @@
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+%include /tmp/timezone-include
+# set to 'x' so we can use shadow password
+rootpw  --iscrypted x
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+reboot --eject
+
+
+# Template from: pre_net_common.cfg
+%pre
+
+# Setup ntp.conf and sync time
+cat <<EOF >/etc/ntp_kickstart.conf
+server pxecontroller
+EOF
+
+/usr/sbin/ntpd -g -q -n -c /etc/ntp_kickstart.conf
+if [ $? -eq 0 ]; then
+    /sbin/hwclock --systohc --utc
+fi
+
+%end
+
+
+# Template from: pre_pkglist.cfg
+%packages
+@core
+@base
+-kernel-module-igb-uio-rt
+-kernel-module-wrs-avp-rt
+-kernel-rt
+-kernel-rt-kvm
+-kernel-rt-tools
+-kernel-rt-tools-libs
+-kmod-drbd-rt
+-kmod-e1000e-rt
+-kmod-i40e-rt
+-kmod-ixgbe-rt
+-kmod-tpm-rt
+-mlnx-ofa_kernel
+-mlnx-ofa_kernel-rt
+-mlnx-ofa_kernel-rt-modules
+-qat16-rt
+@platform-storage
+@updates-storage
+%end
+
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_storage.cfg
+
+sz=$(blockdev --getsize64 $(get_disk $rootfs_device))
+if [ $sz -le $((90*$gb)) ] ; then
+    LOG_VOL_SIZE=4000
+    SCRATCH_VOL_SIZE=4000
+    ROOTFS_SIZE=10000
+else
+    LOG_VOL_SIZE=8000
+    SCRATCH_VOL_SIZE=8000
+    ROOTFS_SIZE=20000
+fi
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=500 --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --grow --asprimary --size=500 --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_storage.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Retrieve the installation uuid from the controller we booted from
+http_port=$(get_http_port)
+INSTALL_UUID=`curl -sf http://pxecontroller:${http_port:-8080}/feed/rel-19.12/install_uuid`
+if [ $? -ne 0 ]
+then
+  INSTALL_UUID=unknown
+fi
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=storage
+subfunction=storage
+system_type=Standard
+security_profile=$secprofile
+INSTALL_UUID=$INSTALL_UUID
+EOF
+
+%end
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_storage.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+## Custom kernel options
+KERN_OPTS=" intel_iommu=off usbcore.autosuspend=-1"
+## Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to ensure NMI watchdog is enabled, if supported
+KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+    KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# k8s updates:
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub2-mkconfig -o /boot/grub2/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_net_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+http_port=$(get_http_port)
+echo "repo --name=base --baseurl=http://pxecontroller:${http_port:-8080}/feed/rel-19.12/" > /tmp/repo-include
+echo "repo --name=updates --baseurl=http://pxecontroller:${http_port:-8080}/updates/rel-19.12/" > /tmp/repo-include
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Persist the http port to the platform configuration
+echo http_port=$(get_http_port) >> /etc/platform/platform.conf
+
+# Obtain the boot interface from the PXE boot
+BOOTIF=`cat /proc/cmdline |xargs -n1 echo |grep BOOTIF=`
+if [ -d /sys/firmware/efi ] ; then
+    BOOTIF=${BOOTIF#BOOTIF=}
+else
+    BOOTIF=${BOOTIF#BOOTIF=01-}
+    BOOTIF=${BOOTIF//-/:}
+fi
+
+mgmt_dev=none
+mgmt_vlan=0
+if [ -n "$BOOTIF" ] ; then
+    ndev=`ip link show |grep -B 1 $BOOTIF |head -1 |awk '{print $2}' |sed -e 's/://'`
+    if [ -n "$ndev" ] ; then
+        mgmt_dev=$ndev
+        # Retrieve the management VLAN from sysinv if it exists
+        mgmt_vlan=`curl -sf http://pxecontroller:6385/v1/isystems/mgmtvlan`
+        if [ $? -ne 0 ]
+        then
+          report_post_failure_with_msg "ERROR: Unable to communicate with System Inventory REST API. Aborting installation."
+        fi
+    else
+        report_post_failure_with_msg "ERROR: Unable to determine mgmt interface from BOOTIF=$BOOTIF."
+    fi
+else
+    report_post_failure_with_msg "ERROR: BOOTIF is not set. Unable to determine mgmt interface."
+fi
+
+if [ $mgmt_vlan -eq 0 ] ; then
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=dhcp
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+else
+
+    # Check whether to use inet or inet6
+    ipv6_addr=$(dig +short AAAA controller)
+    if [[ -n "$ipv6_addr" ]]
+    then
+        mgmt_address_family=inet6
+        ipv6init=yes
+        dhcpv6c=yes
+        dhclientargs=-1
+    else
+        mgmt_address_family=inet
+        ipv6init=no
+        dhcpv6c=no
+        dhclientargs=
+    fi
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=vlan$mgmt_vlan >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=none
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-vlan$mgmt_vlan
+DEVICE=vlan$mgmt_vlan
+BOOTPROTO=dhcp
+DHCLIENTARGS=$dhclientargs
+IPV6INIT=$ipv6init
+DHCPV6C=$dhcpv6c
+ONBOOT=yes
+IPV6_AUTOCONF=no
+PHYSDEV=$mgmt_dev
+VLAN=yes
+LINKDELAY=20
+EOF
+
+    # Reject DHCPOFFER from DHCP server that doesn't send
+    # wrs-install-uuid option
+    echo "require wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+    echo "require dhcp6.wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+
+    # Bring up the mgmt vlan so that a dhcp lease is acquired and an address is
+    # setup prior to the post-install reboot.  This is so that the timing of the IP
+    # address allocation is similar to how normal/non-pxe installation works.
+    mgmt_iface=vlan$mgmt_vlan
+    dhclient_family=$([[ $mgmt_address_family == "inet" ]] && echo -4 || echo -6)
+    ip link add link $mgmt_dev name $mgmt_iface type vlan id $mgmt_vlan
+    ip link set up dev $mgmt_iface
+    dhclient $dhclient_family $mgmt_iface || true
+
+fi
+
+%end
diff --git a/meta-stx/conf/distro/files/ks/net_worker_ks.cfg b/meta-stx/conf/distro/files/ks/net_worker_ks.cfg
new file mode 100644 (file)
index 0000000..79b5ec7
--- /dev/null
@@ -0,0 +1,939 @@
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+%include /tmp/timezone-include
+# set to 'x' so we can use shadow password
+rootpw  --iscrypted x
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+reboot --eject
+
+
+# Template from: pre_net_common.cfg
+%pre
+
+# Setup ntp.conf and sync time
+cat <<EOF >/etc/ntp_kickstart.conf
+server pxecontroller
+EOF
+
+/usr/sbin/ntpd -g -q -n -c /etc/ntp_kickstart.conf
+if [ $? -eq 0 ]; then
+    /sbin/hwclock --systohc --utc
+fi
+
+%end
+
+
+# Template from: pre_pkglist.cfg
+%packages
+@core
+@base
+-kernel-module-igb-uio-rt
+-kernel-module-wrs-avp-rt
+-kernel-rt
+-kernel-rt-kvm
+-kernel-rt-tools
+-kernel-rt-tools-libs
+-kmod-drbd-rt
+-kmod-e1000e-rt
+-kmod-i40e-rt
+-kmod-ixgbe-rt
+-kmod-tpm-rt
+-mlnx-ofa_kernel
+-mlnx-ofa_kernel-rt
+-mlnx-ofa_kernel-rt-modules
+-qat16-rt
+@platform-worker
+@updates-worker
+%end
+
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_worker.cfg
+LOG_VOL_SIZE=4000
+SCRATCH_VOL_SIZE=4000
+BOOT_VOL_SIZE=500
+
+## LOG_VOL_SIZE = 4096
+## SCRATCH_VOL_SIZE = 4096
+## DOCKER = 30720
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+## CGTS_PV_SIZE = 4096 + 4096 + 30720 + 20480 + 10240 + 16 = 69648
+##
+## Round CGTS_PV_SIZE to the closest upper value that can be divided by 1024.
+## 69648/1024=68.01. CGTS_PV_SIZE=69*1024=70656.
+CGTS_PV_SIZE=70656
+
+sz=$(blockdev --getsize64 $(get_disk $rootfs_device))
+if [ $sz -le $((80*$gb)) ] ; then
+    ## Less than 80GB use a 10GB root partition
+    ROOTFS_SIZE=10000
+else
+    ## Use a 20GB root partition
+    ROOTFS_SIZE=20000
+fi
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=$BOOT_VOL_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --asprimary --size=$CGTS_PV_SIZE --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_worker.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Retrieve the installation uuid from the controller we booted from
+http_port=$(get_http_port)
+INSTALL_UUID=`curl -sf http://pxecontroller:${http_port:-8080}/feed/rel-19.12/install_uuid`
+if [ $? -ne 0 ]
+then
+  INSTALL_UUID=unknown
+fi
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=worker
+subfunction=worker
+system_type=Standard
+security_profile=$secprofile
+INSTALL_UUID=$INSTALL_UUID
+EOF
+
+# mount the platform directory from the controller
+cat >> /etc/fstab <<EOF
+controller-platform-nfs:/opt/platform  /opt/platform   nfs     timeo=30,udp,rsize=1024,wsize=1024,_netdev 0 0
+EOF
+
+%end
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_aio_and_worker.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+# Update grub with custom kernel bootargs
+source /etc/init.d/cpumap_functions.sh
+n_cpus=$(cat /proc/cpuinfo 2>/dev/null | \
+  awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+n_numa=$(ls -d /sys/devices/system/node/node* 2>/dev/null | wc -l)
+KERN_OPTS=" iommu=pt usbcore.autosuspend=-1"
+
+KERN_OPTS="${KERN_OPTS} hugepagesz=2M hugepages=0 default_hugepagesz=2M"
+
+# If this is an all-in-one system, we need at least 4 CPUs
+if [ "$system_type" = "All-in-one" -a ${n_cpus} -lt 4 ]; then
+    report_post_failure_with_msg "ERROR: At least 4 CPUs are required for controller+worker node."
+fi
+
+# Add kernel options for cpu isolation / affinity
+if [ ${n_cpus} -gt 1 ]
+then
+  base_cpulist=$(platform_expanded_cpu_list)
+  base_cpumap=$(cpulist_to_cpumap ${base_cpulist} ${n_cpus})
+  avp_cpulist=$(vswitch_expanded_cpu_list)
+  norcu_cpumap=$(invert_cpumap ${base_cpumap} ${n_cpus})
+  norcu_cpulist=$(cpumap_to_cpulist ${norcu_cpumap} ${n_cpus})
+
+  if [[ "$subfunction" =~ lowlatency ]]; then
+    KERN_OPTS="${KERN_OPTS} isolcpus=${norcu_cpulist}"
+    KERN_OPTS="${KERN_OPTS} nohz_full=${norcu_cpulist}"
+  else
+    KERN_OPTS="${KERN_OPTS} isolcpus=${avp_cpulist}"
+  fi
+  KERN_OPTS="${KERN_OPTS} rcu_nocbs=${norcu_cpulist}"
+  KERN_OPTS="${KERN_OPTS} kthread_cpus=${base_cpulist}"
+  KERN_OPTS="${KERN_OPTS} irqaffinity=${base_cpulist}"
+  # Update vswitch.conf
+  sed -i "s/^VSWITCH_CPU_LIST=.*/VSWITCH_CPU_LIST=\"${avp_cpulist}\"/" /etc/vswitch/vswitch.conf
+fi
+
+# Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to set NMI watchdog
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=0 softlockup_panic=0"
+else
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+fi
+
+if [[ "$(dmidecode -s system-product-name)" =~ ^ProLiant.*Gen8$ ]]; then
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on,eth_no_rmrr"
+else
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on"
+fi
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+  KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# Add kernel options to disable kvm-intel.eptad on Broadwell
+# Broadwell: Model: 79, Model name: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
+if grep -q -E "^model\s+:\s+79$" /proc/cpuinfo
+then
+  KERN_OPTS="${KERN_OPTS} kvm-intel.eptad=0"
+fi
+
+# k8s updates:
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# Add kernel option to avoid jiffies_lock contention on real-time kernel
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} skew_tick=1"
+fi
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub2-mkconfig -o /boot/grub2/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_net_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+http_port=$(get_http_port)
+echo "repo --name=base --baseurl=http://pxecontroller:${http_port:-8080}/feed/rel-19.12/" > /tmp/repo-include
+echo "repo --name=updates --baseurl=http://pxecontroller:${http_port:-8080}/updates/rel-19.12/" > /tmp/repo-include
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Persist the http port to the platform configuration
+echo http_port=$(get_http_port) >> /etc/platform/platform.conf
+
+# Obtain the boot interface from the PXE boot
+BOOTIF=`cat /proc/cmdline |xargs -n1 echo |grep BOOTIF=`
+if [ -d /sys/firmware/efi ] ; then
+    BOOTIF=${BOOTIF#BOOTIF=}
+else
+    BOOTIF=${BOOTIF#BOOTIF=01-}
+    BOOTIF=${BOOTIF//-/:}
+fi
+
+mgmt_dev=none
+mgmt_vlan=0
+if [ -n "$BOOTIF" ] ; then
+    ndev=`ip link show |grep -B 1 $BOOTIF |head -1 |awk '{print $2}' |sed -e 's/://'`
+    if [ -n "$ndev" ] ; then
+        mgmt_dev=$ndev
+        # Retrieve the management VLAN from sysinv if it exists
+        mgmt_vlan=`curl -sf http://pxecontroller:6385/v1/isystems/mgmtvlan`
+        if [ $? -ne 0 ]
+        then
+          report_post_failure_with_msg "ERROR: Unable to communicate with System Inventory REST API. Aborting installation."
+        fi
+    else
+        report_post_failure_with_msg "ERROR: Unable to determine mgmt interface from BOOTIF=$BOOTIF."
+    fi
+else
+    report_post_failure_with_msg "ERROR: BOOTIF is not set. Unable to determine mgmt interface."
+fi
+
+if [ $mgmt_vlan -eq 0 ] ; then
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=dhcp
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+else
+
+    # Check whether to use inet or inet6
+    ipv6_addr=$(dig +short AAAA controller)
+    if [[ -n "$ipv6_addr" ]]
+    then
+        mgmt_address_family=inet6
+        ipv6init=yes
+        dhcpv6c=yes
+        dhclientargs=-1
+    else
+        mgmt_address_family=inet
+        ipv6init=no
+        dhcpv6c=no
+        dhclientargs=
+    fi
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=vlan$mgmt_vlan >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=none
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-vlan$mgmt_vlan
+DEVICE=vlan$mgmt_vlan
+BOOTPROTO=dhcp
+DHCLIENTARGS=$dhclientargs
+IPV6INIT=$ipv6init
+DHCPV6C=$dhcpv6c
+ONBOOT=yes
+IPV6_AUTOCONF=no
+PHYSDEV=$mgmt_dev
+VLAN=yes
+LINKDELAY=20
+EOF
+
+    # Reject DHCPOFFER from DHCP server that doesn't send
+    # wrs-install-uuid option
+    echo "require wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+    echo "require dhcp6.wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+
+    # Bring up the mgmt vlan so that a dhcp lease is acquired and an address is
+    # setup prior to the post-install reboot.  This is so that the timing of the IP
+    # address allocation is similar to how normal/non-pxe installation works.
+    mgmt_iface=vlan$mgmt_vlan
+    dhclient_family=$([[ $mgmt_address_family == "inet" ]] && echo -4 || echo -6)
+    ip link add link $mgmt_dev name $mgmt_iface type vlan id $mgmt_vlan
+    ip link set up dev $mgmt_iface
+    dhclient $dhclient_family $mgmt_iface || true
+
+fi
+
+%end
diff --git a/meta-stx/conf/distro/files/ks/net_worker_lowlatency_ks.cfg b/meta-stx/conf/distro/files/ks/net_worker_lowlatency_ks.cfg
new file mode 100644 (file)
index 0000000..1da45a5
--- /dev/null
@@ -0,0 +1,938 @@
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+# SPDX-License-Identifier: Apache-2.0
+#
+
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+%include /tmp/timezone-include
+# set to 'x' so we can use shadow password
+rootpw  --iscrypted x
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+reboot --eject
+
+
+# Template from: pre_net_common.cfg
+%pre
+
+# Setup ntp.conf and sync time
+cat <<EOF >/etc/ntp_kickstart.conf
+server pxecontroller
+EOF
+
+/usr/sbin/ntpd -g -q -n -c /etc/ntp_kickstart.conf
+if [ $? -eq 0 ]; then
+    /sbin/hwclock --systohc --utc
+fi
+
+%end
+
+
+# Template from: pre_pkglist_lowlatency.cfg
+%packages
+@core
+@base
+-kernel-module-igb-uio
+-kernel-module-wrs-avp
+-kernel
+-kernel-tools
+-kernel-tools-libs
+-kmod-drbd
+-kmod-e1000e
+-kmod-i40e
+-kmod-ixgbe
+-kmod-tpm
+-mlnx-ofa_kernel
+-mlnx-ofa_kernel-rt
+-mlnx-ofa_kernel-modules
+-qat16
+@platform-worker-lowlatency
+@updates-worker-lowlatency
+%end
+
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_worker.cfg
+LOG_VOL_SIZE=4000
+SCRATCH_VOL_SIZE=4000
+BOOT_VOL_SIZE=500
+
+## LOG_VOL_SIZE = 4096
+## SCRATCH_VOL_SIZE = 4096
+## DOCKER = 30720
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+## CGTS_PV_SIZE = 4096 + 4096 + 30720 + 20480 + 10240 + 16 = 69648
+##
+## Round CGTS_PV_SIZE to the closest upper value that can be divided by 1024.
+## 69648/1024=68.01. CGTS_PV_SIZE=69*1024=70656.
+CGTS_PV_SIZE=70656
+
+sz=$(blockdev --getsize64 $(get_disk $rootfs_device))
+if [ $sz -le $((80*$gb)) ] ; then
+    ## Less than 80GB use a 10GB root partition
+    ROOTFS_SIZE=10000
+else
+    ## Use a 20GB root partition
+    ROOTFS_SIZE=20000
+fi
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=$BOOT_VOL_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --asprimary --size=$CGTS_PV_SIZE --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_worker_lowlatency.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Retrieve the installation uuid from the controller we booted from
+http_port=$(get_http_port)
+INSTALL_UUID=`curl -sf http://pxecontroller:${http_port:-8080}/feed/rel-19.12/install_uuid`
+if [ $? -ne 0 ]
+then
+  INSTALL_UUID=unknown
+fi
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=worker
+subfunction=worker,lowlatency
+system_type=Standard
+security_profile=$secprofile
+INSTALL_UUID=$INSTALL_UUID
+EOF
+
+# mount the platform directory from the controller
+cat >> /etc/fstab <<EOF
+controller-platform-nfs:/opt/platform  /opt/platform   nfs     timeo=30,udp,rsize=1024,wsize=1024,_netdev 0 0
+EOF
+
+%end
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_aio_and_worker.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+# Update grub with custom kernel bootargs
+source /etc/init.d/cpumap_functions.sh
+n_cpus=$(cat /proc/cpuinfo 2>/dev/null | \
+  awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+n_numa=$(ls -d /sys/devices/system/node/node* 2>/dev/null | wc -l)
+KERN_OPTS=" iommu=pt usbcore.autosuspend=-1"
+
+KERN_OPTS="${KERN_OPTS} hugepagesz=2M hugepages=0 default_hugepagesz=2M"
+
+# If this is an all-in-one system, we need at least 4 CPUs
+if [ "$system_type" = "All-in-one" -a ${n_cpus} -lt 4 ]; then
+    report_post_failure_with_msg "ERROR: At least 4 CPUs are required for controller+worker node."
+fi
+
+# Add kernel options for cpu isolation / affinity
+if [ ${n_cpus} -gt 1 ]
+then
+  base_cpulist=$(platform_expanded_cpu_list)
+  base_cpumap=$(cpulist_to_cpumap ${base_cpulist} ${n_cpus})
+  avp_cpulist=$(vswitch_expanded_cpu_list)
+  norcu_cpumap=$(invert_cpumap ${base_cpumap} ${n_cpus})
+  norcu_cpulist=$(cpumap_to_cpulist ${norcu_cpumap} ${n_cpus})
+
+  if [[ "$subfunction" =~ lowlatency ]]; then
+    KERN_OPTS="${KERN_OPTS} isolcpus=${norcu_cpulist}"
+    KERN_OPTS="${KERN_OPTS} nohz_full=${norcu_cpulist}"
+  else
+    KERN_OPTS="${KERN_OPTS} isolcpus=${avp_cpulist}"
+  fi
+  KERN_OPTS="${KERN_OPTS} rcu_nocbs=${norcu_cpulist}"
+  KERN_OPTS="${KERN_OPTS} kthread_cpus=${base_cpulist}"
+  KERN_OPTS="${KERN_OPTS} irqaffinity=${base_cpulist}"
+  # Update vswitch.conf
+  sed -i "s/^VSWITCH_CPU_LIST=.*/VSWITCH_CPU_LIST=\"${avp_cpulist}\"/" /etc/vswitch/vswitch.conf
+fi
+
+# Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to set NMI watchdog
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=0 softlockup_panic=0"
+else
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+fi
+
+if [[ "$(dmidecode -s system-product-name)" =~ ^ProLiant.*Gen8$ ]]; then
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on,eth_no_rmrr"
+else
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on"
+fi
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+  KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# Add kernel options to disable kvm-intel.eptad on Broadwell
+# Broadwell: Model: 79, Model name: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
+if grep -q -E "^model\s+:\s+79$" /proc/cpuinfo
+then
+  KERN_OPTS="${KERN_OPTS} kvm-intel.eptad=0"
+fi
+
+# k8s updates:
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# Add kernel option to avoid jiffies_lock contention on real-time kernel
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} skew_tick=1"
+fi
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub2-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub2-mkconfig -o /boot/grub2/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_net_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+http_port=$(get_http_port)
+echo "repo --name=base --baseurl=http://pxecontroller:${http_port:-8080}/feed/rel-19.12/" > /tmp/repo-include
+echo "repo --name=updates --baseurl=http://pxecontroller:${http_port:-8080}/updates/rel-19.12/" > /tmp/repo-include
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Persist the http port to the platform configuration
+echo http_port=$(get_http_port) >> /etc/platform/platform.conf
+
+# Obtain the boot interface from the PXE boot
+BOOTIF=`cat /proc/cmdline |xargs -n1 echo |grep BOOTIF=`
+if [ -d /sys/firmware/efi ] ; then
+    BOOTIF=${BOOTIF#BOOTIF=}
+else
+    BOOTIF=${BOOTIF#BOOTIF=01-}
+    BOOTIF=${BOOTIF//-/:}
+fi
+
+mgmt_dev=none
+mgmt_vlan=0
+if [ -n "$BOOTIF" ] ; then
+    ndev=`ip link show |grep -B 1 $BOOTIF |head -1 |awk '{print $2}' |sed -e 's/://'`
+    if [ -n "$ndev" ] ; then
+        mgmt_dev=$ndev
+        # Retrieve the management VLAN from sysinv if it exists
+        mgmt_vlan=`curl -sf http://pxecontroller:6385/v1/isystems/mgmtvlan`
+        if [ $? -ne 0 ]
+        then
+          report_post_failure_with_msg "ERROR: Unable to communicate with System Inventory REST API. Aborting installation."
+        fi
+    else
+        report_post_failure_with_msg "ERROR: Unable to determine mgmt interface from BOOTIF=$BOOTIF."
+    fi
+else
+    report_post_failure_with_msg "ERROR: BOOTIF is not set. Unable to determine mgmt interface."
+fi
+
+if [ $mgmt_vlan -eq 0 ] ; then
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=dhcp
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+else
+
+    # Check whether to use inet or inet6
+    ipv6_addr=$(dig +short AAAA controller)
+    if [[ -n "$ipv6_addr" ]]
+    then
+        mgmt_address_family=inet6
+        ipv6init=yes
+        dhcpv6c=yes
+        dhclientargs=-1
+    else
+        mgmt_address_family=inet
+        ipv6init=no
+        dhcpv6c=no
+        dhclientargs=
+    fi
+
+    # Persist the boot device to the platform configuration. This will get
+    # overwritten later if the management_interface is on a bonded interface.
+    echo management_interface=vlan$mgmt_vlan >> /etc/platform/platform.conf
+
+    # Build networking scripts
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-$mgmt_dev
+DEVICE=$mgmt_dev
+BOOTPROTO=none
+ONBOOT=yes
+IPV6_AUTOCONF=no
+LINKDELAY=20
+EOF
+
+    cat << EOF > /etc/sysconfig/network-scripts/ifcfg-vlan$mgmt_vlan
+DEVICE=vlan$mgmt_vlan
+BOOTPROTO=dhcp
+DHCLIENTARGS=$dhclientargs
+IPV6INIT=$ipv6init
+DHCPV6C=$dhcpv6c
+ONBOOT=yes
+IPV6_AUTOCONF=no
+PHYSDEV=$mgmt_dev
+VLAN=yes
+LINKDELAY=20
+EOF
+
+    # Reject DHCPOFFER from DHCP server that doesn't send
+    # wrs-install-uuid option
+    echo "require wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+    echo "require dhcp6.wrs-install-uuid;" >>/etc/dhcp/dhclient.conf
+
+    # Bring up the mgmt vlan so that a dhcp lease is acquired and an address is
+    # setup prior to the post-install reboot.  This is so that the timing of the IP
+    # address allocation is similar to how normal/non-pxe installation works.
+    mgmt_iface=vlan$mgmt_vlan
+    dhclient_family=$([[ $mgmt_address_family == "inet" ]] && echo -4 || echo -6)
+    ip link add link $mgmt_dev name $mgmt_iface type vlan id $mgmt_vlan
+    ip link set up dev $mgmt_iface
+    dhclient $dhclient_family $mgmt_iface || true
+
+fi
+
+%end
diff --git a/meta-stx/conf/distro/files/ks/poky_stx_aio_ks.cfg b/meta-stx/conf/distro/files/ks/poky_stx_aio_ks.cfg
new file mode 100644 (file)
index 0000000..dff25fe
--- /dev/null
@@ -0,0 +1,1061 @@
+%pre
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+%post
+# This file defines functions that can be used in %pre and %post kickstart sections, by including:
+# . /tmp/ks-functions.sh
+#
+
+cat <<END_FUNCTIONS >/tmp/ks-functions.sh
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+function get_by_path()
+{
+    local disk=\$(cd /dev ; readlink -f \$1)
+    for p in /dev/disk/by-path/*; do
+        if [ "\$disk" = "\$(readlink -f \$p)" ]; then
+            echo \$p
+            return
+        fi
+    done
+}
+
+function get_disk()
+{
+    echo \$(cd /dev ; readlink -f \$1)
+}
+
+function report_pre_failure_with_msg()
+{
+    local msg=\$1
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_msg()
+{
+    local msg=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+\$msg
+
+EOF
+    echo "\$msg" >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    echo "\$msg"
+
+    exit 1
+}
+
+function report_post_failure_with_logfile()
+{
+    local logfile=\$1
+    cat <<EOF >> /etc/motd
+
+Installation failed.
+Please see \$logfile for details of failure
+
+EOF
+    echo \$logfile >/etc/platform/installation_failed
+
+    echo -e '\n\nInstallation failed.\n'
+    cat \$logfile
+
+    exit 1
+}
+
+function get_http_port()
+{
+    echo \$(cat /proc/cmdline |xargs -n1 echo |grep '^inst.repo=' | sed -r 's#^[^/]*://[^/]*:([0-9]*)/.*#\1#')
+}
+
+END_FUNCTIONS
+
+%end
+
+# Template from: pre_common_head.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+append=
+if [ -n "$console" ] ; then
+    append="console=$console"
+fi
+
+if [ -n "$security_profile" ]; then
+    append="$append security_profile=$security_profile"
+fi
+
+#### SECURITY PROFILE HANDLING (Pre Installation) ####
+if [ -n "$security_profile" ] && [ "$security_profile" == "extended" ]; then
+    # IMA specific boot options:
+    # Enable Kernel auditing
+    append="$append audit=1"
+else
+    # we need to blacklist the IMA and Integrity Modules
+    # on standard security profile
+    append="$append module_blacklist=integrity,ima"
+    
+    # Disable Kernel auditing in Standard Security Profile mode
+    append="$append audit=0"
+fi
+
+if [ -n "$tboot" ]; then
+    append="$append tboot=$tboot"
+else
+    append="$append tboot=false"
+fi
+
+boot_device_arg=
+if [ -n "$boot_device" ] ; then
+    boot_device_arg="--boot-drive=$(get_by_path $boot_device)"
+fi
+
+echo "bootloader --location=mbr $boot_device_arg --timeout=5 --append=\"$append\"" > /tmp/bootloader-include
+
+echo "timezone --nontp --utc UTC" >/tmp/timezone-include
+%end
+
+##############################################################
+# Main kickstart
+##############################################################
+#version=DEVEL
+install
+lang en_US.UTF-8
+keyboard us
+
+# System timezone
+%include /tmp/timezone-include
+
+# Root password
+rootpw --iscrypted $6$ArDcm/wSNLJLT2OP$QdWX6kMUgBVsiibukLBLtLfRDVz0n49BQ1svT7hPEQJASvKnqkEL5zc5kqUMMzXzLrj80z6YX9DmYTD0Ysxn.1
+
+selinux --disabled
+authconfig --enableshadow --passalgo=sha512
+firewall --service=ssh
+
+# Use text mode install
+text
+
+# Use CDROM installation media
+cdrom
+
+# Run the Setup Agent on first boot
+firstboot --enable
+
+# System services
+services --enabled="lvm2-monitor.service"
+
+# Do not configure the X Window System
+skipx
+
+# The following is the partition information you requested
+# Note that any partitions you deleted are not expressed
+# here so unless you clear all partitions first, this is
+# not guaranteed to work
+zerombr
+
+# Disk layout from %pre
+%include /tmp/part-include
+
+# Bootloader parms from %pre
+%include /tmp/bootloader-include
+
+#reboot --eject
+
+%packages
+#@^stx-image-aio
+%end
+
+################################################################
+# End of Main
+################################################################
+
+# Template from: pre_disk_setup_common.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# This is a really fancy way of finding the first usable disk for the
+# install and not stomping on the USB device if it comes up first
+
+# First, parse /proc/cmdline to find the boot args
+set -- `cat /proc/cmdline`
+for I in $*; do case "$I" in *=*) eval $I 2>/dev/null;; esac; done
+
+# Find either the ISO or USB device first chopping off partition
+ISO_DEV=`readlink /dev/disk/by-label/oe_iso_boot`
+sdev=`echo $ISO_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    ISO_DEV=$sdev
+fi
+USB_DEV=`readlink /dev/disk/by-label/wr_usb_boot`
+sdev=`echo $USB_DEV | sed -e 's/.$//'`
+if [ -e /dev/disk/by-label/$sdev ] ; then
+    USB_DEV=$sdev
+fi
+
+# Temporary, until lab pxelinux.cfg files are updated to specify install devices
+if [ -z "$rootfs_device" -o -z "$boot_device" ]
+then
+    INST_HDD=""
+    # Prefer a vd* device if this is kvm/qemu
+    for e in vda vdb sda sdb nvme0n1; do
+        if [ -e /dev/$e -a "$ISO_DEV" != "../../$e" -a "$USB_DEV" != "../../$e" ] ; then
+            INST_HDD=$e
+            break
+        fi
+    done
+
+    # Set variables to $INST_HDD if not set
+    rootfs_device=${rootfs_device:-$INST_HDD}
+    boot_device=${boot_device:-$INST_HDD}
+fi
+
+# Convert to by-path
+orig_rootfs_device=$rootfs_device
+rootfs_device=$(get_by_path $rootfs_device)
+
+orig_boot_device=$boot_device
+boot_device=$(get_by_path $boot_device)
+
+if [ ! -e "$rootfs_device" -o ! -e "$boot_device" ] ; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is invalid."
+fi
+
+# Ensure specified device is not a USB drive
+udevadm info --query=property --name=$rootfs_device |grep -q '^ID_BUS=usb' || \
+    udevadm info --query=property --name=$boot_device |grep -q '^ID_BUS=usb'
+if [ $? -eq 0 ]; then
+    # Touch this file to prevent Anaconda from dying an ungraceful death
+    touch /tmp/part-include
+
+    report_pre_failure_with_msg "ERROR: Specified installation ($orig_rootfs_device) or boot ($orig_boot_device) device is a USB drive."
+fi
+
+# Deactivate existing volume groups to avoid Anaconda issues with pre-existing groups
+vgs --noheadings -o vg_name | xargs --no-run-if-empty -n 1 vgchange -an
+
+# Remove volumes and group for cgts-vg, if any
+lvremove --force cgts-vg
+pvs --select 'vg_name=cgts-vg' --noheadings -o pv_name | xargs --no-run-if-empty pvremove --force --force --yes
+vgs --select 'vg_name=cgts-vg' --noheadings -o vg_name | xargs --no-run-if-empty vgremove --force
+
+ONLYUSE_HDD=""
+if [ "$(curl -sf http://pxecontroller:6385/v1/upgrade/$(hostname)/in_upgrade 2>/dev/null)" = "true" ]; then
+    # In an upgrade, only wipe the disk with the rootfs and boot partition
+    echo "In upgrade, wiping only $rootfs_device"
+    WIPE_HDD="$(get_disk $rootfs_device)"
+    ONLYUSE_HDD="$(basename $(get_disk $rootfs_device))"
+    if [ "$(get_disk $rootfs_device)" != "$(get_disk $boot_device)" ]; then
+        WIPE_HDD="$WIPE_HDD,$(get_disk $boot_device)"
+        ONLYUSE_HDD="$ONLYUSE_HDD,$(basename $(get_disk $boot_device))"
+    fi
+else
+    # Make a list of all the hard drives that are to be wiped
+    WIPE_HDD=""
+    # Partition type OSD has a unique globally identifier
+    part_type_guid_str="Partition GUID code"
+    CEPH_OSD_GUID="4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D"
+
+    # Check if we wipe OSDs
+    if [ "$(curl -sf http://pxecontroller:6385/v1/ihosts/wipe_osds 2>/dev/null)" = "true" ]; then
+        echo "Wipe OSD data."
+        WIPE_CEPH_OSDS="true"
+    else
+        echo "Skip Ceph OSD data wipe."
+        WIPE_CEPH_OSDS="false"
+    fi
+
+    for f in /dev/disk/by-path/*
+    do
+        dev=$(readlink -f $f)
+        lsblk --nodeps --pairs $dev | grep -q 'TYPE="disk"'
+        if [ $? -ne 0 ]
+        then
+            continue
+        fi
+
+        # Avoid wiping USB drives
+        udevadm info --query=property --name=$dev |grep -q '^ID_BUS=usb' && continue
+
+        # Avoid wiping ceph osds if sysinv tells us so
+        if [ ${WIPE_CEPH_OSDS} == "false" ]; then
+            wipe_dev="true"
+            part_numbers=( `parted -s $dev print | awk '$1 == "Number" {i=1; next}; i {print $1}'` )
+            # Scanning the partitions looking for CEPH OSDs and
+            # skipping any disk found with such partitions
+            for part_number in "${part_numbers[@]}"; do
+                sgdisk_part_info=$(flock $dev sgdisk -i $part_number $dev)
+                part_type_guid=$(echo "$sgdisk_part_info" | grep "$part_type_guid_str" | awk '{print $4;}')
+                if [ "$part_type_guid" == $CEPH_OSD_GUID ]; then
+                    echo "OSD found on $dev, skipping wipe"
+                    wipe_dev="false"
+                    break
+                fi
+            done
+            if [ "$wipe_dev" == "false" ]; then
+                continue
+            fi
+        fi
+
+        # Add device to the wipe list
+        devname=$(basename $dev)
+        if [ -e $dev -a "$ISO_DEV" != "../../$devname" -a "$USB_DEV" != "../../$devname" ]; then
+            if [ -n "$WIPE_HDD" ]; then
+                WIPE_HDD=$WIPE_HDD,$dev
+            else
+                WIPE_HDD=$dev
+            fi
+        fi
+    done
+    echo "Not in upgrade, wiping disks: $WIPE_HDD"
+fi
+
+for dev in ${WIPE_HDD//,/ }
+do
+    # Clearing previous GPT tables or LVM data
+    # Delete the first few bytes at the start and end of the partition. This is required with
+    # GPT partitions, they save partition info at the start and the end of the block.
+    # Do this for each partition on the disk, as well.
+    partitions=$(lsblk -rip $dev -o TYPE,NAME |awk '$1 == "part" {print $2}')
+    for p in $partitions $dev
+    do
+        echo "Pre-wiping $p from kickstart"
+        dd if=/dev/zero of=$p bs=512 count=34
+        dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+    done
+done
+
+# Check for remaining cgts-vg PVs, which could potentially happen
+# in an upgrade where we're not wiping all disks.
+# If we ever create other volume groups from kickstart in the future,
+# include them in this search as well.
+partitions=$(pvs --select 'vg_name=cgts-vg' -o pv_name --noheading | grep -v '\[unknown\]')
+for p in $partitions
+do
+    echo "Pre-wiping $p from kickstart (cgts-vg present)"
+    dd if=/dev/zero of=$p bs=512 count=34
+    dd if=/dev/zero of=$p bs=512 count=34 seek=$((`blockdev --getsz $p` - 34))
+done
+
+let -i gb=1024*1024*1024
+
+cat<<EOF>/tmp/part-include
+clearpart --all --drives=$WIPE_HDD --initlabel
+EOF
+
+if [ -n "$ONLYUSE_HDD" ]; then
+    cat<<EOF>>/tmp/part-include
+ignoredisk --only-use=$ONLYUSE_HDD
+EOF
+fi
+
+if [ -d /sys/firmware/efi ] ; then
+    cat<<EOF>>/tmp/part-include
+part /boot/efi --fstype=efi --size=300 --ondrive=$(get_disk $boot_device)
+EOF
+else
+    cat<<EOF>>/tmp/part-include
+part biosboot --asprimary --fstype=biosboot --size=1 --ondrive=$(get_disk $boot_device)
+EOF
+fi
+
+
+# Template from: pre_disk_aio.cfg
+
+## NOTE: updates to partition sizes need to be also reflected in
+##  - stx-config/.../sysinv/conductor/manager.py:create_controller_filesystems()
+##  - stx-config/.../sysinv/common/constants.py
+##
+## NOTE: When adding partitions, we currently have a max of 4 primary partitions.
+##       If more than 4 partitions are required, we can use a max of 3 --asprimary,
+##       to allow 1 primary logical partition with extended partitions
+##
+## NOTE: Max default PV size must align with the default controllerfs sizes
+##
+## BACKUP_OVERHEAD = 20
+##
+## Physical install (for disks over 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_IMAGE_STOR_SIZE = 10
+## DEFAULT_DATABASE_STOR_SIZE = 20
+## DEFAULT_IMG_CONVERSION_STOR_SIZE = 20
+## BACKUP = DEFAULT_DATABASE_STOR_SIZE + DEFAULT_IMAGE_STOR_SIZE
+##                                     + BACKUP_OVERHEAD = 50
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+## CGCS_PV_SIZE = 10240 + 2*20480 + 20480 + 51200 + 8196 + 8196 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 233496
+##
+## small install - (for disks below 240GB)
+##  - DB size is doubled to allow for upgrades
+##
+## DEFAULT_SMALL_IMAGE_STOR_SIZE = 10
+## DEFAULT_SMALL_DATABASE_STOR_SIZE = 10
+## DEFAULT_SMALL_IMG_CONVERSION_STOR_SIZE = 10
+## DEFAULT_SMALL_BACKUP_STOR_SIZE = 40
+##
+## LOG_VOL_SIZE = 8192
+## SCRATCH_VOL_SIZE = 8192
+## RABBIT = 2048
+## PLATFORM = 2048
+## ANCHOR = 1024
+## EXTENSION = 1024
+## GNOCCHI = 5120
+## DOCKER = 30720
+## DOCKER_DIST = 16384
+## ETCD = 5120
+## CEPH_MON = 20480
+## KUBELET_VOL_SIZE = 10240
+## RESERVED_PE = 16 (based on pesize=32768)
+##
+##
+## CGCS_PV_SIZE = 10240 + 2*10240 + 10240 + 40960 + 8192 + 8192 + 2048 +
+##                2048 + 1024 + 1024 + 5120 + 30720 + 16384 + 5120 +
+##                20480 + 10240 + 16 = 192528
+##
+## NOTE: To maintain upgrade compatability within the volume group, keep the
+## undersized LOG_VOL_SIZE and SCRATCH_VOL_SIZE, but size the minimally size
+## physical volume correctly.
+##
+##  R4 AIO installations:
+##  - R4 (case #1): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), /local_pv (239G)
+##  - R4 (case #2): /boot (0.5G), / (20G),
+##                  cgts-vg PV (239G), cgts-vg (239G)
+##
+##  Upgrade migration will start with R5 install and create a partition to align
+##  above so filesystems within the volume group will be able to maintain their
+##  sizes in R5
+##    - R5 install  : /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), un-partitioned (336G)
+##    - R5 (case #1): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (97G), unpartitioned (239G)
+##    - R5 (case #2): /boot (0.5G), / (20G),
+##                    cgts-vg PV (142G), cgts-vg PV (336G)
+##
+
+sz=$(blockdev --getsize64 $(get_disk $rootfs_device))
+if [ $sz -le $((240*$gb)) ] ; then
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 192528/1024=188.01. CGCS_PV_SIZE=189*1024=193536. Using a disk with a
+    # size under 189GiB will fail.
+    CGCS_PV_SIZE=193536
+else
+    # Round CGCS_PV_SIZE to the closest upper value that can be divided by 1024.
+    # 233496/1024=228.02. CGCS_PV_SIZE=229*1024=234496.
+    CGCS_PV_SIZE=234496
+fi
+
+ROOTFS_SIZE=20000
+LOG_VOL_SIZE=8000
+SCRATCH_VOL_SIZE=8000
+
+ROOTFS_OPTIONS="defaults"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   # Enable iversion labelling for rootfs when IMA is enabled
+   ROOTFS_OPTIONS="${ROOTFS_OPTIONS},iversion"
+fi
+
+cat<<EOF>>/tmp/part-include
+part /boot --fstype=ext4 --asprimary --size=500 --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+part pv.253004 --grow --size=500 --maxsize=$CGCS_PV_SIZE --ondrive=$(get_disk $rootfs_device)
+volgroup cgts-vg --pesize=32768 pv.253004
+logvol /var/log --fstype=ext4 --vgname=cgts-vg --size=$LOG_VOL_SIZE --name=log-lv
+logvol /scratch --fstype=ext4 --vgname=cgts-vg --size=$SCRATCH_VOL_SIZE --name=scratch-lv
+part / --fstype=ext4 --asprimary --size=$ROOTFS_SIZE --ondrive=$(get_disk $rootfs_device) --fsoptions="$ROOTFS_OPTIONS"
+EOF
+
+%end
+
+
+# Template from: post_platform_conf_aio.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Set the security profile mode
+secprofile="standard"
+profile_mode=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$profile_mode" ]; then
+   secprofile="extended"
+fi
+
+mkdir -p -m 0775 /etc/platform
+cat <<EOF > /etc/platform/platform.conf
+nodetype=controller
+subfunction=controller,worker
+system_type=All-in-one
+security_profile=$secprofile
+EOF
+
+%end
+
+######################################
+# workarounds or fixes for poky-stx
+######################################
+%post --erroronfail
+
+# Add extra users and groups
+SYSADMIN_P="4SuW8cnXFyxsk"
+groupadd -f -g 345 sys_protected
+useradd -m -g sys_protected -G root -d /home/sysadmin -p ${SYSADMIN_P} -s /bin/sh sysadmin
+
+groupadd -r -g 128 nscd
+useradd -M -o -r -d / -s /sbin/nologin -c 'NSCD Daemon' -u 28 -g nscd nscd
+
+useradd -p '' ceph
+groupadd ceph
+usermod -a -G ceph ceph
+
+useradd -p '' patching
+groupadd patching
+usermod -a -G patching patching
+
+useradd -p '' nfv
+groupadd nfv
+usermod -a -G nfv nfv
+
+usermod -a -G sys_protected sysadmin
+usermod -a -G sys_protected sysinv
+usermod -a -G sys_protected www
+usermod -a -G sys_protected nfv
+usermod -a -G sys_protected patching
+usermod -a -G sys_protected haproxy
+usermod -P root root
+
+# Extend path variable for sysadmin
+echo 'PATH=/sbin:/usr/sbin:$PATH' >> /home/sysadmin/.bashrc
+chown sysadmin:sys_protected /home/sysadmin/.bashrc
+
+# Avoid duplicate with systemd-fstab-generator
+sed -i "s|\(^.*/dev/root\)|#\1|" /etc/fstab
+
+%end
+
+
+# Template from: post_common.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Turn off locale support for i18n if is not installed
+if [ ! -d /usr/share/i18n ] ; then
+   rm -f /etc/sysconfig/i18n
+fi
+# Unset the hostname
+rm /etc/hostname
+
+# If using a serial install make sure to add a getty on the tty1
+conarg=`cat /proc/cmdline |xargs -n1 echo |grep console= |grep ttyS`
+if [ -n "$conarg" ] ; then
+   echo "1:2345:respawn:/sbin/mingetty tty1" >> /etc/inittab
+fi
+
+#### SECURITY PROFILE HANDLING (Post Installation) ####
+# Check if the Security profile mode is enabled
+# and load the appropriate kernel modules
+secprofile=`cat /proc/cmdline |xargs -n1 echo |grep security_profile= | grep extended`
+if [ -n "$secprofile" ]; then
+   echo "In Extended Security profile mode. Loading IMA kernel module"
+   systemctl enable auditd.service
+   # Add the securityfs mount for the IMA Runtime measurement list
+   echo "securityfs     /sys/kernel/security    securityfs    defaults,nodev 0 0" >> /etc/fstab
+else
+   # Disable audit daemon in the Standard Security Profile
+   systemctl disable auditd
+fi
+
+. /etc/platform/platform.conf
+# Configure smart package manager channels
+rm -rf /var/lib/smart
+mkdir /var/lib/smart
+/usr/bin/smart channel -y \
+    --add rpmdb type=rpm-sys name="RPM Database"
+/usr/bin/smart channel -y \
+    --add base type=rpm-md name="Base" baseurl=http://controller:${http_port:-8080}/feed/rel-19.12
+/usr/bin/smart channel -y \
+    --add updates type=rpm-md name="Patches" baseurl=http://controller:${http_port:-8080}/updates/rel-19.12
+
+# Configure smart to use rpm --nolinktos option
+/usr/bin/smart config --set rpm-nolinktos=true
+
+# Configure smart to use rpm --nosignature option
+/usr/bin/smart config --set rpm-check-signatures=false
+
+# Delete the CentOS yum repo files
+rm -f /etc/yum.repos.d/CentOS-*
+
+# Persist the boot device naming as UDEV rules so that if the network device
+# order changes post-install that we will still be able to DHCP from the
+# correct interface to reach the active controller.  For most nodes only the
+# management/boot interface needs to be persisted but because we require both
+# controllers to be identically configured and controller-0 and controller-1
+# are installed differently (e.g., controller-0 from USB and controller-1 from
+# network) it is not possible to know which interface to persist for
+# controller-0.  The simplest solution is to persist all interfaces.
+#
+mkdir -p /etc/udev/rules.d
+echo "# Persisted network interfaces from anaconda installer" > /etc/udev/rules.d/70-persistent-net.rules
+for dir in /sys/class/net/*; do
+    if [ -e ${dir}/device ]; then
+       dev=$(basename ${dir})
+       mac_address=$(cat /sys/class/net/${dev}/address)
+       echo "ACTION==\"add\", SUBSYSTEM==\"net\", DRIVERS==\"?*\", ATTR{address}==\"${mac_address}\", NAME=\"${dev}\"" >> /etc/udev/rules.d/70-persistent-net.rules
+    fi
+done
+
+# Mark the sysadmin password as expired immediately
+chage -d 0 sysadmin
+
+# Lock the root password
+#passwd -l root
+
+# Enable tmpfs mount for /tmp
+# delete /var/tmp so that it can similinked in
+rm -rf /var/tmp
+systemctl enable tmp.mount
+
+# Disable automount of /dev/hugepages
+systemctl mask dev-hugepages.mount
+
+# Disable firewall
+systemctl disable firewalld
+
+# Disable libvirtd
+systemctl disable libvirtd.service
+
+# Enable rsyncd
+systemctl enable rsyncd.service
+
+# Allow root to run sudo from a non-tty (for scripts running as root that run sudo cmds)
+echo 'Defaults:root !requiretty' > /etc/sudoers.d/root
+
+# Make fstab just root read/writable
+chmod 600 /etc/fstab
+
+# Create first_boot flag
+touch /etc/platform/.first_boot
+
+%end
+
+# Template from: post_kernel_aio_and_worker.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+# Update grub with custom kernel bootargs
+source /etc/init.d/cpumap_functions.sh
+n_cpus=$(cat /proc/cpuinfo 2>/dev/null | \
+  awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+n_numa=$(ls -d /sys/devices/system/node/node* 2>/dev/null | wc -l)
+KERN_OPTS=" iommu=pt usbcore.autosuspend=-1"
+
+KERN_OPTS="${KERN_OPTS} hugepagesz=2M hugepages=0 default_hugepagesz=2M"
+
+# If this is an all-in-one system, we need at least 4 CPUs
+if [ "$system_type" = "All-in-one" -a ${n_cpus} -lt 4 ]; then
+    report_post_failure_with_msg "ERROR: At least 4 CPUs are required for controller+worker node."
+fi
+
+# Add kernel options for cpu isolation / affinity
+if [ ${n_cpus} -gt 1 ]
+then
+  base_cpulist=$(platform_expanded_cpu_list)
+  base_cpumap=$(cpulist_to_cpumap ${base_cpulist} ${n_cpus})
+  avp_cpulist=$(vswitch_expanded_cpu_list)
+  norcu_cpumap=$(invert_cpumap ${base_cpumap} ${n_cpus})
+  norcu_cpulist=$(cpumap_to_cpulist ${norcu_cpumap} ${n_cpus})
+
+  if [[ "$subfunction" =~ lowlatency ]]; then
+    KERN_OPTS="${KERN_OPTS} isolcpus=${norcu_cpulist}"
+    KERN_OPTS="${KERN_OPTS} nohz_full=${norcu_cpulist}"
+  else
+    KERN_OPTS="${KERN_OPTS} isolcpus=${avp_cpulist}"
+  fi
+  KERN_OPTS="${KERN_OPTS} rcu_nocbs=${norcu_cpulist}"
+  KERN_OPTS="${KERN_OPTS} kthread_cpus=${base_cpulist}"
+  KERN_OPTS="${KERN_OPTS} irqaffinity=${base_cpulist}"
+  # Update vswitch.conf
+  sed -i "s/^VSWITCH_CPU_LIST=.*/VSWITCH_CPU_LIST=\"${avp_cpulist}\"/" /etc/vswitch/vswitch.conf
+fi
+
+# Add kernel options to ensure an selinux is disabled
+KERN_OPTS="${KERN_OPTS} selinux=0 enforcing=0"
+
+# Add kernel options to set NMI watchdog
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=0 softlockup_panic=0"
+else
+  KERN_OPTS="${KERN_OPTS} nmi_watchdog=panic,1 softlockup_panic=1"
+fi
+
+if [[ "$(dmidecode -s system-product-name)" =~ ^ProLiant.*Gen8$ ]]; then
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on,eth_no_rmrr"
+else
+  KERN_OPTS="${KERN_OPTS} intel_iommu=on"
+fi
+
+# Add kernel option to disable biosdevname if enabled
+# As this may already be in GRUB_CMDLINE_LINUX, only add if it is not already present
+grep -q '^GRUB_CMDLINE_LINUX=.*biosdevname=0' /etc/default/grub
+if [ $? -ne 0 ]; then
+  KERN_OPTS="${KERN_OPTS} biosdevname=0"
+fi
+
+# Add kernel options to disable kvm-intel.eptad on Broadwell
+# Broadwell: Model: 79, Model name: Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
+if grep -q -E "^model\s+:\s+79$" /proc/cpuinfo
+then
+  KERN_OPTS="${KERN_OPTS} kvm-intel.eptad=0"
+fi
+
+# k8s updates:
+#KERN_OPTS="${KERN_OPTS} cgroup_disable=memory"
+KERN_OPTS="${KERN_OPTS} user_namespace.enable=1"
+
+# Add kernel option to avoid jiffies_lock contention on real-time kernel
+if [[ "$subfunction" =~ lowlatency ]]; then
+  KERN_OPTS="${KERN_OPTS} skew_tick=1"
+fi
+
+# If the installer asked us to use security related kernel params, use
+# them in the grub line as well (until they can be configured via puppet)
+grep -q 'nopti' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nopti"
+fi
+grep -q 'nospectre_v2' /proc/cmdline
+if [ $? -eq 0 ]; then
+    KERN_OPTS="${KERN_OPTS} nospectre_v2"
+fi
+
+perl -pi -e 's/(GRUB_CMDLINE_LINUX=.*)\"/\1'"$KERN_OPTS"'\"/g' /etc/default/grub
+
+if [ -d /sys/firmware/efi ] ; then
+  grub-mkconfig -o /boot/efi/EFI/centos/grub.cfg
+else
+  grub-mkconfig -o /boot/grub/grub.cfg
+fi
+
+%end
+
+
+# Template from: post_lvm_pv_on_rootfs.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# uncomment the global_filter line in lvm.conf
+perl -0777 -i.bak -pe 's:(# This configuration option has an automatic default value\.\n)\t# global_filter:$1        global_filter:m' /etc/lvm/lvm.conf
+
+# Determine which disk we created our PV on (i.e. the root disk)
+ROOTDISK=$(get_by_path $(pvdisplay --select 'vg_name=cgts-vg' -C -o pv_name --noheadings))
+if [ -z "$ROOTDISK" ]; then
+    report_post_failure_with_msg "ERROR: failed to identify rootdisk via pvdisplay"
+fi
+# Edit the LVM config so LVM only looks for LVs on the root disk
+sed -i "s#^\( *\)global_filter = \[.*#\1global_filter = [ \"a|${ROOTDISK}|\", \"r|.*|\" ]#" /etc/lvm/lvm.conf
+%end
+
+
+# Template from: post_system_aio.cfg
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+# Source the generated platform.conf
+. /etc/platform/platform.conf
+
+## Reserve more memory for base processes since the controller has higher
+## memory requirements but cap it to better handle systems with large
+## amounts of memory
+TOTALMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024)}')
+
+if [ -e /sys/devices/system/node/node0 ]; then
+  RESERVEDMEM=$(grep MemTotal /sys/devices/system/node/node0/meminfo | awk '{printf "%d\n", $4/1024}')
+else
+  RESERVEDMEM=$(grep MemTotal /proc/meminfo | awk '{print int($2/1024/4)}')
+fi
+
+if [ ${RESERVEDMEM} -lt 6144 ]; then
+    RESERVEDMEM=6144
+elif [ ${RESERVEDMEM} -gt 14500 ]; then
+    RESERVEDMEM=14500
+elif [ ${RESERVEDMEM} -gt 8192 ]; then
+    RESERVEDMEM=8192
+fi
+
+sed -i -e "s#\(WORKER_BASE_RESERVED\)=.*#\1=(\"node0:${RESERVEDMEM}MB:1\" \"node1:2000MB:0\" \"node2:2000MB:0\" \"node3:2000MB:0\")#g" /etc/platform/worker_reserved.conf
+
+# Update WORKER_CPU_LIST
+N_CPUS=$(cat /proc/cpuinfo 2>/dev/null | awk '/^[pP]rocessor/ { n +=1 } END { print (n>0) ? n : 1}')
+sed -i "s/^WORKER_CPU_LIST=.*/WORKER_CPU_LIST=\"0-$((N_CPUS-1))\"/" /etc/platform/worker_reserved.conf
+
+%end
+
+
+# Template from: post_usb_controller.cfg
+%pre --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+touch /tmp/repo-include
+
+if [ -d ${srcdir}/patches ]; then
+    echo "repo --name=updates --baseurl=file://${srcdir}/patches/" > /tmp/repo-include
+fi
+
+%end
+
+# Repository arguments from %pre
+%include /tmp/repo-include
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+mgmt_dev=none
+
+# Persist the boot device to the platform configuration. This will get
+# overwritten when config_controller is run.
+echo management_interface=$mgmt_dev >> /etc/platform/platform.conf
+
+# persist the default http port number to platform configuration. This
+# will get overwritten when config_controller is run.
+echo http_port=8080 >> /etc/platform/platform.conf
+
+# Build networking scripts
+cat << EOF > /etc/sysconfig/network-scripts/ifcfg-lo
+DEVICE=lo
+IPADDR=127.0.0.1
+NETMASK=255.0.0.0
+NETWORK=127.0.0.0
+BROADCAST=127.255.255.255
+ONBOOT=yes
+IPV6_AUTOCONF=no
+NAME=loopback
+EOF
+
+%end
+
+
+# Note, this section is different and replaced with a wget
+# if doing the initial install off the network
+%post --nochroot
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+if [ -d $srcdir/Packages ] ; then
+    mkdir -p /mnt/sysimage/www/pages/feed/rel-19.12
+    cp -r $srcdir/Packages /mnt/sysimage/www/pages/feed/rel-19.12/Packages
+    cp -r $srcdir/repodata /mnt/sysimage/www/pages/feed/rel-19.12/repodata
+    cp $srcdir/*.cfg /mnt/sysimage/www/pages/feed/rel-19.12
+fi
+
+if [ -d $srcdir/patches ]; then
+    mkdir -p /mnt/sysimage/www/pages/updates/rel-19.12
+    cp -r $srcdir/patches/Packages /mnt/sysimage/www/pages/updates/rel-19.12/Packages
+    cp -r $srcdir/patches/repodata /mnt/sysimage/www/pages/updates/rel-19.12/repodata
+    mkdir -p /mnt/sysimage/opt/patching
+    cp -r $srcdir/patches/metadata /mnt/sysimage/opt/patching/metadata
+    mkdir -p /mnt/sysimage/opt/patching/packages/19.12
+    
+    find /mnt/sysimage/www/pages/updates/rel-19.12/Packages -name '*.rpm' \
+        | xargs --no-run-if-empty -I files cp --preserve=all files /mnt/sysimage/opt/patching/packages/19.12/
+fi
+
+# Create a uuid specific to this installation
+INSTALL_UUID=`uuidgen`
+echo $INSTALL_UUID > /mnt/sysimage/www/pages/feed/rel-19.12/install_uuid
+echo "INSTALL_UUID=$INSTALL_UUID" >> /mnt/sysimage/etc/platform/platform.conf
+%end
+
+%post
+
+# This is a USB install, so set ONBOOT=yes for network devices.
+# Doing this in the %post so we don't unintentionally setup a
+# network device during the installation.
+for f in /etc/sysconfig/network-scripts/ifcfg-*; do
+    if grep -q '^ONBOOT=' ${f}; then
+        sed -i 's/^ONBOOT=.*/ONBOOT=yes/' ${f}
+    else
+        echo "ONBOOT=yes" >> ${f}
+    fi
+    if grep -q '^IPV6_AUTOCONF=' ${f}; then
+        sed -i 's/^IPV6_AUTOCONF=.*/IPV6_AUTOCONF=no/' ${f}
+    else
+        echo "IPV6_AUTOCONF=no" >> ${f}
+    fi
+done
+
+%end
+
+
+# Template from: post_usb_addon.cfg
+%pre --erroronfail
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+if [ -f ${srcdir}/ks-addon.cfg ]; then
+    cp ${srcdir}/ks-addon.cfg /tmp/
+else
+    cat <<EOF > /tmp/ks-addon.cfg
+# No custom addon included
+EOF
+fi
+%end
+
+%post --nochroot
+if [ -d /mnt/install/source ]; then
+    srcdir=/mnt/install/source
+else
+    srcdir=/run/install/repo
+fi
+
+# Store the ks-addon.cfg for debugging
+mkdir -p /mnt/sysimage/var/log/anaconda
+cp /tmp/ks-addon.cfg /mnt/sysimage/var/log/anaconda/
+%end
+
+%post --erroronfail
+
+# Source common functions
+. /tmp/ks-functions.sh
+
+%include /tmp/ks-addon.cfg
+
+%end
diff --git a/meta-stx/conf/distro/files/syslinux.cfg b/meta-stx/conf/distro/files/syslinux.cfg
new file mode 100644 (file)
index 0000000..cc23c56
--- /dev/null
@@ -0,0 +1,84 @@
+display splash.cfg
+timeout 0
+F1 help.txt
+F2 devices.txt
+F3 splash.cfg
+serial 0 115200
+
+# Pull in the menu User Interface
+ui vesamenu.c32
+
+menu title Select kernel options and boot kernel
+menu tabmsg Press [Tab] to edit, [Return] to select, [ESC] to return to previous menu
+
+# Dark grey
+menu background   #ff555555
+
+# ----------------- NOTE -----------------
+# If you are updating label numbers, make sure that controllerconfig/clone.py
+# is in sync with your changes (only serial console entries).
+#    STANDARD_STANDARD = '0'
+#    STANDARD_EXTENDED = 'S0'
+#    AIO_STANDARD = '2'
+#    AIO_EXTENDED = 'S2'
+#    AIO_LL_STANDARD = '4'
+#    AIO_LL_EXTENDED = 'S4'
+# ----------------------------------------
+
+
+# Standard Controller menu
+menu begin
+  menu title Standard Controller Configuration
+
+  # Serial Console submenu
+  label 0
+    menu label Serial Console
+    menu disable
+    kernel /bzImage
+    append initrd=/initrd rootwait console=ttyS0,115200 inst.text serial inst.stage2=hd:LABEL=oe_iso_boot boot_device=sda rootfs_device=sda biosdevname=0 usbcore.autosuspend=-1 inst.gpt security_profile=standard user_namespace.enable=1 ks=/installer-config/controller_ks.cfg
+  
+  # Graphical Console submenu
+  label 1
+    menu label Graphical Console
+    menu disable
+    kernel /bzImage
+    append initrd=/initrd rootwait console=tty0 inst.text inst.stage2=hd:LABEL=oe_iso_boot boot_device=sda rootfs_device=sda biosdevname=0 usbcore.autosuspend=-1 inst.gpt security_profile=standard user_namespace.enable=1 ks=/installer-config/controller_ks.cfg
+menu end
+
+menu SEPARATOR
+
+# AIO Controller menu
+menu begin
+  menu title All-in-one Controller Configuration
+
+  # Serial Console submenu
+  label 2
+    menu label Serial Console
+    kernel /bzImage
+    append initrd=/initrd rootwait console=ttyS0,115200 inst.text serial inst.stage2=hd:LABEL=oe_iso_boot boot_device=sda rootfs_device=sda biosdevname=0 usbcore.autosuspend=-1 inst.gpt security_profile=standard user_namespace.enable=1 ks=/installer-config/ks.cfg
+  
+  # Graphical Console submenu
+  label 3
+    menu label Graphical Console
+    kernel /bzImage
+    append initrd=/initrd rootwait console=tty0 inst.text inst.stage2=hd:LABEL=oe_iso_boot boot_device=sda rootfs_device=sda biosdevname=0 usbcore.autosuspend=-1 inst.gpt security_profile=standard user_namespace.enable=1 ks=/installer-config/ks.cfg
+menu end
+
+menu SEPARATOR
+
+# AIO (Low Latency) Controller menu
+menu begin
+  menu title All-in-one (lowlatency) Controller Configuration
+
+  # Serial Console submenu
+  label 4
+    menu label Serial Console
+    kernel /bzImage
+    append nitrd=/initrd rootwait console=ttyS0,115200 inst.text serial inst.stage2=hd:LABEL=oe_iso_boot boot_device=sda rootfs_device=sda biosdevname=0 usbcore.autosuspend=-1 inst.gpt security_profile=standard user_namespace.enable=1 ks=/installer-config/aio_lowlatency_ks.cfg
+
+  # Graphical Console submenu
+  label 5
+    menu label Graphical Console
+    kernel /bzImage
+    append initrd=/initrd rootwait console=tty0 inst.text inst.stage2=hd:LABEL=oe_iso_boot boot_device=sda rootfs_device=sda biosdevname=0 usbcore.autosuspend=-1 inst.gpt security_profile=standard user_namespace.enable=1 ks=/installer-config/aio_lowlatency_ks.cfg
+menu end
diff --git a/meta-stx/conf/distro/include/stx-features.inc b/meta-stx/conf/distro/include/stx-features.inc
new file mode 100644 (file)
index 0000000..531b170
--- /dev/null
@@ -0,0 +1,69 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+EXTRA_IMAGE_FEATURES += " empty-root-password"
+EXTRA_IMAGE_FEATURES += " allow-empty-password"
+EXTRA_IMAGE_FEATURES += " allow-root-login"
+
+#Systemd system
+VIRTUAL-RUNTIME_init_manager = "systemd"
+DISTRO_FEATURES_NATIVE_append = "systemd"
+DISTRO_FEATURES_append = " systemd"
+DISTRO_FEATURES_BACKFILL_CONSIDERED_append = "sysvinit"
+
+#Security
+# INITRAMFS_IMAGE = "secure-core-image-initramfs"
+# DISTRO_FEATURES_append = " selinux"
+#DISTRO_FEATURES_NATIVE_append = "ima tpm tpm2 efi-secure-boot luks"
+#DISTRO_FEATURES_append += "ima tpm tpm2 efi-secure-boot luks modsign"
+#MACHINE_FEATURES_NATIVE_append = "efi"
+#MACHINE_FEATURES_append = "efi"
+#INITRAMFS_SCRIPTS = "initramfs-live-boot \
+#                     initramfs-live-install \
+#                    initramfs-live-install-efi \
+#                    "
+
+#SECURE_CORE_IMAGE_EXTRA_INSTALL ?= "\
+#    packagegroup-efi-secure-boot \
+#    packagegroup-tpm \
+#    packagegroup-tpm2 \
+#    packagegroup-ima \
+#    packagegroup-luks \
+#    "
+
+
+# virtualization and cloud stuff
+DISTRO_FEATURES_append = " virtualization"
+DISTRO_FEATURES_append = " kvm"
+DISTRO_FEATURES_append = " openstack"
+PREFERRED_PROVIDER_virtual/containerd = "containerd-opencontainers"
+
+#Graphics and misc
+#DISTRO_FEATURES_append = " x11 opengl"
+#EXTRA_IMAGE_FEATURES_append = " x11-base"
+#VIRTUAL-RUNTIME_graphical_init_manager = "lxdm"
+
+PACKAGE_CLASSES = "package_rpm"
+DISTRO_FEATURES_append = " bluez pam largefile opengl"
+
+#misc
+PREFERRED_PROVIDER_virtual/containerd = "containerd-opencontainers"
+PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto"
+
+VIRTUAL-RUNTIME_syslog = "syslog-ng"
+VIRTUAL-RUNTIME_vim = "vim"
+
+DISTRO_FEATURES_append = " selinux"
+PREFERRED_PROVIDER_virtual/refpolicy ?= "refpolicy-mls"
diff --git a/meta-stx/conf/distro/include/stx-preferred-vers.inc b/meta-stx/conf/distro/include/stx-preferred-vers.inc
new file mode 100644 (file)
index 0000000..7310247
--- /dev/null
@@ -0,0 +1,62 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PREFERRED_VERSION_ceph = "13.2.2"
+PREFERRED_VERSION_keyutils = "1.6"
+PREFERRED_VERSION_python-voluptuous = "0.8.9"
+PREFERRED_VERSION_python3-cherrypy = "18.2.0"
+PREFERRED_VERSION_python-cheroot = "7.0.0"
+PREFERRED_VERSION_python3-cheroot = "7.0.0"
+PREFERRED_VERSION_python-cherrypy = "git"
+PREFERRED_VERSION_pythonkeystoneauth1 = "3.17.1"
+PREFERRED_VERSION_drbd-utils = "8.4.3rc1"
+PREFERRED_VERSION_python-pyyaml = "3.13"
+PREFERRED_VERSION_python-cmd2 = "0.6.8"
+PREFERRED_VERSION_python-expect = "4.6.0"
+PREFERRED_VERSION_python-pika = "0.10.0%"
+PREFERRED_VERSION_python-keyring = "5.3"
+PREFERRED_VERSION_python-barbican = "8.0.%"
+PREFERRED_VERSION_python-keystone = "15.0.%"
+PREFERRED_VERSION_python-flask = "1.0.2"
+PREFERRED_VERSION_python-keystonemiddleware = "5.1.%"
+PREFERRED_VERSION_python-oslo.cache= "1.26.%"
+PREFERRED_VERSION_python-oslo.concurrency= "3.26.%"
+PREFERRED_VERSION_python-oslo.log= "3.38.%"
+PREFERRED_VERSION_python-oslo.middleware = "3.31.%"
+PREFERRED_VERSION_python-oslo.serialization = "2.23.%"
+PREFERRED_VERSION_python-oslo.policy = "1.43.%"
+PREFERRED_VERSION_python-pysaml2 = "4.5.%"
+PREFERRED_VERSION_python-sqlalchemy = "1.1.17"
+PREFERRED_VERSION_python-werkzeug = "0.14.%"
+PREFERRED_VERSION_python-neutronclient = "6.12.%"
+PREFERRED_VERSION_python-oslo.i18n = "3.20.%"
+PREFERRED_VERSION_python-iso8601 = "0.1.12"
+PREFERRED_VERSION_python-six = "1.11.%"
+PREFERRED_VERSION_python-docker = "3.3.0"
+PREFERRED_VERSION_python-pyudev = "0.16.1"
+PREFERRED_VERSION_python-django = "1.11.20+%"
+PREFERRED_VERSION_python-django-babel = "0.6.2+%"
+PREFERRED_VERSION_python-pysnmp = "4.2.5"
+PREFERRED_VERSION_python-dateutil = "2.8.1"
+PREFERRED_VERSION_python-adal = "1.0.2"
+PREFERRED_VERSION_python-sqlalchemy = "1.0.2"
+PREFERRED_VERSION_python-osprofiler = "2.3.0+%"
+PREFERRED_VERSION_python-amqp = "2.5.2"
+PREFERRED_VERSION_python-ryu = "4.24+%"
+PREFERRED_VERSION_docker-distribution = "v2.6.2"
+PREFERRED_VERSION_kuberenetes = "1.16.%"
+PREFERRED_VERSION_ruby = "2.0.0-p648"
+PREFERRED_VERSION_ruby-native = "2.0.0-p648"
+PREFERRED_VERSION_puppet = "4.8.2"
diff --git a/meta-stx/conf/distro/poky-stx.conf b/meta-stx/conf/distro/poky-stx.conf
new file mode 100644 (file)
index 0000000..8eb85dc
--- /dev/null
@@ -0,0 +1,97 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+require conf/distro/poky.conf
+DISTRO = "poky-stx"
+DISTRO_NAME = "Stx (Poky Starlingx Distro)"
+DISTRO_VERSION = "2.7.3"
+DISTRO_CODENAME = "warrior"
+
+MAINTAINER = "zbsarashki <zbsarashki@gmail.com>"
+
+require conf/distro/include/stx-features.inc
+require conf/distro/include/stx-preferred-vers.inc
+
+# For packagegroup-basic
+TASK_BASIC_SSHDAEMON = "openssh-sshd openssh-sftp openssh-sftp-server"
+
+# For iso image
+#KERNEL_FEATURES += "features/overlayfs/overlayfs.scc"
+
+# Disable services by default
+SYSTEMD_AUTO_ENABLE_pn-haproxy = "disable"
+SYSTEMD_AUTO_ENABLE_pn-lighttpd = "disable"
+
+# networking sysv service is required
+SYSTEMD_DISABLED_SYSV_SERVICES_remove = "networking"
+
+# /var/log is mounted as a logical volume for stx,
+# so do not link to /var/volatile/log.
+VOLATILE_LOG_DIR = "no"
+
+# We don't need extra space for iso image
+IMAGE_OVERHEAD_FACTOR = "1.05"
+
+# Add extra users and groups for stx
+INHERIT += " extrausers-config"
+SYSADMIN_P = "4SuW8cnXFyxsk"
+
+EXTRA_USERS_PARAMS_CONFIG = "\
+       groupadd -f -g 345 sys_protected; \
+       useradd -m -g sys_protected -G root \
+           -d /home/sysadmin -p ${SYSADMIN_P} \
+           -s /bin/sh sysadmin; \
+       useradd -p '' sysinv; \
+       groupadd sysinv; \
+       usermod -a -G sysinv sysinv; \
+       useradd -p '' ceph; \
+       groupadd ceph; \
+       usermod -a -G ceph ceph; \
+       useradd -p '' haproxy; \
+       groupadd haproxy; \
+       usermod -a -G haproxy haproxy; \
+       useradd -p '' patching; \
+       groupadd patching; \
+       usermod -a -G patching patching; \
+       useradd -p '' nfv; \
+       groupadd nfv; \
+       usermod -a -G nfv nfv; \
+       useradd -p '' www; \
+       groupadd www; \
+       usermod -a -G www www; \
+       usermod -a -G sys_protected sysadmin; \
+       usermod -a -G sys_protected sysinv; \
+       usermod -a -G sys_protected www; \
+       usermod -a -G sys_protected nfv; \
+       usermod -a -G sys_protected patching; \
+       usermod -a -G sys_protected haproxy; \
+       usermod -P root root; \
+       \
+       groupadd -r -g 128 nscd; \
+       useradd -M -o -r -d / -s /sbin/nologin -c 'NSCD Daemon' -u 28 -g nscd nscd; \
+       "
+
+# Used by:
+# neutron-init
+# glance-init
+CONTROLLER_IP = "127.0.0.1"
+HELM_TOOL_KIT_VERSION = "0.1.0"
+TIS_PATCH_VER = "7"
+HELM_REPO = "stx-platform"
+STX_REL = "19.12"
+
+# For the stx /etc/build.info
+STX_ID = "3.0"
+STX_BUILD_DATE := "${@time.strftime('%Y-%m-%d %H:%M:%S',time.gmtime())}"
diff --git a/meta-stx/conf/layer.conf b/meta-stx/conf/layer.conf
new file mode 100644 (file)
index 0000000..480f8ae
--- /dev/null
@@ -0,0 +1,68 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# We have a conf and classes directory, add to BBPATH
+BBPATH .= ":${LAYERDIR}"
+
+# We have recipes-* directories, add to BBFILES
+BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
+            ${LAYERDIR}/recipes-*/*/*.bbappend"
+
+BBFILE_COLLECTIONS += "stx-layer"
+BBFILE_PATTERN_stx-layer := "^${LAYERDIR}/"
+BBFILE_PRIORITY_stx-layer = "5"
+
+LAYERDEPENDS_STX = "\
+       core \
+       dpdk \
+       networking-layer \
+       openembedded-layer \
+       networking-layer \
+       filesystems-layer \
+       perl-layer \
+       meta-python \
+       webserver \
+       virtualization-layer \
+       cloud-services-layer \
+       openstack-layer \
+"
+
+LAYERDEPENDS_STX_ANACONDA = "\
+       core \
+       filesystems-layer \
+       meta-python \
+       networking-layer \
+       openembedded-layer \
+"
+
+LAYERDEPENDS_stx-layer = "${@bb.utils.contains('BBFILE_COLLECTIONS', 'meta-anaconda', '${LAYERDEPENDS_STX_ANACONDA}', '${LAYERDEPENDS_STX}', d)}"
+
+# This should only be incremented on significant changes that will
+# cause compatibility issues with other layers
+LAYERVERSION_stx-version = "1"
+LAYERSERIES_COMPAT_stx-layer = "thud warrior"
+
+LAYER_PATH_meta-stx = "${LAYERDIR}"
+
+# Masked recipes:
+BBMASK += "/meta-python2/recipes-core/images/meta-python-ptest-image.bb"
+BBMASK += "/meta-python2/recipes-core/images/meta-python-image-base.bb"
+BBMASK += "/meta-python2/recipes-core/images/meta-python-image.bb"
+BBMASK += "/meta-stak-common/recipes-containers/kubernetes/kubernetes_git.bbappend"
+BBMASK += "/meta-cloud-services/meta-openstack/recipes-devtools/python/python-cephclient_0.1.0.5.bb"
+BBMASK += "/meta-openstack/recipes-devtools/qemu/qemu_2.%.bbappend"
+BBMASK += "/meta-cloud-services/meta-openstack/recipes-kernel/linux/linux-yocto_4.18.bbappend"
+BBMASK += "/meta-selinux/recipes-graphics/mesa/mesa_%.bbappend"
+BBMASK += "/meta-virtualization/recipes-containers/docker-distribution/docker-distribution_git.bb"
diff --git a/meta-stx/recipes-bsp/grub/grub-efi_2.02.bbappend b/meta-stx/recipes-bsp/grub/grub-efi_2.02.bbappend
new file mode 100644 (file)
index 0000000..e8e0914
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+TRANSFORM_NAME = "s,grub,grub2,"
+EXTRA_OECONF += "--program-transform-name=${TRANSFORM_NAME} \
+                "
diff --git a/meta-stx/recipes-bsp/grub/grub_2.02.bbappend b/meta-stx/recipes-bsp/grub/grub_2.02.bbappend
new file mode 100644 (file)
index 0000000..df7354c
--- /dev/null
@@ -0,0 +1,29 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+TRANSFORM_NAME = "s,grub,grub2,"
+EXTRA_OECONF += "--program-transform-name=${TRANSFORM_NAME}"
+
+do_install_append() {
+    for file in ${D}${bindir}/grub2-* ${D}${sbindir}/grub2-*; do
+        ln -sf $(basename ${file}) $(echo ${file}|sed 's/grub2/grub/')
+    done
+}
+
+FILES_${PN}-editenv = "${bindir}/grub2-editenv"
+
+pkg_postinst_ontarget_${PN}() {
+       grub-mkconfig -o /boot/grub/grub.cfg
+}
diff --git a/meta-stx/recipes-connectivity/etcd/etcd_git.bb b/meta-stx/recipes-connectivity/etcd/etcd_git.bb
new file mode 100644 (file)
index 0000000..478f2ab
--- /dev/null
@@ -0,0 +1,107 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "A distributed key-value store for shared config and service discovery"
+DESCRIPTION = " \
+    etcd is a distributed reliable key-value store for the most critical data \
+    of a distributed system, with a focus on being: \
+    \
+    * Simple: well-defined, user-facing API (gRPC) \
+    * Secure: automatic TLS with optional client cert authentication \
+    * Fast: benchmarked 10,000 writes/sec \
+    * Reliable: properly distributed using Raft \
+"
+HOMEPAGE = "https://github.com/coreos/etcd"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://src/import/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI = " \
+    git://github.com/coreos/etcd.git;branch=release-3.3 \
+    "
+
+SRCREV = "94745a4eed0425653b3b4275a208d38babceeaec"
+PV = "3.3.15+git${SRCPV}"
+
+S = "${WORKDIR}/git"
+
+inherit go goarch systemd useradd
+
+USERADD_PACKAGES = "${PN}"
+USERADD_PARAM_${PN} = " \
+    --system --shell ${sbindir}/nologin --comment 'etcd user' \
+    --home-dir ${localstatedir}/lib --groups etcd --gid etcd etcd \
+    "
+GROUPADD_PARAM_${PN} = "etcd"
+
+TARGET_CC_ARCH += "${LDFLAGS}"
+GO_IMPORT = "import"
+
+do_compile() {
+       export GOARCH="${TARGET_GOARCH}"
+
+       # Setup vendor directory so that it can be used in GOPATH.
+       #
+       # Go looks in a src directory under any directory in GOPATH but
+       # uses 'vendor' instead of 'vendor/src'. We can fix this with a symlink.
+       #
+       # We also need to link in the ipallocator directory as that is not under
+       # a src directory.
+       export GOPATH="${B}/src/import/"
+       mkdir -p ${B}/src/import/src/github.com/coreos/
+       ln -s ${S}/src/import ${B}/src/import/src/github.com/coreos/etcd
+
+       # Pass the needed cflags/ldflags so that cgo
+       # can find the needed headers files and libraries
+       export CGO_ENABLED="1"
+       export CFLAGS=""
+       export LDFLAGS=""
+       export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+       export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+       if [ "${TARGET_ARCH}" = "x86_64" ]; then
+               export GOARCH="amd64"
+       elif [ "${TARGET_ARCH}" = "i586" ]; then
+               export GOARCH="386"
+       fi
+
+       ./src/import/build
+}
+
+do_install() {
+       install -d ${D}/${bindir}
+       install -m 0755 ${B}/bin/etcd ${D}/${bindir}/etcd
+       install -m 0755 ${B}/bin/etcdctl ${D}/${bindir}/etcdctl
+
+       install -d ${D}${systemd_system_unitdir}
+       install -m 0644 ${S}/src/import/contrib/systemd/etcd.service ${D}${systemd_system_unitdir}
+
+       # etcd state is in /var/lib/etcd
+       install -d ${D}${sysconfdir}/tmpfiles.d
+       echo "d ${localstatedir}/lib/${BPN} 0755 etcd etcd -" \
+               > ${D}${sysconfdir}/tmpfiles.d/${BPN}.conf
+
+       # we aren't creating a user, so we need to comment out this line
+       sed -i '/User/s/^/#/' ${D}${systemd_unitdir}/system/etcd.service
+}
+
+deltask compile_ptest_base
+
+RDEPENDS_${PN} = "bash"
+
+# During packaging etcd gets the warning "no GNU hash in elf binary"
+# This issue occurs due to compiling without ldflags, but a
+# solution has yet to be found. For now we ignore this error with
+# the line below.
+#INSANE_SKIP_${PN} = "ldflags"
diff --git a/meta-stx/recipes-connectivity/etcd/etcd_git.bbappend b/meta-stx/recipes-connectivity/etcd/etcd_git.bbappend
new file mode 100644 (file)
index 0000000..cc749ee
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+    file://etcd.service \
+    file://etcd.conf \
+    "
+
+SYSTEMD_PACKAGES = "${PN}"
+SYSTEMD_SERVICE_${PN} = "etcd.service"
+SYSTEMD_AUTO_ENABLE_${PN} = "disable"
+
+do_install_append() {
+       install -m 0644 ${WORKDIR}/etcd.service ${D}${systemd_system_unitdir}
+       install -d ${D}${sysconfdir}/etcd
+       install -m 0644 ${WORKDIR}/etcd.conf ${D}${sysconfdir}/etcd
+}
diff --git a/meta-stx/recipes-connectivity/etcd/files/etcd.conf b/meta-stx/recipes-connectivity/etcd/files/etcd.conf
new file mode 100644 (file)
index 0000000..687a4a2
--- /dev/null
@@ -0,0 +1,84 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+#[Member]
+#ETCD_CORS=""
+ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
+#ETCD_WAL_DIR=""
+#ETCD_LISTEN_PEER_URLS="http://localhost:2380"
+ETCD_LISTEN_CLIENT_URLS="http://localhost:2379"
+#ETCD_MAX_SNAPSHOTS="5"
+#ETCD_MAX_WALS="5"
+ETCD_NAME="default"
+#ETCD_SNAPSHOT_COUNT="100000"
+#ETCD_HEARTBEAT_INTERVAL="100"
+#ETCD_ELECTION_TIMEOUT="1000"
+#ETCD_QUOTA_BACKEND_BYTES="0"
+#ETCD_MAX_REQUEST_BYTES="1572864"
+#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
+#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
+#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
+#
+#[Clustering]
+#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380"
+ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379"
+#ETCD_DISCOVERY=""
+#ETCD_DISCOVERY_FALLBACK="proxy"
+#ETCD_DISCOVERY_PROXY=""
+#ETCD_DISCOVERY_SRV=""
+#ETCD_INITIAL_CLUSTER="default=http://localhost:2380"
+#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
+#ETCD_INITIAL_CLUSTER_STATE="new"
+#ETCD_STRICT_RECONFIG_CHECK="true"
+#ETCD_ENABLE_V2="true"
+#
+#[Proxy]
+#ETCD_PROXY="off"
+#ETCD_PROXY_FAILURE_WAIT="5000"
+#ETCD_PROXY_REFRESH_INTERVAL="30000"
+#ETCD_PROXY_DIAL_TIMEOUT="1000"
+#ETCD_PROXY_WRITE_TIMEOUT="5000"
+#ETCD_PROXY_READ_TIMEOUT="0"
+#
+#[Security]
+#ETCD_CERT_FILE=""
+#ETCD_KEY_FILE=""
+#ETCD_CLIENT_CERT_AUTH="false"
+#ETCD_TRUSTED_CA_FILE=""
+#ETCD_AUTO_TLS="false"
+#ETCD_PEER_CERT_FILE=""
+#ETCD_PEER_KEY_FILE=""
+#ETCD_PEER_CLIENT_CERT_AUTH="false"
+#ETCD_PEER_TRUSTED_CA_FILE=""
+#ETCD_PEER_AUTO_TLS="false"
+#
+#[Logging]
+#ETCD_DEBUG="false"
+#ETCD_LOG_PACKAGE_LEVELS=""
+#ETCD_LOG_OUTPUT="default"
+#
+#[Unsafe]
+#ETCD_FORCE_NEW_CLUSTER="false"
+#
+#[Version]
+#ETCD_VERSION="false"
+#ETCD_AUTO_COMPACTION_RETENTION="0"
+#
+#[Profiling]
+#ETCD_ENABLE_PPROF="false"
+#ETCD_METRICS="basic"
+#
+#[Auth]
+#ETCD_AUTH_TOKEN="simple"
diff --git a/meta-stx/recipes-connectivity/etcd/files/etcd.service b/meta-stx/recipes-connectivity/etcd/files/etcd.service
new file mode 100644 (file)
index 0000000..afe51ea
--- /dev/null
@@ -0,0 +1,18 @@
+[Unit]
+Description=Etcd Server
+After=network.target
+After=network-online.target
+Wants=network-online.target
+
+[Service]
+Type=notify
+WorkingDirectory=/var/lib/etcd/
+EnvironmentFile=-/etc/etcd/etcd.conf
+User=etcd
+# set GOMAXPROCS to number of processors
+ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\""
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-connectivity/haproxy/files/haproxy.cfg b/meta-stx/recipes-connectivity/haproxy/files/haproxy.cfg
new file mode 100644 (file)
index 0000000..9f2acb4
--- /dev/null
@@ -0,0 +1,86 @@
+# this config needs haproxy-1.1.28 or haproxy-1.2.1
+
+global
+       log 127.0.0.1   local0
+       log 127.0.0.1   local1 notice
+       #log loghost    local0 info
+       maxconn 4096
+       chroot /usr/share/haproxy
+       uid 99
+       gid 99
+       daemon
+       #debug
+       #quiet
+
+defaults
+       log     global
+       mode    http
+       option  httplog
+       option  dontlognull
+       retries 3
+       option redispatch
+       maxconn 2000
+       timeout connect 5000
+       timeout client 50000
+       timeout server 50000
+
+listen appli1-rewrite
+bind   0.0.0.0:10001
+       cookie  SERVERID rewrite
+       balance roundrobin
+       server  app1_1 192.168.34.23:8080 cookie app1inst1 check inter 2000 rise 2 fall 5
+       server  app1_2 192.168.34.32:8080 cookie app1inst2 check inter 2000 rise 2 fall 5
+       server  app1_3 192.168.34.27:8080 cookie app1inst3 check inter 2000 rise 2 fall 5
+       server  app1_4 192.168.34.42:8080 cookie app1inst4 check inter 2000 rise 2 fall 5
+
+listen appli2-insert
+bind   0.0.0.0:10002
+       option  httpchk
+       balance roundrobin
+       cookie  SERVERID insert indirect nocache
+       server  inst1 192.168.114.56:80 cookie server01 check inter 2000 fall 3
+       server  inst2 192.168.114.56:81 cookie server02 check inter 2000 fall 3
+       capture cookie vgnvisitor= len 32
+
+       option  httpclose               # disable keep-alive
+       rspidel ^Set-cookie:\ IP=       # do not let this cookie tell our internal IP address
+       
+listen appli3-relais
+bind   0.0.0.0:10003
+       dispatch 192.168.135.17:80
+
+listen appli4-backup
+bind 0.0.0.0:10004
+       option  httpchk /index.html
+       option  persist
+       balance roundrobin
+       server  inst1 192.168.114.56:80 check inter 2000 fall 3
+       server  inst2 192.168.114.56:81 check inter 2000 fall 3 backup
+
+listen ssl-relay 
+bind   0.0.0.0:8443
+       option  ssl-hello-chk
+       balance source
+       server  inst1 192.168.110.56:443 check inter 2000 fall 3
+       server  inst2 192.168.110.57:443 check inter 2000 fall 3
+       server  back1 192.168.120.58:443 backup
+
+listen appli5-backup
+bind   0.0.0.0:10005
+       option  httpchk *
+       balance roundrobin
+       cookie  SERVERID insert indirect nocache
+       server  inst1 192.168.114.56:80 cookie server01 check inter 2000 fall 3
+       server  inst2 192.168.114.56:81 cookie server02 check inter 2000 fall 3
+       server  inst3 192.168.114.57:80 backup check inter 2000 fall 3
+       capture cookie ASPSESSION len 32
+       timeout server 20000
+
+       option  httpclose               # disable keep-alive
+       option  checkcache              # block response if set-cookie & cacheable
+
+       rspidel ^Set-cookie:\ IP=       # do not let this cookie tell our internal IP address
+       
+       errorloc        502     http://192.168.114.58/error502.html
+       errorfile       503     /etc/haproxy/errors/503.http
+
diff --git a/meta-stx/recipes-connectivity/haproxy/files/haproxy.service b/meta-stx/recipes-connectivity/haproxy/files/haproxy.service
new file mode 100644 (file)
index 0000000..5d22bc4
--- /dev/null
@@ -0,0 +1,14 @@
+[Unit]
+Description=HAProxy Load Balancer
+After=network.target
+
+[Service]
+ExecStartPre=/usr/sbin/haproxy_gencert.sh
+ExecStartPre=/usr/bin/haproxy -f /etc/haproxy/haproxy.cfg -c -q
+ExecStart=/usr/bin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
+ExecReload=/bin/kill -USR2 $MAINPID
+KillMode=mixed
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-connectivity/haproxy/haproxy-1.7.11/haproxy-1.7.11-tpm-support.patch b/meta-stx/recipes-connectivity/haproxy/haproxy-1.7.11/haproxy-1.7.11-tpm-support.patch
new file mode 100644 (file)
index 0000000..6b48214
--- /dev/null
@@ -0,0 +1,324 @@
+From 2fa8fedba0968d1c6d21d2c7fa33c903f8984815 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Thu, 25 Jul 2019 15:22:49 +0800
+Subject: [PATCH] haproxy tpm support
+
+original author: Kam Nasim <kam.nasim@windriver.com>
+
+rebased for 1.7.11
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ include/types/global.h |  13 +++++
+ src/cfgparse.c         |  28 ++++++++++
+ src/haproxy.c          |  26 ++++++++-
+ src/ssl_sock.c         | 147 +++++++++++++++++++++++++++++++++++++++++++------
+ 4 files changed, 197 insertions(+), 17 deletions(-)
+
+diff --git a/include/types/global.h b/include/types/global.h
+index 10f3a3c..68f2138 100644
+--- a/include/types/global.h
++++ b/include/types/global.h
+@@ -37,6 +37,10 @@
+ #include <import/51d.h>
+ #endif
++#ifdef USE_OPENSSL
++#include <openssl/engine.h>
++#endif
++
+ #ifndef UNIX_MAX_PATH
+ #define UNIX_MAX_PATH 108
+ #endif
+@@ -79,6 +83,14 @@ enum {
+       SSL_SERVER_VERIFY_REQUIRED = 1,
+ };
++// WRS: Define a new TPM configuration structure
++struct tpm_conf {
++    char *tpm_object;
++    char *tpm_engine;
++    EVP_PKEY *tpm_key;
++    ENGINE *tpm_engine_ref;
++};
++
+ /* FIXME : this will have to be redefined correctly */
+ struct global {
+ #ifdef USE_OPENSSL
+@@ -101,6 +113,7 @@ struct global {
+       char *connect_default_ciphers;
+       int listen_default_ssloptions;
+       int connect_default_ssloptions;
++      struct tpm_conf tpm; // tpm configuration
+ #endif
+       unsigned int ssl_server_verify; /* default verify mode on servers side */
+       struct freq_ctr conn_per_sec;
+diff --git a/src/cfgparse.c b/src/cfgparse.c
+index 3489f7e..0209874 100644
+--- a/src/cfgparse.c
++++ b/src/cfgparse.c
+@@ -1923,6 +1923,34 @@ int cfg_parse_global(const char *file, int linenum, char **args, int kwm)
+                               env++;
+               }
+       }
++      else if (!strcmp(args[0], "tpm-object")) {
++              if (global.tpm.tpm_object) {
++                      free(global.tpm.tpm_object);
++              }
++#ifdef USE_OPENSSL
++              if (*(args[1]) && (access(args[1], F_OK) != -1)) {
++                      global.tpm.tpm_object = strdup(args[1]);
++              }
++#else
++              Alert("parsing [%s:%d] : '%s' is not implemented.\n", file, linenum, args[0]);
++              err_code |= ERR_ALERT | ERR_FATAL;
++              goto out;
++#endif
++    }
++      else if (!strcmp(args[0], "tpm-engine")) {
++              if (global.tpm.tpm_engine) {
++                      free(global.tpm.tpm_engine);
++              }
++#ifdef USE_OPENSSL
++              if (*(args[1]) && (access(args[1], F_OK) != -1)) {
++                      global.tpm.tpm_engine = strdup(args[1]);
++              }
++#else
++              Alert("parsing [%s:%d] : '%s' is not implemented.\n", file, linenum, args[0]);
++              err_code |= ERR_ALERT | ERR_FATAL;
++              goto out;
++#endif
++    }
+       else {
+               struct cfg_kw_list *kwl;
+               int index;
+diff --git a/src/haproxy.c b/src/haproxy.c
+index f8a0912..f61dacf 100644
+--- a/src/haproxy.c
++++ b/src/haproxy.c
+@@ -1370,6 +1370,24 @@ static void deinit_stick_rules(struct list *rules)
+       }
+ }
++static void deinit_tpm_engine()
++{
++      /*
++     * if the tpm engine is present then
++     * deinit it, this is needed to
++     * flush the TPM key handle from TPM memory
++     */
++      if (global.tpm.tpm_engine_ref) {
++              ENGINE_finish(global.tpm.tpm_engine_ref);
++      }
++
++      if (global.tpm.tpm_key) {
++              EVP_PKEY_free(global.tpm.tpm_key);
++      }
++      free(global.tpm.tpm_engine);  global.tpm.tpm_engine = NULL;
++      free(global.tpm.tpm_object);  global.tpm.tpm_object = NULL;
++}
++
+ void deinit(void)
+ {
+       struct proxy *p = proxy, *p0;
+@@ -1646,7 +1664,13 @@ void deinit(void)
+               free(uap);
+       }
+-
++    
++      /* if HAProxy was in TPM mode then deinit
++       * that configuration as well.
++       */
++      if (global.tpm.tpm_object && global.tpm.tpm_object != '\0')
++              deinit_tpm_engine();
++    
+       userlist_free(userlist);
+       cfg_unregister_sections();
+diff --git a/src/ssl_sock.c b/src/ssl_sock.c
+index 87b2584..44d0b48 100644
+--- a/src/ssl_sock.c
++++ b/src/ssl_sock.c
+@@ -51,6 +51,7 @@
+ #ifndef OPENSSL_NO_DH
+ #include <openssl/dh.h>
+ #endif
++#include <openssl/engine.h>
+ #include <import/lru.h>
+ #include <import/xxhash.h>
+@@ -2360,6 +2361,80 @@ end:
+       return ret;
+ }
++/*
++ * initialize the TPM engine and load the 
++ * TPM object as private key within the Engine.
++ * Only do this for the first bind since TPM can
++ * only load 3-4 contexes before it runs out of memory
++ */
++static int ssl_sock_load_tpm_key(SSL_CTX *ctx, char **err) {
++      if (!global.tpm.tpm_object || global.tpm.tpm_object[0] == '\0') {
++              /* not in TPM mode */
++              return -1;
++      }
++      if (!global.tpm.tpm_key) {
++              Warning ("Could not find tpm_key; initializing engine\n");
++              /* no key present; load the dynamic TPM engine */
++              if (global.tpm.tpm_engine && global.tpm.tpm_engine[0]) {
++                      ENGINE_load_dynamic();
++                      ENGINE *engine = ENGINE_by_id("dynamic");
++                      if (!engine) {
++                              memprintf(err, "%s Unable to load the dynamic engine "
++                                        "(needed for loading custom TPM engine)\n",
++                                        err && *err ? *err : "");
++                              return 1;
++            }
++
++                      ENGINE_ctrl_cmd_string(engine, "SO_PATH", global.tpm.tpm_engine, 0);
++                      ENGINE_ctrl_cmd_string(engine, "LOAD", NULL, 0);
++                      /* stow away for ENGINE cleanup */
++                      global.tpm.tpm_engine_ref = engine;
++
++                      if (ENGINE_init(engine) != 1) {
++                              const char *error_str = ERR_error_string(ERR_get_error(), NULL); 
++                              memprintf(err, "%s Unable to init the TPM engine (%s). Err: %s\n",
++                                        err && *err ? *err : "", 
++                                        global.tpm.tpm_engine, error_str);
++                              goto tpm_err;
++                      }
++                      EVP_PKEY *pkey = ENGINE_load_private_key(engine, 
++                                                               global.tpm.tpm_object,
++                                                               NULL, NULL);
++                      if (!pkey) {
++                              const char *error_str = ERR_error_string(ERR_get_error(), NULL); 
++                              memprintf(err, "%s Unable to load TPM object (%s). Err: %s\n",
++                                        err && *err ? *err : "", 
++                                        global.tpm.tpm_object, error_str);
++                              goto tpm_err;
++                      }
++                      global.tpm.tpm_key = pkey;
++              }
++              else { /* no TPM engine found */
++                      memprintf(err, "%s TPM engine option not set when TPM mode expected\n",
++                                err && *err ? *err : "");
++                      goto tpm_err;
++              }
++      } 
++
++      if (SSL_CTX_use_PrivateKey(ctx, global.tpm.tpm_key) <= 0){
++              const char *error_str = ERR_error_string(ERR_get_error(),
++                                                 NULL);
++              memprintf(err, "%s Invalid private key provided from TPM engine(%s). Err: %s\n",
++                        err && *err ? *err : "", 
++                        global.tpm.tpm_object, error_str);
++              goto tpm_err;
++      }
++
++      return 0;
++
++tpm_err:
++      ENGINE_finish(global.tpm.tpm_engine_ref);
++      global.tpm.tpm_engine_ref = NULL;
++      EVP_PKEY_free(global.tpm.tpm_key);
++      global.tpm.tpm_key = NULL;
++      return 1;
++}
++
+ static int ssl_sock_load_cert_file(const char *path, struct bind_conf *bind_conf, struct proxy *curproxy, char **sni_filter, int fcount, char **err)
+ {
+       int ret;
+@@ -2372,26 +2447,54 @@ static int ssl_sock_load_cert_file(const char *path, struct bind_conf *bind_conf
+               return 1;
+       }
+-      if (SSL_CTX_use_PrivateKey_file(ctx, path, SSL_FILETYPE_PEM) <= 0) {
+-              memprintf(err, "%sunable to load SSL private key from PEM file '%s'.\n",
+-                        err && *err ? *err : "", path);
+-              SSL_CTX_free(ctx);
+-              return 1;
++      /* NOTE (knasim-wrs): US93721: TPM support
++       * This SSL context applies to SSL frontends only.
++       * If the TPM option is set then the Private key 
++       * is stored in TPM.
++       *
++       * Launch the OpenSSL TPM engine and load the TPM
++       * Private Key. The Public key will still be located
++       * at the provided path and needs to be loaded as
++       * per usual.
++       */
++      if (global.tpm.tpm_object) {
++              ret = ssl_sock_load_tpm_key(ctx, err);
++              if (ret > 0) {
++                      /* tpm configuration failed */
++                      SSL_CTX_free(ctx);
++                      return 1;
++              }
+       }
+-
+-      ret = ssl_sock_load_cert_chain_file(ctx, path, bind_conf, sni_filter, fcount);
+-      if (ret <= 0) {
+-              memprintf(err, "%sunable to load SSL certificate from PEM file '%s'.\n",
+-                        err && *err ? *err : "", path);
+-              if (ret < 0) /* serious error, must do that ourselves */
++      else { /* non TPM mode */
++              if (SSL_CTX_use_PrivateKey_file(ctx, path, SSL_FILETYPE_PEM) <= 0) {
++                      memprintf(err, "%sunable to load SSL private key from PEM file '%s'.\n",
++                                err && *err ? *err : "", path);
+                       SSL_CTX_free(ctx);
+-              return 1;
++                      return 1;
++              }
+       }
+-      if (SSL_CTX_check_private_key(ctx) <= 0) {
+-              memprintf(err, "%sinconsistencies between private key and certificate loaded from PEM file '%s'.\n",
+-                        err && *err ? *err : "", path);
+-              return 1;
++      ret = ssl_sock_load_cert_chain_file(ctx, path, bind_conf, sni_filter, fcount);
++              if (ret <= 0) {
++                      memprintf(err, "%sunable to load SSL certificate from PEM file '%s'.\n",
++                                        err && *err ? *err : "", path);
++                      if (ret < 0) /* serious error, must do that ourselves */
++                              SSL_CTX_free(ctx);
++                      return 1;
++              }
++
++      /*
++       * only match the private key to the public key
++       * for non TPM mode. This op would never work for
++       * TPM since the private key has been wrapped, whereas
++       * the public key is still the original one.
++       */
++      if (!global.tpm.tpm_object) {
++              if (SSL_CTX_check_private_key(ctx) <= 0) {
++                      memprintf(err, "%sinconsistencies between private key and certificate loaded from PEM file '%s'.\n",
++                                err && *err ? *err : "", path);
++                      return 1;
++              }
+       }
+       /* we must not free the SSL_CTX anymore below, since it's already in
+@@ -3068,6 +3171,18 @@ int ssl_sock_prepare_srv_ctx(struct server *srv, struct proxy *curproxy)
+               cfgerr++;
+               return cfgerr;
+       }
++
++      /* NOTE (knasim-wrs): US93721: TPM support
++       * This SSL context applies to SSL backends only.
++       * Since Titanium backends don't support SSL, there
++       * is no need to offload these keys in TPM or reuse the
++       * same TPM key for the frontend engine. 
++       * 
++       * If SSL backends are to be supported in the future,
++       * over TPM, then create a new TPM Engine context and
++       * load the backend key in TPM, in a similar fashion to
++       * the frontend key.
++       */
+       if (srv->ssl_ctx.client_crt) {
+               if (SSL_CTX_use_PrivateKey_file(srv->ssl_ctx.ctx, srv->ssl_ctx.client_crt, SSL_FILETYPE_PEM) <= 0) {
+                       Alert("config : %s '%s', server '%s': unable to load SSL private key from PEM file '%s'.\n",
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-connectivity/haproxy/haproxy.inc b/meta-stx/recipes-connectivity/haproxy/haproxy.inc
new file mode 100644 (file)
index 0000000..b94dd36
--- /dev/null
@@ -0,0 +1,116 @@
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "TCP/HTTP proxy and load balancer for high availability environments"
+DESCRIPTION = " \
+HAProxy is a TCP/HTTP reverse proxy which is particularly suited for high \
+availability environments. Indeed, it can: \
+ - route HTTP requests depending on statically assigned cookies \
+ - spread load among several servers while assuring server persistence \
+   through the use of HTTP cookies \
+ - switch to backup servers in the event a main server fails \
+ - accept connections to special ports dedicated to service monitoring \
+ - stop accepting connections without breaking existing ones \
+ - add, modify, and delete HTTP headers in both directions \
+ - block requests matching particular patterns \
+ - report detailed status to authenticated users from a URI \
+   intercepted by the application \
+"
+HOMEPAGE = "http://www.haproxy.org/"
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=2d862e836f92129cdc0ecccc54eed5e0"
+
+DEPENDS = "libpcre openssl zlib"
+
+MAJOR_VER = "${@'.'.join(d.getVar('PV').split('.')[0:2])}"
+
+SRC_URI = "https://www.haproxy.org/download/${MAJOR_VER}/src/haproxy-${PV}.tar.gz \
+           file://haproxy.service \
+           file://haproxy.cfg \
+          "
+
+inherit systemd useradd
+
+# create a user for running haproxy
+HAP_USER_HOME = "${libdir}/haproxy"
+USERADD_PACKAGES = "${PN}"
+USERADD_PARAM_${PN} = "--system --home ${HAP_USER_HOME} --shell /sbin/nologin \
+                       --groups haproxy --gid haproxy haproxy"
+GROUPADD_PARAM_${PN} = "haproxy"
+
+# for haproxy 1.x
+HAP_TARGET = "linux2628"
+
+EXTRA_OEMAKE = " \
+       'CPU=generic' \
+       'TARGET=${HAP_TARGET}' \
+       'USE_GETADDRINFO=1' \
+       'USE_OPENSSL=1' \
+       'USE_PCRE=1' 'USE_PCRE_JIT=1' \
+       'USE_ZLIB=1' \
+       'USE_LINUX_TPROXY=1' \
+       "
+
+EXTRA_OEMAKE_append_x86 = " USE_REGPARM=1"
+EXTRA_OEMAKE_append_x86-64 = " USE_REGPARM=1"
+
+
+do_configure() {
+       :
+}
+
+do_compile() {
+       oe_runmake CC="${CC}" CFLAGS="${CFLAGS}" SBINDIR="${bindir}" \
+                  PREFIX="${prefix}" \
+                  ZLIB_INC=${STAGING_INCDIR} \
+                  ZLIB_LIB=${STAGING_LIBDIR} \
+                  PCRE_INC=${STAGING_INCDIR} \
+                  PCRE_LIB=${STAGING_LIBDIR} \
+                  SSL_INC=${STAGING_INCDIR} \
+                  SSL_LIB=${STAGING_LIBDIR}
+       oe_runmake -C contrib/halog halog
+       oe_runmake -C contrib/iprange iprange
+}
+
+do_install() {
+       oe_runmake install-bin \
+                  PREFIX="${prefix}" \
+                  SBINDIR="${bindir}" \
+                  DESTDIR=${D} \
+                  INCLUDEDIR=${includedir}
+
+       install -D -m 0644 ${WORKDIR}/haproxy.service ${D}${systemd_system_unitdir}/haproxy.service
+       install -D -m 0644 ${WORKDIR}/haproxy.cfg ${D}${sysconfdir}/haproxy/haproxy.cfg
+
+       # install ssl folder for certificate
+       install -m 700 -d ${D}/${sysconfdir}/ssl/haproxy
+       chown haproxy:haproxy ${D}/${sysconfdir}/ssl/haproxy
+
+       # install halog and iprange
+       install -D -m 755 contrib/halog/halog ${D}${bindir}/halog
+       install -D -m 755 contrib/iprange/iprange ${D}${bindir}/iprange
+}
+
+FILES_${PN} = " \
+       ${bindir} \
+       ${sbindir} \
+       ${sysconfdir} \
+       ${systemd_system_unitdir} \
+       "
+RDEPENDS_${PN} = "openssl"
+
+SYSTEMD_SERVICE_${PN} = "haproxy.service"
+
+INSANE_SKIP_${PN} += "already-stripped"
diff --git a/meta-stx/recipes-connectivity/haproxy/haproxy_1.7.11.bb b/meta-stx/recipes-connectivity/haproxy/haproxy_1.7.11.bb
new file mode 100644 (file)
index 0000000..3a7e029
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+include haproxy.inc
+
+SRC_URI += "file://haproxy-${PV}-tpm-support.patch"
+
+SRC_URI[md5sum] = "25be5ad717a71da89a65c3c24250e2eb"
+SRC_URI[sha256sum] = "d564b8e9429d1e8e13cb648bf4694926b472e36da1079df946bb732927b232ea"
diff --git a/meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap/0001-include-sys-types.h-for-getting-u_-typedefs.patch b/meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap/0001-include-sys-types.h-for-getting-u_-typedefs.patch
new file mode 100644 (file)
index 0000000..4ac5290
--- /dev/null
@@ -0,0 +1,27 @@
+From a5e95a42e7bceddc9ecad06694c1a0588f4bafc8 Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Tue, 14 Apr 2015 07:22:47 -0700
+Subject: [PATCH] include sys/types.h for getting u_* typedefs
+
+Upstream-Status: Pending
+
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ cfg.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/cfg.h b/cfg.h
+index d4d4cab..fe49e8f 100644
+--- a/cfg.h
++++ b/cfg.h
+@@ -33,6 +33,7 @@
+ #ifndef _CONF_H_
+ #define _CONF_H_
++#include <sys/types.h>
+ #include "queue.h"
+ struct conf_list_node {
+-- 
+2.1.4
+
diff --git a/meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap/Set_nobody_user_group.patch b/meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap/Set_nobody_user_group.patch
new file mode 100644 (file)
index 0000000..4633da9
--- /dev/null
@@ -0,0 +1,18 @@
+Set nobody user and group
+
+Upstream-Status: Inappropriate [configuration]
+
+Signed-off-by: Roy.Li <rongqing.li@windriver.com>
+--- a/idmapd.conf
++++ b/idmapd.conf
+@@ -17,8 +17,8 @@
+ [Mapping]
+-#Nobody-User = nobody
+-#Nobody-Group = nobody
++Nobody-User = nobody
++Nobody-Group = nogroup
+ [Translation]
diff --git a/meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap/fix-ac-prereq.patch b/meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap/fix-ac-prereq.patch
new file mode 100644 (file)
index 0000000..d81c7c5
--- /dev/null
@@ -0,0 +1,13 @@
+Upstream-Status: Inappropriate [configuration]
+
+--- a/configure.in
++++ b/configure.in
+@@ -1,7 +1,7 @@
+ #                                               -*- Autoconf -*-
+ # Process this file with autoconf to produce a configure script.
+-AC_PREREQ([2.68])
++AC_PREREQ([2.65])
+ AC_INIT([libnfsidmap],[0.25],[linux-nfs@vger.kernel.org])
+ AC_CONFIG_SRCDIR([nfsidmap.h])
+ AC_CONFIG_MACRO_DIR([m4])
diff --git a/meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap_0.25.bb b/meta-stx/recipes-connectivity/libnfsidmap/libnfsidmap_0.25.bb
new file mode 100644 (file)
index 0000000..5217a9a
--- /dev/null
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "NFS id mapping library"
+HOMEPAGE = "http://www.citi.umich.edu/projects/nfsv4/linux/"
+SECTION = "libs"
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://COPYING;md5=d9c6a2a0ca6017fda7cd905ed2739b37"
+
+SRC_URI = "http://www.citi.umich.edu/projects/nfsv4/linux/libnfsidmap/${BPN}-${PV}.tar.gz \
+           file://fix-ac-prereq.patch \
+           file://Set_nobody_user_group.patch \
+           file://0001-include-sys-types.h-for-getting-u_-typedefs.patch \
+          "
+
+SRC_URI[md5sum] = "2ac4893c92716add1a1447ae01df77ab"
+SRC_URI[sha256sum] = "656d245d84400e1030f8f40a5a27da76370690c4a932baf249110f047fe7efcf"
+
+UPSTREAM_CHECK_URI = "http://www.citi.umich.edu/projects/nfsv4/linux/libnfsidmap/"
+
+inherit autotools
+
+EXTRA_OECONF = "--disable-ldap"
+
+do_install_append () {
+       install -d ${D}${sysconfdir}/
+       install -m 0644 ${WORKDIR}/${BPN}-${PV}/idmapd.conf ${D}${sysconfdir}/idmapd.conf
+}
+
diff --git a/meta-stx/recipes-connectivity/nfs-utils/nfs-utils_%.bbappend b/meta-stx/recipes-connectivity/nfs-utils/nfs-utils_%.bbappend
new file mode 100644 (file)
index 0000000..bd4ca24
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+do_install_append() {
+       mv ${D}/${sbindir}/sm-notify ${D}/${sbindir}/nfs-utils-client_sm-notify
+}
diff --git a/meta-stx/recipes-connectivity/openssl/openssl10_1.0.%.bbappend b/meta-stx/recipes-connectivity/openssl/openssl10_1.0.%.bbappend
new file mode 100644 (file)
index 0000000..7490731
--- /dev/null
@@ -0,0 +1,36 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SYSROOT_DIRS_BLACKLIST = " ${bindir} ${sysconfdir}"
+
+SYSROOT_PREPROCESS_FUNCS += "openssl10_avoid_conflict"
+
+openssl10_avoid_conflict () {
+
+       # For libaries remove statics and symlinks to avoid conflict
+
+       rm ${SYSROOT_DESTDIR}${libdir}/libssl.so
+       rm ${SYSROOT_DESTDIR}${libdir}/libcrypto.so
+       rm ${SYSROOT_DESTDIR}${libdir}/libssl.a
+       rm ${SYSROOT_DESTDIR}${libdir}/libcrypto.a
+       #mv ${SYSROOT_DESTDIR}${libdir}/pkgconfig/libcrypto.pc ${SYSROOT_DESTDIR}${libdir}/pkgconfig/libcrypto10.pc 
+       #mv ${SYSROOT_DESTDIR}${libdir}/pkgconfig/libssl.pc ${SYSROOT_DESTDIR}${libdir}/pkgconfig/libcrypto10.pc 
+       #mv ${SYSROOT_DESTDIR}${libdir}/pkgconfig/openssl.pc ${SYSROOT_DESTDIR}${libdir}/pkgconfig/openssl10.pc 
+       rm -rf ${SYSROOT_DESTDIR}${libdir}/pkgconfig
+       rm -rf ${SYSROOT_DESTDIR}${libdir}/engines 
+       # For headers
+       mkdir -p ${SYSROOT_DESTDIR}${includedir}/openssl10
+       mv ${SYSROOT_DESTDIR}${includedir}/openssl ${SYSROOT_DESTDIR}${includedir}/openssl10
+}
diff --git a/meta-stx/recipes-connectivity/qpid/files/fix-compile-through-disable-cflag-werror.patch b/meta-stx/recipes-connectivity/qpid/files/fix-compile-through-disable-cflag-werror.patch
new file mode 100644 (file)
index 0000000..a8aaf61
--- /dev/null
@@ -0,0 +1,12 @@
+diff -urN qpidc-0.20.orig/configure.ac qpidc-0.20/configure.ac
+--- qpidc-0.20.orig/configure.ac       2019-08-19 13:36:13.592214698 +0800
++++ qpidc-0.20/configure.ac    2019-08-19 13:36:29.160214501 +0800
+@@ -75,7 +75,7 @@
+       # Can't test for -Werror as whether it fails or not depends on what's in
+       # CFLAGS/CXXFLAGS. In any case it's been in gcc for a long time (since 2.95 at least)
+       if test "${enableval}" = yes; then
+-        COMPILER_FLAGS="-Werror"
++        COMPILER_FLAGS=""
+           gl_COMPILER_FLAGS(-pedantic)
+           gl_COMPILER_FLAGS(-Wall)
+           gl_COMPILER_FLAGS(-Wextra)
diff --git a/meta-stx/recipes-connectivity/qpid/qpid_0.20.bbappend b/meta-stx/recipes-connectivity/qpid/qpid_0.20.bbappend
new file mode 100644 (file)
index 0000000..ff1dea6
--- /dev/null
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += "file://fix-compile-through-disable-cflag-werror.patch"
+
+
+EXTRA_OEMAKE += " CPPFLAGS='-std=gnu++98 -Wno-error' LDFLAGS='-std=gnu++98 -Wno-error'"
+TARGET_CC_ARCH += "${LDFLAGS}"
+RDEPENDS_${PN} += " bash"
diff --git a/meta-stx/recipes-containers/docker-distribution/docker-distribution_git.bb b/meta-stx/recipes-containers/docker-distribution/docker-distribution_git.bb
new file mode 100644 (file)
index 0000000..bb56fb8
--- /dev/null
@@ -0,0 +1,93 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+HOMEPAGE = "http://github.com/docker/distribution"
+SUMMARY = "The Docker toolset to pack, ship, store, and deliver content"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d2794c0df5b907fdace235a619d80314"
+
+GO_PKG_PATH = "github.com/docker/distribution"
+GO_IMPORT = "import"
+
+SRCREV_distribution="48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
+SRC_URI = " \
+       git://github.com/docker/distribution.git;branch=release/2.6;name=distribution;destsuffix=git/src/${GO_PKG_PATH} \
+       file://${BPN}.service \
+       file://config.yml \
+       "
+
+PV = "v2.6.2"
+S = "${WORKDIR}/git/src/${GO_PKG_PATH}"
+
+inherit goarch
+inherit go
+
+# This disables seccomp and apparmor, which are on by default in the
+# go package. 
+EXTRA_OEMAKE="BUILDTAGS=''"
+
+do_compile() {
+       export GOARCH="${TARGET_GOARCH}"
+       export GOPATH="${WORKDIR}/git/"
+       export GOROOT="${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go"
+       # Pass the needed cflags/ldflags so that cgo
+       # can find the needed headers files and libraries
+       export CGO_ENABLED="1"
+       export CFLAGS=""
+       export LDFLAGS=""
+       export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+       export GO_GCFLAGS=""
+       export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+       cd ${S}
+
+       oe_runmake binaries
+}
+
+do_install() {
+       install -d ${D}/${bindir}
+       install ${S}/bin/registry ${D}/${bindir}
+
+       if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+           install -d ${D}${systemd_system_unitdir}
+           install -m 644 ${WORKDIR}/${BPN}.service ${D}/${systemd_system_unitdir}
+       fi
+
+       install -d ${D}/${sysconfdir}/${BPN}/registry/
+       install ${WORKDIR}/config.yml ${D}/${sysconfdir}/${BPN}/registry/config.yml
+
+       # storage for the registry containers
+       install -d ${D}/${localstatedir}/lib/registry/
+}
+
+INSANE_SKIP_${PN} += "ldflags already-stripped"
+
+FILES_${PN} = "\
+       ${bindir}/* \
+       ${systemd_system_unitdir}/${BPN}.service \
+       ${sysconfdir}/${BPN}/* \
+       ${localstatedir}/lib/registry/ \
+       "
+
+SYSTEMD_SERVICE_${BPN} = "${@bb.utils.contains('DISTRO_FEATURES','systemd','${BPN}.service','',d)}"
+SYSTEMD_AUTO_ENABLE_${BPN} = "disable"
+
+
+SYSROOT_PREPROCESS_FUNCS += "docker_distribution_sysroot_preprocess"
+
+docker_distribution_sysroot_preprocess () {
+    install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${GO_PKG_PATH}
+    cp -r ${S} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${GO_PKG_PATH})
+}
diff --git a/meta-stx/recipes-containers/docker-distribution/docker-distribution_git.bbappend b/meta-stx/recipes-containers/docker-distribution/docker-distribution_git.bbappend
new file mode 100644 (file)
index 0000000..ab01dcf
--- /dev/null
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PKG_NAME = "github.com/docker/distribution"
+
+SYSROOT_PREPROCESS_FUNCS += "docker_distribution_sysroot_preprocess"
+
+docker_distribution_sysroot_preprocess () {
+    install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+    cp -r ${S} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
diff --git a/meta-stx/recipes-containers/docker-distribution/files/config.yml b/meta-stx/recipes-containers/docker-distribution/files/config.yml
new file mode 100644 (file)
index 0000000..4683478
--- /dev/null
@@ -0,0 +1 @@
+# This is a puppet managed config file
diff --git a/meta-stx/recipes-containers/docker-distribution/files/docker-distribution.service b/meta-stx/recipes-containers/docker-distribution/files/docker-distribution.service
new file mode 100644 (file)
index 0000000..7a48e8b
--- /dev/null
@@ -0,0 +1,13 @@
+[Unit]
+Description=v2 Registry server for Docker
+
+[Service]
+Type=simple
+Environment=REGISTRY_STORAGE_DELETE_ENABLED=true
+ExecStart=/usr/bin/registry serve /etc/docker-distribution/registry/config.yml
+Restart=on-failure
+ExecStartPost=/bin/bash -c 'echo $MAINPID > /var/run/docker-distribution.pid'
+ExecStopPost=/bin/rm -f /var/run/docker-distribution.pid
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-containers/docker-forward-journald/docker-forward-journald_git.bb b/meta-stx/recipes-containers/docker-forward-journald/docker-forward-journald_git.bb
new file mode 100644 (file)
index 0000000..979a69d
--- /dev/null
@@ -0,0 +1,53 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Forward stdin to journald"
+HOMEPAGE = "https://github.com/docker/docker"
+SECTION = "devel"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://src/forward-journald/LICENSE;md5=e3fc50a88d0a364313df4b21ef20c29e"
+
+PROTOCOL = "https"
+SRCNAME = "forward-journald"
+SRCREV = "77e02a9774a6ca054e41c27f6f319d701f1cbaea"
+PV = "1.10.3+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/projectatomic/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};"
+
+GO_IMPORT = "forward-journald"
+inherit go goarch
+
+do_compile() {
+        mkdir -p _build/src
+        ln -sfn ${S}/src/forward-journald ./_build/src/${SRCNAME}
+        export GOARCH=${TARGET_GOARCH}
+        export CGO_ENABLED="1"
+        export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+        export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+        export GOPATH="${WORKDIR}/build/_build:${STAGING_DIR_TARGET}/${prefix}/local/go"
+        cd _build/src/${SRCNAME}
+        export GOROOT=${STAGING_DIR_TARGET}/${prefix}/local/go
+        go build -ldflags "-B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o ${WORKDIR}/build/bin/forward-journald forward-journald
+}
+
+do_install() {
+       install -m 0755 -d ${D}/${bindir}/
+
+       install -m 0755 bin/forward-journald ${D}/${bindir}/
+}
+
+INSANE_SKIP_${PN} = "ldflags"
diff --git a/meta-stx/recipes-containers/docker/docker-ce_git.bbappend b/meta-stx/recipes-containers/docker/docker-ce_git.bbappend
new file mode 100644 (file)
index 0000000..2c96eee
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+do_install_append () {
+       # remove the symlink and create actual dir
+       rm -f ${D}${sysconfdir}/docker
+       install -d -m 0755 ${D}${sysconfdir}/docker
+}
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/README b/meta-stx/recipes-containers/kubernetes/files/contrib/README
new file mode 100644 (file)
index 0000000..76bdbe1
--- /dev/null
@@ -0,0 +1,2 @@
+This is originally copied from kubernetes-contrib-v1.16.2.tar.gz in https://github.com/kubernetes/contrib,
+which is EOL now and we can't get it directly from github.
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/apiserver b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/apiserver
new file mode 100644 (file)
index 0000000..8d2a006
--- /dev/null
@@ -0,0 +1,26 @@
+###
+# kubernetes system config
+#
+# The following values are used to configure the kube-apiserver
+#
+
+# The address on the local server to listen to.
+KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
+
+# The port on the local server to listen on.
+# KUBE_API_PORT="--port=8080"
+
+# Port minions listen on
+# KUBELET_PORT="--kubelet-port=10250"
+
+# Comma separated list of nodes in the etcd cluster
+KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379,http://127.0.0.1:4001"
+
+# Address range to use for services
+KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
+
+# default admission control policies
+KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
+
+# Add your own!
+KUBE_API_ARGS=""
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/config b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/config
new file mode 100644 (file)
index 0000000..8c0a284
--- /dev/null
@@ -0,0 +1,22 @@
+###
+# kubernetes system config
+#
+# The following values are used to configure various aspects of all
+# kubernetes services, including
+#
+#   kube-apiserver.service
+#   kube-controller-manager.service
+#   kube-scheduler.service
+#   kubelet.service
+#   kube-proxy.service
+# logging to stderr means we get it in the systemd journal
+KUBE_LOGTOSTDERR="--logtostderr=true"
+
+# journal message level, 0 is debug
+KUBE_LOG_LEVEL="--v=0"
+
+# Should this cluster be allowed to run privileged docker containers
+KUBE_ALLOW_PRIV="--allow-privileged=false"
+
+# How the controller-manager, scheduler, and proxy find the apiserver
+KUBE_MASTER="--master=http://127.0.0.1:8080"
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/controller-manager b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/controller-manager
new file mode 100644 (file)
index 0000000..1a9e3f2
--- /dev/null
@@ -0,0 +1,7 @@
+###
+# The following values are used to configure the kubernetes controller-manager
+
+# defaults from config and apiserver should be adequate
+
+# Add your own!
+KUBE_CONTROLLER_MANAGER_ARGS=""
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/kubelet b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/kubelet
new file mode 100644 (file)
index 0000000..a623673
--- /dev/null
@@ -0,0 +1,17 @@
+###
+# kubernetes kubelet (minion) config
+
+# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
+KUBELET_ADDRESS="--address=127.0.0.1"
+
+# The port for the info server to serve on
+# KUBELET_PORT="--port=10250"
+
+# You may leave this blank to use the actual hostname
+KUBELET_HOSTNAME="--hostname-override=127.0.0.1"
+
+# Edit the kubelet.kubeconfig to have correct cluster server address
+KUBELET_KUBECONFIG=/etc/kubernetes/kubelet.kubeconfig
+
+# Add your own!
+KUBELET_ARGS="--cgroup-driver=systemd --fail-swap-on=false"
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/kubelet.kubeconfig b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/kubelet.kubeconfig
new file mode 100644 (file)
index 0000000..75fe1b0
--- /dev/null
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Config
+clusters:
+  - cluster:
+      server: http://127.0.0.1:8080/
+    name: local
+contexts:
+  - context:
+      cluster: local
+    name: local
+current-context: local
+
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/proxy b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/proxy
new file mode 100644 (file)
index 0000000..0342768
--- /dev/null
@@ -0,0 +1,7 @@
+###
+# kubernetes proxy config
+
+# default config should be adequate
+
+# Add your own!
+KUBE_PROXY_ARGS=""
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/scheduler b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/environ/scheduler
new file mode 100644 (file)
index 0000000..f6fc507
--- /dev/null
@@ -0,0 +1,7 @@
+###
+# kubernetes scheduler config
+
+# default config should be adequate
+
+# Add your own!
+KUBE_SCHEDULER_ARGS=""
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-apiserver.service b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-apiserver.service
new file mode 100644 (file)
index 0000000..67c1b84
--- /dev/null
@@ -0,0 +1,27 @@
+[Unit]
+Description=Kubernetes API Server
+Documentation=https://kubernetes.io/docs/concepts/overview/components/#kube-apiserver https://kubernetes.io/docs/reference/generated/kube-apiserver/
+After=network.target
+After=etcd.service
+
+[Service]
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/kubernetes/apiserver
+User=kube
+ExecStart=/usr/bin/kube-apiserver \
+           $KUBE_LOGTOSTDERR \
+           $KUBE_LOG_LEVEL \
+           $KUBE_ETCD_SERVERS \
+           $KUBE_API_ADDRESS \
+           $KUBE_API_PORT \
+           $KUBELET_PORT \
+           $KUBE_ALLOW_PRIV \
+           $KUBE_SERVICE_ADDRESSES \
+           $KUBE_ADMISSION_CONTROL \
+           $KUBE_API_ARGS
+Restart=on-failure
+Type=notify
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-controller-manager.service b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-controller-manager.service
new file mode 100644 (file)
index 0000000..0ffb81e
--- /dev/null
@@ -0,0 +1,18 @@
+[Unit]
+Description=Kubernetes Controller Manager
+Documentation=https://kubernetes.io/docs/concepts/overview/components/#kube-controller-manager https://kubernetes.io/docs/reference/generated/kube-controller-manager/
+
+[Service]
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/kubernetes/controller-manager
+User=kube
+ExecStart=/usr/bin/kube-controller-manager \
+           $KUBE_LOGTOSTDERR \
+           $KUBE_LOG_LEVEL \
+           $KUBE_MASTER \
+           $KUBE_CONTROLLER_MANAGER_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-proxy.service b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-proxy.service
new file mode 100644 (file)
index 0000000..6574a7c
--- /dev/null
@@ -0,0 +1,18 @@
+[Unit]
+Description=Kubernetes Kube-Proxy Server
+Documentation=https://kubernetes.io/docs/concepts/overview/components/#kube-proxy https://kubernetes.io/docs/reference/generated/kube-proxy/
+After=network.target
+
+[Service]
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/kubernetes/proxy
+ExecStart=/usr/bin/kube-proxy \
+           $KUBE_LOGTOSTDERR \
+           $KUBE_LOG_LEVEL \
+           $KUBE_MASTER \
+           $KUBE_PROXY_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-scheduler.service b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kube-scheduler.service
new file mode 100644 (file)
index 0000000..e933967
--- /dev/null
@@ -0,0 +1,18 @@
+[Unit]
+Description=Kubernetes Scheduler Plugin
+Documentation=https://kubernetes.io/docs/concepts/overview/components/#kube-scheduler https://kubernetes.io/docs/reference/generated/kube-scheduler/
+
+[Service]
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/kubernetes/scheduler
+User=kube
+ExecStart=/usr/bin/kube-scheduler \
+           $KUBE_LOGTOSTDERR \
+           $KUBE_LOG_LEVEL \
+           $KUBE_MASTER \
+           $KUBE_SCHEDULER_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kubelet.service b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/kubelet.service
new file mode 100644 (file)
index 0000000..1082bba
--- /dev/null
@@ -0,0 +1,24 @@
+[Unit]
+Description=Kubernetes Kubelet Server
+Documentation=https://kubernetes.io/docs/concepts/overview/components/#kubelet https://kubernetes.io/docs/reference/generated/kubelet/
+After=docker.service
+Requires=docker.service
+
+[Service]
+WorkingDirectory=/var/lib/kubelet
+EnvironmentFile=-/etc/kubernetes/config
+EnvironmentFile=-/etc/kubernetes/kubelet
+ExecStart=/usr/bin/kubelet \
+           $KUBE_LOGTOSTDERR \
+           $KUBE_LOG_LEVEL \
+           $KUBELET_KUBECONFIG \
+           $KUBELET_ADDRESS \
+           $KUBELET_PORT \
+           $KUBELET_HOSTNAME \
+           $KUBE_ALLOW_PRIV \
+           $KUBELET_ARGS
+Restart=on-failure
+KillMode=process
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/tmpfiles.d/kubernetes.conf b/meta-stx/recipes-containers/kubernetes/files/contrib/init/systemd/tmpfiles.d/kubernetes.conf
new file mode 100644 (file)
index 0000000..ff7c3a2
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+d /var/run/kubernetes 0755 kube kube -
diff --git a/meta-stx/recipes-containers/kubernetes/files/helm-upload b/meta-stx/recipes-containers/kubernetes/files/helm-upload
new file mode 100644 (file)
index 0000000..4dbacc0
--- /dev/null
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+#
+# Copyright (c) 2018 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+# This script takes the names of packaged helm charts as arguments.
+# It installs them in the on-node helm chart repository and regenerates
+# the repository index.
+
+
+# We want to run as the "www" user and scripts can't be setuid.  The
+# sudoers permissions are set up to allow wrsroot to run this script
+# as the "www" user without a password.
+if [ $USER != "www" ]; then
+    exec sudo -u www $0 $@
+fi
+
+
+RETVAL=0
+REINDEX=0
+
+REPO_BASE='/www/pages/helm_charts'
+
+# First argument is always the repo where the charts need to be placed
+if [ $# -lt 2 ]; then
+    echo "Usage: helm-upload <repo name> <chart 1> .. <chart N>"
+    exit 1
+fi
+
+# Make sure the repo directory exists
+REPO_DIR="${REPO_BASE}/$1"
+if [ ! -e $REPO_DIR ]; then
+    echo "$REPO_DIR doesn't exist."
+    exit 1
+fi
+
+shift 1
+
+for FILE in "$@"; do
+    if [ -r $FILE ]; then
+        # QUESTION:  should we disallow overwriting an existing file?
+        # The versions are embedded in the filename, so it shouldn't
+        # cause problems.
+        cp $FILE $REPO_DIR
+        if [ $? -ne 0 ]; then
+            echo Problem adding $FILE to helm chart registry.
+            RETVAL=1
+        else
+            REINDEX=1
+        fi
+    else
+        echo Cannot read file ${FILE}.
+        RETVAL=1
+    fi
+done
+
+
+# Now re-index the helm repository if we successfully copied in
+# any new charts.
+if [ $REINDEX -eq 1 ]; then
+    /usr/sbin/helm repo index $REPO_DIR
+fi
+
+exit $RETVAL
diff --git a/meta-stx/recipes-containers/kubernetes/files/helm.sudo b/meta-stx/recipes-containers/kubernetes/files/helm.sudo
new file mode 100644 (file)
index 0000000..48e02bf
--- /dev/null
@@ -0,0 +1,3 @@
+wrsroot ALL=(www) NOPASSWD: /usr/local/sbin/helm-upload
+
+Defaults lecture=never, secure_path=/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin
diff --git a/meta-stx/recipes-containers/kubernetes/files/kubeadm.conf b/meta-stx/recipes-containers/kubernetes/files/kubeadm.conf
new file mode 100644 (file)
index 0000000..ffdd12f
--- /dev/null
@@ -0,0 +1,32 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# Note: This dropin only works with kubeadm and kubelet v1.11+
+[Service]
+Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
+Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
+# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
+EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
+# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
+# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
+EnvironmentFile=-/etc/sysconfig/kubelet
+ExecStart=
+ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
+ExecStartPre=-/usr/bin/kubelet-cgroup-setup.sh
+ExecStartPost=/bin/bash -c 'echo $MAINPID > /var/run/kubelet.pid;'
+ExecStopPost=/bin/rm -f /var/run/kubelet.pid
+Restart=always
+StartLimitInterval=0
+RestartSec=10
diff --git a/meta-stx/recipes-containers/kubernetes/files/kubelet-cgroup-setup.sh b/meta-stx/recipes-containers/kubernetes/files/kubelet-cgroup-setup.sh
new file mode 100644 (file)
index 0000000..7efb27a
--- /dev/null
@@ -0,0 +1,115 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+# This script does minimal cgroup setup for kubelet. This creates k8s-infra
+# cgroup for a minimal set of resource controllers, and configures cpuset
+# attributes to span all online cpus and nodes. This will do nothing if
+# the k8s-infra cgroup already exists (i.e., assume already configured).
+# NOTE: The creation of directories under /sys/fs/cgroup is volatile, and
+# does not persist reboots. The cpuset.mems and cpuset.cpus is later updated
+# by puppet kubernetes.pp manifest.
+#
+
+# Define minimal path
+PATH=/bin:/usr/bin:/usr/local/bin
+
+# Log info message to /var/log/daemon.log
+function LOG {
+    logger -p daemon.info "$0($$): $@"
+}
+
+# Log error message to /var/log/daemon.log
+function ERROR {
+    logger -s -p daemon.error "$0($$): ERROR: $@"
+}
+
+# Create minimal cgroup directories and configure cpuset attributes
+# pids should be first in the list, since it appears to get auto deleted
+function create_cgroup {
+    local cg_name=$1
+    local cg_nodeset=$2
+    local cg_cpuset=$3
+
+    local CGROUP=/sys/fs/cgroup
+    local CONTROLLERS=("pids" "cpuset" "memory" "cpu,cpuacct" "systemd")
+    local cnt=''
+    local CGDIR=''
+    local RC=0
+
+    # Create the cgroup for required controllers
+    for cnt in ${CONTROLLERS[@]}; do
+        CGDIR=${CGROUP}/${cnt}/${cg_name}
+        if [ -d ${CGDIR} ]; then
+            LOG "Nothing to do, already configured: ${CGDIR}."
+            exit ${RC}
+        fi
+        LOG "Creating: ${CGDIR}"
+        mkdir -p ${CGDIR}
+        RC=$?
+        if [ ${RC} -ne 0 ]; then
+            ERROR "Creating: ${CGDIR}, rc=${RC}"
+            exit ${RC}
+        fi
+    done
+
+    # Customize cpuset attributes
+    LOG "Configuring cgroup: ${cg_name}, nodeset: ${cg_nodeset}, cpuset: ${cg_cpuset}"
+    CGDIR=${CGROUP}/cpuset/${cg_name}
+    local CGMEMS=${CGDIR}/cpuset.mems
+    local CGCPUS=${CGDIR}/cpuset.cpus
+    local CGTASKS=${CGDIR}/tasks
+
+    # Assign cgroup memory nodeset
+    LOG "Assign nodeset ${cg_nodeset} to ${CGMEMS}"
+    /bin/echo ${cg_nodeset} > ${CGMEMS}
+    RC=$?
+    if [ ${RC} -ne 0 ]; then
+        ERROR "Unable to write to: ${CGMEMS}, rc=${RC}"
+        exit ${RC}
+    fi
+
+    # Assign cgroup cpus
+    LOG "Assign cpuset ${cg_cpuset} to ${CGCPUS}"
+    /bin/echo ${cg_cpuset} > ${CGCPUS}
+    RC=$?
+    if [ ${RC} -ne 0 ]; then
+        ERROR "Assigning: ${cg_cpuset} to ${CGCPUS}, rc=${RC}"
+        exit ${RC}
+    fi
+
+    # Set file ownership
+    chown root:root ${CGMEMS} ${CGCPUS} ${CGTASKS}
+    RC=$?
+    if [ ${RC} -ne 0 ]; then
+        ERROR "Setting owner for: ${CGMEMS}, ${CGCPUS}, ${CGTASKS}, rc=${RC}"
+        exit ${RC}
+    fi
+
+    # Set file mode permissions
+    chmod 644 ${CGMEMS} ${CGCPUS} ${CGTASKS}
+    RC=$?
+    if [ ${RC} -ne 0 ]; then
+        ERROR "Setting mode for: ${CGMEMS}, ${CGCPUS}, ${CGTASKS}, rc=${RC}"
+        exit ${RC}
+    fi
+
+    return ${RC}
+}
+
+if [ $UID -ne 0 ]; then
+    ERROR "Require sudo/root."
+    exit 1
+fi
+
+# Configure default kubepods cpuset to span all online cpus and nodes.
+ONLINE_NODESET=$(/bin/cat /sys/devices/system/node/online)
+ONLINE_CPUSET=$(/bin/cat /sys/devices/system/cpu/online)
+
+# Configure kubelet cgroup to match cgroupRoot.
+create_cgroup 'k8s-infra' ${ONLINE_NODESET} ${ONLINE_CPUSET}
+
+exit $?
+
diff --git a/meta-stx/recipes-containers/kubernetes/files/kubernetes-accounting.conf b/meta-stx/recipes-containers/kubernetes/files/kubernetes-accounting.conf
new file mode 100644 (file)
index 0000000..134a219
--- /dev/null
@@ -0,0 +1,19 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+[Manager]
+DefaultCPUAccounting=yes
+DefaultMemoryAccounting=yes
+
diff --git a/meta-stx/recipes-containers/kubernetes/helm_2.13.1.bb b/meta-stx/recipes-containers/kubernetes/helm_2.13.1.bb
new file mode 100644 (file)
index 0000000..7d700f7
--- /dev/null
@@ -0,0 +1,70 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "The Kubernetes Package Manager"
+HOMEPAGE = "https://github.com/kubernetes/helm/releases "
+SECTION = "devel"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=0c7bcb474e766c7d92924a18cd9d3878"
+
+#SRCREV = "618447cbf203d147601b4b9bd7f8c37a5d39fbb4"
+SRCNAME = "helm"
+#PROTOCOL = "https"
+#BRANCH = "release-2.13"
+S = "${WORKDIR}/linux-amd64"
+PV = "2.13.1"
+
+SRC_URI = " \
+       https://get.helm.sh/helm-v2.13.1-linux-amd64.tar.gz \
+       file://helm-upload \
+       file://helm.sudo \
+       "
+# Client: &version.Version{SemVer:"v2.13.1", GitCommit:"618447cbf203d147601b4b9bd7f8c37a5d39fbb4", GitTreeState:"clean"}
+SRC_URI[md5sum] = "ffbe37fe328d99156d14a950bbd8107c"
+SRC_URI[sha256sum] = "c1967c1dfcd6c921694b80ededdb9bd1beb27cb076864e58957b1568bc98925a"
+
+INSANE_SKIP_${PN} = "ldflags"
+INHIBIT_PACKAGE_STRIP = "1"
+INHIBIT_SYSROOT_STRIP = "1"
+INHIBIT_PACKAGE_DEBUG_SPLIT  = "1"
+
+RDEPENDS_${PN} += " bash"
+
+
+do_configure() {
+       :
+}
+
+do_compile() {
+       :
+}
+
+do_install() {
+       install -m 0755 -d ${D}/${sbindir}/
+       install -m 0750 -d ${D}/${sysconfdir}/sudoers.d
+
+       install -m 0755 ${S}/helm ${D}/${sbindir}/
+       install -m 0755 ${S}/../helm.sudo ${D}/${sysconfdir}/sudoers.d/helm
+       install -m 0755 ${S}/../helm-upload ${D}/${sbindir}/
+}
+
+BBCLASSEXTEND = "native nativesdk"
+
+FILES_${PN} = " \
+       ${sbindir}/helm \
+       ${sbindir}/helm-upload \
+       ${sysconfdir}/sudoers.d \
+       ${sysconfdir}/sudoers.d/helm \
+       "
diff --git a/meta-stx/recipes-containers/kubernetes/kubernetes_git.bbappend b/meta-stx/recipes-containers/kubernetes/kubernetes_git.bbappend
new file mode 100644 (file)
index 0000000..88d6ad8
--- /dev/null
@@ -0,0 +1,114 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+PV = "1.16.2+git${SRCREV_kubernetes}"
+SRCREV_kubernetes = "c97fe5036ef3df2967d086711e6c0c405941e14b"
+
+SRC_URI = "git://github.com/kubernetes/kubernetes.git;branch=release-1.16;name=kubernetes \
+       file://0001-hack-lib-golang.sh-use-CC-from-environment.patch \
+       file://0001-cross-don-t-build-tests-by-default.patch \
+       file://kubernetes-accounting.conf \
+       file://kubeadm.conf \
+       file://kubelet-cgroup-setup.sh \
+       file://contrib/* \
+       "
+
+INSANE_SKIP_${PN} += "textrel"
+INSANE_SKIP_${PN}-misc += "textrel"
+INSANE_SKIP_kubelet += "textrel"
+
+inherit useradd
+
+USERADD_PACKAGES = "${PN}"
+USERADD_PARAM_${PN} = "-r -g kube -d / -s /sbin/nologin -c 'Kubernetes user' kube"
+GROUPADD_PARAM_${PN} = "-r kube"
+
+do_install () {
+       install -d ${D}${bindir}
+       install -d ${D}${systemd_system_unitdir}/
+
+       # Install binaries
+       install -m 755 -D ${S}/src/import/_output/local/bin/${TARGET_GOOS}/${TARGET_GOARCH}/* ${D}/${bindir}
+
+       # kubeadm:
+       install -d -m 0755 ${D}/${sysconfdir}/systemd/system/kubelet.service.d
+       install -m 0644 ${WORKDIR}/kubeadm.conf ${D}/${sysconfdir}/systemd/system/kubelet.service.d
+
+       # kubelete-cgroup-setup.sh
+       install -m 0700 ${WORKDIR}/kubelet-cgroup-setup.sh ${D}/${bindir}
+
+       # install the bash completion
+       install -d -m 0755 ${D}${datadir}/bash-completion/completions/
+       ${D}${bindir}/kubectl completion bash > ${D}${datadir}/bash-completion/completions/kubectl
+
+       # install config files
+       install -d -m 0755 ${D}${sysconfdir}/${BPN}
+       install -m 644 -t ${D}${sysconfdir}/${BPN} ${WORKDIR}/contrib/init/systemd/environ/*
+
+       # install service files
+       install -d -m 0755 ${D}${systemd_system_unitdir}
+       install -m 0644 -t ${D}${systemd_system_unitdir} ${WORKDIR}/contrib/init/systemd/*.service
+
+       # install the place the kubelet defaults to put volumes
+       install -d ${D}${localstatedir}/lib/kubelet
+
+       # install systemd tmpfiles
+       install -d -m 0755 ${D}${sysconfdir}/tmpfiles.d
+       install -p -m 0644 -t ${D}${sysconfdir}/tmpfiles.d ${WORKDIR}/contrib/init/systemd/tmpfiles.d/kubernetes.conf
+
+       # enable CPU and Memory accounting
+       install -d -m 0755 ${D}/${sysconfdir}/systemd/system.conf.d
+       install -m 0644 ${WORKDIR}/kubernetes-accounting.conf ${D}/${sysconfdir}//systemd/system.conf.d/
+}
+
+SYSTEMD_PACKAGES += "${PN} kube-proxy"
+SYSTEMD_SERVICE_kube-proxy = "kube-proxy.service"
+SYSTEMD_SERVICE_${PN} = "\
+       kube-scheduler.service \
+       kube-apiserver.service \
+       kube-controller-manager.service \
+       "
+SYSTEMD_AUTO_ENABLE_${PN} = "disable"
+SYSTEMD_AUTO_ENABLE_kubelet = "disable"
+SYSTEMD_AUTO_ENABLE_kube-proxy = "disable"
+
+FILES_${PN} += "\
+       ${bindir}/kube-scheduler \
+       ${bindir}/kube-apiserver \
+       ${bindir}/kube-controller-manager \
+       ${bindir}/hyperkube \
+       ${bindir}/kubelet-cgroup-setup.sh \
+       "
+
+FILES_kubectl += "\
+       ${datadir}/bash-completion/completions/kubectl \
+       "
+
+FILES_${PN}-misc = "\
+       ${bindir}/conversion-gen \
+       ${bindir}/openapi-gen \
+       ${bindir}/apiextensions-apiserver \
+       ${bindir}/defaulter-gen \
+       ${bindir}/mounter \
+       ${bindir}/deepcopy-gen \
+       ${bindir}/go-bindata \
+       ${bindir}/go2make \
+       "
+
+RDEPENDS_${PN} += "\
+       bash \
+       kube-proxy \
+       "
diff --git a/meta-stx/recipes-containers/registry-token-server/files/registry-token-server-1.0.0.tar.gz b/meta-stx/recipes-containers/registry-token-server/files/registry-token-server-1.0.0.tar.gz
new file mode 100644 (file)
index 0000000..ba20c20
Binary files /dev/null and b/meta-stx/recipes-containers/registry-token-server/files/registry-token-server-1.0.0.tar.gz differ
diff --git a/meta-stx/recipes-containers/registry-token-server/files/registry-token-server.service b/meta-stx/recipes-containers/registry-token-server/files/registry-token-server.service
new file mode 100644 (file)
index 0000000..477e85d
--- /dev/null
@@ -0,0 +1,19 @@
+[Unit]
+Description=v2 Registry token server for Docker
+
+[Service]
+Type=simple
+EnvironmentFile=/etc/docker-distribution/registry/token_server.conf
+ExecStart=/usr/bin/registry-token-server -addr=${REGISTRY_TOKEN_SERVER_ADDR} \
+    -issuer=${REGISTRY_TOKEN_SERVER_ISSUER} \
+    -endpoint=${REGISTRY_TOKEN_SERVER_KS_ENDPOINT} \
+    -tlscert=${REGISTRY_TOKEN_SERVER_TLSCERT} \
+    -tlskey=${REGISTRY_TOKEN_SERVER_TLSKEY} \
+    -realm=${REGISTRY_TOKEN_SERVER_REALM} \
+    -key=${REGISTRY_TOKEN_SERVER_KEY}
+Restart=on-failure
+ExecStartPost=/bin/bash -c 'echo $MAINPID > /var/run/registry-token-server.pid'
+ExecStopPost=/bin/rm -f /var/run/registry-token-server.pid
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-containers/registry-token-server/files/token-server-certificate.pem b/meta-stx/recipes-containers/registry-token-server/files/token-server-certificate.pem
new file mode 100644 (file)
index 0000000..c40df59
--- /dev/null
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDADCCAegCCQCSevkS4h7LQjANBgkqhkiG9w0BAQsFADBCMQswCQYDVQQGEwJY
+WDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBh
+bnkgTHRkMB4XDTE4MDkyMTE0MTYwOFoXDTE5MDkyMTE0MTYwOFowQjELMAkGA1UE
+BhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBD
+b21wYW55IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKtCbNJ/
+aPEMkZFEtMKRomOh9NgeOv0jYFY5i23fXghtTgdXu9//H3Huz5/KDJ+XEUp2DZgK
+YQ2UHVR+cqj2sFjCllfAVrzmv9FFR0CQpQxqKcxChefVwsMh6XsqF+GzbqzFOx67
+bT39Xb5+spAmDHctFl3nrmyA1wM6e+OXcktC0chILeN+UEyq5Xeng6/BpVnI2UaY
+J1OpfuUrffddy5t0oeuKGZ/xG2g9sL6GMGBeVslOmLg4CBOwq3knUGoOTFYSjHVx
+rU/p4YgUotIUvb4GBsXqbiI7M2NakItTR6mxfcYiKkxfjadQlptFyGucI84mMYx8
+vO3o6TFLfcTYqZ8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAHXZR0U0pyMkYIeO5
+Y/n0H9Onj/PtCJHBbYzMHZGMPlX2IbW+JAeE/0XNIYGHtAtFwlb825Tkg2p7wpa8
+8HmOBqkTyn2ywDdmPqdfjCiMu/Ge6tkLjqkmYWv2l/d4+qEMR4dUh9g8SrrtUdZg
+DP7H22B+0knQ7s04JuiJ27hqi4nPOzdwdJNpz5Przgce8vN1ihk8194pR/uoNrjP
+td3Po+DwmxFKigoKPQCHgQuD63mAFor4vVnht+IkNbB3/lQyXP6Qv7DnWVW9WDBL
+nKxgXhRwyy5mYebYmwA//JX41O/Kdp1Q6oWgv4zSLd8M9FIMtESG8k4gSl0XfUBa
+Y24p0Q==
+-----END CERTIFICATE-----
diff --git a/meta-stx/recipes-containers/registry-token-server/files/token-server-private-key.pem b/meta-stx/recipes-containers/registry-token-server/files/token-server-private-key.pem
new file mode 100644 (file)
index 0000000..4332eb3
--- /dev/null
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAq0Js0n9o8QyRkUS0wpGiY6H02B46/SNgVjmLbd9eCG1OB1e7
+3/8fce7Pn8oMn5cRSnYNmAphDZQdVH5yqPawWMKWV8BWvOa/0UVHQJClDGopzEKF
+59XCwyHpeyoX4bNurMU7HrttPf1dvn6ykCYMdy0WXeeubIDXAzp745dyS0LRyEgt
+435QTKrld6eDr8GlWcjZRpgnU6l+5St9913Lm3Sh64oZn/EbaD2wvoYwYF5WyU6Y
+uDgIE7CreSdQag5MVhKMdXGtT+nhiBSi0hS9vgYGxepuIjszY1qQi1NHqbF9xiIq
+TF+Np1CWm0XIa5wjziYxjHy87ejpMUt9xNipnwIDAQABAoIBAFHCIV+QkdHZ9TiL
+u1vT2NmFvPTb4b9tfxVK3YRziVmujPy2Zqu2CRYEMzyOYd5iaU/J8g1ujwzDdAkd
+YLHHK0MEim+UFBSUeGh4kV6CbzjxCclIzNJz20n6y5MP8ly+o4x5kBLI2YsphPJn
+W+mzMGpIrQ/hhgSosX0KE5EAgQDqOfJSlhZvSgSO5UF9nXvEn7Y9Zc8GK0XQdcwB
+Pr8iFhuhEJmmb4LrCm+3Me/fhLxFjUAOAcLSkFnqfxo2vAuRqk99OOLxFEfPYZB8
+kLkKlQ+PwhkG3pjPg6w/rOmBHqW/ZEpd87972JWeHscXYpb/cLLVmcJbZI/claos
+YOHS7CECgYEA4XKo7GzuqSkLskfaZM2pyNhHbxphqyNfk8GmW5NJnKavpmY8YiXh
+7hNXXf4HCkcHvHMn4JUCHgHVavDNhNnrHNrQAzO3KwuUrrFiBP+yP1tRyQ4BP395
+KIBSUyeEOo9vM7d3yerI8WHboio5gaoqEfeNS1dakZ6ZiOpoP94CIxECgYEAwnfW
+Drdcqkpj794gYDlXH4D279f7+qmq11eI4C0zkZzTFkExl8BGfqpy49kruaTm0e4t
+L1B23TYfKC0ei4BQskyNCHUnl/eic/JHe9gJRd6BAZi2REfV0LI4ytYGgniCu50H
+EJVvTVMXS/+wWcjZr037oV6/WiB9Wzr7Z1oFoa8CgYBlmqdG5lEpK7Z5wqhKheXe
+/pozGFCsMGUC0mOHIfoq/3RqKelM0oXgJhdZ5QKHPzvdUojGTmGF5I2qhJwbI5sy
+her5hnUmkTGRCaCDYDmVFDLnycgGNg0Ek9CGaWjOe5ZCWI1EEuw83T1++Eiyh14u
+esLTEatftXq8megh4IxWAQKBgQCTNfox27ZnJrcuXn0tulpse8jy2RJjt0qfhyET
+asRN52SXxTRQhvoWattcBgsmlmEw69cCqSvB23WMiVNFERaFUpO0olMdpBUzJmXc
+pzal0IDh/4OCfsqqGDALxCbbX3S/p2gwsp617z+EhYMvBG9dWHAywTGjfVLH3Ady
+PmBi+wKBgQCWJS/PmTpyO8LU4MYZk91mJmjHAsPlgi/9n8yEqdmins+X698IsoCr
+s2FN8rol8+UP8c3m9o4kp62ouoby2QzAZw0y3UGWcxOb3ZpoozatKodsoETSLLoL
+T//wVn2Z2MsS9tLOBLZzsZiYlHyYxTUm7UTOdxdjbSLWVdLbCpKEhg==
+-----END RSA PRIVATE KEY-----
diff --git a/meta-stx/recipes-containers/registry-token-server/files/token_server.conf b/meta-stx/recipes-containers/registry-token-server/files/token_server.conf
new file mode 100644 (file)
index 0000000..4683478
--- /dev/null
@@ -0,0 +1 @@
+# This is a puppet managed config file
diff --git a/meta-stx/recipes-containers/registry-token-server/registry-token-server_1.0.0.bb b/meta-stx/recipes-containers/registry-token-server/registry-token-server_1.0.0.bb
new file mode 100644 (file)
index 0000000..f4f6479
--- /dev/null
@@ -0,0 +1,74 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " Token server for use with Docker registry with Openstack Keystone back end"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d2794c0df5b907fdace235a619d80314"
+
+GO_IMPORT = "registry-token-server"
+
+SRC_URI = "file://registry-token-server-1.0.0.tar.gz \
+           file://registry-token-server.service \
+           file://token_server.conf \
+          "
+
+RDEPENDS_${PN}-dev_append = "systemd"
+
+DEPENDS += "\
+        go-logrus \
+        docker-distribution \
+        go-libtrust \
+        go-patricia \
+        go-mux \
+        go-context \
+        go-phercloud \
+        "
+
+inherit go goarch ${@bb.utils.contains('VIRTUAL-RUNTIME_init_manager','systemd','systemd','', d)}
+
+do_compile() {
+        mkdir -p _build/src
+        ln -sfn ${WORKDIR}/${PN}-${PV} ./_build/src/registry-token-server
+  
+        # Pass the needed cflags/ldflags so that cgo
+        # can find the needed headers files and libraries
+        export GOARCH=${TARGET_GOARCH}
+        export CGO_ENABLED="1"
+        export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+        export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+        export GOPATH="${WORKDIR}/build/_build:${STAGING_DIR_TARGET}/${prefix}/local/go"
+        cd _build/src/${GO_IMPORT}
+        #oe_runmake registry-token-server
+        export GOROOT=${STAGING_DIR_TARGET}/${prefix}/local/go
+        go build -o ${WORKDIR}/build/bin/registry-token-server registry-token-server
+}
+
+SYSTEMD_PACKAGES = "${PN}"
+SYSTEMD_SERVICE_${PN} = "registry-token-server.service"
+SYSTEMD_AUTO_ENABLE = "enable"
+
+do_install() {
+        install -d ${D}/${sbindir}
+        install -m 0755 bin/registry-token-server ${D}/${sbindir}
+        install -d ${D}/${sysconfdir}/registry-token-server/registry
+        install -m 0644 ${WORKDIR}/token_server.conf ${D}/${sysconfdir}/registry-token-server/registry
+
+        if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+            install -d ${D}${systemd_unitdir}/system
+            install -m 0644 ${WORKDIR}/registry-token-server.service ${D}${systemd_unitdir}/system/
+        fi
+}
+
diff --git a/meta-stx/recipes-core/distributedcloud/distributedcloud-client-dcmanager_git.bb b/meta-stx/recipes-core/distributedcloud/distributedcloud-client-dcmanager_git.bb
new file mode 100644 (file)
index 0000000..fb7e22f
--- /dev/null
@@ -0,0 +1,57 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " \
+       Client library for Distributed Cloud built on the Distributed Cloud API. \
+       It provides a command-line tool (dcmanager).  \
+       Distributed Cloud provides configuration and management of distributed clouds \
+       "
+
+HOMEPAGE = "https://opendev.org/starlingx"
+SECTION = "network"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://distributedcloud-client/LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCNAME = "distcloud-client"
+SRCREV = "8a8f01dd3701d4793dd8cbc0147e4dca49cd7c03"
+PV = "1.0.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://opendev.org/starlingx/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       "
+
+inherit distutils python-dir
+
+DEPENDS += " \
+       python-pbr-native \
+       "
+
+do_configure() {
+       cd ${S}/distributedcloud-client
+       distutils_do_configure
+}
+
+do_compile() {
+       cd ${S}/distributedcloud-client
+       distutils_do_compile
+}
+
+do_install() {
+       cd ${S}/distributedcloud-client
+       distutils_do_install
+}
diff --git a/meta-stx/recipes-core/distributedcloud/distributedcloud_git.bb b/meta-stx/recipes-core/distributedcloud/distributedcloud_git.bb
new file mode 100644 (file)
index 0000000..21796f1
--- /dev/null
@@ -0,0 +1,258 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "StarlingX distributedcloud packages collection"
+HOMEPAGE = "https://opendev.org/starlingx"
+SECTION = "network"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://distributedcloud/LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCNAME = "distcloud"
+SRCREV = "8329259704a5becd036663fc7de9b7a61f4bc27e"
+PV = "1.0.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://opendev.org/starlingx/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       "
+
+# TODO: 
+# 1 - Patch service unit files to create the following directories:
+#      - var/log/dcdbsync 
+#      - var/log/dcmanager
+#      - var/log/dcorch
+# python-jsonschema >= 2.0.0
+# python-keystoneauth1 >= 3.1.0
+# python-pbr >= 1.8
+# python-pecan >= 1.0.0
+# python-routes >= 1.12.3
+
+PACKAGES += "distributedcloud-dcmanager"
+PACKAGES += "distributedcloud-dcorch"
+PACKAGES += "distributedcloud-dcdbsync"
+PACKAGES += "distributedcloud-ocf"
+DESCRIPTION_distributedcloud-dcmanager = "Distributed Cloud Manager"
+DESCRIPTION_distributedcloud-dcorch = "Distributed Cloud Orchestrator"
+DESCRIPTION_distributedcloud-dcdbsync = "DC DCorch DBsync Agent"
+
+DEPENDS += " \
+       python-pbr-native \
+       "
+
+ALL_RDEPENDS = " \
+       python-pycrypto \
+       python-cryptography \
+       python-eventlet \
+       python-setuptools \
+       python-jsonschema \
+       python-keyring \
+       python-keystonemiddleware \
+       python-keystoneauth1 \
+       python-netaddr \
+       python-oslo.concurrency \
+       python-oslo.config \
+       python-oslo.context \
+       python-oslo.db \
+       python-oslo.i18n \
+       python-oslo.log \
+       python-oslo.messaging \
+       python-oslo.middleware \
+       python-oslo.policy \
+       python-oslo.rootwrap \
+       python-oslo.serialization \
+       python-oslo.service \
+       python-oslo.utils \
+       python-oslo.versionedobjects \
+       python-pbr \
+       python-pecan \
+       python-routes \
+       python-sphinx \
+       python-pyopenssl \
+       systemd \
+       python-babel \
+       distributedcloud-ocf \
+       "
+# TODO: 
+# Check dependencies
+#      python-sphinxcontrib-httpdomain
+
+RDEPENDS_distributedcloud-dcmanager  += "  ${ALL_RDEPENDS}"
+RDEPENDS_distributedcloud-dcorch += "  ${ALL_RDEPENDS}"
+RDEPENDS_distributedcloud-dcdbsync+= " ${ALL_RDEPENDS}"
+       
+
+inherit distutils python-dir
+
+do_configure() {
+       cd ${S}/distributedcloud
+       distutils_do_configure
+}
+
+do_compile() {
+       cd ${S}/distributedcloud
+       distutils_do_compile
+}
+
+do_install() {
+       cd ${S}/distributedcloud
+       distutils_do_install
+
+       SRCPATH=${datadir}/starlingx/distrbutedcloud-config-files/
+
+       # dcmanager
+       install -d -m 0755 ${D}/var/log/dcmanager
+       install -d -m 0755 ${D}/var/cache/dcmanager
+       install -d -m 0755 ${D}/${sysconfdir}/dcmanager
+       install -d -m 0755 ${D}/${sysconfdir}/tempfiles.d
+       install -d -m 0755 ${D}/${datadir}/starlingx/distrbutedcloud-config-files/
+       install -d -m 0755 ${D}/${systemd_system_unitdir}
+       install -d -m 0755 ${D}/opt/dc/ansible
+
+       # Install systemd unit files
+       install -p -D -m 0644 centos/files/dcmanager-api.service ${D}/${systemd_system_unitdir}/
+       install -p -D -m 0644 centos/files/dcmanager-manager.service ${D}/${systemd_system_unitdir}/
+       install -p -D -m 0755 centos/files/dcmanager.conf ${D}/${sysconfdir}/tempfiles.d
+
+       # Install default config files
+       # defer postinst_ontarget
+       install -p -m 0644 dcmanager//config-generator.conf ${D}/${SRCPATH}/dcmanager-config-generator.conf
+
+       # dcorch
+       install -d -m 0755 ${D}/var/log/dcorch
+       install -d -m 0755 ${D}/var/cache/dcorch
+       install -d -m 0755 ${D}/${sysconfdir}/dcorch
+
+       # Install systemd unit files
+       install -p -D -m0644 centos/files/dcorch-api.service ${D}/${systemd_system_unitdir}/
+       install -p -D -m0644 centos/files/dcorch-engine.service ${D}/${systemd_system_unitdir}/
+       install -p -D -m0644 centos/files/dcorch-sysinv-api-proxy.service ${D}/${systemd_system_unitdir}/
+       install -p -D -m0644 centos/files/dcorch-snmp.service ${D}/${systemd_system_unitdir}/
+       install -p -D -m0644 centos/files/dcorch-identity-api-proxy.service ${D}/${systemd_system_unitdir}/
+       install -p -D -m0644 centos/files/dcorch.conf ${D}/${sysconfdir}/tempfiles.d
+
+       # Install ocf scripts
+       install -d -m 0755 ${D}/${libdir}/ocf/resource.d/openstack/ocf
+       install -m 0644 \
+               ocf/dcdbsync-api \
+               ocf/dcmanager-api \
+               ocf/dcmanager-manager \
+               ocf/dcorch-cinder-api-proxy \
+               ocf/dcorch-engine \
+               ocf/dcorch-identity-api-proxy \
+               ocf/dcorch-neutron-api-proxy \
+               ocf/dcorch-nova-api-proxy \
+               ocf/dcorch-patch-api-proxy \
+               ocf/dcorch-snmp \
+               ocf/dcorch-sysinv-api-proxy ${D}/${libdir}/ocf/resource.d/openstack/
+
+       # Install default config files
+       # defer postinst_ontarget
+       install -p -m 0644 dcorch/config-generator.conf ${D}/${SRCPATH}/dcorch-config-generator.conf
+
+       # dc dbsync agent
+       install -d -m 755 ${D}/var/log/dcdbsync
+       install -d -m 755 ${D}/var/cache/dcdbsync
+       install -d -m 755 ${D}/${sysconfdir}/dcdbsync/
+
+       # Install systemd unit files
+       install -p -D -m 644 centos/files/dcdbsync-api.service ${D}/${systemd_system_unitdir}/dcdbsync-api.service
+       # ???? CheckInstall systemd unit files for optional second instance
+       install -p -D -m 644 centos/files/dcdbsync-openstack-api.service ${D}/${systemd_system_unitdir}/dcdbsync-openstack-api.service
+       install -p -D -m 644 centos/files/dcdbsync.conf  ${D}/${sysconfdir}/tmpfiles.d
+
+       # Install default config files
+       # defer postinst_ontarget
+       install -p -m 0644 dcdbsync/config-generator.conf ${D}/${SRCPATH}/dcdbsync-config-generator.conf
+}
+
+
+pkg_postinst_ontarget_distributedcloud-dcmanager() {
+       SRCPATH=${datadir}/starlingx/distrbutedcloud-config-files/
+       oslo-config-generator --config-file ${SRCPATH}/dcmanager-config-generator.conf \
+               --output-file ${sysconfdir}/dcmanager/dcmanager.conf.sample
+}
+
+pkg_postinst_ontarget_distributedcloud-dcorch() {
+       SRCPATH=${datadir}/starlingx/distrbutedcloud-config-files/
+       oslo-config-generator --config-file ${SRCPATH}/dcorch-config-generator.conf \
+               --output-file ${sysconfdir}/dcorch/dcorch.conf.sample
+}
+
+
+pkg_postinst_ontarget_distributedcloud-dcdbsync() {
+       SRCPATH=${datadir}/starlingx/distrbutedcloud-config-files/
+       oslo-config-generator --config-file ${SRCPATH}/dcdbsync-config-generator.conf \
+               --output-file ${sysconfdir}/dcdbsync/dcdbsync.conf.sample
+}
+
+FILES_${PN} = " \
+  /var/volatile \
+  /var/log \
+  /var/volatile/log \
+  /etc/tmpfiles.d \
+  "
+
+FILES_distributedcloud-ocf = " \
+       ${libdir}/ocf/resource.d/openstack  \
+       "
+
+FILES_distributedcloud-dcdbsync = " \
+       ${PYTHON_SITEPACKAGES_DIR}/dcdbsync \
+       ${PYTHON_SITEPACKAGES_DIR}/dcdbsync-*.egg.info \
+       /var/cache/dcdbsync \
+       /var/volatile/log/dcdbsync \
+       ${bindir}/dcdbsync-api \
+       ${systemd_system_unitdir}/dcdbsync-api.service \
+       ${systemd_system_unitdir}/dcdbsync-openstack-api.service \
+       ${sysconfdir}/dcdbsync/ \
+       ${datadir}/starlingx/distrbutedcloud-config-files/dcdbsync-config-generator.conf \
+       "
+
+FILES_distributedcloud-dcorch = " \
+       ${PYTHON_SITEPACKAGES_DIR}/dcorch \
+       ${PYTHON_SITEPACKAGES_DIR}/distributedcloud-*.egg-info \
+       ${bindir}/dcorch-api \
+       ${systemd_system_unitdir}/dcorch-api.service \
+       ${bindir}/dcorch-engine \
+       ${systemd_system_unitdir}/dcorch-engine.service \
+       ${bindir}/dcorch-api-proxy \
+       ${systemd_system_unitdir}/dcorch-sysinv-api-proxy.service \
+       ${systemd_system_unitdir}/dcorch-identity-api-proxy.service \
+       ${bindir}/dcorch-manage \
+       ${bindir}/dcorch-snmp \
+       ${systemd_system_unitdir}/dcorch-snmp.service \
+       ${sysconfdir}/tempfiles.d/dcorch.conf \
+       /var/cache/dcorch \
+       ${sysconfdir}/dcorch \
+       ${datadir}/starlingx/distrbutedcloud-config-files/dcorch-config-generator.conf \
+       "
+
+FILES_distributedcloud-dcmanager = " \
+       ${PYTHON_SITEPACKAGES_DIR}/dcmanager \
+       ${PYTHON_SITEPACKAGES_DIR}/distributedcloud-*.egg-info \
+       ${bindir}/dcmanager-api \
+       ${systemd_system_unitdir}/dcmanager-api.service \
+       ${bindir}/dcmanager-manager \
+       ${systemd_system_unitdir}/dcmanager-manager.service \
+       ${bindir}/dcmanager-manage \
+       ${sysconfdir}/tmpfiles.d/dcmanager.conf \
+       /var/cache/dcmanager \
+       ${sysconfdir}/dcmanager \
+       ${sysconfdir}/tempfiles.d/dcmanager.conf \
+       /opt/dc/ansible  \
+       ${datadir}/starlingx/distrbutedcloud-config-files/dcmanager-config-generator.conf \
+       "
diff --git a/meta-stx/recipes-core/images/stx-image-aio-installer.bb b/meta-stx/recipes-core/images/stx-image-aio-installer.bb
new file mode 100644 (file)
index 0000000..ba20787
--- /dev/null
@@ -0,0 +1,68 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "An image with Anaconda to do installation for StarlingX"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/Apache-2.0;md5=89aea4e17d99a7cacdbeed46a0096b10"
+
+# Support installation from initrd boot
+do_image_complete[depends] += "core-image-anaconda-initramfs:do_image_complete"
+
+DEPENDS += "isomd5sum-native"
+
+CUSTOMIZE_LOGOS ??= "yocto-compat-logos"
+
+# We override what gets set in core-image.bbclass
+IMAGE_INSTALL = "\
+    packagegroup-core-boot \
+    packagegroup-core-ssh-openssh \
+    ${@['', 'packagegroup-installer-x11-anaconda'][bool(d.getVar('XSERVER', True))]} \
+    python3-anaconda \
+    anaconda-init \
+    kernel-modules \
+    ${CUSTOMIZE_LOGOS} \
+    dhcp-client \
+    ldd \
+    rng-tools \
+"
+
+IMAGE_LINGUAS = "en-us en-gb"
+
+# Generate live image
+IMAGE_FSTYPES_remove = "wic wic.bmap"
+IMAGE_FSTYPES_append = " iso"
+
+IMAGE_ROOTFS_EXTRA_SPACE =" + 102400"
+
+inherit core-image stx-anaconda-image
+
+KICKSTART_FILE ?= "${LAYER_PATH_meta-stx}/conf/distro/files/ks/poky_stx_aio_ks.cfg"
+
+# Only the ones prefix with poky_stx_ are tested and working
+KICKSTART_FILE_EXTRA ?= " \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/poky_stx_aio_ks.cfg \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/aio_ks.cfg \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/aio_lowlatency_ks.cfg \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/controller_ks.cfg \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/net_controller_ks.cfg \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/net_smallsystem_ks.cfg \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/net_smallsystem_lowlatency_ks.cfg \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/net_storage_ks.cfg \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/net_worker_ks.cfg \
+    ${LAYER_PATH_meta-stx}/conf/distro/files/ks/net_worker_lowlatency_ks.cfg \
+"
+
+SYSLINUX_CFG_LIVE = "${LAYER_PATH_meta-stx}/conf/distro/files/syslinux.cfg"
diff --git a/meta-stx/recipes-core/images/stx-image-aio.bb b/meta-stx/recipes-core/images/stx-image-aio.bb
new file mode 100644 (file)
index 0000000..a55adc7
--- /dev/null
@@ -0,0 +1,62 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = " StarlingX Single Server"
+
+LICENSE = "MIT"
+
+CORE_IMAGE_EXTRA_INSTALL = " \
+       packagegroup-basic \
+       packagegroup-core-base-utils  \
+       "
+
+IMAGE_INSTALL_append = " \
+       ${CORE_IMAGE_BASE_INSTALL} \
+       packagegroup-core-full-cmdline \
+       packagegroup-core-lsb \
+       packagegroup-stx-upstream \
+       packagegroup-stx-puppet \
+       packagegroup-stx-fault \
+       packagegroup-stx-metal \
+       packagegroup-stx-nfv \
+       packagegroup-stx-monitoring \
+       packagegroup-stx-ha \
+       packagegroup-stx-config-files \
+       packagegroup-stx-update \
+       packagegroup-stx-integ \
+       packagegroup-stx-config \
+       packagegroup-stx-distributedcloud \
+       packagegroup-stx-utilities \
+       packagegroup-stx-armada-app \
+       starlingx-dashboard \
+       playbookconfig \
+       distributedcloud-client-dcmanager \
+       registry-token-server \
+       "
+
+IMAGE_FEATURES += " \
+       package-management \
+       ssh-server-openssh \
+       "
+
+inherit stx-postrun
+inherit core-image
+inherit distro_features_check
+inherit openstack-base
+inherit identity
+inherit monitor
+
+# We need docker-ce
+PACKAGE_EXCLUDE += " docker"
diff --git a/meta-stx/recipes-core/initrdscripts/files/init-install.sh b/meta-stx/recipes-core/initrdscripts/files/init-install.sh
new file mode 100755 (executable)
index 0000000..023a614
--- /dev/null
@@ -0,0 +1,366 @@
+#!/bin/sh -e
+#
+# install.sh [device_name] [rootfs_name] [video_mode] [vga_mode]
+#
+
+PATH=/sbin:/bin:/usr/sbin:/usr/bin
+
+# Get a list of hard drives
+hdnamelist=""
+live_dev_name=`cat /proc/mounts | grep ${1%/} | awk '{print $1}'`
+live_dev_name=${live_dev_name#\/dev/}
+# Only strip the digit identifier if the device is not an mmc
+case $live_dev_name in
+    mmcblk*)
+    ;;
+    nvme*)
+    ;;
+    *)
+        live_dev_name=${live_dev_name%%[0-9]*}
+    ;;
+esac
+
+echo "Searching for hard drives ..."
+
+# Some eMMC devices have special sub devices such as mmcblk0boot0 etc
+# we're currently only interested in the root device so pick them wisely
+devices=`ls /sys/block/ | grep -v mmcblk` || true
+mmc_devices=`ls /sys/block/ | grep "mmcblk[0-9]\{1,\}$"` || true
+devices="$devices $mmc_devices"
+
+for device in $devices; do
+    case $device in
+        loop*)
+            # skip loop device
+            ;;
+        sr*)
+            # skip CDROM device
+            ;;
+        ram*)
+            # skip ram device
+            ;;
+        *)
+            # skip the device LiveOS is on
+            # Add valid hard drive name to the list
+            case $device in
+                $live_dev_name*)
+                # skip the device we are running from
+                ;;
+                *)
+                    hdnamelist="$hdnamelist $device"
+                ;;
+            esac
+            ;;
+    esac
+done
+
+TARGET_DEVICE_NAME=""
+for hdname in $hdnamelist; do
+    # Display found hard drives and their basic info
+    echo "-------------------------------"
+    echo /dev/$hdname
+    if [ -r /sys/block/$hdname/device/vendor ]; then
+        echo -n "VENDOR="
+        cat /sys/block/$hdname/device/vendor
+    fi
+    if [ -r /sys/block/$hdname/device/model ]; then
+        echo -n "MODEL="
+        cat /sys/block/$hdname/device/model
+    fi
+    if [ -r /sys/block/$hdname/device/uevent ]; then
+        echo -n "UEVENT="
+        cat /sys/block/$hdname/device/uevent
+    fi
+    echo
+done
+
+# use the first one found
+for hdname in $hdnamelist; do
+    TARGET_DEVICE_NAME=$hdname
+    break
+done
+
+if [ -n "$TARGET_DEVICE_NAME" ]; then
+    echo "Installing image on /dev/$TARGET_DEVICE_NAME ..."
+else
+    echo "No hard drive found. Installation aborted."
+    exit 1
+fi
+
+device=/dev/$TARGET_DEVICE_NAME
+
+#
+# Unmount anything the automounter had mounted
+#
+
+for dir in `awk '/\/dev.* \/run\/media/{print $2}' /proc/mounts | grep $TARGET_DEVICE_NAME`; do
+       umount $dir
+done
+
+if [ ! -b /dev/loop0 ] ; then
+    mknod /dev/loop0 b 7 0
+fi
+
+mkdir -p /tmp
+if [ ! -L /etc/mtab ] && [ -e /proc/mounts ]; then
+    ln -sf /proc/mounts /etc/mtab
+fi
+
+disk_size=$(parted ${device} unit mb print | grep '^Disk .*: .*MB' | cut -d" " -f 3 | sed -e "s/MB//")
+
+grub_version=$(grub-install -V|sed 's/.* \([0-9]\).*/\1/')
+
+if [ $grub_version -eq 0 ] ; then
+    bios_boot_size=0
+else
+    # For GRUB 2 we need separate parition to store stage2 grub image
+    # 2Mb value is chosen to align partition for best performance.
+    bios_boot_size=2
+fi
+
+boot_size=512
+rootfs_size=20000
+log_vol_size=8000
+scratch_vol_size=8000
+
+data_size=$((disk_size-bios_boot_size-boot_size-rootfs_size))
+boot_start=$((bios_boot_size))
+rootfs_start=$((bios_boot_size+boot_size))
+rootfs_end=$((rootfs_start+rootfs_size))
+data_start=$((rootfs_end))
+
+# MMC devices are special in a couple of ways
+# 1) they use a partition prefix character 'p'
+# 2) they are detected asynchronously (need rootwait)
+rootwait=""
+part_prefix=""
+if [ ! "${device#/dev/mmcblk}" = "${device}" ] || \
+   [ ! "${device#/dev/nvme}" = "${device}" ]; then
+    part_prefix="p"
+    rootwait="rootwait"
+fi
+
+# USB devices also require rootwait
+if [ -n `readlink /dev/disk/by-id/usb* | grep $TARGET_DEVICE_NAME` ]; then
+    rootwait="rootwait"
+fi
+
+if [ $grub_version -eq 0 ] ; then
+    bios_boot=''
+    bootfs=${device}${part_prefix}1
+    rootfs=${device}${part_prefix}2
+    data=${device}${part_prefix}3
+else
+    bios_boot=${device}${part_prefix}1
+    bootfs=${device}${part_prefix}2
+    rootfs=${device}${part_prefix}3
+    data=${device}${part_prefix}4
+fi
+
+echo "*********************************************"
+[ $grub_version -ne 0 ] && echo "BIOS boot partition size: $bios_boot_size MB ($bios_boot)"
+echo "Boot partition size:   $boot_size MB ($bootfs)"
+echo "Rootfs partition size: $rootfs_size MB ($rootfs)"
+echo "Data partition size:   $data_size MB ($data)"
+echo "*********************************************"
+echo "Deleting partition table on ${device} ..."
+dd if=/dev/zero of=${device} bs=512 count=35
+
+echo "Creating new partition table on ${device} ..."
+if [ $grub_version -eq 0 ] ; then
+    parted ${device} mktable msdos
+    echo "Creating boot partition on $bootfs"
+    parted ${device} mkpart primary ext3 0% $boot_size
+else
+    parted ${device} mktable gpt
+    echo "Creating BIOS boot partition on $bios_boot"
+    parted ${device} mkpart bios_boot 0% $bios_boot_size
+    parted ${device} set 1 bios_grub on
+    echo "Creating boot partition on $bootfs"
+    parted ${device} mkpart boot ext3 $boot_start $boot_size
+fi
+
+echo "Creating rootfs partition on $rootfs"
+[ $grub_version -eq 0 ] && pname='primary' || pname='root'
+parted ${device} -s mkpart $pname ext4 $rootfs_start $rootfs_end
+
+echo "Creating data partition on $data"
+[ $grub_version -eq 0 ] && pname='primary' || pname='data'
+parted ${device} -s mkpart $pname $data_start 100%
+parted ${device} -s set 4 lvm on
+
+parted ${device} print
+
+echo "Waiting for device nodes..."
+C=0
+while [ $C -ne 3 ] && [ ! -e $bootfs  -o ! -e $rootfs -o ! -e $data ]; do
+    C=$(( C + 1 ))
+    sleep 1
+done
+
+echo "Formatting $bootfs to ext3..."
+mkfs.ext3 -F $bootfs
+
+echo "Formatting $rootfs to ext4..."
+mkfs.ext4 -F $rootfs
+
+echo "Create LVM for $data..."
+vg_name="cgts-vg"
+
+# Disable udev scan in lvm.conf
+sed -i 's/\(md_component_detection =\).*/\1 0/' /etc/lvm/lvm.conf
+
+pvcreate -y -ff $data
+vgcreate -y -ff $vg_name $data
+
+udevd -d
+
+lvcreate -y -n log-lv --size $log_vol_size $vg_name
+lvcreate -y -n scratch-lv --size  $scratch_vol_size $vg_name
+
+mkfs.ext4 -F /dev/$vg_name/log-lv
+mkfs.ext4 -F /dev/$vg_name/scratch-lv
+
+mkdir /tgt_root
+mkdir /tgt_log
+mkdir /src_root
+mkdir -p /boot
+
+if [ ! -f /run/media/$1/$2 ]; then
+    mkdir -p /run/media/$1
+    mount /dev/$1 /run/media/$1
+fi
+
+# Handling of the target root partition
+mount $rootfs /tgt_root
+mount /dev/$vg_name/log-lv /tgt_log
+mount -o rw,loop,noatime,nodiratime /run/media/$1/$2 /src_root
+echo "Copying rootfs files..."
+cp -a /src_root/* /tgt_root
+if [ -d /tgt_root/etc/ ] ; then
+    if [ $grub_version -ne 0 ] ; then
+        boot_uuid=$(blkid -o value -s UUID ${bootfs})
+        bootdev="UUID=$boot_uuid"
+    else
+        bootdev=${bootfs}
+    fi
+    sed -i '/vfat/d' /tgt_root/etc/fstab
+    echo "$bootdev  /boot  ext3  defaults  1  2" >> /tgt_root/etc/fstab
+    echo "/dev/$vg_name/log-lv  /var/log  ext4  defaults  1  2" >> /tgt_root/etc/fstab
+    echo "/dev/$vg_name/scratch-lv  /scratch  ext4  defaults  1  2" >> /tgt_root/etc/fstab
+
+    # We dont want udev to mount our root device while we're booting...
+    if [ -d /tgt_root/etc/udev/ ] ; then
+        echo "${device}" >> /tgt_root/etc/udev/mount.blacklist
+    fi
+fi
+
+INSTALL_UUID=`uuidgen`
+cat << _EOF > /tgt_root/etc/platform/platform.conf
+nodetype=controller
+subfunction=controller,worker
+system_type=All-in-one
+security_profile=standard
+management_interface=lo
+http_port=8080
+INSTALL_UUID=${INSTALL_UUID}
+_EOF
+
+# Create first_boot flag
+touch /tgt_root/etc/platform/.first_boot
+
+# The grub.cfg is created by installer, so the postinsts script is not needed.
+rm -f /tgt_root/etc/rpm-postinsts/*-grub
+
+# /var/log will be mounted to the log-lv
+# so move the all files to log-lv
+cp -rf /tgt_root/var/log/* /tgt_log
+rm -rf /tgt_root/var/log
+
+# Fake as anaconda to add info needed by stx 3.0
+cat << _EOF > /tgt_root/etc/rpm-postinsts/999-anaconda
+# anaconda - postinst
+#!/bin/sh
+set -e
+mkdir -p /var/log/anaconda/
+echo "Display mode = t" > /var/log/anaconda/anaconda.log
+_EOF
+chmod 0755 /tgt_root/etc/rpm-postinsts/999-anaconda
+
+umount /tgt_root
+umount /src_root
+
+echo "Looking for kernels to use as boot target.."
+# Find kernel to boot to
+# Give user options if multiple are found
+kernels="$(find /run/media/$1/ -type f  \
+           -name bzImage* -o -name zImage* \
+           -o -name vmlinux* -o -name vmlinuz* \
+           -o -name fitImage* \
+           | sed s:.*/::)"
+if [ -n "$(echo $kernels)" ]; then
+    # only one kernel entry if no space
+    if [ -z "$(echo $kernels | grep " ")" ]; then
+        kernel=$kernels
+        echo "$kernel will be used as the boot target"
+    else
+        echo "Which kernel do we want to boot by default? The following kernels were found:"
+        echo $kernels
+        read answer
+        kernel=$answer
+    fi
+else
+    echo "No kernels found, exiting..."
+    exit 1
+fi
+
+# Handling of the target boot partition
+mount $bootfs /boot
+echo "Preparing boot partition..."
+
+if [ -f /etc/grub.d/00_header -a $grub_version -ne 0 ] ; then
+    echo "Preparing custom grub2 menu..."
+    root_part_uuid=$(blkid -o value -s PARTUUID ${rootfs})
+    boot_uuid=$(blkid -o value -s UUID ${bootfs})
+    GRUBCFG="/boot/grub/grub.cfg"
+    mkdir -p $(dirname $GRUBCFG)
+    cat >$GRUBCFG <<_EOF
+timeout=5
+default=0
+menuentry "Yocto Linux with StarlingX @STX_ID@" {
+    search --no-floppy --fs-uuid $boot_uuid --set root
+    linux /$kernel root=$rootfs $rootwait rw console=tty0 console=ttyS0,115200 $5 $3 $4
+}
+_EOF
+    chmod 0444 $GRUBCFG
+fi
+grub-install ${device}
+
+if [ $grub_version -eq 0 ] ; then
+    echo "(hd0) ${device}" > /boot/grub/device.map
+    echo "Preparing custom grub menu..."
+    echo "default 0" > /boot/grub/menu.lst
+    echo "timeout 30" >> /boot/grub/menu.lst
+    echo "title Live Boot/Install-Image" >> /boot/grub/menu.lst
+    echo "root  (hd0,0)" >> /boot/grub/menu.lst
+    echo "kernel /$kernel root=$rootfs rw $3 $4 quiet" >> /boot/grub/menu.lst
+fi
+
+# Copy kernel artifacts. To add more artifacts just add to types
+# For now just support kernel types already being used by something in OE-core
+for types in bzImage zImage vmlinux vmlinuz fitImage; do
+    for kernel in `find /run/media/$1/ -name $types*`; do
+        cp $kernel /boot
+    done
+done
+
+umount /boot
+
+sync
+
+echo "Remove your installation media, and press ENTER"
+
+read enter
+
+echo "Rebooting..."
+reboot -f
diff --git a/meta-stx/recipes-core/initrdscripts/initramfs-module-install_1.0.bbappend b/meta-stx/recipes-core/initrdscripts/initramfs-module-install_1.0.bbappend
new file mode 100644 (file)
index 0000000..a2b1ca7
--- /dev/null
@@ -0,0 +1,26 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+do_install_append () {
+    sed -i -e 's/@STX_ID@/${STX_ID}/' ${D}/init.d/install.sh
+}
+
+RDEPENDS_${PN} += "\
+    util-linux-mount \
+    util-linux-uuidgen \
+    lvm2 \
+"
diff --git a/meta-stx/recipes-core/initrdscripts/initramfs-module-setup-live/setup-live b/meta-stx/recipes-core/initrdscripts/initramfs-module-setup-live/setup-live
new file mode 100644 (file)
index 0000000..dfbd576
--- /dev/null
@@ -0,0 +1,76 @@
+#/bin/sh
+
+_UDEV_DAEMON=`udev_daemon`
+
+setup_enabled() {
+       return 0
+}
+
+setup_run() {
+ROOT_IMAGE="rootfs.img"
+ISOLINUX=""
+ROOT_DISK=""
+shelltimeout=60
+
+       if [ -z "$bootparam_root" -o "$bootparam_root" = "/dev/ram0" ]; then
+               echo "Waiting for removable media..."
+               udevadm trigger --action=add
+               udevadm settle
+               echo "Mounted filesystems"
+               mount |grep media
+               C=0
+               while true
+               do
+                 for i in `ls /run/media 2>/dev/null`; do
+                     if [ -f /run/media/$i/$ROOT_IMAGE ] ; then
+                               found="yes"
+                               ROOT_DISK="$i"
+                               break
+                         elif [ -f /run/media/$i/isolinux/$ROOT_IMAGE ]; then
+                               found="yes"
+                               ISOLINUX="isolinux"
+                               ROOT_DISK="$i"
+                               break
+                     fi
+                 done
+                 if [ "$found" = "yes" ]; then
+                     break;
+                 fi
+                 # don't wait for more than $shelltimeout seconds, if it's set
+                 if [ -n "$shelltimeout" ]; then
+                     echo " " $(( $shelltimeout - $C ))
+                     if [ $C -ge $shelltimeout ]; then
+                          echo "..."
+                          echo "Mounted filesystems"
+                          mount | grep media
+                          echo "Available block devices"
+                          cat /proc/partitions
+                          msg "Cannot find $ROOT_IMAGE file in /run/media/* , dropping to a shell "
+                          /bin/sh
+                     fi
+                     udevadm trigger --action=add
+                     udevadm settle
+                     echo "Mounted filesystems"
+                     mount |grep media
+                     C=$(( C + 1 ))
+                 fi
+                 sleep 1
+               done
+               killall -9 "${_UDEV_DAEMON##*/}" 2>/dev/null
+               # The existing rootfs module has no support for rootfs images. Assign the rootfs image.
+               bootparam_root="/run/media/$ROOT_DISK/$ISOLINUX/$ROOT_IMAGE"
+       fi
+
+       if [ "$bootparam_LABEL" != "boot" -a -f /init.d/$bootparam_LABEL.sh ] ; then
+               if [ -f /run/media/$ROOT_DISK/$ISOLINUX/$ROOT_IMAGE ] ; then
+                   ./init.d/$bootparam_LABEL.sh $ROOT_DISK/$ISOLINUX $ROOT_IMAGE $video_mode $vga_mode $console_params
+               else
+                   msg "Could not find $bootparam_LABEL script"
+                   /bin/sh
+               fi
+
+               # If we're getting here, we failed...
+               msg "Target $bootparam_LABEL failed"
+               /bin/sh
+       fi
+}
diff --git a/meta-stx/recipes-core/initrdscripts/initramfs-module-setup-live_1.0.bbappend b/meta-stx/recipes-core/initrdscripts/initramfs-module-setup-live_1.0.bbappend
new file mode 100644 (file)
index 0000000..1eefa1a
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/initramfs-module-setup-live:"
diff --git a/meta-stx/recipes-core/packagegroups/packagegroup-stak-base.bb b/meta-stx/recipes-core/packagegroups/packagegroup-stak-base.bb
new file mode 100644 (file)
index 0000000..22a00ec
--- /dev/null
@@ -0,0 +1,562 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Base rootfs for stx and akraino"
+
+PR = "r0"
+
+#
+# packages which content depend on MACHINE_FEATURES need to be MACHINE_ARCH
+#
+
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+
+inherit packagegroup
+
+PROVIDES = "${PACKAGES}"
+PACKAGES = " \
+       packagegroup-stak-base \
+       packagegroup-stak-perl \
+       packagegroup-stak-python \
+       packagegroup-stak-ruby \
+       packagegroup-stak-puppet \
+       "
+
+RDEPENDS_packagegroup-stak-base = " \
+       lighttpd \
+       tcpdump \
+       tzdata \
+       numactl \
+       samba \
+       mariadb \
+       traceroute \
+       wget \
+       expect \
+       boost \
+       alsa-lib \
+       collectd \
+       conntrack-tools \
+       ding-libs \
+       dosfstools \
+       docker \
+       docker-registry \
+       containerd-opencontainers \
+       dracut \
+       drbd-utils \
+       efibootmgr \
+       efivar \
+       expat \
+       facter \
+       hwdata \
+       iscsi-initiator-utils \
+       isomd5sum \
+       jansson \
+       json-c \
+       kexec-tools \
+       keyutils \
+       kubernetes \
+       libcomerr \
+       libcgroup \
+       libdrm \
+       libedit \
+       libestr \
+       libev \
+       libgudev \
+       libjpeg-turbo \
+       libndp \
+       oath \
+       libogg \
+       libpcap \
+       libpciaccess \
+       libpipeline \
+       libpng \
+       libproxy \
+       libpwquality \
+       libseccomp \
+       libsm \
+       libss \
+       libssh2 \
+       libtomcrypt \
+       libtommath \
+       sysfsutils \
+       libteam \
+       libuser \
+       libutempter \
+       libvorbis \
+       cyrus-sasl \
+       ipmitool \
+       iperf3 \
+       iotop \
+       lsof \
+       lsscsi \
+       lz4 \
+       lzop \
+       lz4 \
+       lzop \
+       mailcap \
+       mailx \
+       mdadm \
+       mod-wsgi \
+       mokutil \
+       mozjs \
+       mtools \
+       mysql-python \
+       libnewt-python \
+       nmap \
+       nss-pam-ldapd \
+       ntfs-3g-ntfsprogs \
+       openipmi \
+       openldap \
+       libopus \
+       ovmf-shell-efi \
+       p11-kit \
+       perf \
+       polkit \
+       popt \
+       procps \
+       pulseaudio \
+       screen \
+       seabios \
+        smartmontools \
+        snappy \
+        socat \
+        spawn-fcgi \
+        spice \
+        swig \
+        sysstat \
+        tcp-wrappers \
+        trousers \
+        xfsprogs \
+        yajl \
+        zip \
+        c-ares \
+        cifs-utils \
+        createrepo-c \
+        gdbm \
+        gmp \
+        groff \
+        libnl \
+        mcstrans \
+        linuxptp \
+        lua \
+        lldpd \
+        multipath-tools \
+        flac \
+        freetype \
+        geoip \
+        glusterfs \
+        gobject-introspection \
+        gperftools \
+        gperftools \
+        gpm \
+        hiera \
+        leveldb \
+        libutempter \
+        lksctp-tools \
+        quota \
+        radvd \
+        usbredir \
+        texinfo \
+        freetype \
+        celt051 \
+        cryptsetup \
+        lmsensors-libsensors \
+        lmsensors-fancontrol \
+        lmsensors-isatools \
+        lmsensors-pwmconfig \
+        lmsensors-sensord \
+        lmsensors-sensors \
+        lmsensors-sensorsconfconvert \
+        lmsensors-sensorsdetect \
+       syslinux \
+       krb5 \
+       rocksdb \
+       resource-agents \
+       go-phercloud \
+       haproxy \
+       cluster-glue \
+       dpkg \
+       eventlog \
+       libibverbs \
+       libnfsidmap \
+       net-tools \
+       hardlink \
+       deltarpm \
+       dtc \
+       dtc-dev \
+       libtpms \
+       nscd \
+       openscap \
+       polkit \
+       "
+#      libibverbs Conflicts with rdma-core
+#      qat17 
+#      docker-distribution 
+
+RDEPENDS_packagegroup-stak-ruby = " \
+       ruby-shadow \
+       ruby \
+       "
+
+RDEPENDS_packagegroup-stak-puppet = " \
+       puppet \
+       puppet-vswitch \
+       "
+
+RDEPENDS_packagegroup-stak-perl = " \
+       filter-perl \
+       pathtools-perl \
+       podlators-perl \
+       scalar-list-utils-perl \
+       \
+       perl-module-carp \
+       perl-module-constant \
+       perl-module-data-dumper \
+       perl-module-encode \
+       perl-module-exporter \
+       perl-module-file-path \
+       perl-module-file-temp \
+       perl-module-getopt-long \
+       perl-module-http-tiny \
+       perl-module-parent \
+       perl-module-pod-escapes \
+       perl-module-pod-perldoc \
+       perl-module-pod-simple \
+       perl-module-pod-usage \
+       perl-module-socket \
+       perl-module-storable \
+       perl-module-text-parsewords \
+       perl-module-time-local \
+       perl-module-time-hires \
+       perl-module-threads \
+       perl-module-threads-shared \
+       filter-perl \
+       libhtml-tagset-perl \
+       libmailtools-perl \
+       libsocket6-perl \
+       libtest-pod-perl \
+       libwww-perl \
+       pathtools-perl \
+       scalar-list-utils-perl \
+       "
+
+RDEPENDS_packagegroup-stak-python = " \
+       python-pygpgme \
+       python-pyparted \
+       python-coverage \
+       python-docker-registry-core \
+       python-flask-restful \
+       python-mox3 \
+       python-pexpect \
+       python-ptyprocess \
+       python-traceback2 \
+       python-typing \
+       python-ujson \
+       python-yappi \
+       python-hp3parclient \
+       python-click \
+       python-daemon \
+       python-ipy \
+       python-ldap \
+       python-linecache2 \
+       python-pep8 \
+       python-pyudev \
+       python-simplegeneric \
+       python-slip-dbus \
+       python-pytz \
+       python-pymysql \
+       python-pyzmq \
+       python-xstatic-jquery.tablesorter \
+       python-xstatic-jquery.quicksearch \
+       python-xstatic-magic-search \
+       python-pyperf \
+       python-urwid \
+       python-urlgrabber \
+       python-semantic-version \
+       python-repoze.lru \
+       python-pyelftools \
+       python-pycurl \
+       python-lefthandclient \
+       python-jwcrypto \
+       python-iniparse \
+       python-ethtool \
+       python-jmespath \
+       python-beaker \
+       python-cherrypy \
+       python-d2to1 \
+       python-firewall \
+       python-ldap3 \
+       python-pyngus \
+       python-pyzmq \
+       python-requests-oauthlib \
+       python-versiontools \
+       python-dogpile.cache \
+       python-oslo.cache \
+       python-oslo.concurrency \
+       python-oslo.config \
+       python-oslo.context \
+       python-oslo.db \
+       python-oslo.i18n \
+       python-oslo.log \
+       python-oslo.middleware \
+       python-oslo.messaging \
+       python-oslo.policy \
+       python-oslo.rootwrap \
+       python-oslo.serialization \
+       python-oslo.service \
+       python-oslo.upgradecheck \
+       python-oslo.utils \
+       python-oslo.versionedobjects \
+       python-configobj \
+       python-pyudev \
+       python-six \
+       python-alembic \
+       python-django-debreach \
+       python-google-auth \
+       python-gunicorn \
+       python-influxdb \
+       python-ldappool \
+       python-munch \
+       python-aniso8601 \
+       python-yaql \
+       python-construct \
+       python-yappi \
+       python-idna \
+       python-selectors34 \
+       python-pymisp \
+       python-scrypt \
+       python-flask \
+       python-itsdangerous \
+       python-flask-sqlalchemy \
+       python-funcsigs \
+       python-requests \
+       python-pymongo \
+       python-pytest-tempdir \
+       python-flask-migrate \
+       python-robotframework \
+       python-webencodings \
+       python-flask-nav \
+       python-lockfile \
+       python-pluggy \
+       python-configparser \
+       python-dateutil \
+       python-enum34 \
+       python-monotonic \
+       python-humanize \
+       python-unidiff \
+       python-snakefood \
+       python-traceback2 \
+       python-babel \
+       python-jsonpatch \
+       python-cmd2 \
+       python-pyusb \
+       python-strict-rfc3339 \
+       python-robotframework-seriallibrary \
+       python-sijax \
+       python-click \
+       python-zopeinterface \
+       python-parse-type \
+       python-semver \
+       python-bcrypt \
+       python-typing \
+       python-flask-bcrypt \
+       python-pynetlinux \
+       python-pyasn1-modules \
+       python-pyfirmata \
+       python-cryptography \
+       python-flask-babel \
+       python-urllib3 \
+       python-protobuf \
+       python-slip-dbus \
+       python-singledispatch \
+       python-pycodestyle \
+       python-blinker \
+       python-beautifulsoup4 \
+       python-pyalsaaudio \
+       python-sh \
+       python-mako \
+       python-backports-abc \
+       python-flask-script \
+       python-flask-bootstrap \
+       python-asn1crypto \
+       python-pysqlite \
+       python-pybind11 \
+       python-greenlet \
+       python-attr \
+       python-daemon \
+       python-pydbus \
+       python-lazy-object-proxy \
+       python-crcmod \
+       python-pytest \
+       python-pytest-runner \
+       python-pandas \
+       python-pyinotify \
+       python-flask-wtf \
+       python-daemonize \
+       python-wtforms \
+       python-pyiface \
+       python-pretend \
+       pyrtm \
+       python-ipaddress \
+       python-bitarray \
+       python-pyflakes \
+       python-snimpy \
+       python-pysnmp \
+       python-pyjwt \
+       python-hyperlink \
+       python-sdnotify \
+       python-rfc3987 \
+       python-vobject \
+       python-serpent \
+       python-mccabe \
+       python-py \
+       python-speaklater \
+       python-intervals \
+       python-flask-mail \
+       python-ndg-httpsclient \
+       python-wcwidth \
+       python-pyparsing \
+       python-pep8 \
+       python-redis \
+       python-psutil \
+       python-grpcio-tools \
+       python-flask-restful \
+       python-mock \
+       python-javaobj-py3 \
+       python-subprocess32 \
+       python-constantly \
+       python-netaddr \
+       python-pam \
+       python-stevedore \
+       python-dbusmock \
+       python-matplotlib \
+       python-attrs \
+       python-docutils \
+       python-pyopenssl \
+       python-setuptools-scm \
+       python-smbus \
+       python-dnspython \
+       python-pycparser \
+       python-pyserial \
+       python-imaging \
+       python-pint \
+       python-cython \
+       python-flask-login \
+       python-html5lib \
+       python-flask-xstatic \
+       python-chardet \
+       python-flask-uploads \
+       python-inflection \
+       python-twofish \
+       python-pytz \
+       python-jsonschema \
+       python-flask-user \
+       python-behave \
+       python-pyzmq \
+       python-jinja2 \
+       python-pyasn1 \
+       python-pyudev \
+       python-sparts \
+       python-epydoc \
+       python-feedparser \
+       python-pyrex \
+       python-progress \
+       python-can \
+       python-vcversioner \
+       python-markupsafe \
+       python-paho-mqtt \
+       python-grpcio \
+       python-xlrd \
+       python-visitor \
+       python-pyflame \
+       python-jsonpointer \
+       python-numeric \
+       python-ptyprocess \
+       python-pyperclip \
+       python-wrapt \
+       python-appdirs \
+       python-cheetah \
+       python-dbus \
+       python-flask-sijax \
+       python-simplejson \
+       python-webdav \
+       python-pip \
+       python-oauthlib \
+       python-flask-pymongo \
+       python-sqlalchemy \
+       python-django \
+       python-pbr \
+       python-networkx \
+       python-decorator \
+       python-anyjson \
+       python-prompt-toolkit \
+       python-pycrypto \
+       python-cffi \
+       python-djangorestframework \
+       python-future \
+       python-linecache2 \
+       python-pyroute2 \
+       python-parse \
+       python-feedformatter \
+       python-pysocks \
+       python-six \
+       python-alembic \
+       python-pybluez \
+       python-pysmi \
+       python-gdata \
+       python-gevent \
+       python-whoosh \
+       python-ujson \
+       python-xstatic-font-awesome \
+       python-pexpect \
+       python-iso8601 \
+       python-flask-navigation \
+       python-pystache \
+       python-lxml \
+       python-prettytable \
+       python-systemd \
+       python-cryptography-vectors \
+       python-pyhamcrest \
+       python-certifi \
+       python-ply \
+       python-webcolors \
+       python-editor \
+       python-django-south \
+       pamela \
+       python-aws-iot-device-sdk-python \
+       python-werkzeug \
+       python-isort \
+       python-periphery \
+       python-pytest-helpers-namespace \
+       python-paste \
+       python-pyyaml \
+       python-dominate \
+       python-pygpgme \
+       python-msgpack \
+       python-xstatic \
+       python-evdev \
+       python-passlib \
+       python-horizon \
+       python-keyring \
+       python-pyinotify \
+       python3-linux-procfs \
+       python-murano-pkg-check \
+       python-oslo.upgradecheck \
+       python-xstatic-term.js \
+       python-configshell \
+       "
+
+#      python-dogpile.core
diff --git a/meta-stx/recipes-core/packagegroups/packagegroup-stx.bb b/meta-stx/recipes-core/packagegroups/packagegroup-stx.bb
new file mode 100644 (file)
index 0000000..e4487dc
--- /dev/null
@@ -0,0 +1,253 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "StarlingX stx packages"
+
+PR = "r0"
+
+#
+# packages which content depend on MACHINE_FEATURES need to be MACHINE_ARCH
+#
+
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+inherit packagegroup
+
+PROVIDES = "${PACKAGES}"
+PACKAGES = " \
+       packagegroup-stx-upstream \
+       packagegroup-stx-puppet \
+       packagegroup-stx-fault \
+       packagegroup-stx-metal \
+       packagegroup-stx-nfv \
+       packagegroup-stx-monitoring \
+       packagegroup-stx-ha \
+       packagegroup-stx-config \
+       packagegroup-stx-config-files \
+       packagegroup-stx-distributedcloud \
+       packagegroup-stx-update \
+       packagegroup-stx-integ \
+       packagegroup-stx-utilities \
+       packagegroup-stx-armada-app \
+       "
+
+RDEPENDS_packagegroup-stx-puppet = "\
+       stx-puppet \
+       puppet-dcdbsync \
+       puppet-dcmanager \
+       puppet-dcorch \
+       puppet-fm \
+       puppet-mtce \
+       puppet-nfv \
+       puppet-patching \
+       puppet-smapi \
+       puppet-sshd \
+       puppet-sysinv \
+       puppet-manifests \
+       "
+
+RDEPENDS_packagegroup-stx-config = " \
+       config-gate-worker \
+       config-gate \
+       controllerconfig \
+       cgts-client \
+       sysinv-agent \
+       sysinv \
+       workerconfig-subfunction \
+       tsconfig \
+       "
+
+RDEPENDS_packagegroup-stx-config-files  = " \
+       audit-config \
+       dhclient-config \
+       dnsmasq-config \
+       docker-config \
+       initscripts-config \
+       filesystem-scripts \
+       haproxy-config \
+       ioscheduler-config \
+       iptables-config \
+       iscsi-initiator-utils-config \
+       lighttpd-config \
+       logrotate-config \
+       memcached-custom \
+       mlx4-config \
+       net-snmp-config \
+       nfs-utils-config \
+       ntp-config \
+       openldap-config \
+       openssh-config \
+       openvswitch-config \
+       pam-config \
+       rabbitmq-server-config \
+       rsync-config \
+       setup-config \
+       shadow-utils-config \
+       sudo-config \
+       syslog-ng-config \
+       systemd-config \
+       util-linux-config \
+       "
+
+RDEPENDS_packagegroup-stx-fault = " \
+        fm-api \
+        fm-common \
+        fm-doc \
+        fm-mgr \
+        fm-rest-api \
+        python-fmclient \
+        snmp-audittrail \
+        snmp-ext \
+        "
+
+RDEPENDS_packagegroup-stx-ha = " \
+        sm-common-libs \
+        libsm-common \
+        sm \
+        sm-db \
+        sm-api \
+        sm-client \
+        sm-tools \
+       sm-eru \
+        stx-ocf-scripts \
+        "
+
+RDEPENDS_packagegroup-stx-metal = " \
+       inventory \
+       mtce \
+       mtce-pmon \
+       mtce-hwmon \
+       mtce-hostw \
+       mtce-lmon \
+       mtce-compute \
+       mtce-control \
+       mtce-storage \
+       python-inventoryclient \
+       pxe-network-installer \
+       "
+
+RDEPENDS_packagegroup-stx-monitoring = " \
+       collectd-extensions \
+       influxdb-extensions \
+       monitor-tools \
+       vm-topology \
+       "
+
+RDEPENDS_packagegroup-stx-distributedcloud = " \
+       distributedcloud-dcmanager \
+       distributedcloud-dcorch \
+       distributedcloud-dcdbsync \
+       distributedcloud-ocf \
+       "
+
+RDEPENDS_packagegroup-stx-nfv = " \
+       nfv-common \
+       nfv-plugins \
+       nfv-tools \
+       nfv-vim \
+       nfv-client \
+       "
+
+RDEPENDS_packagegroup-stx-upstream = " \
+       barbican \
+       python-neutronclient \
+       python-aodhclient \
+       python-barbican \
+       python-barbicanclient \
+       python-cinderclient \
+       python-glanceclient \
+       python-gnocchiclient \
+       python-django-horizon \
+       python-heatclient \
+       python-ironicclient \
+       python-keystoneauth1 \
+       python-keystoneclient \
+       python-magnumclient \
+       python-muranoclient \
+       python-novaclient \
+       python-openstackclient \
+       python-openstacksdk \
+       python-pankoclient \
+       openstack-ras \
+       "
+
+RDEPENDS_packagegroup-stx-update = " \
+       cgcs-patch \
+       cgcs-patch-agent \
+       cgcs-patch-controller \
+       enable-dev-patch \
+       patch-alarm \
+       "
+
+RDEPENDS_packagegroup-stx-integ = " \
+       dpkg \
+       dtc \
+       ibsh \
+       python-redfishtool \
+       puppet-boolean \
+       puppetlabs-create-resources \
+       puppet-dnsmasq \
+       puppet-drbd \
+       puppet-filemapper \
+       puppet-ldap \
+       puppetlabs-lvm \
+       puppet-network \
+       puppet-nslcd \
+       puppetlabs-postgresql \
+       puppet-puppi \
+       mariadb \
+       drbd-utils \
+       docker-distribution \
+        docker-forward-journald \
+       etcd \
+       kubernetes \
+       ldapscripts \
+       python-3parclient \
+       python-lefthandclient \
+       python-setuptools \
+       python-ryu \
+       spectre-meltdown-checker \
+       kvm-timer-advance \
+       ceph \
+       lldpd \
+        lvm2 \
+        tzdata \
+       "
+
+RDEPENDS_packagegroup-stx-utilities = " \
+       build-info \
+       python-cephclient \
+       ceph-manager \
+       stx-ssl \
+       collector \
+       collect-engtools \
+       logmgmt \
+       namespace-utils \
+       nfscheck \
+       stx-extensions \
+       worker-utils \
+       update-motd \
+       platform-util \
+       pci-irq-affinity \
+       "
+
+RDEPENDS_packagegroup-stx-armada-app = "\
+       monitor-helm \
+       monitor-helm-elastic \
+       openstack-helm \
+       openstack-helm-infra \
+       stx-monitor-helm \
+       stx-openstack-helm \
+       stx-platform-helm \
+       "
diff --git a/meta-stx/recipes-core/stx-ansible-playbooks/files/0001-stx.3.0-rebase-adjust-path.patch b/meta-stx/recipes-core/stx-ansible-playbooks/files/0001-stx.3.0-rebase-adjust-path.patch
new file mode 100644 (file)
index 0000000..0a1628d
--- /dev/null
@@ -0,0 +1,39 @@
+From 91da8b2956e346916f092fe2ce3a18e2422023e7 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Fri, 6 Mar 2020 22:43:25 -0800
+Subject: [PATCH] stx.3.0 rebase: adjust path
+
+---
+ .../roles/bootstrap/apply-bootstrap-manifest/tasks/main.yml     | 2 +-
+ .../src/playbooks/roles/recover-ceph-data/tasks/main.yml        | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/playbookconfig/src/playbooks/roles/bootstrap/apply-bootstrap-manifest/tasks/main.yml b/playbookconfig/src/playbooks/roles/bootstrap/apply-bootstrap-manifest/tasks/main.yml
+index 23591c8..e4c905c 100644
+--- a/playbookconfig/src/playbooks/roles/bootstrap/apply-bootstrap-manifest/tasks/main.yml
++++ b/playbookconfig/src/playbooks/roles/bootstrap/apply-bootstrap-manifest/tasks/main.yml
+@@ -90,7 +90,7 @@
+ - name: Applying puppet bootstrap manifest
+   command: >
+-    /usr/local/bin/puppet-manifest-apply.sh
++    /usr/bin/puppet-manifest-apply.sh
+     {{ hieradata_workdir }}
+     {{ derived_network_params.controller_0_address }}
+     controller ansible_bootstrap > {{ manifest_apply_log }}
+diff --git a/playbookconfig/src/playbooks/roles/recover-ceph-data/tasks/main.yml b/playbookconfig/src/playbooks/roles/recover-ceph-data/tasks/main.yml
+index d92021f..a01a3f6 100644
+--- a/playbookconfig/src/playbooks/roles/recover-ceph-data/tasks/main.yml
++++ b/playbookconfig/src/playbooks/roles/recover-ceph-data/tasks/main.yml
+@@ -45,7 +45,7 @@
+       - name: Applying puppet ceph-mon DRBD manifest
+         command: >
+-          /usr/local/bin/puppet-manifest-apply.sh
++          /usr/bin/puppet-manifest-apply.sh
+           {{ puppet_permdir }}/hieradata/
+           {{ derived_network_params.controller_0_address }}
+           controller runtime /tmp/ceph-mon.yml
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-ansible-playbooks/files/0002-update_sysinv_database-do-not-fail-if-ceph-monitor-a.patch b/meta-stx/recipes-core/stx-ansible-playbooks/files/0002-update_sysinv_database-do-not-fail-if-ceph-monitor-a.patch
new file mode 100644 (file)
index 0000000..903190e
--- /dev/null
@@ -0,0 +1,29 @@
+From 6193c304ae187327fadaaa8c4f780135f0f07aa0 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Mon, 30 Mar 2020 12:03:13 +0800
+Subject: [PATCH] update_sysinv_database: do not fail if ceph monitor already
+ configured
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ .../roles/bootstrap/persist-config/tasks/update_sysinv_database.yml   | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/update_sysinv_database.yml b/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/update_sysinv_database.yml
+index e321710..f4d2185 100644
+--- a/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/update_sysinv_database.yml
++++ b/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/update_sysinv_database.yml
+@@ -73,7 +73,9 @@
+ - name: Fail if populate config script throws an exception
+   fail:
+     msg: "Failed to provision initial system configuration."
+-  when: populate_result.rc != 0
++  when:
++    - populate_result.rc != 0
++    - '"Ceph monitor already configured" not in populate_result.stderr'
+ # If this is initial play or replay with management and/or oam network
+ # config change, must wait for the keystone endpoint runtime manifest
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-core/stx-ansible-playbooks/files/0003-update_sysinv_database-wait-after-provision.patch b/meta-stx/recipes-core/stx-ansible-playbooks/files/0003-update_sysinv_database-wait-after-provision.patch
new file mode 100644 (file)
index 0000000..f61da43
--- /dev/null
@@ -0,0 +1,27 @@
+From a8310826a3939c1726f7e20ab51e3c8fb970cdc9 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Tue, 31 Mar 2020 17:15:29 +0800
+Subject: [PATCH] update_sysinv_database: wait after provision
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ .../roles/bootstrap/persist-config/tasks/update_sysinv_database.yml    | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/update_sysinv_database.yml b/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/update_sysinv_database.yml
+index f4d2185..105e6ef 100644
+--- a/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/update_sysinv_database.yml
++++ b/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/update_sysinv_database.yml
+@@ -77,6 +77,9 @@
+     - populate_result.rc != 0
+     - '"Ceph monitor already configured" not in populate_result.stderr'
++- name: Wait 30s after provision initial system configuration.
++  wait_for: timeout=30
++
+ # If this is initial play or replay with management and/or oam network
+ # config change, must wait for the keystone endpoint runtime manifest
+ # to complete
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-core/stx-ansible-playbooks/files/0004-bringup_flock_services-use-systmd-for-fminit-and-add.patch b/meta-stx/recipes-core/stx-ansible-playbooks/files/0004-bringup_flock_services-use-systmd-for-fminit-and-add.patch
new file mode 100644 (file)
index 0000000..81b033b
--- /dev/null
@@ -0,0 +1,54 @@
+From e83427fbd1cca8f03adb9769c8c2ac260c5f6996 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Wed, 15 Apr 2020 09:49:34 +0800
+Subject: [PATCH] bringup_flock_services: use systmd for fminit and add retry
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ .../tasks/bringup_flock_services.yml               | 30 ++++++++++++++++++----
+ 1 file changed, 25 insertions(+), 5 deletions(-)
+
+diff --git a/playbookconfig/src/playbooks/roles/bootstrap/bringup-essential-services/tasks/bringup_flock_services.yml b/playbookconfig/src/playbooks/roles/bootstrap/bringup-essential-services/tasks/bringup_flock_services.yml
+index 462e1c2..cc239ef 100644
+--- a/playbookconfig/src/playbooks/roles/bootstrap/bringup-essential-services/tasks/bringup_flock_services.yml
++++ b/playbookconfig/src/playbooks/roles/bootstrap/bringup-essential-services/tasks/bringup_flock_services.yml
+@@ -36,11 +36,31 @@
+       regexp: "bind_host=.*$"
+       replace: "bind_host={{ controller_floating_address }}"
+-  - name: Restart FM API and bring up FM Manager
+-    command: "{{ item }}"
+-    with_items:
+-      - /etc/init.d/fm-api restart
+-      - /etc/init.d/fminit start
++  - name: Restart FM API
++    systemd:
++      name: fm-api
++      state: restarted
++
++  - name: Wait 10s after Restart FM API
++    wait_for: timeout=10
++
++  - block:
++    - name: Bring up FM Manager
++      systemd:
++        name: fminit
++        state: started
++
++    rescue:
++      - name: FM Manager failed to start, wait 10s then retry
++        wait_for: timeout=10
++      - name: Retry to restart FM Manager
++        systemd:
++          name: fminit
++          state: restarted
++        register: result
++        until: result is not failed
++        retries: 5
++        delay: 10
+   - name: Bring up Maintenance Agent
+     command: /usr/lib/ocf/resource.d/platform/mtcAgent start
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-core/stx-ansible-playbooks/files/0005-persist-config-add-retry-for-etcd.patch b/meta-stx/recipes-core/stx-ansible-playbooks/files/0005-persist-config-add-retry-for-etcd.patch
new file mode 100644 (file)
index 0000000..fd00b14
--- /dev/null
@@ -0,0 +1,36 @@
+From 6722f2ec0b7044cbdeb2e67cfd317375fa54c46d Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Wed, 15 Apr 2020 10:52:15 +0800
+Subject: [PATCH] persist-config: add retry for etcd
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ .../roles/bootstrap/persist-config/tasks/shutdown_services.yml   | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/shutdown_services.yml b/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/shutdown_services.yml
+index 4edd295..0e5fb3b 100644
+--- a/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/shutdown_services.yml
++++ b/playbookconfig/src/playbooks/roles/bootstrap/persist-config/tasks/shutdown_services.yml
+@@ -60,11 +60,16 @@
+         state: restarted
+     rescue:
+-      - name: Etcd failed to restart, try one more time
++      - name: Etcd failed to restart, wait 10s then retry
++        wait_for: timeout=10
++      - name: Retry to restart etcd
+         systemd:
+           name: etcd
+           state: restarted
+-
++        register: result
++        until: result is not failed
++        retries: 5
++        delay: 10
+ - block:  # Revert configuration to loopback interface
+   - name: Set facts derived from previous network configurations
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-core/stx-ansible-playbooks/files/0006-bringup_helm-wait-after-initialize-helm-to-avoid-tim.patch b/meta-stx/recipes-core/stx-ansible-playbooks/files/0006-bringup_helm-wait-after-initialize-helm-to-avoid-tim.patch
new file mode 100644 (file)
index 0000000..a1e6a3f
--- /dev/null
@@ -0,0 +1,27 @@
+From 7841401dd8064e4fc9c6ba3e0b2e19ba51fc29b2 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Wed, 15 Apr 2020 11:05:30 +0800
+Subject: [PATCH] bringup_helm: wait after initialize helm to avoid timeout
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ .../roles/bootstrap/bringup-essential-services/tasks/bringup_helm.yml  | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/playbookconfig/src/playbooks/roles/bootstrap/bringup-essential-services/tasks/bringup_helm.yml b/playbookconfig/src/playbooks/roles/bootstrap/bringup-essential-services/tasks/bringup_helm.yml
+index 32df992..de09070 100644
+--- a/playbookconfig/src/playbooks/roles/bootstrap/bringup-essential-services/tasks/bringup_helm.yml
++++ b/playbookconfig/src/playbooks/roles/bootstrap/bringup-essential-services/tasks/bringup_helm.yml
+@@ -193,6 +193,9 @@
+       recurse: yes
+   when: inventory_hostname != 'localhost'
++- name: Wait 10s after Initialize Helm
++  wait_for: timeout=10
++
+ - name: Generate Helm repo indicies
+   command: helm repo index "{{ source_helm_bind_dir }}/{{ item }}"
+   become_user: www
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-core/stx-ansible-playbooks/playbookconfig.bb b/meta-stx/recipes-core/stx-ansible-playbooks/playbookconfig.bb
new file mode 100644 (file)
index 0000000..4336ab6
--- /dev/null
@@ -0,0 +1,81 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/patches:${THISDIR}/files:"
+DESCRIPTION = " stx-ansible-playbooks"
+
+STABLE = "starlingx/master"
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "0ad01cd4cae7d5c85e1022b816ed465b334bb2e5"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+# The patch 0001-Treat-the-failure-as-expected-result-if-resize-using.patch
+# need to be removed if updating to stx 2.0.0 or above.
+SRC_URI = " \
+       git://opendev.org/starlingx/ansible-playbooks.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://0001-stx.3.0-rebase-adjust-path.patch \
+       file://0002-update_sysinv_database-do-not-fail-if-ceph-monitor-a.patch \
+       file://0003-update_sysinv_database-wait-after-provision.patch \
+       file://0004-bringup_flock_services-use-systmd-for-fminit-and-add.patch \
+       file://0005-persist-config-add-retry-for-etcd.patch \
+       file://0006-bringup_helm-wait-after-initialize-helm-to-avoid-tim.patch \
+        "
+
+RDEPENDS_playbookconfig = " \
+       nscd \
+       python \
+       python-netaddr \
+       python-ptyprocess \
+       python-pexpect \
+       python-ansible \
+       sysinv \
+       grub \
+       grubby \
+       dracut \
+       openssl-bin \
+       ipset \
+       "
+
+do_configure () {
+       :
+} 
+
+do_compile() {
+       :
+}
+
+do_install () {
+       cd ${S}/playbookconfig/src
+       oe_runmake -e \
+               DESTDIR=${D}/${datadir}/ansible/stx-ansible
+}
+
+pkg_postinst_ontarget_${PN}() { 
+       cp /etc/ansible/ansible.cfg /etc/ansible/ansible.cfg.orig
+       cp /etc/ansible/hosts /etc/ansible/hosts.orig
+       cp /usr/share/ansible/stx-ansible/playbooks/ansible.cfg /etc/ansible
+       cp /usr/share/ansible/stx-ansible/playbooks/hosts /etc/ansible
+
+}
+
+FILES_${PN} = " \
+       ${datadir} \
+       "
diff --git a/meta-stx/recipes-core/stx-config-files/config-files_1.0.0.bb b/meta-stx/recipes-core/stx-config-files/config-files_1.0.0.bb
new file mode 100644 (file)
index 0000000..33d3b77
--- /dev/null
@@ -0,0 +1,726 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-config-files"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "d778e862571957ece3c404c0c37d325769772fde"
+SRCNAME = "config-files"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+
+# TODO:
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "\
+       file://systemd-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://audit-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://docker-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://filesystem-scripts/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://filesystem-scripts/filesystem-scripts-1.0/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://io-scheduler/centos/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://iptables-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://lighttpd-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://logrotate-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://mlx4-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://ntp-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://openldap-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://openvswitch-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://shadow-utils-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://sudo-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://syslog-ng-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://systemd-config/files/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       "
+
+SRC_URI = " \
+       git://opendev.org/starlingx/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://openssh-config-rm-hmac-ripemd160.patch \
+       file://util-linux-pam-postlogin.patch \
+       file://syslog-ng-config-parse-err.patch \
+       file://syslog-ng-config-systemd-service.patch \
+       file://syslog-ng-conf-fix-the-source.patch \
+       file://syslog-ng-conf-replace-match-with-message.patch \
+       "
+
+do_configure () {
+       :
+}
+
+do_compile () {
+       :
+}
+
+do_install () {
+       install -m 0755 -d ${D}/${datadir}/starlingx/config-files
+       # for f in $(find ./ -not -path "./docker-config/*" -name '*\.spec' | cut -d '/' -f2);
+       for f in $(find ./ -name '*\.spec' | cut -d '/' -f2);
+       do 
+               tar -c $f -f - | tar -C ${D}/${datadir}/starlingx/config-files -xf -;
+       done
+       find ${D}/${datadir}/starlingx/config-files -name centos -exec rm -rf {} +
+       rm -rf ${D}/${datadir}/starlingx/config-files/centos-release-config 
+       chown -R root:root ${D}/${datadir}/starlingx/config-files/
+}
+
+PACKAGES ?= ""
+PACKAGES += "audit-config"
+PACKAGES += "dhclient-config"
+PACKAGES += "dnsmasq-config"
+PACKAGES += "docker-config"
+PACKAGES += "initscripts-config"
+PACKAGES += "filesystem-scripts"
+PACKAGES += "haproxy-config"
+PACKAGES += "ioscheduler-config"
+PACKAGES += "iptables-config"
+PACKAGES += "iscsi-initiator-utils-config"
+PACKAGES += "lighttpd-config"
+PACKAGES += "logrotate-config"
+PACKAGES += "memcached-custom"
+PACKAGES += "mlx4-config"
+PACKAGES += "net-snmp-config"
+PACKAGES += "nfs-utils-config"
+PACKAGES += "ntp-config"
+PACKAGES += "openldap-config"
+PACKAGES += "openssh-config"
+PACKAGES += "openvswitch-config"
+PACKAGES += "pam-config"
+PACKAGES += "rabbitmq-server-config"
+PACKAGES += "rsync-config"
+PACKAGES += "setup-config"
+PACKAGES += "shadow-utils-config"
+PACKAGES += "sudo-config"
+PACKAGES += "syslog-ng-config"
+PACKAGES += "systemd-config"
+PACKAGES += "util-linux-config"
+
+
+FILES_audit-config = "${datadir}/starlingx/config-files/audit-config/"
+FILES_dhclient-config = "${datadir}/starlingx/config-files/dhcp-config/"
+FILES_dnsmasq-config = "${datadir}/starlingx/config-files/dnsmasq-config/"
+FILES_docker-config = "${datadir}/starlingx/config-files/docker-config/"
+FILES_initscripts-config = "${datadir}/starlingx/config-files/initscripts-config/"
+FILES_filesystem-scripts= "${datadir}/starlingx/config-files/filesystem-scripts/"
+FILES_haproxy-config= "${datadir}/starlingx/config-files/haproxy-config/"
+FILES_ioscheduler-config= "${datadir}/starlingx/config-files/io-scheduler/"
+FILES_iptables-config= "${datadir}/starlingx/config-files/iptables-config/"
+FILES_iscsi-initiator-utils-config = "${datadir}/starlingx/config-files/iscsi-initiator-utils-config/"
+FILES_lighttpd-config= "${datadir}/starlingx/config-files/lighttpd-config/"
+FILES_logrotate-config= "${datadir}/starlingx/config-files/logrotate-config/"
+FILES_memcached-custom = "${datadir}/starlingx/config-files/memcached-custom/"
+FILES_mlx4-config= "${datadir}/starlingx/config-files/mlx4-config/"
+FILES_net-snmp-config= "${datadir}/starlingx/config-files/net-snmp-config/"
+FILES_nfs-utils-config= "${datadir}/starlingx/config-files/nfs-utils-config/"
+FILES_ntp-config= "${datadir}/starlingx/config-files/ntp-config/"
+FILES_openldap-config= "${datadir}/starlingx/config-files/openldap-config/"
+FILES_openssh-config= "${datadir}/starlingx/config-files/openssh-config/"
+FILES_openvswitch-config= "${datadir}/starlingx/config-files/openvswitch-config/"
+FILES_pam-config= "${datadir}/starlingx/config-files/pam-config/"
+FILES_rabbitmq-server-config= "${datadir}/starlingx/config-files/rabbitmq-server-config/"
+FILES_rsync-config= "${datadir}/starlingx/config-files/rsync-config/"
+FILES_setup-config= "${datadir}/starlingx/config-files/setup-config/"
+FILES_shadow-utils-config= "${datadir}/starlingx/config-files/shadow-utils-config/"
+FILES_sudo-config= "${datadir}/starlingx/config-files/sudo-config/"
+FILES_syslog-ng-config= "${datadir}/starlingx/config-files/syslog-ng-config/"
+FILES_systemd-config= "${datadir}/starlingx/config-files/systemd-config/"
+FILES_util-linux-config= "${datadir}/starlingx/config-files/util-linux-config/"
+
+RDEPENDS_audit-config += " \
+       audit \
+       auditd \
+       audit-python \
+       "
+RDEPENDS_dhclient-config += "dhcp-client"
+RDEPENDS_dnsmasq-config += ""
+RDEPENDS_docker-config += "docker-ce logrotate "
+RDEPENDS_initscripts-config += "initscripts"
+RDEPENDS_filesystem-scripts += ""
+RDEPENDS_haproxy-config += "haproxy"
+RDEPENDS_ioscheduler-config += ""
+RDEPENDS_iptables-config += "iptables"
+RDEPENDS_iscsi-initiator-utils-config += " iscsi-initiator-utils"
+RDEPENDS_lighttpd-config += " \
+       lighttpd \
+       lighttpd-module-proxy \
+       lighttpd-module-setenv \
+       "
+RDEPENDS_logrotate-config += " logrotate cronie"
+RDEPENDS_memcached-custom += "memcached"
+RDEPENDS_mlx4-config += ""
+RDEPENDS_net-snmp-config += " net-snmp"
+RDEPENDS_nfs-utils-config += " nfs-utils"
+RDEPENDS_ntp-config += " ntp"
+RDEPENDS_openldap-config += " \
+       openldap \
+       "
+RRECOMMENDS_openldap-config += " \
+       openldap-slapd \
+       openldap-backend-shell \
+       openldap-backend-passwd \
+       openldap-backend-null \
+       openldap-backend-monitor \
+       openldap-backend-meta \
+       openldap-backend-ldap \
+       openldap-backend-dnssrv \
+       openldap-staticdev \
+       openldap-locale \
+       openldap-overlay-proxycache \
+       openldap-slapd \
+       openldap-slurpd \
+       openldap-bin \
+       "
+
+RDEPENDS_openssh-config += " openssh"
+RDEPENDS_openvswitch-config += " openvswitch"
+RDEPENDS_pam-config += " \
+       libpam-runtime \
+       nss-pam-ldapd \
+       libpwquality \
+       pam-plugin-access \
+       pam-plugin-cracklib \
+       pam-plugin-debug \
+       pam-plugin-deny \
+       pam-plugin-echo \
+       pam-plugin-env \
+       pam-plugin-exec \
+       pam-plugin-faildelay \
+       pam-plugin-filter \
+       pam-plugin-ftp \
+       pam-plugin-group \
+       pam-plugin-issue \
+       pam-plugin-keyinit \
+       pam-plugin-lastlog \
+       pam-plugin-limits \
+       pam-plugin-listfile \
+       pam-plugin-localuser \
+       pam-plugin-loginuid \
+       pam-plugin-mail \
+       pam-plugin-mkhomedir \
+       pam-plugin-motd \
+       pam-plugin-namespace \
+       pam-plugin-nologin \
+       pam-plugin-permit \
+       pam-plugin-pwhistory \
+       pam-plugin-rhosts \
+       pam-plugin-rootok \
+       pam-plugin-securetty \
+       pam-plugin-shells \
+       pam-plugin-stress \
+       pam-plugin-succeed-if \
+       pam-plugin-tally \
+       pam-plugin-tally2 \
+       pam-plugin-time \
+       pam-plugin-timestamp \
+       pam-plugin-umask \
+       pam-plugin-unix \
+       pam-plugin-warn \
+       pam-plugin-wheel \
+       pam-plugin-xauth \
+       "
+RDEPENDS_rabbitmq-server-config += " rabbitmq-server"
+RDEPENDS_rsync-config += " rsync"
+RDEPENDS_setup-config += ""
+RDEPENDS_shadow-utils-config += " shadow"
+RDEPENDS_sudo-config += " sudo"
+RDEPENDS_syslog-ng-config += " syslog-ng"
+RDEPENDS_systemd-config += " systemd"
+RDEPENDS_util-linux-config += " util-linux"
+
+pkg_postinst_ontarget_audit-config() {
+       cp -f ${datadir}/starlingx/config-files/audit-config/files/syslog.conf ${sysconfdir}/audisp/plugins.d/syslog.conf
+       chmod 640 ${sysconfdir}/audisp/plugins.d/syslog.conf
+}
+
+pkg_postinst_ontarget_dhclient-config() {
+       SRCPATH=${datadir}/starlingx/config-files/dhcp-config/files
+       install -m 0755 -p ${SRCPATH}/dhclient-enter-hooks ${sysconfdir}/dhcp/dhclient-enter-hooks
+       install -m 0755 -p ${SRCPATH}/dhclient.conf ${sysconfdir}/dhcp/dhclient/dhclient.conf
+       ln -fs ${sysconfdir}/dhcp/dhclient-enter-hooks ${sysconfdir}/dhclient-enter-hooks
+}
+       
+pkg_postinst_ontarget_dnsmasq-config() {
+       install -m 755 ${datadir}/starlingx/config-files/dnsmasq-config/files/init ${sysconfdir}/init.d/dnsmasq
+}
+
+pkg_postinst_ontarget_docker-config() {
+       SRCPATH=${datadir}/starlingx/config-files/docker-config/files
+       install -d -m 0755 ${sysconfdir}/systemd/system/docker.service.d
+
+       install -D -m 644 ${SRCPATH}/docker-pmond.conf ${sysconfdir}/pmon.d/docker.conf
+       install -D -m 644 ${SRCPATH}/docker-stx-override.conf \
+                       ${sysconfdir}/systemd/system/docker.service.d/docker-stx-override.conf 
+       install -D -m 644 ${SRCPATH}/docker.logrotate ${sysconfdir}/logrotate.d/docker.logrotate
+}
+
+pkg_postinst_ontarget_filesystem-scripts() {
+       SRCPATH=${datadir}/starlingx/config-files/filesystem-scripts/filesystem-scripts-1.0
+       install -D -m 755 ${SRCPATH}/uexportfs ${sysconfdir}/init.d/uexportfs
+
+       install -d -m 0755 /usr/lib/ocf/resource.d/platform/
+       install -D -m 755 ${SRCPATH}/nfsserver-mgmt /usr/lib/ocf/resource.d/platform/nfsserver-mgmt
+
+       install -p -D -m 755 ${SRCPATH}/nfs-mount ${bindir}/nfs-mount
+       install -D -m 755 ${SRCPATH}/uexportfs.service ${systemd_system_unitdir}/uexportfs.service
+
+       systemctl enable uexportfs.service
+}
+
+
+pkg_postinst_ontarget_haproxy-config() {
+
+       install -d -m 755 ${sysconfdir}/haproxy/errors/
+       install -m 755 ${datadir}/starlingx/config-files/haproxy-config/files/503.http ${sysconfdir}/haproxy/errors/503.http
+
+       install -m 644 ${datadir}/starlingx/config-files/haproxy-config/files/haproxy.service ${sysconfdir}/systemd/system/
+       install -p -D -m 0755 ${datadir}/starlingx/config-files/haproxy-config/files/haproxy.sh ${sysconfdir}/init.d/haproxy
+
+       /bin/systemctl disable haproxy.service
+       if test -s ${sysconfdir}/logrotate.d/haproxy ; then
+           echo '#See /etc/logrotate.d/syslog for haproxy rules' > ${sysconfdir}/logrotate.d/haproxy
+       fi
+}
+
+pkg_postinst_ontarget_initscripts-config() {
+       install -d  -m 755 ${sysconfdir}/sysconfig
+       install -d  -m 755 ${sysconfdir}/init.d
+       install -d  -m 755 ${systemd_system_unitdir}
+
+       SRCPATH=${datadir}/starlingx/config-files/initscripts-config/files
+       install -m  644 ${SRCPATH}/sysctl.conf ${datadir}/starlingx/stx.sysctl.conf
+       install -m  644 ${SRCPATH}/sysconfig-network.conf ${sysconfdir}/sysconfig/network
+       install -m  755 ${SRCPATH}/mountnfs.sh ${sysconfdir}/init.d/mountnfs
+       install -m  644 ${SRCPATH}/mountnfs.service ${systemd_system_unitdir}/mountnfs.service
+
+
+       cp -f ${datadir}/starlingx/stx.sysctl.conf ${sysconfdir}/sysctl.conf
+       chmod 644 ${sysconfdir}/sysctl.conf
+}
+
+pkg_postinst_ontarget_iscsi-initiator-utils-config() {
+#      %description
+#      package StarlingX configuration files of iscsi-initiator-utils to system folder.
+
+#      install -d  ${libdir}/tmpfiles.d
+#      install -d  ${sysconfdir}/systemd/system
+#      install -d  ${datadir}/starlingx
+
+       SRCPATH=${datadir}/starlingx/config-files/iscsi-initiator-utils-config/files
+       tmpfilesdir=${libdir}/tmpfiles.d
+
+       install -m 0644 ${SRCPATH}/iscsi-cache.volatiles   ${tmpfilesdir}/iscsi-cache.conf
+       install -m 0644 ${SRCPATH}/iscsi-shutdown.service  ${sysconfdir}/systemd/system
+       install -m 0644 ${SRCPATH}/iscsid.conf             ${datadir}/starlingx/stx.iscsid.conf
+
+       cp -f ${datadir}/starlingx/stx.iscsid.conf ${sysconfdir}/iscsi/iscsid.conf
+       chmod 0750 ${sysconfdir}/iscsi
+       chmod 0640 ${sysconfdir}/iscsi/iscsid.conf
+       
+       /bin/systemctl disable iscsi-shutdown.service
+}
+
+pkg_postinst_ontarget_lighttpd-config() {
+#      %description
+#      StarlingX lighttpd configuration file
+
+       CONFDIR=${sysconfdir}/lighttpd
+       ROOTDIR=/www
+       SRCPATH=${datadir}/starlingx/config-files/lighttpd-config/files
+
+       install -d -m 1777 ${ROOTDIR}/tmp
+       install -d ${CONFDIR}/ssl
+       install -d ${ROOTDIR}/pages/dav
+        install -m640 ${SRCPATH}/lighttpd.conf         ${datadir}/starlingx/lighttpd.conf
+       install -m755 ${SRCPATH}/lighttpd.init          ${datadir}/starlingx/lighttpd.init
+       install -m644 ${SRCPATH}/lighttpd-inc.conf      ${CONFDIR}/lighttpd-inc.conf
+       install -m644 ${SRCPATH}/index.html.lighttpd    ${ROOTDIR}/pages/index.html
+
+       install -d ${sysconfdir}/logrotate.d
+       install -m644 ${SRCPATH}/lighttpd.logrotate     ${datadir}/starlingx/lighttpd.logrotate
+       chmod 02770 ${sysconfdir}/lighttpd
+
+       cp --preserve=xattr -f ${datadir}/starlingx/lighttpd.conf  ${sysconfdir}/lighttpd/lighttpd.conf
+       chmod 640 ${sysconfdir}/lighttpd/lighttpd.conf
+       cp --preserve=xattr -f ${datadir}/starlingx/lighttpd.logrotate ${sysconfdir}/logrotate.d/lighttpd
+       chmod 644 ${sysconfdir}/logrotate.d/lighttpd
+
+       # /etc/rc.d/init.d/lighttpd is not a config file, so replace it here if it doesn't match
+       cp --preserve=xattr -f ${datadir}/starlingx/lighttpd.init ${sysconfdir}/rc.d/init.d/lighttpd
+       chmod 755 ${sysconfdir}/rc.d/init.d/lighttpd
+}
+
+pkg_postinst_ontarget_logrotate-config() {
+#      %description
+#      StarlingX logrotate configuration file
+
+       SRCPATH=${datadir}/starlingx/config-files/logrotate-config/files
+
+       install -m 644 ${SRCPATH}/logrotate-cron.d ${sysconfdir}/cron.d/logrotate
+       install -m 644 ${SRCPATH}/logrotate.conf ${datadir}/starlingx/logrotate.conf
+
+       cp -f ${datadir}/starlingx/logrotate.conf ${sysconfdir}/logrotate.conf 
+       chmod 644 ${sysconfdir}/logrotate.conf
+       mv ${sysconfdir}/cron.daily/logrotate ${sysconfdir}/logrotate.cron
+       chmod 700 ${sysconfdir}/logrotate.cron
+}
+
+
+pkg_postinst_ontarget_memcached-custom() {
+#      Summary: package memcached service files to system folder.
+
+       SRCPATH=${datadir}/starlingx/config-files/memcached-custom/files
+       install -m 644 -p ${SRCPATH}/memcached.service ${sysconfdir}/systemd/system/memcached.service
+}
+
+
+pkg_postinst_ontarget_mlx4-config() {
+#      %description
+#      Wind River Mellanox port-type configuration scripts
+       SRCPATH=${datadir}/starlingx/config-files/mlx4-config/files
+
+#      /bin/systemctl disable mlx4-config.service >/dev/null 2>&1
+
+       install -m 755 ${SRCPATH}/mlx4-configure.sh     ${sysconfdir}/init.d/
+       install -m 644 ${SRCPATH}/mlx4-config.service   ${systemd_system_unitdir}/
+       install -m 555 ${SRCPATH}/mlx4_core_goenabled.sh ${sysconfdir}/goenabled.d/
+       install -m 755 ${SRCPATH}/mlx4_core_config.sh   ${bindir}/
+
+       /bin/systemctl enable mlx4-config.service >/dev/null 2>&1
+}
+
+
+pkg_postinst_ontarget_net-snmp-config() {
+#      %description
+#      package StarlingX configuration files of net-snmp to system folder.
+
+       SRCPATH=${datadir}/starlingx/config-files/net-snmp-config/files
+
+       install -d ${datadir}/snmp
+
+       install -m 644 ${SRCPATH}/stx.snmpd.conf    ${datadir}/starlingx/stx.snmpd.conf
+       install -m 755 ${SRCPATH}/stx.snmpd         ${sysconfdir}/rc.d/init.d/snmpd
+       install -m 660 ${SRCPATH}/stx.snmp.conf     ${datadir}/snmp/snmp.conf
+       install -m 644 ${SRCPATH}/snmpd.service     ${sysconfdir}/systemd/system/snmpd.service
+       
+       
+       cp -f ${datadir}/starlingx/stx.snmpd.conf   ${sysconfdir}/snmp/snmpd.conf
+       chmod 640 ${sysconfdir}/snmp/snmpd.conf
+       chmod 640 ${sysconfdir}/snmp/snmptrapd.conf
+       
+       /bin/systemctl disable snmpd.service
+}
+
+
+pkg_postinst_ontarget_nfs-utils-config() {
+#      %description
+#      package customized configuration and service files of nfs-utils to system folder.
+
+
+       SRCPATH=${datadir}/starlingx/config-files/nfs-utils-config/files
+       
+
+       install -m 755 -p -D ${SRCPATH}/nfscommon               ${sysconfdir}/init.d
+       install -m 644 -p -D ${SRCPATH}/nfscommon.service       ${systemd_system_unitdir}/
+       install -m 755 -p -D ${SRCPATH}/nfsserver               ${sysconfdir}/init.d
+       install -m 644 -p -D ${SRCPATH}/nfsserver.service       ${systemd_system_unitdir}
+       install -m 644 -p -D ${SRCPATH}/nfsmount.conf           ${datadir}/starlingx/stx.nfsmount.conf
+       
+       cp -f ${datadir}/starlingx/stx.nfsmount.conf ${sysconfdir}/nfsmount.conf
+       chmod 644 ${sysconfdir}/nfsmount.conf
+
+       # STX - disable these service files as rpc-statd is started by nfscommon
+       /bin/systemctl disable rpc-statd.service
+       /bin/systemctl disable rpc-statd-notify.service
+       /bin/systemctl disable nfs-lock.service
+       /bin/systemctl disable nfslock.service 
+
+       /bin/systemctl enable nfscommon.service  >/dev/null 2>&1 || :
+       /bin/systemctl enable nfsserver.service  >/dev/null 2>&1 || :
+
+       # For now skiping the preun rule
+       #/bin/systemctl disable nfscommon.service >/dev/null 2>&1 || :
+       #/bin/systemctl disable nfsserver.service >/dev/null 2>&1 || :
+
+}
+
+pkg_postinst_ontarget_ntp-config() {
+#      %description
+#      StarlingX ntp configuration file
+
+       SRCPATH=${datadir}/starlingx/config-files/ntp-config/files
+       install -D -m644 ${SRCPATH}/ntpd.sysconfig ${datadir}/starlingx/ntpd.sysconfig
+       install -D -m644 ${SRCPATH}/ntp.conf ${datadir}/starlingx/ntp.conf
+
+       cp -f ${datadir}/starlingx/ntpd.sysconfig ${sysconfdir}/sysconfig/ntpd
+       cp -f ${datadir}/starlingx/ntp.conf ${sysconfdir}/ntp.conf
+       chmod 644 ${sysconfdir}/sysconfig/ntpd
+       chmod 644 ${sysconfdir}/ntp.conf
+}
+
+
+pkg_postinst_ontarget_openldap-config() {
+#      $description
+#      StarlingX openldap configuration file
+
+       SRCPATH=${datadir}/starlingx/config-files/openldap-config/files
+
+       install -m 755 ${SRCPATH}/initscript ${sysconfdir}/init.d/openldap
+       install -m 600 ${SRCPATH}/slapd.conf ${sysconfdir}/openldap/slapd.conf
+
+       install -m 600 ${SRCPATH}/initial_config.ldif ${sysconfdir}/openldap/initial_config.ldif
+
+       install -m 644 ${SRCPATH}/slapd.service ${sysconfdir}/systemd/system/slapd.service
+       install -m 644 ${SRCPATH}/slapd.sysconfig ${datadir}/starlingx/slapd.sysconfig 
+
+       sed -i -e 's|/var/run|/run|' ${sysconfdir}/systemd/system/slapd.service
+       
+       cp -f ${datadir}/starlingx/slapd.sysconfig ${sysconfdir}/sysconfig/slapd
+       chmod 644 ${systemd_system_unitdir}/slapd
+}
+
+pkg_postinst_ontarget_openssh-config() {
+#      %description
+#      package StarlingX configuration files of openssh to system folder.
+
+
+       SRCPATH=${datadir}/starlingx/config-files/openssh-config/files
+
+       install -m 644 ${SRCPATH}/sshd.service  ${sysconfdir}/systemd/system/sshd.service
+       install -m 644 ${SRCPATH}/ssh_config    ${datadir}/starlingx/ssh_config
+       install -m 600 ${SRCPATH}/sshd_config   ${datadir}/starlingx/sshd_config
+
+       # remove the unsupported and deprecated options
+       sed -i -e 's/^\(GSSAPIAuthentication.*\)/#\1/' \
+              -e 's/^\(GSSAPICleanupCredentials.*\)/#\1/' \
+              -e 's/^\(UsePrivilegeSeparation.*\)/#\1/' \
+              ${datadir}/starlingx/sshd_config
+
+       sed -i -e 's/\(GSSAPIAuthentication yes\)/#\1/' ${datadir}/starlingx/ssh_config
+       
+       cp -f ${datadir}/starlingx/ssh_config  ${sysconfdir}/ssh/ssh_config
+       cp -f ${datadir}/starlingx/sshd_config ${sysconfdir}/ssh/sshd_config
+}
+
+pkg_postinst_ontarget_openvswitch-config() {
+#      %description
+#      StarlingX openvswitch configuration file
+
+       SRCPATH=${datadir}/starlingx/config-files/openvswitch-config/files
+
+       install -m 0644 ${SRCPATH}/ovsdb-server.pmon.conf ${sysconfdir}/openvswitch/ovsdb-server.pmon.conf
+       install -m 0644 ${SRCPATH}/ovs-vswitchd.pmon.conf ${sysconfdir}/openvswitch/ovs-vswitchd.pmon.conf
+       install -m 0640 ${SRCPATH}/etc_logrotate.d_openvswitch ${datadir}/starlingx/etc_logrotate.d_openvswitch
+       
+       cp -f ${datadir}/starlingx/etc_logrotate.d_openvswitch ${sysconfdir}/logrotate.d/openvswitch
+       chmod 644 ${sysconfdir}/logrotate.d/openvswitch
+}
+
+pkg_postinst_ontarget_pam-config() {
+#      %description
+#      package StarlingX configuration files of pam to system folder.
+
+       SRCPATH=${datadir}/starlingx/config-files/pam-config/files
+
+       install  -m 644 ${SRCPATH}/sshd.pam        ${datadir}/starlingx/sshd.pam
+       install  -m 644 ${SRCPATH}/common-account  ${sysconfdir}/pam.d/common-account
+       install  -m 644 ${SRCPATH}/common-auth     ${sysconfdir}/pam.d/common-auth
+       install  -m 644 ${SRCPATH}/common-password ${sysconfdir}/pam.d/common-password
+       install  -m 644 ${SRCPATH}/common-session  ${sysconfdir}/pam.d/common-session
+       install  -m 644 ${SRCPATH}/common-session-noninteractive ${sysconfdir}/pam.d/common-session-noninteractive
+       install  -m 644 ${SRCPATH}/system-auth.pamd ${datadir}/starlingx/stx.system-auth
+       
+       cp -f ${datadir}/starlingx/stx.system-auth ${sysconfdir}/pam.d/system-auth
+       cp -f ${datadir}/starlingx/sshd.pam    ${sysconfdir}/pam.d/sshd
+}
+
+pkg_postinst_ontarget_rabbitmq-server-config() {
+#      %description
+#      package StarlingX configuration files of rabbitmq-server to system folder.
+
+       SRCPATH=${datadir}/starlingx/config-files/rabbitmq-server-config/files
+
+       install -d ${libdir}/ocf/resource.d/rabbitmq
+       install -m 0755 ${SRCPATH}/rabbitmq-server.ocf              ${libdir}/ocf/resource.d/rabbitmq/stx.rabbitmq-server
+       install -m 0644 ${SRCPATH}/rabbitmq-server.service.example  ${sysconfdir}/systemd/system/rabbitmq-server.service
+       install -m 0644 ${SRCPATH}/rabbitmq-server.logrotate        ${datadir}/starlingx/stx.rabbitmq-server.logrotate
+
+       sed -i -e 's/notify/simple/' ${sysconfdir}/systemd/system/rabbitmq-server.service
+
+       cp ${datadir}/starlingx/stx.rabbitmq-server.logrotate ${sysconfdir}/logrotate.d/rabbitmq-server
+}
+
+pkg_postinst_ontarget_rsync-config() {
+#      %description
+#      package StarlingX configuration files of rsync to system folder.
+
+       SRCPATH=${datadir}/starlingx/config-files/rsync-config/files
+
+       install -m 644 ${SRCPATH}/rsyncd.conf  ${datadir}/starlingx/stx.rsyncd.conf
+       
+       cp -f ${datadir}/starlingx/stx.rsyncd.conf  ${sysconfdir}/rsyncd.conf
+}
+
+pkg_postinst_ontarget_setup-config() {
+#      %description
+#      package StarlingX configuration files of setup to system folder.
+
+       SRCPATH=${datadir}/starlingx/config-files/setup-config/files
+
+       install -m 644 ${SRCPATH}/motd          ${datadir}/starlingx/stx.motd
+       install -m 644 ${SRCPATH}/prompt.sh     ${sysconfdir}/profile.d/prompt.sh
+       install -m 644 ${SRCPATH}/custom.sh     ${sysconfdir}/profile.d/custom.sh
+
+       cp -f ${datadir}/starlingx/stx.motd    ${sysconfdir}/motd
+       chmod 600   ${sysconfdir}/{exports,fstab}
+}
+
+pkg_postinst_ontarget_shadow-utils-config() {
+#      %description
+#      StarlingX shadow-utils configuration file
+
+       SRCPATH=${datadir}/starlingx/config-files/shadow-utils-config/files
+
+       install -D -m644 ${SRCPATH}/login.defs ${datadir}/starlingx/login.defs
+       install -D -m644 ${SRCPATH}/clear_shadow_locks.service  ${systemd_system_unitdir}/clear_shadow_locks.service
+
+       cp -f ${datadir}/starlingx/login.defs ${sysconfdir}/login.defs
+       chmod 644 ${sysconfdir}/login.defs
+       /bin/systemctl preset clear_shadow_locks.service
+}
+
+pkg_postinst_ontarget_sudo-config() {
+#      %description
+#      StarlingX sudo configuration file
+
+       SYSADMIN_P="4SuW8cnXFyxsk"
+       SRCPATH=${datadir}/starlingx/config-files/sudo-config/files
+
+       install -m 440 ${SRCPATH}/sysadmin.sudo  ${sysconfdir}/sudoers.d/sysadmin
+
+       getent group sys_protected >/dev/null || groupadd -f -g 345 sys_protected
+       getent passwd sysadmin > /dev/null || \
+               useradd -m -g sys_protected -G root  -d /home/sysadmin -p ${SYSADMIN_P} -s /bin/sh sysadmin 2> /dev/null || :
+}
+
+pkg_postinst_syslog-ng-config() {
+#      %description
+#      StarlingX syslog-ng configuration file
+
+       SRCPATH=$D${datadir}/starlingx/config-files/syslog-ng-config/files
+
+       install -D -m644 ${SRCPATH}/syslog-ng.conf $D${datadir}/starlingx/syslog-ng.conf
+
+       # Fix the config version to avoid warning
+       sed -i -e 's/\(@version: \).*/\1 3.19/' $D${datadir}/starlingx/syslog-ng.conf
+
+       # Workaround: comment out the udp source to aviod the service fail to start at boot time
+       sed -i -e 's/\(.*s_udp.*\)/#\1/' $D${datadir}/starlingx/syslog-ng.conf
+
+       install -D -m644 ${SRCPATH}/syslog-ng.logrotate $D${datadir}/starlingx/syslog-ng.logrotate
+       install -D -m644 ${SRCPATH}/remotelogging.conf $D${sysconfdir}/syslog-ng/remotelogging.conf
+       install -D -m700 ${SRCPATH}/fm_event_syslogger $D${sbindir}/fm_event_syslogger
+       install -D -m644 ${SRCPATH}/syslog-ng.service $D${datadir}/starlingx/syslog-ng.service
+
+       cp -f $D${datadir}/starlingx/syslog-ng.conf $D${sysconfdir}/syslog-ng/syslog-ng.conf
+       chmod 644 $D${sysconfdir}/syslog-ng/syslog-ng.conf
+       cp -f $D${datadir}/starlingx/syslog-ng.logrotate $D${sysconfdir}/logrotate.d/syslog
+       chmod 644 $D${sysconfdir}/logrotate.d/syslog
+       cp -f $D${datadir}/starlingx/syslog-ng.service $D${systemd_system_unitdir}/syslog-ng.service
+       chmod 644 $D${systemd_system_unitdir}/syslog-ng.service
+
+       # enable syslog-ng service by default
+       OPTS=""
+       if [ -n "$D" ]; then
+               OPTS="--root=$D"
+       fi
+       if [ -z "$D" ]; then
+               systemctl daemon-reload
+       fi
+
+       systemctl $OPTS enable syslog-ng.service
+
+       if [ -z "$D" ]; then
+               systemctl --no-block restart syslog-ng.service
+       fi
+
+# TODO
+#preun:
+#      %systemd_preun syslog-ng.service 
+#postun:
+#      ldconfig
+#      %systemd_postun_with_restart syslog-ng.service 
+#      systemctl daemon-reload 2>&1 || :
+#      systemctl try-restart 
+}
+
+pkg_postinst_ontarget_systemd-config() {
+#      %description
+#      StarlingX systemd configuration file
+
+       SRCPATH=${datadir}/starlingx/config-files/systemd-config/files
+
+       install -m644 ${SRCPATH}/60-persistent-storage.rules ${sysconfdir}/udev/rules.d/60-persistent-storage.rules
+       install -m644 ${SRCPATH}/journald.conf ${datadir}/starlingx/journald.conf
+       install -m644 ${SRCPATH}/systemd.conf.tmpfiles.d ${sysconfdir}/tmpfiles.d/systemd.conf
+       install -m644 ${SRCPATH}/tmp.conf.tmpfiles.d ${sysconfdir}/tmpfiles.d/tmp.conf
+       install -m644 ${SRCPATH}/tmp.mount ${sysconfdir}/systemd/system/tmp.mount
+
+       cp -f ${datadir}/starlingx/journald.conf ${sysconfdir}/systemd/journald.conf
+       chmod 644 ${sysconfdir}/systemd/journald.conf
+}
+
+pkg_postinst_ontarget_util-linux-config() {
+#      %description
+#      package StarlingX configuration files of util-linux to system folder.
+
+       SRCPATH=${datadir}/starlingx/config-files/util-linux-config/files
+
+       install -m 644 ${SRCPATH}/stx.su     ${datadir}/starlingx/stx.su
+       install -m 644 ${SRCPATH}/stx.login  ${datadir}/starlingx/stx.login
+       install -m 644 ${SRCPATH}/stx.postlogin ${datadir}/starlingx/stx.postlogin
+
+       cp -f ${datadir}/starlingx/stx.su ${sysconfdir}/pam.d/su
+       cp -f ${datadir}/starlingx/stx.login  ${sysconfdir}/pam.d/login
+       cp -f ${datadir}/starlingx/stx.postlogin  ${sysconfdir}/pam.d/postlogin
+
+}
+
+pkg_postinst_ontarget_ioscheduler-config() {
+#      %description
+#      CGCS io scheduler configuration and tuning.
+
+       SRCPATH=${datadir}/starlingx/config-files/io-scheduler/
+
+       install -m 644 ${SRCPATH}/60-io-scheduler.rules ${sysconfdir}/udev/rules.d/60-io-scheduler.rules
+
+       /bin/udevadm control --reload-rules
+       /bin/udevadm trigger --type=devices --subsystem-match=block
+}
+
+pkg_postinst_ontarget_iptables-config() {
+#      %description
+#      StarlingX iptables configuration file
+
+       SRCPATH=${datadir}/starlingx/config-files/iptables-config/files
+       
+       install -m 600 ${SRCPATH}/iptables.rules ${datadir}/starlingx/iptables.rules
+       install -m 600 ${SRCPATH}/ip6tables.rules ${datadir}/starlingx/ip6tables.rules 
+       
+       cp -f S{datadir}/starlingx/iptables.rules ${sysconfdir}/sysconfig/iptables
+       chmod 600 ${sysconfdir}/sysconfig/iptables
+       cp -f ${datadir}/starlingx/ip6tables.rules ${sysconfdir}/sysconfig/ip6tables
+       chmod 600 ${sysconfdir}/sysconfig/ip6tables
+       /bin/systemctl enable iptables.service ip6tables.service >/dev/null 2>&1
+}
diff --git a/meta-stx/recipes-core/stx-config-files/files/openssh-config-rm-hmac-ripemd160.patch b/meta-stx/recipes-core/stx-config-files/files/openssh-config-rm-hmac-ripemd160.patch
new file mode 100644 (file)
index 0000000..85b2fb9
--- /dev/null
@@ -0,0 +1,11 @@
+diff --git a/openssh-config/files/sshd_config b/openssh-config/files/sshd_config
+index 0dfc0e2..b767509 100644
+--- a/openssh-config/files/sshd_config
++++ b/openssh-config/files/sshd_config
+@@ -144,5 +144,5 @@ DenyUsers admin secadmin operator
+ # TODO (aning): once openssh is updated to 7.5, an explicit exclusion list
+ # using "-" should be used for cipher, MAC and kex excluded suites.
+ Ciphers aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com
+-MACs hmac-sha1,hmac-sha2-256,hmac-sha2-512,hmac-ripemd160,hmac-ripemd160@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha1-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-64-etm@openssh.com,umac-128-etm@openssh.com
++MACs hmac-sha1,hmac-sha2-256,hmac-sha2-512,umac-64@openssh.com,umac-128@openssh.com,hmac-sha1-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-64-etm@openssh.com,umac-128-etm@openssh.com
+ KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256
diff --git a/meta-stx/recipes-core/stx-config-files/files/syslog-ng-conf-fix-the-source.patch b/meta-stx/recipes-core/stx-config-files/files/syslog-ng-conf-fix-the-source.patch
new file mode 100644 (file)
index 0000000..a493093
--- /dev/null
@@ -0,0 +1,28 @@
+From 466367eb5ebc4ea1b9b1457d496834c57ed1dd66 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Thu, 26 Mar 2020 23:41:59 +0800
+Subject: [PATCH] syslog-ng.conf: fix the source
+
+ERROR: Using /dev/log Unix socket with systemd is not possible.
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ syslog-ng-config/files/syslog-ng.conf | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/syslog-ng-config/files/syslog-ng.conf b/syslog-ng-config/files/syslog-ng.conf
+index b1bab2c..1e06c02 100644
+--- a/syslog-ng-config/files/syslog-ng.conf
++++ b/syslog-ng-config/files/syslog-ng.conf
+@@ -62,7 +62,7 @@ options { chain_hostnames(off); flush_lines(0); use_dns(no); use_fqdn(no);
+ # This is the default behavior of sysklogd package
+ # Logs may come from unix stream, but not from another machine.
+ #
+-source s_src { unix-dgram("/dev/log"   ); internal();
++source s_src { systemd_journal(); internal();
+                file("/proc/kmsg" program_override("kernel")   );
+ };
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-core/stx-config-files/files/syslog-ng-conf-replace-match-with-message.patch b/meta-stx/recipes-core/stx-config-files/files/syslog-ng-conf-replace-match-with-message.patch
new file mode 100644 (file)
index 0000000..7d5b6fb
--- /dev/null
@@ -0,0 +1,62 @@
+From 78029f7aa8f8231fc657a094f41fb4ae1baba95e Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Sat, 28 Mar 2020 22:53:47 +0800
+Subject: [PATCH] syslog-ng.conf: replace match with message
+
+Fix the warning:
+ WARNING: the match() filter without the use of the value() option is
+ deprecated and hinders performance, please use a more specific filter
+ like message() and/or program() instead;
+ location='/etc/syslog-ng/syslog-ng.conf:255:80'
+ [2020-03-26T09:55:01.825267] WARNING: With use-dns(no), dns-cache()
+ will be forced to 'no' too!;
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ syslog-ng-config/files/syslog-ng.conf | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/syslog-ng-config/files/syslog-ng.conf b/syslog-ng-config/files/syslog-ng.conf
+index 1e06c02..7373031 100644
+--- a/syslog-ng-config/files/syslog-ng.conf
++++ b/syslog-ng-config/files/syslog-ng.conf
+@@ -252,7 +252,7 @@ filter f_newsnotice { facility(news) and filter(f_notice); };
+ #filter f_syslog3 { not facility(auth, authpriv, mail) and not filter(f_debug); };
+ filter f_syslog   { facility(syslog); };
+ filter f_user     { facility(user) and not filter(f_vim) and not filter(f_vim_api)
+-                    and not filter(f_vim_webserver) and not match("fmClientCli");
++                    and not filter(f_vim_webserver) and not message("fmClientCli");
+                     and not program("^(-)?(ba)?(su|sh)$"); };
+ filter f_uucp     { facility(uucp); };
+@@ -303,12 +303,12 @@ filter f_fm_event { facility(local5) and program(fmManager); };
+ filter f_fm_manager { facility(local1) and program(fmManager); };
+ # IMA Filters
+-filter f_ima         { facility(auth) and program(audispd) and match("type=INTEGRITY_") ; };
+-filter f_ima_appraise  { filter(f_ima) and match("appraise_data") ; };
++filter f_ima         { facility(auth) and program(audispd) and message("type=INTEGRITY_") ; };
++filter f_ima_appraise  { filter(f_ima) and message("appraise_data") ; };
+ # Sysinv Log Filter
+-filter f_sysinv    { facility(local6) and program(sysinv) and not match("sysinv.api.hooks.auditor"); };
+-filter f_sysinvapi { facility(local6) and program(sysinv) and     match("sysinv.api.hooks.auditor"); };
++filter f_sysinv    { facility(local6) and program(sysinv) and not message("sysinv.api.hooks.auditor"); };
++filter f_sysinvapi { facility(local6) and program(sysinv) and     message("sysinv.api.hooks.auditor"); };
+ # Distributed Cloud Log Filters
+ filter f_dcmanagermanager    { facility(local2) and program(dcmanager-manager); };
+@@ -324,8 +324,8 @@ filter f_dcdbsyncopenstackapi    { facility(local3) and program(dcdbsync-api); }
+ # Openstack Log Filters
+ filter f_horizon       { facility(local7) };
+ filter f_libvirtd      { program(libvirtd) };
+-filter f_keystoneall   { facility(local2) and message("keystone.*") and not match("keystone.common.wsgi"); };
+-filter f_keystoneapi   { facility(local2) and  match("keystone.common.wsgi"); };
++filter f_keystoneall   { facility(local2) and message("keystone.*") and not message("keystone.common.wsgi"); };
++filter f_keystoneapi   { facility(local2) and  message("keystone.common.wsgi"); };
+ filter f_barbicanapi              { facility(local2) and program(barbican-api); };
+ filter f_barbicandbsync           { facility(local2) and program(barbican-dbsync); };
+ filter f_barbicankeystonelistener { facility(local2) and program(barbican-keystone-listener); };
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-core/stx-config-files/files/syslog-ng-config-parse-err.patch b/meta-stx/recipes-core/stx-config-files/files/syslog-ng-config-parse-err.patch
new file mode 100644 (file)
index 0000000..2ecbff5
--- /dev/null
@@ -0,0 +1,13 @@
+diff --git a/syslog-ng-config/files/syslog-ng.conf b/syslog-ng-config/files/syslog-ng.conf
+index 43a4067..b1bab2c 100644
+--- a/syslog-ng-config/files/syslog-ng.conf
++++ b/syslog-ng-config/files/syslog-ng.conf
+@@ -183,7 +183,7 @@ destination d_newscrit   { file("/var/log/news/news.crit"); };
+ destination d_newserr    { file("/var/log/news/news.err"); };
+ destination d_newsnotice { file("/var/log/news/news.notice"); };
+-# Some `catch-all' logfiles.
++# Some 'catch-all' logfiles.
+ #
+ destination d_debug    { file("/var/log/debug"); };
+ destination d_error    { file("/var/log/error"); };
diff --git a/meta-stx/recipes-core/stx-config-files/files/syslog-ng-config-systemd-service.patch b/meta-stx/recipes-core/stx-config-files/files/syslog-ng-config-systemd-service.patch
new file mode 100644 (file)
index 0000000..c536696
--- /dev/null
@@ -0,0 +1,13 @@
+diff --git a/syslog-ng-config/files/syslog-ng.service b/syslog-ng-config/files/syslog-ng.service
+index 606a967..b8d90d2 100644
+--- a/syslog-ng-config/files/syslog-ng.service
++++ b/syslog-ng-config/files/syslog-ng.service
+@@ -5,7 +5,7 @@ Documentation=man:syslog-ng(8)
+ [Service]
+ Type=notify
+ Sockets=syslog.socket
+-ExecStartPre=-/usr/bin/mkdir -p /var/run/syslog-ng/
++ExecStartPre=-/bin/mkdir -p /var/run/syslog-ng/
+ ExecStart=/usr/sbin/syslog-ng -F -p /var/run/syslog-ng/syslog-ng.pid
+ ExecReload=/bin/kill -HUP $MAINPID
+ StandardOutput=null
diff --git a/meta-stx/recipes-core/stx-config-files/files/util-linux-pam-postlogin.patch b/meta-stx/recipes-core/stx-config-files/files/util-linux-pam-postlogin.patch
new file mode 100644 (file)
index 0000000..e34b7b3
--- /dev/null
@@ -0,0 +1,9 @@
+diff --git a/util-linux-config/files/stx.postlogin b/util-linux-config/files/stx.postlogin
+new file mode 100644
+index 0000000..292bd8a
+--- /dev/null
++++ b/util-linux-config/files/stx.postlogin
+@@ -0,0 +1,3 @@
++session     [default=1]   pam_lastlog.so nowtmp showfailed
++session     optional      pam_lastlog.so silent noupdate showfailed
++
diff --git a/meta-stx/recipes-core/stx-config/cgts-client.inc b/meta-stx/recipes-core/stx-config/cgts-client.inc
new file mode 100644 (file)
index 0000000..dcefd54
--- /dev/null
@@ -0,0 +1,53 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " cgts-client"
+
+inherit setuptools distutils python-dir
+DEPENDS += " \
+       python-pbr-native \
+       "
+
+RDEPENDS_cgts-client += " \
+       python-prettytable \
+       bash-completion \
+       python-neutronclient \
+       python-keystoneclient \
+       python-six \
+       python-httplib2 \
+       "
+
+do_configure_append() {
+       cd ${S}/sysinv/cgts-client/cgts-client
+       distutils_do_configure
+} 
+
+do_compile_append() {
+       cd ${S}/sysinv/cgts-client/cgts-client
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/sysinv/cgts-client/cgts-client
+       distutils_do_install
+}
+
+FILES_cgts-client = " \
+       ${bindir}/system \
+       ${PYTHON_SITEPACKAGES_DIR}/sysinv/ \
+       ${PYTHON_SITEPACKAGES_DIR}/cgtsclient/ \
+       ${PYTHON_SITEPACKAGES_DIR}/cgtsclient-*.egg-info \
+       ${PYTHON_SITEPACKAGES_DIR}/sysinv-*.egg-info \
+       "
diff --git a/meta-stx/recipes-core/stx-config/config-gate.inc b/meta-stx/recipes-core/stx-config/config-gate.inc
new file mode 100644 (file)
index 0000000..ca412c4
--- /dev/null
@@ -0,0 +1,36 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " config-gate-worker"
+PACKAGES += " config-gate"
+
+RDEPENDS_config-gate-worker += " bash"
+RDEPENDS_config-gate += " bash"
+
+do_install_append() {
+       cd ${S}/config-gate/files
+       oe_runmake -e \
+                 SBINDIR=${D}/${sbindir} SYSTEMDDIR=${D}/${systemd_system_unitdir} \
+                install
+}
+
+FILES_config-gate-worker = " \
+       ${sbindir}/wait_for_worker_config_init.sh \
+       ${systemd_system_unitdir}/worker-config-gate.service \
+       "
+FILES_config-gate = " \
+       ${sbindir}/wait_for_config_init.sh \
+       ${systemd_system_unitdir}/config.service \
+       "
diff --git a/meta-stx/recipes-core/stx-config/controllerconfig.inc b/meta-stx/recipes-core/stx-config/controllerconfig.inc
new file mode 100644 (file)
index 0000000..4690c6f
--- /dev/null
@@ -0,0 +1,99 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " controllerconfig"
+
+RDEPENDS_controllerconfig += " \
+       bash \
+       fm-api \
+       systemd \
+       tsconfig \
+       python-iso8601 \
+       python-keyring \
+       python-netaddr \
+       python-netifaces \
+       python-pyudev \
+       python-six \
+       python-cryptography \
+       python-oslo.utils \
+       python-pysnmp \
+       python-ruamel.yaml \
+       "
+
+do_configure_prepend() {
+       cd ${S}/controllerconfig/controllerconfig
+       distutils_do_configure
+} 
+
+do_compile_prepend() {
+       cd ${S}/controllerconfig/controllerconfig
+       distutils_do_compile
+}
+
+do_install_prepend () {
+       cd ${S}/controllerconfig/controllerconfig
+       distutils_do_install
+
+        install -p -D -m 700 scripts/keyringstaging ${D}/${bindir}
+        install -p -D -m 700 scripts/openstack_update_admin_password ${D}/${bindir}
+        install -p -D -m 700 scripts/install_clone.py ${D}/${bindir}
+        install -p -D -m 700 scripts/finish_install_clone.sh ${D}/${bindir}
+
+       install -d -m 700 ${D}/${sysconfdir}/goenabled.d
+       install -d -m 700 ${D}/${sysconfdir}/init.d
+        install -p -D -m 700 scripts/config_goenabled_check.sh ${D}/${sysconfdir}/goenabled.d
+        install -p -D -m 755 scripts/controller_config ${D}/${sysconfdir}/init.d/controller_config
+
+        ## Install Upgrade scripts
+       install -d -m 755 ${D}/${sysconfdir}/upgrade.d
+        install -p -m 755 upgrade-scripts/16-neutron-move-bindings-off-controller-1.py ${D}/${sysconfdir}/upgrade.d
+        install -p -m 755 upgrade-scripts/20-sysinv-retire-ceph-cache-tier-sp.py ${D}/${sysconfdir}/upgrade.d
+       
+
+        install -p -D -m 664 scripts/controllerconfig.service ${D}/${sysconfdir}/systemd/system/controllerconfig.service
+
+       sed -i -e 's|/usr/local/bin|${bindir}|' \
+               ${D}${libdir}/python2.7/site-packages/controllerconfig/utils.py \
+               ${D}${libdir}/python2.7/site-packages/controllerconfig/upgrades/utils.py \
+               ${D}${sysconfdir}/init.d/controller_config
+}
+
+
+FILES_controllerconfig = " \
+               ${sysconfdir}/goenabled.d/config_goenabled_check.sh  \
+               ${sysconfdir}/upgrade.d/20-sysinv-retire-ceph-cache-tier-sp.py \
+               ${sysconfdir}/upgrade.d/16-neutron-move-bindings-off-controller-1.py \
+               ${sysconfdir}/init.d/controller_config \
+               ${bindir}/keysringstaging \
+               ${bindir}/openstack_update_admin_password \
+               ${bindir}/install_clone.py \
+               ${bindir}/finish_install_clone.sh \
+               ${bindir}/finish_install_clone.sh \
+               ${bindir}/upgrade_controller \
+               ${bindir}/config_region \
+               ${bindir}/config_subcloud \
+               ${bindir}/config_management \
+               ${bindir}/keyringstaging \
+               ${bindir}/tidy_storage_post_restore \
+               ${bindir}/config_controller \
+               ${bindir}/upgrade_controller_simplex \
+               ${sysconfdir}/systemd/system/controllerconfig.service \
+               ${libdir}/python2.7/site-packages/controllerconfig*.egg-info/ \
+               ${libdir}/python2.7/site-packages/controllerconfig/ \
+               "
+
+SYSTEMD_PACKAGES += "controllerconfig"
+SYSTEMD_SERVICE_controllerconfig = "controllerconfig.service"
+SYSTEMD_AUTO_ENABLE_controllerconfig = "enable"
diff --git a/meta-stx/recipes-core/stx-config/files/0001-puppet-manifests-adjust-path-variable.patch b/meta-stx/recipes-core/stx-config/files/0001-puppet-manifests-adjust-path-variable.patch
new file mode 100644 (file)
index 0000000..aea9672
--- /dev/null
@@ -0,0 +1,27 @@
+From 54a94a0caf6db9e041e3769ae2be830cb0d7ea2f Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Thu, 26 Dec 2019 11:42:23 -0800
+Subject: [PATCH] puppet-manifests: adjust path variable
+
+Issue 54:
+Without this patch, mount command is not found
+---
+ puppet-manifests/src/modules/platform/manifests/filesystem.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/puppet-manifests/src/modules/platform/manifests/filesystem.pp b/puppet-manifests/src/modules/platform/manifests/filesystem.pp
+index a324ae82..a75f57b5 100644
+--- a/puppet-manifests/src/modules/platform/manifests/filesystem.pp
++++ b/puppet-manifests/src/modules/platform/manifests/filesystem.pp
+@@ -64,7 +64,7 @@ define platform::filesystem (
+   -> exec { "mount ${device}":
+     unless  => "mount | awk '{print \$3}' | grep -Fxq ${mountpoint}",
+     command => "mount ${mountpoint}",
+-    path    => '/usr/bin'
++    path    => '/usr/bin:/usr/sbin:/bin:/sbin'
+   }
+ }
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-config/files/0001-puppet-manifests-integ-set-correct-ldap-module-path.patch b/meta-stx/recipes-core/stx-config/files/0001-puppet-manifests-integ-set-correct-ldap-module-path.patch
new file mode 100644 (file)
index 0000000..c6e7199
--- /dev/null
@@ -0,0 +1,26 @@
+From 32efd8d303556b43005f26715bd8264886e17420 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Wed, 11 Dec 2019 17:24:17 -0800
+Subject: [PATCH] puppet-manifests integ: set correct ldap module path
+
+OE installs ldap modules under libexec.
+---
+ puppet-manifests/src/modules/platform/manifests/ldap.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/puppet-manifests/src/modules/platform/manifests/ldap.pp b/puppet-manifests/src/modules/platform/manifests/ldap.pp
+index b3d6ee71..7205e7d3 100644
+--- a/puppet-manifests/src/modules/platform/manifests/ldap.pp
++++ b/puppet-manifests/src/modules/platform/manifests/ldap.pp
+@@ -53,7 +53,7 @@ class platform::ldap::server::local
+                           -e 's:serverID.*:serverID ${server_id}:' \\
+                           -e 's:credentials.*:credentials=${admin_pw}:' \\
+                           -e 's:^rootpw .*:rootpw ${admin_hashed_pw}:' \\
+-                          -e 's:modulepath .*:modulepath /usr/lib64/openldap:' \\
++                          -e 's:modulepath .*:modulepath /usr/libexec/openldap:' \\
+                           /etc/openldap/slapd.conf",
+     onlyif  => '/usr/bin/test -e /etc/openldap/slapd.conf'
+   }
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-config/files/0001-stx-config-puppet-manifests-cast-to-Integer.patch b/meta-stx/recipes-core/stx-config/files/0001-stx-config-puppet-manifests-cast-to-Integer.patch
new file mode 100644 (file)
index 0000000..9e0ab81
--- /dev/null
@@ -0,0 +1,30 @@
+From bd8abad80bb0b24ed3556e51d345e4364a129c6b Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Mon, 13 Jan 2020 13:11:52 -0800
+Subject: [PATCH] stx-config: puppet-manifests cast to Integer
+
+Use cast operator to convert string to a number
+---
+ puppet-manifests/src/modules/platform/manifests/params.pp | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/puppet-manifests/src/modules/platform/manifests/params.pp b/puppet-manifests/src/modules/platform/manifests/params.pp
+index 9ef50eb2..d99cc609 100644
+--- a/puppet-manifests/src/modules/platform/manifests/params.pp
++++ b/puppet-manifests/src/modules/platform/manifests/params.pp
+@@ -35,9 +35,9 @@ class platform::params (
+   $protected_group_name = 'sys_protected'
+   $protected_group_id = '345'
+-  # PUPPET 4 treats custom facts as strings. We convert to int by adding zero.
+-  $phys_core_count = 0 + $::physical_core_count
+-  $plat_res_mem = 0 + $::platform_res_mem
++  # To convert a string to a number, cast the type by declaring a new Numeric object.
++  $phys_core_count = Integer($::physical_core_count)
++  $plat_res_mem = Integer($::platform_res_mem)
+   # Engineering parameters common to openstack services:
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-config/files/0001-stx-config-remove-argparse-requirement-from-sysinv.patch b/meta-stx/recipes-core/stx-config/files/0001-stx-config-remove-argparse-requirement-from-sysinv.patch
new file mode 100644 (file)
index 0000000..8b8a286
--- /dev/null
@@ -0,0 +1,25 @@
+From 06a156e04df427f51b42b1093577823c558166ad Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Tue, 25 Feb 2020 13:50:37 -0800
+Subject: [PATCH] stx-config: remove argparse requirement from sysinv.
+
+argparse is part of stdlib
+---
+ sysinv/sysinv/sysinv/requirements.txt | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/sysinv/sysinv/sysinv/requirements.txt b/sysinv/sysinv/sysinv/requirements.txt
+index fb4ef6a1..12a341c7 100644
+--- a/sysinv/sysinv/sysinv/requirements.txt
++++ b/sysinv/sysinv/sysinv/requirements.txt
+@@ -2,7 +2,6 @@ pbr>=0.5
+ SQLAlchemy
+ amqplib>=0.6.1
+ anyjson>=0.3.3
+-argparse
+ boto3
+ botocore>=1.11.0
+ cryptography!=2.0  # BSD/Apache-2.0
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-config/storageconfig.inc b/meta-stx/recipes-core/stx-config/storageconfig.inc
new file mode 100644 (file)
index 0000000..3ad6ea1
--- /dev/null
@@ -0,0 +1,34 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += "storageconfig"
+RDEPENDS_storageconfig += " bash"
+
+do_install_append () {
+       cd ${S}/storageconfig/storageconfig/
+        oe_runmake GOENABLEDDIR=${D}/${sysconfdir}/goenabled.d  INITDDIR=${D}/${sysconfdir}/init.d \
+                               SYSTEMDDIR=${D}/${systemd_system_unitdir} install
+       sed -i -e 's:/usr/local/bin/:/usr/bin/:g' ${D}/${sysconfdir}/init.d/storage_config
+}
+
+FILES_storageconfig = " \
+               ${sysconfdir}/init.d/storage_config \
+               ${sysconfdir}/goenabled.d/config_goenabled_check.sh \
+               ${systemd_system_unitdir}/storageconfig.service \
+               "
+
+SYSTEMD_PACKAGES += "storageconfig"
+SYSTEMD_SERVICE_storageconfig = "storageconfig.service"
+SYSTEMD_AUTO_ENABLE_storageconfig = "enable"
diff --git a/meta-stx/recipes-core/stx-config/stx-config.bb b/meta-stx/recipes-core/stx-config/stx-config.bb
new file mode 100644 (file)
index 0000000..a5e1440
--- /dev/null
@@ -0,0 +1,65 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-config"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "b51e4ef738e0020f11f164fd3f86399872caf3c6"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI = " \
+       git://opendev.org/starlingx/config.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://0001-stx-config-remove-argparse-requirement-from-sysinv.patch \
+       "
+
+DEPENDS = "\
+       puppet \
+       python \
+       python-pbr-native \
+       "
+
+inherit setuptools distutils
+
+# Other packages depend on tsconfig build it first
+require tsconfig.inc
+require config-gate.inc
+require controllerconfig.inc
+require storageconfig.inc
+require cgts-client.inc
+require sysinv.inc
+require sysinv-agent.inc
+require workerconfig.inc
+
+do_configure() {
+       :
+} 
+
+do_compile() {
+       :
+}
+
+do_install() {
+       :
+}
+
+FILES_${PN} = " "
+
+DISTRO_FEATURES_BACKFILL_CONSIDERED_remove = "sysvinit"
diff --git a/meta-stx/recipes-core/stx-config/sysinv-agent.inc b/meta-stx/recipes-core/stx-config/sysinv-agent.inc
new file mode 100644 (file)
index 0000000..d639536
--- /dev/null
@@ -0,0 +1,51 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " sysinv-agent"
+
+RDEPENDS_sysinv-agent += " python"
+
+
+do_configure_append() {
+       :
+} 
+
+do_compile_append() {
+               :
+}
+
+do_install_append() {
+
+       echo "Installing sysinv..."
+       cd ${S}/sysinv/sysinv-agent
+
+       install -d -m 755 ${D}${sysconfdir}/init.d
+       install -p -D -m 755 ${S}/sysinv/sysinv-agent/sysinv-agent ${D}/${sysconfdir}/init.d/sysinv-agent
+
+       install -d -m 755 ${D}${sysconfdir}/pmon.d
+       install -p -D -m 644 ${S}/sysinv/sysinv-agent/sysinv-agent.conf ${D}/${sysconfdir}/pmon.d/sysinv-agent.conf
+       install -p -D -m 644 ${S}/sysinv/sysinv-agent/sysinv-agent.service ${D}/${systemd_system_unitdir}/sysinv-agent.service
+
+}
+
+FILES_sysinv-agent = " \
+       ${systemd_system_unitdir}/sysinv-agent.service  \
+       ${sysconfdir}/pmon.d/sysinv-agent.conf \
+       ${sysconfdir}/init.d/sysinv-agent \
+       "
+
+SYSTEMD_PACKAGES += "sysinv-agent"
+SYSTEMD_SERVICE_sysinv-agent = "sysinv-agent.service"
+SYSTEMD_AUTO_ENABLE_sysinv-agent = "disable"
diff --git a/meta-stx/recipes-core/stx-config/sysinv.inc b/meta-stx/recipes-core/stx-config/sysinv.inc
new file mode 100644 (file)
index 0000000..34fbb25
--- /dev/null
@@ -0,0 +1,156 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " sysinv"
+
+RDEPENDS_sysinv += " python"
+RDEPENDS_sysinv += " bash"
+
+inherit setuptools distutils python-dir systemd
+
+DEPENDS += " \
+       python-pbr-native \
+       "
+RDEPENDS_sysinv += " \
+       python-anyjson \
+       python-amqp \
+       python-amqplib \
+       python-passlib \
+       python-websockify \
+       python-pyparted \
+       python-boto3 \
+       python-botocore \
+       python-coverage \
+       python-docker \
+       python-eventlet \
+       python-ipaddr \
+       python-keyring \
+       python-kubernetes \
+       python-netaddr \
+       python-pyudev \
+       python-pbr \
+       python-webtest \
+       python-wsme \
+       python-six \
+       python-django \
+       python-mox3 \
+       python-oslo.i18n \
+       python-oslo.config \
+       python-oslo.concurrency \
+       python-oslo.db \
+       python-oslo.log \
+       python-oslo.utils \
+       python-pecan \
+       python2-rpm \
+       python-pyghmi \
+       python-paramiko \
+       tsconfig \
+       resource-agents \
+       gptfdisk \
+       "
+
+do_configure_append() {
+       echo "Configure sysinv..."
+       cd ${S}/sysinv/sysinv/sysinv
+       distutils_do_configure
+} 
+
+do_compile_append() {
+       
+       echo "Building sysinv..."
+       cd ${S}/sysinv/sysinv/sysinv
+       distutils_do_compile
+}
+
+do_install_append() {
+
+       echo "Installing sysinv..."
+       cd ${S}/sysinv/sysinv/sysinv
+       distutils_do_install
+
+       install -d -m 755 ${D}${sysconfdir}/goenabled.d
+       install -p -D -m 755 etc/sysinv/sysinv_goenabled_check.sh ${D}${sysconfdir}/goenabled.d/sysinv_goenabled_check.sh
+       
+       install -d -m 755 ${D}${sysconfdir}/sysinv
+       install -p -D -m 755 etc/sysinv/policy.json ${D}${sysconfdir}/sysinv/policy.json
+       install -p -D -m 640 etc/sysinv/profileSchema.xsd ${D}${sysconfdir}/sysinv/profileSchema.xsd
+       
+       install -p -D -m 644 etc/sysinv/crushmap-storage-model.txt ${D}${sysconfdir}/sysinv/crushmap-storage-model.txt
+       install -p -D -m 644 etc/sysinv/crushmap-controller-model.txt ${D}${sysconfdir}/sysinv/crushmap-controller-model.txt
+       install -p -D -m 644 etc/sysinv/crushmap-aio-sx.txt ${D}${sysconfdir}/sysinv/crushmap-aio-sx.txt
+       
+       install -d -m 755 ${D}${sysconfdir}/motd.d
+       install -p -D -m 755 etc/sysinv/motd-system ${D}${sysconfdir}/motd.d/10-system
+       
+       install -d -m 755 ${D}${sysconfdir}/sysinv/upgrades
+       install -p -D -m 755 etc/sysinv/delete_load.sh ${D}${sysconfdir}/sysinv/upgrades/delete_load.sh
+       
+       install -m 755 -p -D scripts/sysinv-api ${D}/usr/lib/ocf/resource.d/platform/sysinv-api
+       install -m 755 -p -D scripts/sysinv-conductor ${D}/usr/lib/ocf/resource.d/platform/sysinv-conductor
+       
+       install -m 644 -p -D scripts/sysinv-api.service ${D}${systemd_system_unitdir}/sysinv-api.service
+       install -m 644 -p -D scripts/sysinv-conductor.service ${D}${systemd_system_unitdir}/sysinv-conductor.service
+       
+       #install -p -D -m 755 ${D}/usr/bin/sysinv-api ${D}/usr/bin/sysinv-api
+       #install -p -D -m 755 ${D}/usr/bin/sysinv-agent ${D}/usr/bin/sysinv-agent
+       #install -p -D -m 755 ${D}/usr/bin/sysinv-conductor ${D}/usr/bin/sysinv-conductor
+       
+       install -d -m 755 ${D}${bindir}/
+       install -p -D -m 755 sysinv/cmd/partition_info.sh ${D}${bindir}/partition_info.sh
+       install -p -D -m 755 sysinv/cmd/manage-partitions ${D}${bindir}/manage-partitions
+       install -p -D -m 755 sysinv/cmd/query_pci_id ${D}${bindir}/query_pci_id
+
+       sed -i -e 's|/usr/local/bin|${bindir}|' \
+               ${D}${libdir}/python2.7/site-packages/sysinv/common/constants.py \
+               ${D}${libdir}/python2.7/site-packages/sysinv/puppet/common.py
+}
+
+FILES_sysinv = " \
+       ${bindir}/sysinv-agent \
+       ${bindir}/sysinv-utils \
+       ${bindir}/sysinv-conductor \
+       ${bindir}/sysinv-api \
+       ${bindir}/sysinv-helm \
+       ${bindir}/sysinv-dbsync \
+       ${bindir}/sysinv-dnsmasq-lease-update \
+       ${bindir}/sysinv-puppet \
+       ${bindir}/sysinv-rootwrap \
+       ${bindir}/sysinv-upgrade \
+       ${PYTHON_SITEPACKAGES_DIR}/sysinv/ \
+       ${bindir}/manage-partitions \
+       ${bindir}/query_pci_id \
+       ${bindir}/partition_info.sh \
+       ${libdir}/ocf \
+       ${libdir}/ocf/resource.d \
+       ${libdir}/ocf/resource.d/platform \
+       ${libdir}/ocf/resource.d/platform/sysinv-conductor \
+       ${libdir}/ocf/resource.d/platform/sysinv-api \
+       ${sysconfdir}/motd.d \
+       ${sysconfdir}/sysinv \
+       ${sysconfdir}/motd.d/10-system \
+       ${sysconfdir}/sysinv/profileSchema.xsd \
+       ${sysconfdir}/sysinv/crushmap-controller-model.txt \
+       ${sysconfdir}/sysinv/crushmap-storage-model.txt \
+       ${sysconfdir}/sysinv/crushmap-aio-sx.txt \
+       ${sysconfdir}/sysinv/policy.json \
+       ${sysconfdir}/sysinv/upgrades \
+       ${sysconfdir}/sysinv/upgrades/delete_load.sh \
+       ${sysconfdir}/goenabled.d/sysinv_goenabled_check.sh \
+       ${systemd_system_unitdir}/sysinv-api.service \
+       ${systemd_system_unitdir}/sysinv-conductor.service \
+       "
+
+#pkg_postinst_ontarget_sysinv() {
+#}
diff --git a/meta-stx/recipes-core/stx-config/tsconfig.inc b/meta-stx/recipes-core/stx-config/tsconfig.inc
new file mode 100644 (file)
index 0000000..99bd207
--- /dev/null
@@ -0,0 +1,46 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " tsconfig"
+
+inherit distutils
+
+RDEPENDS_tsconfig_append = " bash"
+
+do_configure_append () {
+       cd ${S}/tsconfig/tsconfig
+       distutils_do_configure
+} 
+
+do_compile_append () {
+       cd ${S}/tsconfig/tsconfig
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/tsconfig/tsconfig
+       distutils_do_install
+
+
+       install -m 755 -d ${D}/${bindir}
+       install -m 500 scripts/tsconfig ${D}/${bindir}/
+
+}
+
+FILES_tsconfig = " \
+       ${PYTHON_SITEPACKAGES_DIR}/tsconfig \
+       ${PYTHON_SITEPACKAGES_DIR}/tsconfig*.egg-info \
+       ${bindir}/tsconfig \
+       "
diff --git a/meta-stx/recipes-core/stx-config/workerconfig.inc b/meta-stx/recipes-core/stx-config/workerconfig.inc
new file mode 100644 (file)
index 0000000..949409c
--- /dev/null
@@ -0,0 +1,97 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " \
+       workerconfig \
+       workerconfig-standalone \
+       workerconfig-subfunction \
+       "
+
+RDEPENDS_workerconfig += "bash"
+RDEPENDS_workerconfig-standalone += "workerconfig"
+RDEPENDS_workerconfig-subfunction += "workerconfig"
+
+systemddir = "${sysconfdir}/systemd/system"
+
+do_install_append () {
+       cd ${S}/workerconfig/workerconfig/
+       oe_runmake GOENABLEDDIR=${D}/${sysconfdir}/goenabled.d  INITDDIR=${D}/${sysconfdir}/init.d \
+               SYSTEMDDIR=${D}/${systemddir} install
+
+       sed -i -e 's|/usr/local/bin|${bindir}|' \
+               ${D}${sysconfdir}/init.d/worker_config
+}
+
+FILES_workerconfig += " \
+       ${sysconfdir}/init.d/worker_config \
+       "
+
+FILES_workerconfig-standalone += " \
+       ${systemddir}/config/workerconfig-standalone.service \
+       ${sysconfdir}/goenabled.d/config_goenabled_check.sh \
+       "
+
+FILES_workerconfig-subfunction += "  \
+       ${sysconfdir}/systemd/system/config/workerconfig-combined.service \
+       "
+
+pkg_postinst_workerconfig-standalone () {
+       if [ ! -e $D${systemddir}/workerconfig.service ]; then
+               cp $D${systemddir}/config/workerconfig-standalone.service $D${systemddir}/workerconfig.service
+       else
+               rm -f $D${systemddir}/workerconfig.service
+               cp $D${systemddir}/config/workerconfig-standalone.service $D${systemddir}/workerconfig.service
+       fi
+
+       # enable workerconfig service by default
+       OPTS=""
+       if [ -n "$D" ]; then
+               OPTS="--root=$D"
+       fi
+       if [ -z "$D" ]; then
+               systemctl daemon-reload
+       fi
+
+       systemctl $OPTS enable workerconfig.service
+
+       if [ -z "$D" ]; then
+               systemctl --no-block restart workerconfig.service
+       fi
+}
+
+
+pkg_postinst_workerconfig-subfunction () {
+       if [ ! -e $D${systemddir}/workerconfig.service ]; then
+               cp $D${systemddir}/config/workerconfig-combined.service $D${systemddir}/workerconfig.service
+       else
+               rm -f $D${systemddir}/workerconfig.service
+               cp $D${systemddir}/config/workerconfig-combined.service $D${systemddir}/workerconfig.service
+       fi
+
+       # enable workerconfig service by default
+       OPTS=""
+       if [ -n "$D" ]; then
+               OPTS="--root=$D"
+       fi
+       if [ -z "$D" ]; then
+               systemctl daemon-reload
+       fi
+
+       systemctl $OPTS enable workerconfig.service
+
+       if [ -z "$D" ]; then
+               systemctl --no-block restart workerconfig.service
+       fi
+}
diff --git a/meta-stx/recipes-core/stx-fault/files/0001-Honor-the-build-system-LDFLAGS.patch b/meta-stx/recipes-core/stx-fault/files/0001-Honor-the-build-system-LDFLAGS.patch
new file mode 100644 (file)
index 0000000..9765455
--- /dev/null
@@ -0,0 +1,32 @@
+From 78585fade2d47fc0aa98b7e2fb494385466007da Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Thu, 27 Jun 2019 14:36:59 -0700
+Subject: [PATCH 1/2] Honor the build system LDFLAGS
+
+---
+ fm-common/sources/Makefile | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fm-common/sources/Makefile b/fm-common/sources/Makefile
+index 7b1ea03..7c7e4c7 100755
+--- a/fm-common/sources/Makefile
++++ b/fm-common/sources/Makefile
+@@ -25,12 +25,13 @@ build: lib fmClientCli
+       $(CXX) $(CCFLAGS) $(INCLUDES) $(EXTRACCFLAGS) -c $< -o $@
+ lib: $(OBJS)
+-      $(CXX) -Wl,-soname,$(LIBFMCOMMON_SO).$(MAJOR) -o $(LIBFMCOMMON_SO).$(MAJOR).$(MINOR) -shared $(OBJS) $(EXTRAARFLAGS) ${LDLIBS}
++      $(LDCXXSHARED) $(LDFLAGS) -Wl,-soname,$(LIBFMCOMMON_SO).$(MAJOR) \
++              -o $(LIBFMCOMMON_SO).$(MAJOR).$(MINOR) $(OBJS) $(EXTRAARFLAGS) ${LDLIBS}
+       ln -sf $(LIBFMCOMMON_SO).$(MAJOR).$(MINOR) $(LIBFMCOMMON_SO).$(MAJOR)
+       ln -sf $(LIBFMCOMMON_SO).$(MAJOR).$(MINOR) $(LIBFMCOMMON_SO)
+ fmClientCli: $(CLI_OBJS) lib
+-      $(CXX) -o $@ $(CLI_OBJS) -L./ -lfmcommon
++      $(CXX) $(LDFLAGS) -o $@ $(CLI_OBJS) -L./ -lfmcommon
+ clean:
+       @rm -f $(OBJ) *.o *.so fmClientCli
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-fault/files/0001-Use-build-systems-LDFLAGS.patch b/meta-stx/recipes-core/stx-fault/files/0001-Use-build-systems-LDFLAGS.patch
new file mode 100644 (file)
index 0000000..bdca16f
--- /dev/null
@@ -0,0 +1,25 @@
+From ee51474dd6756492d1487304ce83b878b8867f6b Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Mon, 8 Jul 2019 21:50:24 -0700
+Subject: [PATCH] Use build systems LDFLAGS
+
+---
+ snmp-audittrail/sources/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/snmp-audittrail/sources/Makefile b/snmp-audittrail/sources/Makefile
+index 6f7469c..bb55765 100644
+--- a/snmp-audittrail/sources/Makefile
++++ b/snmp-audittrail/sources/Makefile
+@@ -16,7 +16,7 @@ build: lib
+       $(CXX) $(CCFLAGS) $(INCLUDES) -c $< -o $@
+ lib: $(OBJS)
+-      $(CXX) $(OBJS) $(LDLIBS) -o $(LIBCGTSAGENT_SO).$(MAJOR).$(MINOR).$(PATCH) -shared
++      $(LDCXXSHARED) $(OBJS) $(LDFLAGS) $(LDLIBS) -o $(LIBCGTSAGENT_SO).$(MAJOR).$(MINOR).$(PATCH) 
+       ln -sf $(LIBCGTSAGENT_SO).$(MAJOR).$(MINOR).$(PATCH) $(LIBCGTSAGENT_SO).$(MAJOR)
+       ln -sf $(LIBCGTSAGENT_SO).$(MAJOR).$(MINOR).$(PATCH) $(LIBCGTSAGENT_SO)
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-fault/files/0001-snmp-ext-use-build-systems-LDFLAGS.patch b/meta-stx/recipes-core/stx-fault/files/0001-snmp-ext-use-build-systems-LDFLAGS.patch
new file mode 100644 (file)
index 0000000..5bb780b
--- /dev/null
@@ -0,0 +1,25 @@
+From e452f7eaf630f7ab5de240c51ddce14c9329d580 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Mon, 8 Jul 2019 21:56:11 -0700
+Subject: [PATCH] snmp-ext use build systems LDFLAGS
+
+---
+ snmp-ext/sources/Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/snmp-ext/sources/Makefile b/snmp-ext/sources/Makefile
+index 408ea34..c00febe 100644
+--- a/snmp-ext/sources/Makefile
++++ b/snmp-ext/sources/Makefile
+@@ -23,7 +23,7 @@ LIBCGTSAGENT_SO := libcgtsAgentPlugin.so
+       $(CXX) $(CCFLAGS) $(INCLUDES) -c $< -o $@
+ lib: $(OBJS)
+-      $(CXX) $(OBJS) $(LDLIBS) -o $(LIBCGTSAGENT_SO).$(MAJOR).$(MINOR).$(PATCH) -shared
++      $(LDCXXSHARED) $(OBJS) $(LDFLAGS) $(LDLIBS) -o $(LIBCGTSAGENT_SO).$(MAJOR).$(MINOR).$(PATCH) 
+       ln -sf $(LIBCGTSAGENT_SO).$(MAJOR).$(MINOR).$(PATCH) $(LIBCGTSAGENT_SO).$(MAJOR)
+       ln -sf $(LIBCGTSAGENT_SO).$(MAJOR).$(MINOR).$(PATCH) $(LIBCGTSAGENT_SO)
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-fault/fm-api.inc b/meta-stx/recipes-core/stx-fault/fm-api.inc
new file mode 100644 (file)
index 0000000..a41b7ac
--- /dev/null
@@ -0,0 +1,39 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " fm-api"
+
+do_configure_append () {
+       cd ${S}/fm-api
+       distutils_do_configure
+} 
+
+do_compile_append() {
+       cd ${S}/fm-api
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/fm-api
+       distutils_do_install
+
+       # fix the path for binaries
+       sed -i -e 's|/usr/local/bin|${bindir}|' ${D}${libdir}/python2.7/site-packages/fm_api/constants.py
+}
+
+FILES_fm-api_append = " \
+       ${libdir}/python2.7/site-packages/fm_api-1.0.0-py2.7.egg-info/ \
+       ${libdir}/python2.7/site-packages/fm_api \
+       "
diff --git a/meta-stx/recipes-core/stx-fault/fm-common.inc b/meta-stx/recipes-core/stx-fault/fm-common.inc
new file mode 100644 (file)
index 0000000..f0d1304
--- /dev/null
@@ -0,0 +1,62 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " fm-common"
+RDEPENDS_fm-common = " python"
+
+do_configure_append () {
+       cd ${S}/fm-common/sources
+
+       # fix the hardcoded path
+       sed -i -e 's|/usr/local/bin|${bindir}|' fmConstants.h
+
+       distutils_do_configure
+} 
+
+do_compile_append() {
+       cd ${S}/fm-common/sources
+       oe_runmake -e BINDIR=${bindir} \
+                       INCLUDES="-I./ " \
+                       EXTRACCFLAGS=" " \
+                       CCFLAGS="${CXXFLAGS} ${CCSHARED}" \
+                       LIBDIR=${libdir} INCDIR=${includedir} \
+                       CGCS_DOC_DEPLOY=${cgcs_doc_deploy}
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/fm-common/sources
+       oe_runmake -e DESTDIR=${D} BINDIR=${bindir} \
+               LIBDIR=${libdir} INCDIR=${includedir} \
+               CGCS_DOC_DEPLOY=${cgcs_doc_deploy} install
+       distutils_do_install
+}
+
+
+FILES_fm-common = " \
+       ${cgcs_doc_deploy}/fmAlarm.h \
+       ${bindir}/fm_db_sync_event_suppression.py \
+       ${bindir}/fmClientCli \
+       ${libdir}/python2.7/site-packages/fm_core*.egg-info \
+       ${libdir}/python2.7/site-packages/fm_core.so \
+       ${libdir}/libfmcommon.so.1.0 \
+       ${libdir}/libfmcommon.so.1 \
+       ${includedir}/fmConfig.h \
+       ${includedir}/fmLog.h \
+       ${includedir}/fmThread.h \
+       ${includedir}/fmDbAPI.h \
+       ${includedir}/fmAPI.h \
+       ${includedir}/fmAlarm.h \
+       "
diff --git a/meta-stx/recipes-core/stx-fault/fm-doc.inc b/meta-stx/recipes-core/stx-fault/fm-doc.inc
new file mode 100644 (file)
index 0000000..8aafd5e
--- /dev/null
@@ -0,0 +1,35 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += "fm-doc"
+
+do_install_append () {
+       cd ${S}/fm-doc/fm_doc/
+       install -d -m 755 ${D}/${cgcs_doc_deploy}
+       install -d -m 755 ${D}/${sysconfdir}/fm
+       install -m 744 events.yaml ${D}/${sysconfdir}/fm/
+       install -m 644 events.yaml ${D}/${cgcs_doc_deploy}
+       install -m 755 checkEventYaml ${D}/${cgcs_doc_deploy}
+       install -m 644 parseEventYaml.py ${D}/${cgcs_doc_deploy}
+       install -m 644 check_missing_alarms.py ${D}/${cgcs_doc_deploy}
+}
+
+FILES_fm-doc = " \
+       ${sysconfdir}/fm/events.yaml \
+       ${cgcs_doc_deploy}/events.yaml \
+       ${cgcs_doc_deploy}/checkEventYaml \
+       ${cgcs_doc_deploy}/parseEventYaml.py \
+       ${cgcs_doc_deploy}/check_missing_alarms.py \
+       "
diff --git a/meta-stx/recipes-core/stx-fault/fm-mgr.inc b/meta-stx/recipes-core/stx-fault/fm-mgr.inc
new file mode 100644 (file)
index 0000000..18b3fed
--- /dev/null
@@ -0,0 +1,52 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " fm-mgr"
+
+###################
+# DEPENDS = " fm-common"
+###################
+
+do_compile_append() {
+       cd ${S}/fm-mgr/sources/
+       oe_runmake -e \
+               LDFLAGS="${LDFLAGS} -L${S}/fm-common/sources" \
+               CCFLAGS="${CXXFLAGS}" \
+               INCLUDES="-I. -I${S}/fm-common/sources" \
+               build
+}
+
+do_install_append () {
+       cd ${S}/fm-mgr/sources/
+       install -d -m0755 ${D}/${systemd_system_unitdir} 
+       oe_runmake -e DESTDIR=${D} BINDIR=${bindir} \
+               LIBDIR=${libdir} UNITDIR=${systemd_system_unitdir} \
+                       SYSCONFDIR=${sysconfdir} \
+                       install
+       rm -rf ${D}/usr/lib/systemd
+
+       # fix the path for init scripts
+       sed -i -e 's|rc.d/||' ${D}/${systemd_system_unitdir}/*.service
+
+       # fix the path for binaries
+       sed -i -e 's|/usr/local/bin/|${bindir}/|' ${D}${sysconfdir}/init.d/fminit
+}
+
+FILES_fm-mgr = "  \
+       ${bindir}/fmManager \
+       ${systemd_system_unitdir}/fminit.service \
+       ${sysconfdir}/init.d/fminit \
+       ${sysconfdir}/logrotate.d/fm.logrotate \
+       "
diff --git a/meta-stx/recipes-core/stx-fault/fm-rest-api.inc b/meta-stx/recipes-core/stx-fault/fm-rest-api.inc
new file mode 100644 (file)
index 0000000..ce1e282
--- /dev/null
@@ -0,0 +1,52 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " fm-rest-api"
+
+do_configure_append () {
+       cd ${S}/fm-rest-api/fm
+       distutils_do_configure
+} 
+
+do_compile_append() {
+       cd ${S}/fm-rest-api/fm
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/fm-rest-api/fm
+       distutils_do_install
+       install -d -m 755 ${D}/${systemd_system_unitdir}
+       install -p -D -m 644 scripts/fm-api.service ${D}/${systemd_system_unitdir}
+       install -p -D -m 755 scripts/fm-api ${D}/${sysconfdir}/init.d/fm-api
+       install -p -D -m 644 fm-api-pmond.conf ${D}/${sysconfdir}/pmon.d/fm-api.conf
+
+       # fix the path for init scripts
+       sed -i -e 's|rc.d/||' ${D}/${systemd_system_unitdir}/*.service
+}
+
+FILES_fm-rest-api = " \
+       ${systemd_system_unitdir}/fm-api.service \
+       ${sysconfdir}/init.d/fm-api \
+       ${sysconfdir}/pmon.d/fm-api.conf \
+       ${libdir}/python2.7/site-packages/fm \
+       ${libdir}/python2.7/site-packages/fm-1.0.0-py2.7.egg-info \
+       ${bindir}/fm-dbsync \
+       ${bindir}/fm-api \
+       "
+
+#pkg_postinst_ontarget_fm-rest-api() {
+#      /usr/bin/systemctl enable fm-api.service
+#}
diff --git a/meta-stx/recipes-core/stx-fault/python-fmclient.inc b/meta-stx/recipes-core/stx-fault/python-fmclient.inc
new file mode 100644 (file)
index 0000000..7f6ec26
--- /dev/null
@@ -0,0 +1,41 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " python-fmclient"
+
+do_configure_append () {
+       cd ${S}/python-fmclient/fmclient
+       distutils_do_configure
+} 
+
+do_compile_append() {
+       cd ${S}/python-fmclient/fmclient
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/python-fmclient/fmclient
+       distutils_do_install
+       
+       install -d -m 0755 ${D}/${sysconfdir}/bash_completion.d/
+       install -p -D -m 664 tools/fm.bash_completion ${D}/${sysconfdir}/bash_completion.d/fm.bash_completion
+}
+
+FILES_python-fmclient = " \
+       ${bindir}/fm \
+       ${libdir}/python2.7/site-packages/fmclient-*.egg-info/ \
+       ${libdir}/python2.7/site-packages/fmclient/ \
+       ${sysconfdir}/bash_completion.d/fm.bash_completion \
+       "
diff --git a/meta-stx/recipes-core/stx-fault/snmp-audittrail.inc b/meta-stx/recipes-core/stx-fault/snmp-audittrail.inc
new file mode 100644 (file)
index 0000000..680516d
--- /dev/null
@@ -0,0 +1,40 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " snmp-audittrail"
+
+##################
+# DEPENDS = " fm-common net-snmp"
+##################
+
+do_compile_append() {
+       cd ${S}/snmp-audittrail/sources
+       oe_runmake -e \
+               LDFLAGS="${LDFLAGS} -L${S}/fm-common/sources" \
+               CCFLAGS="${CXXFLAGS} -fPIC" \
+               INCLUDES="-I. -I${S}/fm-common/sources" \
+               build
+}
+
+do_install_append () {
+       cd ${S}/snmp-audittrail/sources
+       oe_runmake -e DESTDIR=${D} LIB_DIR=${libdir} MIBVER=0 PATCH=0 install
+}
+
+FILES_snmp-audittrail = " \
+       ${libdir}/libsnmpAuditPlugin.so.1.0.0 \
+       ${libdir}/libsnmpAuditPlugin.so.1 \
+       "
+
diff --git a/meta-stx/recipes-core/stx-fault/snmp-ext.inc b/meta-stx/recipes-core/stx-fault/snmp-ext.inc
new file mode 100644 (file)
index 0000000..f0a4b98
--- /dev/null
@@ -0,0 +1,40 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " snmp-ext"
+
+####################
+# DEPENDS = " fm-common snmp-audittrail"
+####################
+
+do_compile_append() {
+       cd ${S}/snmp-ext/sources
+       oe_runmake -e PATCH=0 \
+               INCLUDES="-I. -I${S}/fm-common/sources" \
+               LDFLAGS="${LDFLAGS} -L${S}/fm-common/sources" \
+               CCFLAGS="${CXXFLAGS} -fPIC" lib
+}
+
+do_install_append () {
+       cd ${S}/snmp-ext/sources
+       oe_runmake -e DEST_DIR=${D} LIB_DIR=${libdir} MIBVER=0 PATCH=0 install
+}
+
+FILES_snmp-ext_append = " \
+       ${datadir}/snmp/mibs/wrsAlarmMib.mib.txt \
+       ${datadir}/snmp/mibs/wrsEnterpriseReg.mib.txt \
+       ${libdir}/libcgtsAgentPlugin.so.1.0.0 \
+       ${libdir}/libcgtsAgentPlugin.so.1 \
+       "
diff --git a/meta-stx/recipes-core/stx-fault/stx-fault.bb b/meta-stx/recipes-core/stx-fault/stx-fault.bb
new file mode 100644 (file)
index 0000000..803cf87
--- /dev/null
@@ -0,0 +1,77 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-fault"
+
+INSANE_SKIP_${PN} = "ldflags"
+
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "2025f585c5b92890c8cb32c480b0151c7c1cb545"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI = " \
+       git://opendev.org/starlingx/fault.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://0001-Honor-the-build-system-LDFLAGS.patch \
+       file://0001-Use-build-systems-LDFLAGS.patch \
+       file://0001-snmp-ext-use-build-systems-LDFLAGS.patch \
+       "
+
+inherit setuptools
+DEPENDS = " \
+       util-linux \
+       postgresql \
+       python \
+       python-pbr-native \
+       python-six \
+       python-oslo.i18n \
+       python-oslo.utils \
+       python-requests \
+       bash \
+       net-snmp \
+"
+
+RDEPENDS_${PN} += " bash"
+
+cgcs_doc_deploy = "/opt/deploy/cgcs_doc"
+
+require fm-common.inc
+require fm-api.inc
+require fm-doc.inc
+require fm-mgr.inc
+require fm-rest-api.inc
+require python-fmclient.inc
+require snmp-audittrail.inc
+require snmp-ext.inc
+
+do_configure() {
+       :
+}
+
+do_compile() {
+       :
+}
+
+do_install() {
+       :
+}
+
+FILES_${PN} = " "
diff --git a/meta-stx/recipes-core/stx-gui/stx-gui.bb b/meta-stx/recipes-core/stx-gui/stx-gui.bb
new file mode 100644 (file)
index 0000000..5325f85
--- /dev/null
@@ -0,0 +1,77 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-gui"
+
+PACKAGES = "starlingx-dashboard"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "d1c22e49a95f92e91049b96f44e685f46785977c"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+
+
+SRC_URI = "git://opendev.org/starlingx/gui.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit distutils python-dir
+
+DEPENDS = "\
+       python \
+       python-pbr-native \
+       "
+
+#RDEPENDS_starlingx-dashboard += " \
+#      openstack-dashboard \
+#      "
+
+RDEPENDS_starlingx-dashboard_append = " \
+       ${PYTHON_PN}-cephclient \
+       "
+
+do_configure () {
+       cd ${S}/starlingx-dashboard/starlingx-dashboard
+       distutils_do_configure
+}
+
+
+do_compile () {
+       cd ${S}/starlingx-dashboard/starlingx-dashboard
+       distutils_do_compile
+}
+
+
+do_install () {
+       cd ${S}/starlingx-dashboard/starlingx-dashboard
+       distutils_do_install
+       #install -d -m 0755 ${D}/${datadir}/openstack-dashboard/openstack_dashboard/enabled/
+       #install -d -m 0755 ${D}/${datadir}/openstack-dashboard/openstack_dashboard/themes/starlingx/
+       #install -d -m 0755 ${D}/${datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.d
+       #install -p -D -m 755 build/lib/starlingx_dashboard/enabled/* \
+       #               ${D}/${datadir}/openstack-dashboard/openstack_dashboard/enabled/
+       #install -p -D -m 755 build/lib/starlingx_dashboard/themes/* \
+       #               ${D}/${datadir}/openstack-dashboard/openstack_dashboard/themes/starlingx_dashboard
+       #install -p -D -m 755 build/lib/starlingx_dashboard/local/local_settings.d/* \
+       #       ${D}/${datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.d
+}
+
+FILES_starlingx-dashboard = " \
+       ${PYTHON_SITEPACKAGES_DIR} \
+       "
diff --git a/meta-stx/recipes-core/stx-ha/files/0001-Allow-user-to-define-destination-libdir.patch b/meta-stx/recipes-core/stx-ha/files/0001-Allow-user-to-define-destination-libdir.patch
new file mode 100644 (file)
index 0000000..1e371b2
--- /dev/null
@@ -0,0 +1,47 @@
+From 128062759147c9a903d14ad7edbe6ead04f95812 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Fri, 24 Jan 2020 13:10:02 -0800
+Subject: [PATCH 1/2] Allow user to define destination libdir
+
+---
+ service-mgmt/sm-db/src/Makefile | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/service-mgmt/sm-db/src/Makefile b/service-mgmt/sm-db/src/Makefile
+index a29b1ac..379ea00 100644
+--- a/service-mgmt/sm-db/src/Makefile
++++ b/service-mgmt/sm-db/src/Makefile
+@@ -33,6 +33,10 @@ EXTRACCFLAGS= -D__STDC_FORMAT_MACROS
+ LDLIBS= -lsqlite3 -lglib-2.0 -luuid -lrt -lsm_common
+ LDFLAGS = -shared -rdynamic
++LIB_DIR = /usr/lib64
++BIN_DIR = /usr/bin
++INC_DIR = /usr/include
++
+ build: libsm_db.so sm_db_build
+ .c.o:
+@@ -51,12 +55,13 @@ sm_db_build: ${OBJS}
+       $(CXX) $(INCLUDES) $(CCFLAGS) $(EXTRACCFLAGS) $(OBJS) $(LDLIBS) -o sm_db_build
+ install:
+-      install -d ${DEST_DIR}/usr/lib64
+-      install libsm_db.so.${VER} $(DEST_DIR)/usr/lib64
+-      cp -P libsm_db.so libsm_db.so.$(VER_MJR) $(DEST_DIR)/usr/lib64
+-      install -d ${DEST_DIR}/usr/include
+-      install -m 0644 *.h ${DEST_DIR}/usr/include
+-      install -d 755 ${DEST_DIR}/usr/bin
++      install -d ${DEST_DIR}/${LIB_DIR}
++      install libsm_db.so.${VER} $(DEST_DIR)/${LIB_DIR}
++      cp -P libsm_db.so libsm_db.so.$(VER_MJR) $(DEST_DIR)/${LIB_DIR}
++      install -d ${DEST_DIR}/${INC_DIR}
++      install -m 0644 *.h ${DEST_DIR}/${INC_DIR}
++      install -d 755 ${DEST_DIR}/${BIN_DIR}
++      install -m 0744 sm_db_build ${DEST_DIR}/${BIN_DIR}
+ clean:
+       @rm -f *.o *.a *.so *.so.*
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-ha/files/0002-Install-sm-eru-sm-eru-dump-and-sm-eru-watchdog.patch b/meta-stx/recipes-core/stx-ha/files/0002-Install-sm-eru-sm-eru-dump-and-sm-eru-watchdog.patch
new file mode 100644 (file)
index 0000000..f020f37
--- /dev/null
@@ -0,0 +1,30 @@
+From bfde9f6164bd729764ad3f85b0c1aacce01ba4d0 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Fri, 24 Jan 2020 13:11:25 -0800
+Subject: [PATCH 2/2] Install sm-eru, sm-eru-dump, and sm-eru-watchdog
+
+---
+ service-mgmt/sm-common/src/Makefile | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/service-mgmt/sm-common/src/Makefile b/service-mgmt/sm-common/src/Makefile
+index 99f736d..7d138f9 100644
+--- a/service-mgmt/sm-common/src/Makefile
++++ b/service-mgmt/sm-common/src/Makefile
+@@ -69,8 +69,11 @@ install:
+       # install of these 3 are in the .spec file so that they can be
+       # renamed with '-' like they are in the bitbake file.
+       #
+-      # install -d $(DEST_DIR)$(BIN_DIR)
+-      # install sm_watchdog sm_eru sm_eru_dump $(DEST_DIR)$(BIN_DIR)
++
++      install -d -m 755 $(DEST_DIR)$(BIN_DIR)
++      install -p -m 755 sm_eru $(DEST_DIR)$(BIN_DIR)/sm-eru 
++      install -p -m 755 sm_eru_dump $(DEST_DIR)$(BIN_DIR)/sm-eru-dump
++      install -p -m 755 sm_watchdog $(DEST_DIR)$(BIN_DIR)/sm-watchdog
+       install -d $(DEST_DIR)$(LIB_DIR)
+       install libsm_common.so.${VER} $(DEST_DIR)$(LIB_DIR)
+       cp -P libsm_common.so libsm_common.so.$(VER_MJR) $(DEST_DIR)$(LIB_DIR)
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-ha/files/0003-pragma-ignore-Wunused-result-errors-with-gcc-8.3.patch b/meta-stx/recipes-core/stx-ha/files/0003-pragma-ignore-Wunused-result-errors-with-gcc-8.3.patch
new file mode 100644 (file)
index 0000000..bc28593
--- /dev/null
@@ -0,0 +1,36 @@
+From b990cc6c5fad7fb0599dbdbb0ea693dca885d1ed Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Fri, 24 Jan 2020 14:16:16 -0800
+Subject: [PATCH] pragma ignore -Wunused-result errors with gcc 8.3
+
+---
+ service-mgmt/sm/src/sm_node_api.cpp | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/service-mgmt/sm/src/sm_node_api.cpp b/service-mgmt/sm/src/sm_node_api.cpp
+index 179ec77..2866dbe 100644
+--- a/service-mgmt/sm/src/sm_node_api.cpp
++++ b/service-mgmt/sm/src/sm_node_api.cpp
+@@ -829,7 +829,9 @@ static bool sm_node_api_reboot_timeout( SmTimerIdT timer_id, int64_t user_data )
+         return( true );
+     }
++#pragma GCC diagnostic ignored "-Wunused-result"
+     write( sysrq_handler_fd, "1", 1 );
++#pragma GCC diagnostic error "-Wunused-result"
+     close( sysrq_handler_fd );
+     // Trigger sysrq command.
+@@ -856,7 +858,9 @@ static bool sm_node_api_reboot_timeout( SmTimerIdT timer_id, int64_t user_data )
+               "************************************" );
+     sleep(5); // wait 5 seconds before a forced reboot.
++#pragma GCC diagnostic ignored "-Wunused-result"
+     write( sysrq_tigger_fd, "b", 1 ); 
++#pragma GCC diagnostic error "-Wunused-result"
+     close( sysrq_tigger_fd );
+     return( true );
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-ha/files/0004-Cast-size_t-to-int-to-silence-gcc-8.3.patch b/meta-stx/recipes-core/stx-ha/files/0004-Cast-size_t-to-int-to-silence-gcc-8.3.patch
new file mode 100644 (file)
index 0000000..1557cee
--- /dev/null
@@ -0,0 +1,25 @@
+From 5d23f7c297943f8ce89ace74f34c6c4ee0e87ee3 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Fri, 24 Jan 2020 14:33:20 -0800
+Subject: [PATCH] Cast size_t to int to silence gcc 8.3
+
+---
+ service-mgmt/sm/src/sm_cluster_hbs_info_msg.cpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/service-mgmt/sm/src/sm_cluster_hbs_info_msg.cpp b/service-mgmt/sm/src/sm_cluster_hbs_info_msg.cpp
+index ca91c18..cfb9034 100644
+--- a/service-mgmt/sm/src/sm_cluster_hbs_info_msg.cpp
++++ b/service-mgmt/sm/src/sm_cluster_hbs_info_msg.cpp
+@@ -182,7 +182,7 @@ void SmClusterHbsInfoMsg::_cluster_hbs_info_msg_received( int selobj, int64_t us
+             }
+             return;
+         }
+-        DPRINTFD("msg received %d bytes. buffer size %d", bytes_read, sizeof(msg));
++        DPRINTFD("msg received %d bytes. buffer size %d", bytes_read, (int)sizeof(msg));
+         if(size_of_msg_header > (unsigned int)bytes_read)
+         {
+             DPRINTFE("size not right, msg size %d, expected not less than %d",
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-ha/sm-api.inc b/meta-stx/recipes-core/stx-ha/sm-api.inc
new file mode 100644 (file)
index 0000000..36914ab
--- /dev/null
@@ -0,0 +1,63 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " sm-api"
+
+RDEPENDS_sm-api += " \
+       bash \
+       python-six \
+       chkconfig \
+       mtce-pmon \
+       "
+
+do_configure_append () {
+       cd ${S}/service-mgmt-api/sm-api
+       distutils_do_configure
+} 
+
+do_compile_append () {
+       cd ${S}/service-mgmt-api/sm-api
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/service-mgmt-api/sm-api
+       distutils_do_install
+       install -d -m 0755 ${D}/${sysconfdir}/sm
+       install -d -m 0755 ${D}/${sysconfdir}/init.d
+       install -d -m 0755 ${D}/${sysconfdir}/pmon.d
+       install -d -m 0755 ${D}/${sysconfdir}/sm-api
+       install -d -m 0755 ${D}/${systemd_system_unitdir}
+       install -m 644 scripts/sm_api.ini ${D}/${sysconfdir}/sm
+       install -m 755 scripts/sm-api ${D}/${sysconfdir}/init.d
+       install -m 644 scripts/sm-api.service ${D}/${systemd_system_unitdir}
+       install -m 644 scripts/sm-api.conf ${D}/${sysconfdir}/pmon.d
+       install -m 644 etc/sm-api/policy.json ${D}/${sysconfdir}/sm-api
+}
+
+FILES_sm-api = " \
+       ${libdir}/python2.7/site-packages/sm_api*.egg-info/ \
+       ${libdir}/python2.7/site-packages/sm_api/ \
+       ${systemd_system_unitdir}/sm-api.service \
+       ${sysconfdir}/init.d/sm-api \
+       ${sysconfdir}/pmon.d/sm-api.conf \
+       ${sysconfdir}/sm/sm_api.ini \
+       ${sysconfdir}/sm-api/policy.json \
+       ${bindir}/sm-api \
+       "
+
+SYSTEMD_PACKAGES += "sm-api"
+SYSTEMD_SERVICE_sm-api = "sm-api.service"
+SYSTEMD_AUTO_ENABLE_sm-api = "enable"
diff --git a/meta-stx/recipes-core/stx-ha/sm-client.inc b/meta-stx/recipes-core/stx-ha/sm-client.inc
new file mode 100644 (file)
index 0000000..8d044c3
--- /dev/null
@@ -0,0 +1,44 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " sm-client"
+
+RDEPENDS_sm-client += " \
+       python \
+       python-six \
+       "
+
+do_configure_append () {
+       cd ${S}/service-mgmt-client/sm-client
+       distutils_do_configure
+} 
+
+do_compile_append() {
+       cd ${S}/service-mgmt-client/sm-client
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/service-mgmt-client/sm-client
+       distutils_do_install
+       install -d -m 0755 ${D}/${bindir}
+       install -m 755 usr/bin/smc ${D}/${bindir}
+}
+
+FILES_sm-client = " \
+       ${bindir}/smc \
+       ${libdir}/python2.7/site-packages/sm_client*.egg-info/ \
+       ${libdir}/python2.7/site-packages/sm_client/ \
+       "
diff --git a/meta-stx/recipes-core/stx-ha/sm-common.inc b/meta-stx/recipes-core/stx-ha/sm-common.inc
new file mode 100644 (file)
index 0000000..0b4ec9a
--- /dev/null
@@ -0,0 +1,87 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " libsm-common"
+PACKAGES += " sm-common-libs"
+PACKAGES += " sm-eru"
+
+RDEPENDS_sm-common-libs += " \
+       bash \
+       sqlite \
+       util-linux \
+       systemd \
+       "
+RDEPENDS_sm-eru = " sm-common-libs"
+RDEPENDS_libsm-common = " sm-common-libs"
+
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       cd ${S}/service-mgmt/sm-common/src
+       oe_runmake -e VER=0 VER_MJR=1 \
+               INCLUDES="-I. $(pkg-config --cflags glib-2.0)" \
+               CCFLAGS="${CXXFLAGS} -fPIC" LDFLAGS="${LDFLAGS} -shared -rdynamic" \
+               EXTRACCFLAGS="${LDFLAGS}" 
+       
+}
+
+do_install_prepend () {
+       cd ${S}/service-mgmt/sm-common/src
+       oe_runmake -e DEST_DIR=${D} BIN_DIR=${bindir} UNIT_DIR=${systemd_system_unitdir} \
+                       LIB_DIR=${libdir} INC_DIR=${includedir} VER=0 VER_MJR=1 install
+       cd ${S}/service-mgmt/sm-common/
+       install -d -m 755 ${D}/${systemd_system_unitdir}
+       # install -m 755 -d ${D}/${sysconfdir}/pmon.d
+       # install -m 755 -d ${D}/${sysconfdir}/init.d
+
+       install -m 644 -p -D scripts/sm-eru.service ${D}/${systemd_system_unitdir}/sm-eru.service
+       install -m 644 -p -D scripts/sm-watchdog.service ${D}/${systemd_system_unitdir}/sm-watchdog.service
+
+       install -m 640 -p -D scripts/sm-eru.conf ${D}/${sysconfdir}/pmon.d/sm-eru.conf
+       install -m 640 -p -D scripts/sm-watchdog.conf ${D}/${sysconfdir}/pmon.d/sm-watchdog.conf
+       install -m 750 -p -D scripts/sm-eru ${D}/${sysconfdir}/init.d/sm-eru
+       install -m 750 -p -D scripts/sm-watchdog ${D}/${sysconfdir}/init.d/sm-watchdog
+}
+
+FILES_libsm-common = " \
+       ${libdir}/libsm_common.so.0 \
+       ${libdir}/libsm_common.so.1 \
+       ${libdir}/libsm_common.so \
+       "
+
+FILES_sm-common-libs = " \
+       var/lib/sm/watchdog/modules/libsm_watchdog_nfs.so.0 \
+       var/lib/sm/watchdog/modules/libsm_watchdog_nfs.so.1 \
+       "
+
+FILES_sm-eru = " \
+       ${bindir}/sm-eru \
+       ${bindir}/sm-eru-dump \
+       ${bindir}/sm-watchdog \
+       ${systemd_system_unitdir}/sm-eru.service \
+       ${systemd_system_unitdir}/sm-watchdog.service \
+       ${sysconfdir}/init.d/sm-eru \
+       ${sysconfdir}/pmon.d/sm-eru.conf \
+       ${sysconfdir}/init.d/sm-watchdog \
+       ${sysconfdir}/pmon.d/sm-watchdog.conf \
+       "
+
+SYSTEMD_PACKAGES += "sm-eru"
+SYSTEMD_SERVICE_sm-eru = "sm-eru.service sm-watchdog.service"
+SYSTEMD_AUTO_ENABLE_sm-eru = "enable"
diff --git a/meta-stx/recipes-core/stx-ha/sm-db.inc b/meta-stx/recipes-core/stx-ha/sm-db.inc
new file mode 100644 (file)
index 0000000..b8e9923
--- /dev/null
@@ -0,0 +1,47 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " sm-db"
+
+DEPENDS =+ "sqlite3-native"
+
+do_configure_append () {
+       :
+} 
+
+do_compile_append () {
+       cd ${S}/service-mgmt/sm-db
+       sqlite3 database/sm.db < database/create_sm_db.sql
+       sqlite3 database/sm.hb.db < database/create_sm_hb_db.sql
+
+       oe_runmake -e VER=0 VER_MJR=1 \
+               INCLUDES="-I. -I${S}/service-mgmt/sm-common/src " \
+               CCFLAGS="${CXXFLAGS} -fPIC" \
+               LDFLAGS="${LDFLAGS} -shared -rdynamic -L${S}/service-mgmt/sm-common/src " \
+               EXTRACCFLAGS="-D_TEST__AA__BB ${LDFLAGS} -L${S}/service-mgmt/sm-common/src "
+}
+
+do_install_append () {
+       cd ${S}/service-mgmt/sm-db
+       oe_runmake -e DEST_DIR=${D} BIN_DIR=${bindir} UNIT_DIR=${systemd_system_unitdir} \
+                       LIB_DIR=${libdir} INC_DIR=${includedir} VER=0 VER_MJR=1 install
+}
+
+FILES_sm-db = " \
+       ${bindir}/sm_db_build \
+       ${libdir}/libsm_db.so.1 \
+       ${libdir}/libsm_db.so.0 \
+       ${localstatedir}/lib/sm \
+       "
diff --git a/meta-stx/recipes-core/stx-ha/sm-tools.inc b/meta-stx/recipes-core/stx-ha/sm-tools.inc
new file mode 100644 (file)
index 0000000..9cf5c2c
--- /dev/null
@@ -0,0 +1,48 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " sm-tools"
+
+RDEPENDS_sm-tools += " python"
+
+do_configure_append () {
+       cd ${S}/service-mgmt-tools/sm-tools
+       distutils_do_configure
+} 
+
+do_compile_append() {
+       cd ${S}/service-mgmt-tools/sm-tools
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/service-mgmt-tools/sm-tools
+       distutils_do_install
+}
+
+FILES_sm-tools = " \
+       ${bindir}/sm-query \
+       ${bindir}/sm-provision \
+       ${bindir}/sm-restart \
+       ${bindir}/sm-unmanage \
+       ${bindir}/sm-dump \
+       ${bindir}/sm-iface-state \
+       ${bindir}/sm-configure \
+       ${bindir}/sm-manage \
+       ${bindir}/sm-deprovision \
+       ${bindir}/sm-restart-safe \
+       ${libdir}/python2.7/site-packages/sm_tools-*.egg-info/ \
+       ${libdir}/python2.7/site-packages/sm_tools/ \
+       "
diff --git a/meta-stx/recipes-core/stx-ha/sm.inc b/meta-stx/recipes-core/stx-ha/sm.inc
new file mode 100644 (file)
index 0000000..b59d416
--- /dev/null
@@ -0,0 +1,85 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " sm"
+
+RDEPENDS_sm += " \
+       bash \
+       python \
+       logrotate \
+       time \
+       systemd \
+       chkconfig \
+       mtce-pmon \
+       "
+#      aaa_base
+
+do_configure_append () {
+       :
+}
+
+do_compile_append() {
+       cd ${S}/service-mgmt/sm/src
+       oe_runmake -e -j1 VER=0 VER_MJR=1 \
+               INCLUDES="-I. -I${S}/service-mgmt/sm-common/src \
+                       -I${S}/service-mgmt/sm-db/src $(pkg-config --cflags glib-2.0)" \
+               EXTRACCFLAGS="-I. -I${S}/service-mgmt/sm-common/src \
+                       -I${S}/service-mgmt/sm-db/src $(pkg-config --cflags glib-2.0) \
+                       -L${S}/service-mgmt/sm-common/src -L${S}/service-mgmt/sm-db/src \
+                               $(pkg-config --ldlags glib-2.0)" \
+               CCFLAGS="${CXXFLAGS} -std=c++11" LDFLAGS="${LDFLAGS} -rdynamic"
+}
+
+do_install_append () {
+       cd ${S}/service-mgmt/sm/src
+       oe_runmake -e DEST_DIR=${D} BIN_DIR=${bindir} UNIT_DIR=${systemd_system_unitdir} \
+               LIB_DIR=${libdir} INC_DIR=${includedir} VER=0 VER_MJR=1 install
+       cd ${S}/service-mgmt/sm/scripts
+
+       install -d ${D}/${sysconfdir}/init.d
+       install sm ${D}/${sysconfdir}/init.d/sm
+       install sm.shutdown ${D}/${sysconfdir}/init.d/sm-shutdown
+       install -d -m0755 ${D}/${sysconfdir}/pmon.d
+       install -m 644 sm.conf ${D}/${sysconfdir}/pmon.d/sm.conf
+       install -d ${D}/${sysconfdir}/logrotate.d
+       install -m 644 sm.logrotate ${D}/${sysconfdir}/logrotate.d/sm.logrotate
+       install -d -m 755 ${D}/${sbindir}
+       install sm.notify ${D}/${sbindir}/stx-ha-sm-notify
+       install sm.troubleshoot ${D}/${sbindir}/sm-troubleshoot
+       install sm.notification ${D}/${sbindir}/sm-notification
+       install -d $(D)${systemd_system_unitdir}
+       install -m 644 *.service ${D}/${systemd_system_unitdir}
+}
+
+FILES_sm = " \
+       ${bindir}/sm \
+       ${sysconfdir}/init.d/sm \
+       ${sysconfdir}/init.d/sm-shutdown \
+       ${sysconfdir}/pmon.d/sm.conf \
+       ${sysconfdir}/logrotate.d/sm.logrotate \
+       ${sbindir}/stx-ha-sm-notify \
+       ${sbindir}/sm-troubleshoot \
+       ${sbindir}/sm-notification \
+       ${systemd_system_unitdir}/sm-shutdown.service \
+       ${systemd_system_unitdir}/sm.service \
+       "
+
+pkg_postinst_ontarget_sm_append () {
+       /usr/bin/update-alternatives --install /usr/sbin/sm-notify sm-notify /usr/sbin/stx-ha-sm-notify 5
+}
+
+SYSTEMD_PACKAGES += "sm"
+SYSTEMD_SERVICE_sm = "sm.service sm-shutdown.service"
+SYSTEMD_AUTO_ENABLE_sm = "enable"
diff --git a/meta-stx/recipes-core/stx-ha/stx-ha.bb b/meta-stx/recipes-core/stx-ha/stx-ha.bb
new file mode 100644 (file)
index 0000000..cdbdeb3
--- /dev/null
@@ -0,0 +1,93 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-ha"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCNAME = "ha"
+SRCREV = "a7b7d35b9922a3f2a8462492b7f1958f135a612d"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+
+#TODO:
+#3b83ef96387f14655fc854ddc3c6bd57  ./LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./service-mgmt-api/sm-api/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./service-mgmt-client/LICENSE
+#1dece7821bf3fd70fe1309eaa37d52a2  ./service-mgmt-client/sm-client/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./service-mgmt-tools/sm-tools/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./service-mgmt/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./service-mgmt/sm-common/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./service-mgmt/sm-db/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./service-mgmt/sm/LICENSE
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI = "git://opendev.org/starlingx/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://0001-Allow-user-to-define-destination-libdir.patch \
+       file://0002-Install-sm-eru-sm-eru-dump-and-sm-eru-watchdog.patch \
+       file://0003-pragma-ignore-Wunused-result-errors-with-gcc-8.3.patch \
+       file://0004-Cast-size_t-to-int-to-silence-gcc-8.3.patch \
+       "
+
+inherit setuptools
+inherit pkgconfig
+inherit systemd
+
+DISTRO_FEATURES_BACKFILL_CONSIDERED_remove = "sysvinit"
+
+DEPENDS += " \
+       stx-fault \
+       stx-metal \
+       sqlite3 \
+       python \
+       python-pbr-native \
+        glib-2.0 \
+       sqlite3 \
+       "
+
+require sm-common.inc
+require sm-db.inc
+require sm.inc
+require sm-api.inc
+require sm-client.inc
+require sm-tools.inc
+require stx-ocf-scripts.inc
+
+#TODO: Shouldn't have to do this
+LDFLAGS_remove = "-Wl,--as-needed"
+
+do_configure () {
+       :
+} 
+
+do_compile() {
+       :       
+}
+
+do_install () {
+       :
+}
+
+FILES_${PN} = " "
+FILES_${PN}-dev += " \
+       var/lib/sm/watchdog/modules/libsm_watchdog_nfs.so \
+       "
+#      var/lib/sm/watchdog/modules/libsm_watchdog_nfs.so.1 \
+#      var/lib/sm/watchdog/modules/libsm_watchdog_nfs.so.0 \
+#      ${libdir}/libsm_common.so.1 \
+#      ${libdir}/libsm_common.so.0 
diff --git a/meta-stx/recipes-core/stx-ha/stx-ocf-scripts.inc b/meta-stx/recipes-core/stx-ha/stx-ocf-scripts.inc
new file mode 100644 (file)
index 0000000..7e49966
--- /dev/null
@@ -0,0 +1,40 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " stx-ocf-scripts"
+
+RDEPENDS_stx-ocf-scripts += " \
+       bash \
+       openstack-ras \
+       "
+
+do_configure_append () {
+       :
+}
+
+do_compile_append() {
+       :
+}
+
+do_install_append () {
+       cd ${S}/stx-ocf-scripts/src/ocf
+
+       install -d -m 755 ${D}/usr/lib/ocf/resource.d/openstack
+       install -p -D -m 755 $(find . -type f) ${D}/usr/lib/ocf/resource.d/openstack/ 
+}
+
+FILES_stx-ocf-scripts = " \
+       ${libdir}/ocf/resource.d/openstack/ \
+       "
diff --git a/meta-stx/recipes-core/stx-integ-kubernetes/cloud-provider-openstack.bb b/meta-stx/recipes-core/stx-integ-kubernetes/cloud-provider-openstack.bb
new file mode 100644 (file)
index 0000000..8ef02d8
--- /dev/null
@@ -0,0 +1,59 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "cloud-provider-openstack"
+
+STABLE = "starlingx/master"
+PROTOCOL = "https"
+BRANCH = "master"
+SRCREV = "70609a3d55e5b7d2be82667fc35792505f9013c4"
+S = "${WORKDIR}/git"
+PV = "19.05"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI = "git://opendev.org/starlingx/config.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+RDEPENDS_${PN} += " bash"
+
+do_configure () {
+       :
+} 
+
+do_compile() {
+       :
+}
+
+do_install () {
+
+       cd ${S}/filesystem/cloud-provider-openstack/
+       install -d -m755 ${D}/${bindir}
+       install -d -m755 ${D}/${systemd_system_unitdir}
+
+       install -D -m644 files/cloud-provider-openstack.sh ${D}/${bindir}
+       install -D -m644 files/cloud-provider-openstack.service ${D}/${systemd_system_unitdir}
+
+
+}
+
+#pkg_postinst_ontarget_${PN} () {
+#      /usr/bin/systemctl enable cloud-provider-openstack.service
+#}     
+
+FILES_${PN}_append += " \
+               ${systemd_system_unitdir} \
+               "
diff --git a/meta-stx/recipes-core/stx-integ/files/base/0001-cgcs-users-with-patch-ibsh-patches.patch b/meta-stx/recipes-core/stx-integ/files/base/0001-cgcs-users-with-patch-ibsh-patches.patch
new file mode 100644 (file)
index 0000000..7ce1650
--- /dev/null
@@ -0,0 +1,2191 @@
+From b62415943878891ce000b9e0b414354b60047876 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Tue, 2 Jul 2019 14:09:28 -0700
+Subject: [PATCH 1/2] cgcs-users with patch ibsh patches
+
+Applied ibsh-0.3e-cgcs.patch and copyright patch.
+---
+ base/cgcs-users/cgcs-users-1.0/BUGS           |  19 +
+ base/cgcs-users/cgcs-users-1.0/CONTRIBUTORS   |   7 +
+ base/cgcs-users/cgcs-users-1.0/COPYING        | 340 ++++++++++++++++++
+ base/cgcs-users/cgcs-users-1.0/COPYRIGHT      |  17 +
+ base/cgcs-users/cgcs-users-1.0/INSTALL        |  23 ++
+ base/cgcs-users/cgcs-users-1.0/Makefile       |  56 +++
+ base/cgcs-users/cgcs-users-1.0/README         |  29 ++
+ base/cgcs-users/cgcs-users-1.0/Release        |  17 +
+ base/cgcs-users/cgcs-users-1.0/TODO           |  10 +
+ base/cgcs-users/cgcs-users-1.0/VERSION        |   1 +
+ base/cgcs-users/cgcs-users-1.0/antixploit.c   | 131 +++++++
+ base/cgcs-users/cgcs-users-1.0/command.c      | 209 +++++++++++
+ base/cgcs-users/cgcs-users-1.0/config.c       | 179 +++++++++
+ base/cgcs-users/cgcs-users-1.0/delbadfiles.c  | 239 ++++++++++++
+ .../cgcs-users-1.0/example.allowall.xtns      |  28 ++
+ .../cgcs-users-1.0/example.denyall.xtns       |   2 +
+ base/cgcs-users/cgcs-users-1.0/execute.c      | 159 ++++++++
+ base/cgcs-users/cgcs-users-1.0/globals.cmds   |   8 +
+ base/cgcs-users/cgcs-users-1.0/globals.xtns   |   3 +
+ base/cgcs-users/cgcs-users-1.0/ibsh.h         | 126 +++++++
+ base/cgcs-users/cgcs-users-1.0/jail.c         | 101 ++++++
+ base/cgcs-users/cgcs-users-1.0/main.c         | 239 ++++++++++++
+ base/cgcs-users/cgcs-users-1.0/misc.c         |  52 +++
+ 23 files changed, 1995 insertions(+)
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/BUGS
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/CONTRIBUTORS
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/COPYING
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/COPYRIGHT
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/INSTALL
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/Makefile
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/README
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/Release
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/TODO
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/VERSION
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/antixploit.c
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/command.c
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/config.c
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/delbadfiles.c
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/example.allowall.xtns
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/example.denyall.xtns
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/execute.c
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/globals.cmds
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/globals.xtns
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/ibsh.h
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/jail.c
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/main.c
+ create mode 100644 base/cgcs-users/cgcs-users-1.0/misc.c
+
+diff --git a/base/cgcs-users/cgcs-users-1.0/BUGS b/base/cgcs-users/cgcs-users-1.0/BUGS
+new file mode 100644
+index 0000000..7dacaab
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/BUGS
+@@ -0,0 +1,19 @@
++** Open BUGS **
++None, so far.
++
++** Fixed BUGS **
++- Input length checking on all inputs, string copies, etc. is fixed.
++- The myscanf function will no longer accept more then 80 chars at once,
++so ibsh hopefully wont crash on a too long input.
++- Added signal.h in the header file, the lack of it caused compilation
++problems on some systems.
++- Fixed the infinite loop in DelBadFiles. This function is temporarily 
++taken out of the project
++- Removed the involvment of /bin/sh from system. Added path checking.
++- In jail root, not only ../ is not allowed, but .. too.
++- Fixed a bug, that happened on bsd, when the user pressed ^D.
++- Fixed a bug with opendir
++- Fixed a format string vulnerability in logprintbadfile(). Thanks to
++Kim Streich for the report.
++
++2005.05.23
+diff --git a/base/cgcs-users/cgcs-users-1.0/CONTRIBUTORS b/base/cgcs-users/cgcs-users-1.0/CONTRIBUTORS
+new file mode 100644
+index 0000000..35ca436
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/CONTRIBUTORS
+@@ -0,0 +1,7 @@
++CONTRIBUTORS TO PROJECT IBSH
++
++Kim Streich <kstreich at gmail.com>
++      * bug finder, debugger, tester.
++
++RazoR (Nikolay Alexandrov) <Nikolay@Alexandrov.ws>
++      * bug finder, debugger, tester.
+diff --git a/base/cgcs-users/cgcs-users-1.0/COPYING b/base/cgcs-users/cgcs-users-1.0/COPYING
+new file mode 100644
+index 0000000..d60c31a
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/COPYING
+@@ -0,0 +1,340 @@
++                  GNU GENERAL PUBLIC LICENSE
++                     Version 2, June 1991
++
++ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
++     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ Everyone is permitted to copy and distribute verbatim copies
++ of this license document, but changing it is not allowed.
++
++                          Preamble
++
++  The licenses for most software are designed to take away your
++freedom to share and change it.  By contrast, the GNU General Public
++License is intended to guarantee your freedom to share and change free
++software--to make sure the software is free for all its users.  This
++General Public License applies to most of the Free Software
++Foundation's software and to any other program whose authors commit to
++using it.  (Some other Free Software Foundation software is covered by
++the GNU Library General Public License instead.)  You can apply it to
++your programs, too.
++
++  When we speak of free software, we are referring to freedom, not
++price.  Our General Public Licenses are designed to make sure that you
++have the freedom to distribute copies of free software (and charge for
++this service if you wish), that you receive source code or can get it
++if you want it, that you can change the software or use pieces of it
++in new free programs; and that you know you can do these things.
++
++  To protect your rights, we need to make restrictions that forbid
++anyone to deny you these rights or to ask you to surrender the rights.
++These restrictions translate to certain responsibilities for you if you
++distribute copies of the software, or if you modify it.
++
++  For example, if you distribute copies of such a program, whether
++gratis or for a fee, you must give the recipients all the rights that
++you have.  You must make sure that they, too, receive or can get the
++source code.  And you must show them these terms so they know their
++rights.
++
++  We protect your rights with two steps: (1) copyright the software, and
++(2) offer you this license which gives you legal permission to copy,
++distribute and/or modify the software.
++
++  Also, for each author's protection and ours, we want to make certain
++that everyone understands that there is no warranty for this free
++software.  If the software is modified by someone else and passed on, we
++want its recipients to know that what they have is not the original, so
++that any problems introduced by others will not reflect on the original
++authors' reputations.
++
++  Finally, any free program is threatened constantly by software
++patents.  We wish to avoid the danger that redistributors of a free
++program will individually obtain patent licenses, in effect making the
++program proprietary.  To prevent this, we have made it clear that any
++patent must be licensed for everyone's free use or not licensed at all.
++
++  The precise terms and conditions for copying, distribution and
++modification follow.
++\f
++                  GNU GENERAL PUBLIC LICENSE
++   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
++
++  0. This License applies to any program or other work which contains
++a notice placed by the copyright holder saying it may be distributed
++under the terms of this General Public License.  The "Program", below,
++refers to any such program or work, and a "work based on the Program"
++means either the Program or any derivative work under copyright law:
++that is to say, a work containing the Program or a portion of it,
++either verbatim or with modifications and/or translated into another
++language.  (Hereinafter, translation is included without limitation in
++the term "modification".)  Each licensee is addressed as "you".
++
++Activities other than copying, distribution and modification are not
++covered by this License; they are outside its scope.  The act of
++running the Program is not restricted, and the output from the Program
++is covered only if its contents constitute a work based on the
++Program (independent of having been made by running the Program).
++Whether that is true depends on what the Program does.
++
++  1. You may copy and distribute verbatim copies of the Program's
++source code as you receive it, in any medium, provided that you
++conspicuously and appropriately publish on each copy an appropriate
++copyright notice and disclaimer of warranty; keep intact all the
++notices that refer to this License and to the absence of any warranty;
++and give any other recipients of the Program a copy of this License
++along with the Program.
++
++You may charge a fee for the physical act of transferring a copy, and
++you may at your option offer warranty protection in exchange for a fee.
++
++  2. You may modify your copy or copies of the Program or any portion
++of it, thus forming a work based on the Program, and copy and
++distribute such modifications or work under the terms of Section 1
++above, provided that you also meet all of these conditions:
++
++    a) You must cause the modified files to carry prominent notices
++    stating that you changed the files and the date of any change.
++
++    b) You must cause any work that you distribute or publish, that in
++    whole or in part contains or is derived from the Program or any
++    part thereof, to be licensed as a whole at no charge to all third
++    parties under the terms of this License.
++
++    c) If the modified program normally reads commands interactively
++    when run, you must cause it, when started running for such
++    interactive use in the most ordinary way, to print or display an
++    announcement including an appropriate copyright notice and a
++    notice that there is no warranty (or else, saying that you provide
++    a warranty) and that users may redistribute the program under
++    these conditions, and telling the user how to view a copy of this
++    License.  (Exception: if the Program itself is interactive but
++    does not normally print such an announcement, your work based on
++    the Program is not required to print an announcement.)
++\f
++These requirements apply to the modified work as a whole.  If
++identifiable sections of that work are not derived from the Program,
++and can be reasonably considered independent and separate works in
++themselves, then this License, and its terms, do not apply to those
++sections when you distribute them as separate works.  But when you
++distribute the same sections as part of a whole which is a work based
++on the Program, the distribution of the whole must be on the terms of
++this License, whose permissions for other licensees extend to the
++entire whole, and thus to each and every part regardless of who wrote it.
++
++Thus, it is not the intent of this section to claim rights or contest
++your rights to work written entirely by you; rather, the intent is to
++exercise the right to control the distribution of derivative or
++collective works based on the Program.
++
++In addition, mere aggregation of another work not based on the Program
++with the Program (or with a work based on the Program) on a volume of
++a storage or distribution medium does not bring the other work under
++the scope of this License.
++
++  3. You may copy and distribute the Program (or a work based on it,
++under Section 2) in object code or executable form under the terms of
++Sections 1 and 2 above provided that you also do one of the following:
++
++    a) Accompany it with the complete corresponding machine-readable
++    source code, which must be distributed under the terms of Sections
++    1 and 2 above on a medium customarily used for software interchange; or,
++
++    b) Accompany it with a written offer, valid for at least three
++    years, to give any third party, for a charge no more than your
++    cost of physically performing source distribution, a complete
++    machine-readable copy of the corresponding source code, to be
++    distributed under the terms of Sections 1 and 2 above on a medium
++    customarily used for software interchange; or,
++
++    c) Accompany it with the information you received as to the offer
++    to distribute corresponding source code.  (This alternative is
++    allowed only for noncommercial distribution and only if you
++    received the program in object code or executable form with such
++    an offer, in accord with Subsection b above.)
++
++The source code for a work means the preferred form of the work for
++making modifications to it.  For an executable work, complete source
++code means all the source code for all modules it contains, plus any
++associated interface definition files, plus the scripts used to
++control compilation and installation of the executable.  However, as a
++special exception, the source code distributed need not include
++anything that is normally distributed (in either source or binary
++form) with the major components (compiler, kernel, and so on) of the
++operating system on which the executable runs, unless that component
++itself accompanies the executable.
++
++If distribution of executable or object code is made by offering
++access to copy from a designated place, then offering equivalent
++access to copy the source code from the same place counts as
++distribution of the source code, even though third parties are not
++compelled to copy the source along with the object code.
++\f
++  4. You may not copy, modify, sublicense, or distribute the Program
++except as expressly provided under this License.  Any attempt
++otherwise to copy, modify, sublicense or distribute the Program is
++void, and will automatically terminate your rights under this License.
++However, parties who have received copies, or rights, from you under
++this License will not have their licenses terminated so long as such
++parties remain in full compliance.
++
++  5. You are not required to accept this License, since you have not
++signed it.  However, nothing else grants you permission to modify or
++distribute the Program or its derivative works.  These actions are
++prohibited by law if you do not accept this License.  Therefore, by
++modifying or distributing the Program (or any work based on the
++Program), you indicate your acceptance of this License to do so, and
++all its terms and conditions for copying, distributing or modifying
++the Program or works based on it.
++
++  6. Each time you redistribute the Program (or any work based on the
++Program), the recipient automatically receives a license from the
++original licensor to copy, distribute or modify the Program subject to
++these terms and conditions.  You may not impose any further
++restrictions on the recipients' exercise of the rights granted herein.
++You are not responsible for enforcing compliance by third parties to
++this License.
++
++  7. If, as a consequence of a court judgment or allegation of patent
++infringement or for any other reason (not limited to patent issues),
++conditions are imposed on you (whether by court order, agreement or
++otherwise) that contradict the conditions of this License, they do not
++excuse you from the conditions of this License.  If you cannot
++distribute so as to satisfy simultaneously your obligations under this
++License and any other pertinent obligations, then as a consequence you
++may not distribute the Program at all.  For example, if a patent
++license would not permit royalty-free redistribution of the Program by
++all those who receive copies directly or indirectly through you, then
++the only way you could satisfy both it and this License would be to
++refrain entirely from distribution of the Program.
++
++If any portion of this section is held invalid or unenforceable under
++any particular circumstance, the balance of the section is intended to
++apply and the section as a whole is intended to apply in other
++circumstances.
++
++It is not the purpose of this section to induce you to infringe any
++patents or other property right claims or to contest validity of any
++such claims; this section has the sole purpose of protecting the
++integrity of the free software distribution system, which is
++implemented by public license practices.  Many people have made
++generous contributions to the wide range of software distributed
++through that system in reliance on consistent application of that
++system; it is up to the author/donor to decide if he or she is willing
++to distribute software through any other system and a licensee cannot
++impose that choice.
++
++This section is intended to make thoroughly clear what is believed to
++be a consequence of the rest of this License.
++\f
++  8. If the distribution and/or use of the Program is restricted in
++certain countries either by patents or by copyrighted interfaces, the
++original copyright holder who places the Program under this License
++may add an explicit geographical distribution limitation excluding
++those countries, so that distribution is permitted only in or among
++countries not thus excluded.  In such case, this License incorporates
++the limitation as if written in the body of this License.
++
++  9. The Free Software Foundation may publish revised and/or new versions
++of the General Public License from time to time.  Such new versions will
++be similar in spirit to the present version, but may differ in detail to
++address new problems or concerns.
++
++Each version is given a distinguishing version number.  If the Program
++specifies a version number of this License which applies to it and "any
++later version", you have the option of following the terms and conditions
++either of that version or of any later version published by the Free
++Software Foundation.  If the Program does not specify a version number of
++this License, you may choose any version ever published by the Free Software
++Foundation.
++
++  10. If you wish to incorporate parts of the Program into other free
++programs whose distribution conditions are different, write to the author
++to ask for permission.  For software which is copyrighted by the Free
++Software Foundation, write to the Free Software Foundation; we sometimes
++make exceptions for this.  Our decision will be guided by the two goals
++of preserving the free status of all derivatives of our free software and
++of promoting the sharing and reuse of software generally.
++
++                          NO WARRANTY
++
++  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
++FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
++OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
++PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
++OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
++TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
++PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
++REPAIR OR CORRECTION.
++
++  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
++WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
++REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
++INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
++OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
++TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
++YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
++PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGES.
++
++                   END OF TERMS AND CONDITIONS
++\f
++          How to Apply These Terms to Your New Programs
++
++  If you develop a new program, and you want it to be of the greatest
++possible use to the public, the best way to achieve this is to make it
++free software which everyone can redistribute and change under these terms.
++
++  To do so, attach the following notices to the program.  It is safest
++to attach them to the start of each source file to most effectively
++convey the exclusion of warranty; and each file should have at least
++the "copyright" line and a pointer to where the full notice is found.
++
++    <one line to give the program's name and a brief idea of what it does.>
++    Copyright (C) <year>  <name of author>
++
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
++
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
++
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++
++
++Also add information on how to contact you by electronic and paper mail.
++
++If the program is interactive, make it output a short notice like this
++when it starts in an interactive mode:
++
++    Gnomovision version 69, Copyright (C) year  name of author
++    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
++    This is free software, and you are welcome to redistribute it
++    under certain conditions; type `show c' for details.
++
++The hypothetical commands `show w' and `show c' should show the appropriate
++parts of the General Public License.  Of course, the commands you use may
++be called something other than `show w' and `show c'; they could even be
++mouse-clicks or menu items--whatever suits your program.
++
++You should also get your employer (if you work as a programmer) or your
++school, if any, to sign a "copyright disclaimer" for the program, if
++necessary.  Here is a sample; alter the names:
++
++  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
++  `Gnomovision' (which makes passes at compilers) written by James Hacker.
++
++  <signature of Ty Coon>, 1 April 1989
++  Ty Coon, President of Vice
++
++This General Public License does not permit incorporating your program into
++proprietary programs.  If your program is a subroutine library, you may
++consider it more useful to permit linking proprietary applications with the
++library.  If this is what you want to do, use the GNU Library General
++Public License instead of this License.
+diff --git a/base/cgcs-users/cgcs-users-1.0/COPYRIGHT b/base/cgcs-users/cgcs-users-1.0/COPYRIGHT
+new file mode 100644
+index 0000000..7507d05
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/COPYRIGHT
+@@ -0,0 +1,17 @@
++This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++Copyright (C) 2005  Attila Nagyidai
++
++This program is free software; you can redistribute it and/or
++modify it under the terms of the GNU General Public License
++as published by the Free Software Foundation; either version 2
++of the License, or (at your option) any later version.
++
++This program is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this program; if not, write to the Free Software
++Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
+diff --git a/base/cgcs-users/cgcs-users-1.0/INSTALL b/base/cgcs-users/cgcs-users-1.0/INSTALL
+new file mode 100644
+index 0000000..42b1866
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/INSTALL
+@@ -0,0 +1,23 @@
++Installing ibsh is really easy, so no need for the usual sections
++in this document. There is no configure script either, so if 
++something wrong, make will fail.
++
++# make ibsh
++# make ibsh_install
++
++Optionally:
++
++# make clean
++
++
++To uninstall ibsh:
++
++# make ibsh_uninstall
++
++
++Of course you will have to enable this shell by:
++# echo /bin/ibsh >> /etc/shells
++or however you like it.
++And make sure the permissions read 0755 !
++
++2005.03.24.
+diff --git a/base/cgcs-users/cgcs-users-1.0/Makefile b/base/cgcs-users/cgcs-users-1.0/Makefile
+new file mode 100644
+index 0000000..ed37d00
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/Makefile
+@@ -0,0 +1,56 @@
++# This is the makefile for ibsh 0.3e
++CC = gcc
++OBJECTS = main.o command.o jail.o execute.o config.o misc.o antixploit.o delbadfiles.o
++
++ibsh: ${OBJECTS} ibsh.h
++      ${CC} -o ibsh ${OBJECTS}
++
++main.o: main.c ibsh.h
++      ${CC} -c main.c
++
++command.o: command.c ibsh.h
++      ${CC} -c command.c
++
++jail.o: jail.c ibsh.h
++      ${CC} -c jail.c
++
++execute.o: execute.c ibsh.h
++      ${CC} -c execute.c
++
++config.o: config.c ibsh.h
++      ${CC} -c config.c
++
++misc.o: misc.c ibsh.h
++      ${CC} -c misc.c
++
++antixploit.o: antixploit.c ibsh.h
++      ${CC} -c antixploit.c
++
++delbadfiles.o: delbadfiles.c ibsh.h
++      ${CC} -c delbadfiles.c
++
++ibsh_install:
++      cp ./ibsh /bin/
++      mkdir /etc/ibsh
++      mkdir /etc/ibsh/cmds
++      mkdir /etc/ibsh/xtns
++      cp ./globals.cmds /etc/ibsh/
++      cp ./globals.xtns /etc/ibsh/
++
++ibsh_uninstall:
++      rm -rf /etc/ibsh/globals.cmds
++      rm -rf /etc/ibsh/globals.xtns
++      rm -rf /etc/ibsh/cmds/*.*
++      rm -rf /etc/ibsh/xtns/*.*
++      rmdir /etc/ibsh/cmds
++      rmdir /etc/ibsh/xtns
++      rmdir /etc/ibsh
++      rm -rf /bin/ibsh
++
++clean:
++      rm -rf ibsh
++      rm -rf *.o
++
++
++# 13:49 2005.04.06.
++
+diff --git a/base/cgcs-users/cgcs-users-1.0/README b/base/cgcs-users/cgcs-users-1.0/README
+new file mode 100644
+index 0000000..2035e57
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/README
+@@ -0,0 +1,29 @@
++      Iron Bars SHell - a restricted interactive shell.
++
++Overview
++
++      For long i have been in the search of a decent restricted shell, but in vain.
++      The few i found, were really easy to hack, and there were quite a few docs
++      around on the web about hacking restricted shells with a menu interface.
++      For my definitions, a restricted shell must not only prevent the user to 
++      escape her jail, but also not to access any files outside the jail.
++      The system administrator must have total control over the restricted shell.
++      These are the major features incorporated and realized by ibsh.
++
++
++Features
++
++      Please read the changelog.      
++
++
++Installation
++
++      Read the INSTALL file.
++
++
++Contact
++      See Authors file.
++
++
++Attila Nagyidai
++2005.05.23.
+diff --git a/base/cgcs-users/cgcs-users-1.0/Release b/base/cgcs-users/cgcs-users-1.0/Release
+new file mode 100644
+index 0000000..e6cb9f3
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/Release
+@@ -0,0 +1,17 @@
++This release introduces minor bugfixes, and important new and renewed features.
++Erasing evil files in the home directory of the user is incorporated again, with 
++many improvements. First of all: no file will be erased! Only the access to them
++will be blocked. The extension policy has changed, now ibsh blocks those extensions,
++that are NOT listed. This goes in sync with the usual method of operation of ibsh.
++The execute permission of files in the user space, will be removed.
++New customizing features were added: each user now can have her own commands and 
++extensions file, created and maintained by the system administrator. Some users
++(employees) may require access to special programs. User configuration files allow
++this access only those, who need it, not for everybody.
++Ibsh now scans not only the extensions of files, but the content too! Whatever the permission
++for a certain file exists, if that contains source code, or is a linux binary, access
++will be blocked.
++The absolute path for the users is now limited to 255 characters. Longer, already
++existing filenames will be renamed.
++
++06/04/2005 
+diff --git a/base/cgcs-users/cgcs-users-1.0/TODO b/base/cgcs-users/cgcs-users-1.0/TODO
+new file mode 100644
+index 0000000..9a8de60
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/TODO
+@@ -0,0 +1,10 @@
++TODO
++
++      - tab completion.
++      - shell variables.
++      - some changes to the prompt, maybe variable prompt.
++      - history
++      - to be able to use corporate, or other large/complicated programs in a safe
++      working environment, yet be able to share files/work with others.
++
++2005.05.23.
+diff --git a/base/cgcs-users/cgcs-users-1.0/VERSION b/base/cgcs-users/cgcs-users-1.0/VERSION
+new file mode 100644
+index 0000000..aaf9552
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/VERSION
+@@ -0,0 +1 @@
++IBSH v0.3e
+diff --git a/base/cgcs-users/cgcs-users-1.0/antixploit.c b/base/cgcs-users/cgcs-users-1.0/antixploit.c
+new file mode 100644
+index 0000000..79ac9e4
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/antixploit.c
+@@ -0,0 +1,131 @@
++/*
++  Created: 03.19.05 11:34:57 by Attila Nagyidai
++
++  $Id: C\040Console.c,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  Co-Author: Witzy
++  Email: stazzz@altern.org
++  
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++/* Header files */
++#include "ibsh.h"
++
++
++void lshift( char *line )
++{
++  int i = 0;
++  
++  for (i=0; i<strlen(line) - 1; i++) {
++      line[i] = line[i+1];
++  }
++}
++
++
++/* Checks if a part of the command line contains */
++/* a file in the jail, that is a source code. */
++/* Source codes must not be compiled, interpreted, */
++/* or otherwise used. */
++/* abspath is loggedin.udir, if the token is a path. */
++/* otherwise abspath is realpath. */
++int antixploit( const char *abspath, char *token )
++{
++  char jailfile[STRING_SIZE], line[LINE_SIZE], temp[BUFFER_SIZE];
++  struct stat info;
++  FILE *fp;
++  int retval = 0;
++
++  
++  snprintf(jailfile, STRING_SIZE, "%s/%s/%s", loggedin.udir, abspath, token);
++#ifdef DEBUG
++  printf("jailfile: %s\n", jailfile);
++#endif
++  if ( (lstat(jailfile, &info)) == -1 ) {
++      // this is not a file
++      return 0;
++  }
++
++  fp = fopen(jailfile, "rb");
++  if ( fp == NULL ) {
++      // if i cant open it, gcc cant open it
++      return 0;
++  }
++
++  fgets(line, LINE_SIZE, fp);
++  while ( line[0] != '\0' ) {
++#ifdef DEBUG
++      printf("Line: %s\n", line);
++#endif
++      // c, c++
++      if ( (strncmp(line, C_CODE, strlen(C_CODE) - 1)) == 0 ) {
++          retval = 1;
++          break;
++      }
++      // perl, sh, python
++      if ( (strncmp(line, SHELL_CODE, strlen(SHELL_CODE) - 1)) == 0 ) {
++          retval = 1;
++          break;
++      }
++      // python
++      if ( (strncmp(line, PYTHON_CODE, strlen(PYTHON_CODE) - 1)) == 0 ) {
++          retval = 1;
++          break;
++      }
++      // ada
++      if ( (strncmp(line, ADA_CODE, strlen(ADA_CODE) - 1)) == 0 ) {
++          retval = 1;
++          break;
++      }
++      // eiffel
++      if ( (strncmp(line, EIFFEL_CODE, strlen(EIFFEL_CODE) - 1)) == 0 ) {
++          retval = 1;
++          break;
++      }
++      // lisp
++      if ( (strncmp(line, LISP_CODE, strlen(LISP_CODE) - 1)) == 0 ) {
++          retval = 1;
++          break;
++      }
++      // elf
++      lshift(line);
++      if ( (strncmp(line, ELF_CODE, strlen(ELF_CODE) - 1)) == 0 ) {
++          retval = 1;
++          break;
++      }
++      bzero(line, LINE_SIZE);
++      fgets(line, LINE_SIZE, fp);
++  }
++  fclose(fp);
++#ifdef DEBUG
++  printf("retval: %d\n", retval);
++#endif
++  return retval;
++}
++
+diff --git a/base/cgcs-users/cgcs-users-1.0/command.c b/base/cgcs-users/cgcs-users-1.0/command.c
+new file mode 100644
+index 0000000..91d9d8f
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/command.c
+@@ -0,0 +1,209 @@
++/*
++  Created: 03.19.05 11:34:57 by Attila Nagyidai
++
++  $Id: C\040Console.c,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  Co-Author: Witzy
++  Email: stazzz@altern.org
++  
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++/* Header files */
++#include "ibsh.h"
++
++extern Strng commands[MAX_ITEMS];
++
++/* This is my version of scanf. It offers good protection against */
++/* buffer overflow attacks. */
++/* Technical Description: */
++/* Variables: the char to read to from stdin, the sum of chars typed in, */
++/* and an integer for length checking. */
++/* Read characters from stdin in a loop, until Enter is pressed. */
++/* The line size check wont stop accepting characters, but it will */
++/* break long commands to LINE_SIZEd pieces. */
++/* Update: */
++/* Read only 80 characters. If the current path + this 80 chars + / */
++/* are longer then 255, read less chars. */
++void myscanf( char *vptr, char *abspath )
++{
++    int c;
++              char chars[STRING_SIZE];
++    int i = 0, j = 0;
++    int linesize = LINE_SIZE;
++
++              fflush(stdin);
++    if ( (strlen(abspath)) > (255 - 80 - 1) ) {
++        linesize = 255 - strlen(abspath) - 1; /* thats for the / */
++    }
++
++    while ( ((c = getchar()) != 10) && ( i < linesize ) ) {
++                              //printf("%d", c);
++                              if  (c > 127) {
++                                               c = 0;
++                                              //ungetc(c, stdin);
++                                              //fflush(stdin);
++                                              break;
++                              }
++            if ( c < 0 ) {
++                ungetc(c, stdin);
++                openlog("ibsh", LOG_PID, LOG_AUTH);
++            syslog(LOG_INFO, "user %s has logged out.", loggedin.uname);
++            closelog();
++            exit(0);
++            }
++        chars[i] = c;
++        /* the user is not allowed to pass long lines of trash to ibsh */
++        i++;
++    }
++    chars[i] = '\0';
++   strncpy(vptr,chars,STRING_SIZE-1);
++   vptr[STRING_SIZE-1] = '\0';
++}
++
++
++/* Checks, if the user command, is blacklisted, a hack attempt, */
++/* or it is a real and allowed command. */
++/* Technical Description: */
++/* Variables: pointer for the strtok, temporary strings, counters, */
++/* and an integer to check, how deep is the user in the jail. That is */
++/* the level of subdirectories below jail root. It is set to -1, because */
++/* there is always a / in the jailpath. So only subdir /'s are counted. */
++/* Check if the command contains special characters, if yes, quit. */
++/* If the user is in jailroot, a ../ is not appropriate! */
++/* Count the slashes in the jailpath, so if the user uses way too many */
++/* ../ 's, then we will know. Split the command to particles by the spaces. */
++/* Count the dirups (../); if a token starts with a /, paste the homedir path */
++/* right in front of it. Thats your root booby, not / !!!! */
++/* Finally check the command against the COMMANDS_LIST. */
++int CommandOK( const char *thecommand, const char *rootdir,
++const char *jailpath, char *newcommand )
++{
++  char *tok;
++  char temp1[STRING_SIZE], *temp2;
++  int i = 0, j = 0;
++  int subdirlevel = -1; /* jailpath always starts with a / */
++  int dirupfound = 0;
++  int listed = 0;
++
++  /* First, get the fancy stuff: */
++  /* ../ out of the jailroot, too many ../ out of some */
++  /* subdirectory in the jail, multiple commands, pipes. */
++  bzero(newcommand,STRING_SIZE);
++
++  if ( (strstr(thecommand, ";")) != NULL ) {
++      return 0;
++  }
++  if ( (strstr(thecommand, "|")) != NULL ) {
++      return 0;
++  }
++  if ( (strstr(thecommand, "&")) != NULL ) {
++      return 0;
++  }
++  if ( (strstr(thecommand, "&&")) != NULL ) {
++      return 0;
++  }
++  if ( (strstr(thecommand, "||")) != NULL ) {
++      return 0;
++  }
++  /* The user is in the jailroot. */
++  if ( (strcmp(jailpath, "/")) == 0 ) {
++      /* Does the user wish to get out ? */
++      if ( (strstr(thecommand, "..")) != NULL ) {
++             return 0;
++      }
++  }
++  /* The user is deeper, than the jailroot, and */
++  /* this is a problem. How deep is he, how many */
++  /* ../ do we allow ?? */
++  else {
++      for (i = 0; i < strlen(jailpath); i++) {
++          if ( jailpath[i] == '/' ) {
++              subdirlevel++;
++          }
++      }
++  }
++
++  /* Split the command */
++  for (tok = strtok((void *) thecommand, " "); tok; tok = strtok(0, " ")) {
++      /* Separate parts of the command with a space */
++      if ( (strlen(newcommand)) > 0 ) {
++          strncat(newcommand," ", STRING_SIZE-strlen(newcommand)-1);
++      }
++      
++      /* He wants to get to the real root, does he ? */
++      /* In that case, add the jailroot to the left. */
++      if ( tok[0] == '/' ) {
++        strncat(newcommand,rootdir,STRING_SIZE-strlen(newcommand)-1);
++      }
++
++      /* how many ../ are here */
++      /* if too many, that is more, then how deep */
++      /* the user in the subdirs inside the jail is, */
++      /* cancel the execution of the command. */
++      if ( (strstr(tok, "../")) != NULL ) {
++        strncpy(temp1,tok,sizeof(temp1)-1);
++        temp1[sizeof(temp1)-1] = '\0';
++      
++      while (1) {
++              temp2 = strstr(temp1, "../");
++              if ( temp2 == NULL ) {
++                  break;
++              }
++              LTrim3(temp2, temp1);
++              dirupfound++;
++          }
++          if ( dirupfound > subdirlevel ) {
++              return 0;
++          }
++          /* replace dirups with real path */
++          for (i = 0; i < dirupfound; i++) {
++              PathMinusOne(jailpath, tok, subdirlevel,sizeof(tok));
++          }
++      }
++          /* if command is not listed, return 0 */
++      i = 0;
++      while ( ((strlen(commands[i])) > 0) && ( j == 0 ) ) {
++      if ( (strcmp(tok, commands[i])) == 0 ) {
++                listed = 1;
++                break;
++          }
++          i++;
++      }
++      j++;
++      strncat(newcommand,tok,STRING_SIZE-strlen(newcommand)-1);
++ 
++}
++#ifdef DEBUG
++  printf("old: %s; new: %s; ok: %d\n", thecommand, newcommand, listed);
++#endif
++  return listed;
++}
++
++
+diff --git a/base/cgcs-users/cgcs-users-1.0/config.c b/base/cgcs-users/cgcs-users-1.0/config.c
+new file mode 100644
+index 0000000..8e2af23
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/config.c
+@@ -0,0 +1,179 @@
++/*
++  Created: 03.19.05 11:34:57 by Attila Nagyidai
++
++  $Id: C\040Console.c,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved.
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  Co-Author: Witzy
++  Email: stazzz@altern.org
++  
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++/* Header files */
++#include "ibsh.h"
++
++extern Strng commands[MAX_ITEMS];
++extern Strng extensions[MAX_ITEMS];
++
++/* Shy's improved version of the original (and not well working) loadconfig. */
++/* Reads both config files, and parses the contents into arrays. */
++/* This one effectively dismisses every comment from the files. */
++/* Technical Description: */
++/* Variables: file pointer, counters, temporary string arrays. */
++/* The method is the same for both files. First open the file, catch */
++/* any errors. Read file 'til eof. Not read comments (starting with '#'), */
++/* remove trailing newline character. Copy the finished item to the */
++/* pass-by-address arguments. */
++int LoadConfig( void )
++{
++  FILE *fp;
++  int i = 0;
++  char *file_user;
++  
++  Strng tmp[MAX_ITEMS];
++  Strng tmp2[MAX_ITEMS];
++
++  /* COMMAND CONFIG !!!! */
++  file_user = (char *)malloc(strlen(loggedin.uname) + strlen(COMMANDS_DIR) + strlen(".cmds") + 2);
++
++  if(loggedin.uname != NULL)
++      sprintf(file_user, "%s/%s.cmds", COMMANDS_DIR, loggedin.uname);
++  else{
++        free(file_user);
++        return -1;
++  }
++
++  /* Open global config,if not present go out !!! */
++  if((fp = fopen(COMMANDS_FILE,"r")) == NULL) {
++      OPENLOG;
++      syslog(LOG_ERR, "ibsh panic! Global commands file %s can not be read.", COMMANDS_FILE);
++      CLOSELOG;
++      exit(0);
++  }
++
++  while (!feof(fp) && (i<MAX_ITEMS)) {
++        fgets(tmp[i],STRING_SIZE,fp);
++        if ( tmp[i][0] != '#' ) {
++                /* Delete '\n' */
++                tmp[i][strlen(tmp[i]) - 1] = '\0';
++                strncpy(commands[i],tmp[i],strlen(tmp[i]));
++      
++#ifdef DEBUG
++                printf("COMMANDS %s\n",commands[i]);
++#endif
++              i++;
++      }
++  }
++  fclose(fp);
++
++
++#ifdef DEBUG
++  printf("FILE USER %s\n",file_user);
++#endif
++  /* Add the user command */
++  if ((fp = fopen(file_user,"r")) == NULL) {
++            free(file_user);
++      }
++  else {
++  i--;
++  
++  while (!feof(fp) && (i<MAX_ITEMS)) {
++        fgets(tmp[i],STRING_SIZE,fp);
++        if ( tmp[i][0] != '#' ) {
++                // Delete '\n'
++                tmp[i][strlen(tmp[i]) - 1] = '\0';
++                strncpy(commands[i],tmp[i],strlen(tmp[i]));
++#ifdef DEBUG
++                printf("COMMANDS %s\n",commands[i]);
++#endif
++
++              i++;
++      }
++  }
++  fclose(fp);
++  free(file_user);
++  }
++
++  i = 0;
++  
++  /* EXTENSIONS CONFIG !!!!*/
++  
++  file_user = (char *)malloc(strlen(loggedin.uname) + strlen(EXTENSIONS_DIR) + strlen(".xtns") + 2);
++  
++  sprintf(file_user, "%s/%s.xtns", EXTENSIONS_DIR, loggedin.uname);
++
++    /* Open global config,if not present go out !!! */
++  if((fp = fopen(EXTENSIONS_FILE,"r")) == NULL) {
++      OPENLOG;
++      syslog(LOG_ERR, "ibsh panic! Global extensions file %s can not be read.", EXTENSIONS_FILE);
++      CLOSELOG;
++      printf("heyxtns");
++      exit(0);
++  }
++
++  while (!feof(fp) && (i<MAX_ITEMS)) {
++    fgets(tmp2[i],STRING_SIZE,fp);
++        if ( tmp2[i][0] != '#' ) {
++                /* Delete '\n' */
++                tmp2[i][strlen(tmp2[i]) - 1] = '\0';
++                strncpy(extensions[i],tmp2[i],strlen(tmp2[i]));
++#ifdef DEBUG
++                printf("EXTENSIONS %s\n",extensions[i]);
++#endif
++              i++;
++      }
++  }
++  fclose(fp);
++
++  
++  /* Add the user extensions */
++  if ((fp = fopen(file_user,"r")) == NULL) {
++            free(file_user);
++            return 0;
++      }
++
++  i--;
++  
++  while (!feof(fp) && (i<MAX_ITEMS)) {
++        fgets(tmp2[i],STRING_SIZE,fp);
++        if ( tmp2[i][0] != '#' ) {
++                // Delete '\n'
++                tmp2[i][strlen(tmp2[i]) - 1] = '\0';
++                strncpy(extensions[i],tmp2[i],strlen(tmp2[i]));
++              i++;
++      }
++  }
++  fclose(fp);
++  free(file_user);
++  
++  return 0;
++}
++
+diff --git a/base/cgcs-users/cgcs-users-1.0/delbadfiles.c b/base/cgcs-users/cgcs-users-1.0/delbadfiles.c
+new file mode 100644
+index 0000000..fc09f4b
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/delbadfiles.c
+@@ -0,0 +1,239 @@
++/*
++  Created: 03.19.05 11:34:57 by Attila Nagyidai
++
++  $Id: C\040Console.c,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  Co-Author: Witzy
++  Email: stazzz@altern.org
++  
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++/* Header files */
++#include "ibsh.h"
++
++extern Strng extensions[MAX_ITEMS];
++
++/* witzy's fix of the DelBadFiles function, making it work and adding features.
++   It drops rights of the files which don't end with an extension present into the extensions array,
++   deletes symbolic links pointing out of the jail, and makes unexecutable the files that are executable.
++   A message is printed each time such a file is found, explaining what was done.
++   Don't forget that this function chdirs to the path given into parameter, so when DelBadFiles returns,
++   your current working directory is this path.
++*/
++void DelBadFiles (const char *basedir)
++{
++  DIR *dp;
++  struct dirent *list;
++  struct stat info, attr;
++  char tmp[STRING_SIZE];
++  int i, allowed;
++
++  strncpy(tmp, basedir, STRING_SIZE - 1);
++  if ( lstat( tmp, &info ) == -1 ) {
++      return;
++  }
++
++  if ( !S_ISDIR(info.st_mode) ) {
++      return;
++  }
++
++  if ( (dp = opendir(tmp)) == NULL ) {
++          closedir( dp );
++      return;
++  }
++
++  if ( chdir(tmp) == -1 ) {
++      return;
++  }
++
++  while ( (list = readdir(dp)) != NULL ) {
++#ifdef DEBUG
++    printf("direntry: %s\n", list->d_name);
++#endif
++    if ( (lstat(list->d_name, &attr)) < 0 )
++      continue;
++    
++    // rename long path names
++#ifdef DEBUG
++    printf("length: %d;\n", (strlen(basedir) + strlen(list->d_name) + 2) );
++#endif
++    if ( (strlen(basedir) + strlen(list->d_name) + 2) > 255 ) {
++      snprintf(tmp, 255 - strlen(basedir) - 2, "%s", list->d_name);
++      rename(list->d_name, tmp);
++#ifdef DEBUG
++      printf("%s renamed to %s !\n", list->d_name, tmp);
++#endif
++      if ( (antixploit(basedir, tmp)) == 1 ) {
++      removeAllRights(list->d_name, &attr);
++      }
++      if (isExecutable(&attr)) {
++      makeUnexecutable(tmp, &attr);
++      }
++      continue;
++    }
++
++    if ( S_ISDIR(attr.st_mode) ) { /* in the case of a directory */
++      if ( ((strcmp(list->d_name, ".")) != 0) && ((strcmp(list->d_name, "..")) != 0) ) {
++#ifdef DEBUG
++      printf("recursive call for %s\n", list->d_name);
++#endif
++      DelBadFiles(list->d_name); /* recursively look for bad files in this directory */
++      chdir ("..");
++      }
++    } else if ( S_ISLNK(attr.st_mode) ) { /* in the case of a symlink */
++      if ( symlinkGoesOuttaJail(list->d_name) ) {
++#ifdef DEBUG
++      printf("symlinkoutofjail: %s\n", list->d_name);
++#endif
++      if (unlink(list->d_name) == 0) {
++        bzero (tmp, sizeof(tmp));
++        snprintf (tmp, sizeof(tmp)-1, "Illegal symbolic link %s was erased. Contact the sysadmin for policy.\n", list->d_name);
++        logPrintBadfile (tmp);
++      }
++      }
++    } else if (hasSomeRwxRights(&attr)) { /* other cases (in particular a file), only if there are some rights on it */
++#ifdef DEBUG
++      printf("%s has some rights\n", list->d_name);
++#endif
++      /* check the runnability of the file */
++      if (isExecutable(&attr)) {
++#ifdef DEBUG
++      printf("%s executable\n", list->d_name);
++#endif
++      if (makeUnexecutable(list->d_name, &attr) == 0) {
++        bzero (tmp, sizeof(tmp));
++        snprintf (tmp, sizeof(tmp)-1, "Executable file %s is not anymore. Contact the sysadmin for policy.\n", list->d_name);
++        logPrintBadfile (tmp);
++      }
++      }
++      
++      if ( (antixploit(basedir, list->d_name)) == 1 ) {
++      if (removeAllRights(list->d_name, &attr) == 0) {
++        bzero (tmp, sizeof(tmp));
++        snprintf (tmp, sizeof(tmp)-1, "Illegal file %s got its rights dropped. Contact the sysadmin for policy.\n", list->d_name);
++        logPrintBadfile (tmp);
++        continue;
++      }
++      }
++      
++      /* check if the file has a permitted extension */
++      for (i = 0, allowed = 0; (strlen(extensions[i])) > 0 && !allowed; i++) {
++      if ( (strstr(list->d_name, extensions[i])) != NULL ) {
++#ifdef DEBUG
++        printf("filename: %s; extension: %s\n", list->d_name, extensions[i]);
++#endif
++        allowed = 1;
++      }
++      } /* for */
++      if (!allowed) { /* if the file hasn't an allowed extension */
++#ifdef DEBUG
++      printf("not allowed extension for %s\n", list->d_name);
++#endif
++      if (removeAllRights(list->d_name, &attr) == 0) {
++        bzero (tmp, sizeof(tmp));
++        snprintf (tmp, sizeof(tmp)-1, "Illegal file %s got its rights dropped. Contact the sysadmin for policy.\n", list->d_name);
++        logPrintBadfile (tmp);
++      }
++      }
++    } /* else */
++
++  } /* while */
++  
++  closedir( dp );
++}
++
++/* takes a symlink location, resolves it and returns :
++   1 if the symlink points out of the jail
++   0 else, meaning the symlink is ok
++*/
++int symlinkGoesOuttaJail (const char * sl)
++{
++  char fPnted[PATH_MAX];
++  char rslvdPath[PATH_MAX]; /* size of PATH_MAX because of realpath() behavior */
++  int i;
++  
++  i = readlink (sl, fPnted, PATH_MAX);
++  if ( i > 0 && i < PATH_MAX ) {
++    fPnted[i] = '\0';
++    if (realpath (fPnted, rslvdPath) == rslvdPath) {
++      if ( strncmp (loggedin.udir, rslvdPath, strlen(loggedin.udir)) == 0 )
++      return 0;
++      else
++      return 1;
++    }
++  }
++  return 1; /* if this line is reached, there was a problem with the processing of the symlink, 
++             e.g. the path is too long, so we should consider that the symlink is bad, 
++             and may be deleted by the calling function */
++}
++
++/* takes a stat structure, and returns
++   1 if at least one of the user/group/other execution bits or suid/guid are set
++   0 if no such bit is set at all
++ */
++int isExecutable (struct stat * s)
++{
++  if ( ((s->st_mode & S_IXUSR) == S_IXUSR)
++       | ((s->st_mode & S_IXGRP) == S_IXGRP)
++       | ((s->st_mode & S_IXOTH) == S_IXOTH)
++       | ((s->st_mode & S_ISUID) == S_ISUID)
++       | ((s->st_mode & S_ISGID) == S_ISGID) )
++    return 1;
++  return 0;
++}
++
++int hasSomeRwxRights (struct stat * s)
++{
++  if ( ((s->st_mode & S_IRWXU) != 0)
++       | ((s->st_mode & S_IRWXG) != 0)
++       | ((s->st_mode & S_IRWXO) != 0) )
++    return 1;
++  return 0;
++}
++
++int makeUnexecutable (const char * filename, struct stat * s)
++{
++  return chmod (filename,
++              s->st_mode & ~(S_IXUSR | S_IXGRP | S_IXOTH | S_ISUID | S_ISGID) );
++}
++
++int removeAllRights (const char * filename, struct stat * s)
++{
++  return chmod (filename, 
++              s->st_mode & ~(S_IRWXU | S_IRWXG | S_IRWXO | S_ISUID | S_ISGID) );
++}
++
++void logPrintBadfile (const char * msg)
++{
++  OPENLOG;
++  syslog(LOG_WARNING, "%s", msg);
++  CLOSELOG;
++  //  printf ("ibsh: %s\n", msg);
++}
+diff --git a/base/cgcs-users/cgcs-users-1.0/example.allowall.xtns b/base/cgcs-users/cgcs-users-1.0/example.allowall.xtns
+new file mode 100644
+index 0000000..d0963cd
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/example.allowall.xtns
+@@ -0,0 +1,28 @@
++# Add any extension the user may use.
++q
++w
++e
++r
++t
++y
++u
++i
++o
++p
++a
++s
++d
++f
++g
++h
++j
++k
++l
++z
++x
++c
++v
++b
++n
++m
++
+diff --git a/base/cgcs-users/cgcs-users-1.0/example.denyall.xtns b/base/cgcs-users/cgcs-users-1.0/example.denyall.xtns
+new file mode 100644
+index 0000000..9dead3a
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/example.denyall.xtns
+@@ -0,0 +1,2 @@
++# Add any extension the user may use.
++
+diff --git a/base/cgcs-users/cgcs-users-1.0/execute.c b/base/cgcs-users/cgcs-users-1.0/execute.c
+new file mode 100644
+index 0000000..2d80366
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/execute.c
+@@ -0,0 +1,159 @@
++/*
++  Created: 03.19.05 11:34:57 by Attila Nagyidai
++
++  $Id: C\040Console.c,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  Co-Author: Witzy
++  Email: stazzz@altern.org
++  
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++/* Header files */
++#include "ibsh.h"
++
++/* Counts the spaces in the command */
++int nbspace(const char *command)
++{
++int i=0;
++int nbspace=0;
++
++while(command[i] != '\0'){
++      if(command[i] == ' ')
++              nbspace++;
++      i++;
++}
++
++return nbspace;
++}
++
++/* Shy's improved and secured version of hhsystem, originally taken from */
++/* the book: Linux Unix Systemprogramming by Helmut Herold. */
++int  hhsystem(const char *user_command)   /*--- Version ohne Signalbehandlung ---*/
++{
++   pid_t   pid;
++   int     status;
++   int i=0;
++   int find = 0;
++
++   char *field;
++
++   char path[STRING_SIZE];
++
++   char *current_path;
++   char *fieldspath;
++   char *params[nbspace(user_command) + 1];
++
++   DIR *currentdir;
++   struct dirent *pdirent;
++
++   if (user_command == NULL)
++      return(1);   /* In Unix ist immer Kommandoprozessor vorhanden */
++
++   if ( (pid=fork()) < 0)
++      status = -1;
++
++   else if (pid == 0) {
++       /* Split the command */
++      field = strtok((char *)user_command," ");
++      while(field != NULL){
++#ifdef DEBUG
++              printf("CHAMPS %s\n",field);
++#endif
++              params[i] = malloc(strlen(field) + 1);
++              bzero(params[i],strlen(field)+1);
++              strncpy(params[i],field,strlen(field));
++              i++;
++              field = strtok(NULL," ");
++
++      }
++      /* Put NULL at the end for execve */
++      params[i] = NULL;
++
++      /* Get PATH */
++      current_path = getenv("PATH");
++
++#ifdef DEBUG
++      printf("PATH %s %s\n",current_path,loggedin.udir);
++#endif
++
++      /* Parse the PATH if the command is in the home dir it's skip !! */
++        fieldspath = strtok((char *)current_path,":");
++              while((fieldspath != NULL) && find != 1){
++#ifdef DEBUG
++                      printf("FIELD PATH %s\n",fieldspath);
++#endif
++                      if(!strstr(fieldspath,loggedin.udir)){
++                              if((currentdir = opendir(fieldspath)) != NULL){
++
++                                      while(((pdirent = readdir(currentdir)) != NULL) && find != 1){
++                                              if(!strncmp(pdirent->d_name,params[0],sizeof(params[0]))){
++#ifdef DEBUG
++                                              printf("TROUVE %s!!!!\n",pdirent->d_name);
++#endif
++                                              find = 1;
++
++                                      }
++                              }
++                      }
++                              closedir(currentdir);
++                      }
++                      if(find == 0)
++                              fieldspath = strtok(NULL,":");
++                      
++              }
++
++      /* Contruct the real command with the good path */
++      if(find == 1 && ((strlen(fieldspath)+strlen(params[0])+1) < sizeof(path))){
++              bzero(path,sizeof(path));
++              snprintf(path,sizeof(path)-1,"%s/%s",fieldspath,params[0]);
++              path[sizeof(path)-1] = '\0';
++
++#ifdef DEBUG
++    printf("PATH FINAL %s %d ok!\n",path,strlen(path));
++              printf("PARAMS[0] %s\n",params[0]);
++              printf("PARAMS[1] %s\n",params[1]);
++#endif
++              execve(path,params,environ);
++      }
++      /* The command is in the home dir :( bad for you guys !! */
++      else{
++              status = -1;
++      }
++      _exit(127);
++
++   } else
++      while (waitpid(pid, &status, 0) < 0)
++         if (errno != EINTR) {
++            status = -1;
++            break;
++         }
++
++   return(status);
++}
+diff --git a/base/cgcs-users/cgcs-users-1.0/globals.cmds b/base/cgcs-users/cgcs-users-1.0/globals.cmds
+new file mode 100644
+index 0000000..8c9b7a4
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/globals.cmds
+@@ -0,0 +1,8 @@
++# Add any commands the user may execute. Even shell commands.
++# You have to allow logout and/or exit, so the user can logout!
++# cd and pwd should also be allowed. Note: other shell builtin
++# commands are not yet implemented!
++cd
++pwd
++logout
++exit
+diff --git a/base/cgcs-users/cgcs-users-1.0/globals.xtns b/base/cgcs-users/cgcs-users-1.0/globals.xtns
+new file mode 100644
+index 0000000..71f86f8
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/globals.xtns
+@@ -0,0 +1,3 @@
++# Add any extension the user may use.
++.doc
++.txt
+diff --git a/base/cgcs-users/cgcs-users-1.0/ibsh.h b/base/cgcs-users/cgcs-users-1.0/ibsh.h
+new file mode 100644
+index 0000000..9d9d692
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/ibsh.h
+@@ -0,0 +1,126 @@
++/*
++  Created: 03.19.05 11:15:21 by Attila Nagyidai
++
++  $Id: C\040Header.h,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++#ifndef _IBSH_H
++#define _IBSH_H
++
++/* Insert Code here */
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <time.h>
++#include <unistd.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <sys/wait.h>
++#include <syslog.h>
++#include <fcntl.h>
++#include <errno.h>
++#include <dirent.h>
++#include <pwd.h>
++#include <grp.h>
++#include <limits.h>
++#include <glob.h>
++#include <signal.h>
++
++#define PAM_SIZE    8
++#define LINE_SIZE   80
++#define STRING_SIZE 255
++#define BUFFER_SIZE 4096
++#define PATH_MAX 4096
++#define MAX_ITEMS   50
++#define COMMANDS_DIR "/etc/ibsh/cmds"
++#define COMMANDS_FILE "/etc/ibsh/globals.cmds"
++#define EXTENSIONS_DIR "/etc/ibsh/xtns"
++#define EXTENSIONS_FILE "/etc/ibsh/globals.xtns"
++
++/* Antixploit */
++#define C_CODE  "#include"
++#define SHELL_CODE  "#!/"
++#define PYTHON_CODE "import"
++#define ADA_CODE  "package body"
++#define EIFFEL_CODE "feature --"
++#define LISP_CODE "(defun"
++#define ELF_CODE "ELF"
++
++/* Logging */
++#define OPENLOG     openlog("ibsh", LOG_PID, LOG_AUTH)
++#define CLOSELOG    closelog()
++
++/* Typedefs, structs, globals */
++typedef struct theuser {
++    char uname[STRING_SIZE];
++    uid_t uid;
++    char udir[STRING_SIZE];
++    struct passwd *record;
++} theuser;
++
++typedef char Strng[STRING_SIZE];
++
++theuser loggedin; /* user info */
++
++//static Strng commands[MAX_ITEMS];  /* permitted commands */
++Strng commands[MAX_ITEMS];
++Strng extensions[MAX_ITEMS];
++/*static Strng extensions[MAX_ITEMS];   permitted extensions */
++char real_path[STRING_SIZE];    /* absolute path */
++char jail_path[STRING_SIZE];    /* path inside the jail */
++char user_command[STRING_SIZE];   /* whatever the user types */
++char filtered_command[STRING_SIZE]; /* this one will be executed */
++int exitcode;
++extern char **environ;
++
++
++int CommandOK( const char *thecommand, const char *rootdir,
++const char *jailpath, char *newcommand );
++void LTrim3( const char *base, char *result );
++void GetPositionInJail( const char *abspath, const char *rootdir, char *relpath );
++int LoadConfig( void );
++void myscanf( char *vptr, char *abspath );
++int  hhsystem(const char *kdozeile);
++void PathMinusOne( const char *basepath, char *evalpath, int slashcount,size_t nevalpath);
++void log_attempt( const char *username );
++int nbspace(const char *command);
++void lshift( char *line );
++int antixploit( const char *abspath, char *token );
++void logPrintBadfile (const char * msg);
++int removeAllRights (const char * filename, struct stat * s);
++int makeUnexecutable (const char * filename, struct stat * s);
++int hasSomeRwxRights (struct stat * s);
++int isExecutable (struct stat * s);
++int symlinkGoesOuttaJail (const char * sl);
++void DelBadFiles (const char *basedir);
++
++
++#endif /* _IBSH_H */
+diff --git a/base/cgcs-users/cgcs-users-1.0/jail.c b/base/cgcs-users/cgcs-users-1.0/jail.c
+new file mode 100644
+index 0000000..ab3300a
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/jail.c
+@@ -0,0 +1,101 @@
++/*
++  Created: 03.19.05 11:34:57 by Attila Nagyidai
++
++  $Id: C\040Console.c,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  Co-Author: Witzy
++  Email: stazzz@altern.org
++  
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++/* Header files */
++#include "ibsh.h"
++
++
++
++/* Remove the path of the Jail root dir from the displayed paths! */
++/* Return the jail path from the absolute path. */
++/* Copy characters from absolute path to jail path, starting, where the */
++/* jail root ends. */
++void GetPositionInJail( const char *abspath, const char *rootdir, char *relpath )
++{
++  int i = 0;
++  int j = 0;
++  
++  bzero(relpath, strlen(relpath));
++  for (i = strlen(rootdir); i < strlen(abspath); i++) {
++      relpath[j] = abspath[i];
++      j++;
++  }
++  relpath[j] = '\0';
++}
++
++/* Take 3 characters from left off a string. */
++/* It practically removes one ../ . */
++void LTrim3( const char *base, char *result )
++{
++  int i = 0;
++  int j = 0;
++
++  bzero(result, strlen(result));
++  for (i = 3; i < strlen(base); i++) {
++      result[j] = base[i];
++      j++;
++  }
++  result[j] = '\0';
++}
++
++/* Remove one subdirectory from the path in the argument. */
++/* In case the user uses ../ 's in his command. */
++/* Technical Description: */
++/* Variables: string pointer for the strtok function, and an */
++/* integer to stop the removing. */
++/* Disassemble the path by the slashes. And glue the required parts */
++/* together. Number of required parts = number of all parts - 1 . */
++void PathMinusOne( const char *basepath, char *evalpath, int slashcount,size_t nevalpath )
++{
++  char *tok;
++  int j = 1;
++  
++  bzero(evalpath, strlen(evalpath));
++  if ( slashcount == 1 ) {
++      strncpy(evalpath,"/",nevalpath-1);
++      evalpath[nevalpath-1] = '\0';
++  }
++  else {
++      for (tok = strtok((void *) basepath, "/"); tok; tok = strtok(0, "/")) {
++          if ( j < slashcount ) {
++              strncat(evalpath,tok,nevalpath-strlen(evalpath)-1);
++               strncat(evalpath,"/",nevalpath-strlen(evalpath)-1);
++          }
++          j++;
++      }
++  }
++}
+diff --git a/base/cgcs-users/cgcs-users-1.0/main.c b/base/cgcs-users/cgcs-users-1.0/main.c
+new file mode 100644
+index 0000000..1d92899
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/main.c
+@@ -0,0 +1,239 @@
++/*
++  Created: 03.19.05 11:34:57 by Attila Nagyidai
++
++  $Id: C\040Console.c,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved.
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  Co-Author: Witzy
++  Email: stazzz@altern.org
++  
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++/* Header files */
++#include "ibsh.h"
++#include "stdlib.h"
++
++/* Main: */
++/* Handle arguments, read config files, start command processing. */
++/* IBSH doesnt use any command line arguments, but my text editor */
++/* uses this code in all new c files to create. And i didnt have the */
++/* heart to remove it. ;p */
++/* Technical Description: */
++/* Get the passwd entry for the user. The uid is easily aquired, since */
++/* it is the real user id. After that, grab the passwd file entry upon */
++/* the id, and copy the information to the loggedin struct. */
++/* Add some signal handlers too. */
++/* The infinite loop: */
++/* Get the current directory, the full path. Compute the jailpath from that, */
++/* that is the directories below the users homedir, which is the jail root. */
++/* The jail ceiling if you like. Print some prompt to the user with the jailpath, */
++/* and read stdin for incoming commands. Filter out the bad commands, typos, the */
++/* not allowed commands. It the command is ok, execute it. If it is a shell builtin, */
++/* use our builtin code, otherwise use execve. After execve, check if the user didnt */
++/* use the last command to create some illegal content. If yes, erase that. Give the */
++/* notice only afterwards. */
++
++void ALRMhandler(int sig) {
++    OPENLOG;
++    syslog(LOG_INFO, "CLI timeout, user %s has logged out.", loggedin.uname);
++    CLOSELOG;
++    exit(0);
++}
++
++int main(int argc, char **argv)
++{
++  char temp[STRING_SIZE], *buf;
++  struct stat info;
++  uid_t ruid, euid;
++  gid_t rgid, egid;
++  unsigned int tout_cli = 0;
++
++  const char* tout = getenv("TMOUT");
++  if (tout)
++    tout_cli = atoi(tout);
++  else
++    //default to 5 mins
++    tout_cli = 300;
++
++  /* setuid protection */
++  ruid = getuid();
++  euid = geteuid();
++  rgid = getgid();
++  egid = getegid();
++  if ( (ruid!=euid) || (ruid==0) || (euid==0) || (rgid!=egid) || (rgid==0) || (egid==0) ) {
++      OPENLOG;
++      syslog(LOG_ERR, "setuid/setgid violation!");
++      CLOSELOG;
++      printf("ibsh: setuid/setgid violation!! exiting...\n");
++#ifdef DEBUG
++      printf("ruid: %d;euid: %d;rgid: %d;egid: %d\n", ruid,euid,rgid,egid);
++#endif
++      exit(0);
++  }
++
++  /* To Do: The code of your application goes here */
++  /* First part: */
++  /* Get essential information about the user who got this shell: */
++  /* first the username, then the user id. Upon this, retrieve the */
++  /* user's record in the passwd file. */
++  bzero(&loggedin, sizeof(loggedin));
++  loggedin.uid = getuid();
++  loggedin.record = getpwuid(loggedin.uid);
++  if ( loggedin.record == NULL ) {
++      loggedin.record = getpwnam(loggedin.uname);
++      if ( loggedin.record == NULL ) {
++          openlog(loggedin.uname, LOG_PID, LOG_AUTH);
++          syslog(LOG_ERR, "Can not obtain user information");
++          closelog();
++          exit(0);
++      }
++  }
++  strncpy(loggedin.uname, loggedin.record->pw_name, PAM_SIZE);
++  strncpy(loggedin.udir, loggedin.record->pw_dir, STRING_SIZE);
++
++  /* Second part: */
++  /* Handle some signal catching. Read the configuration files. */
++  signal( SIGINT, SIG_IGN );
++  signal( SIGQUIT, SIG_IGN );
++  signal( SIGTERM, SIG_IGN );
++  signal( SIGTSTP, SIG_IGN );
++  signal( SIGALRM, ALRMhandler );
++  LoadConfig();
++
++  /* Command mode */
++  if(argc == 3) {
++      if ( argv[1][1] == 'c' ) {
++          if ( CommandOK(argv[2], loggedin.udir, "/", filtered_command) == 1) {
++                exitcode = hhsystem(filtered_command);
++                OPENLOG;
++                syslog(LOG_INFO, "command %s ordered, command %s has been executed.",
++                argv[2], filtered_command);
++                CLOSELOG;
++                exit(exitcode);
++          }
++          exit(0);
++      }
++      else {
++        exit(0);
++      }
++  }
++
++  OPENLOG;
++  syslog(LOG_INFO, "user %s has logged in.", loggedin.uname);
++  CLOSELOG;
++
++
++  DelBadFiles(loggedin.udir);
++  chdir (loggedin.udir);
++
++
++  /* Third part: */
++  /* Start reading and processing the user issued commands. */
++  /* Split the command by the spaces, filter out anything, */
++  /* that would allow the user to access files outside the */
++  /* jail. Filter out multiples and pipes as well. No program */
++  /* will be allowed to run, unless it is mentioned in the */
++  /* config files. Files that are created with an extension */
++  /* that is listed in the other config file, must be deleted! */
++  alarm(tout_cli);
++  for ( ; ; ) {
++    /* Where is he ? */
++    getcwd(real_path, STRING_SIZE);
++    GetPositionInJail(real_path, loggedin.udir, jail_path);
++    if ( (strlen(jail_path)) == 0 ) {
++        strncpy(jail_path, "/", 2);
++    }
++    /* We don't want the user to know where he actually is. */
++    /* This is the prompt! */
++    printf("[%s]%% ", loggedin.uname);
++    /* scanf("%s", user_command); */
++    myscanf(user_command, real_path);
++    alarm(tout_cli);
++    /* Command interpretation and execution. */
++    if ( (CommandOK(user_command, loggedin.udir, jail_path, filtered_command)) == 0 ) {
++        log_attempt(loggedin.uname);  /* v0.2a */
++        continue;
++    }
++    /* If the user issued command starts with a shell builtin. */
++    bzero(temp, strlen(temp));
++    if ( (buf = strstr(filtered_command, "cd")) != NULL ) {
++      if ( (strcmp(buf, filtered_command)) == 0 ) {
++          LTrim3(filtered_command, temp);
++          if ( (strcmp(temp, real_path)) != 0 ) {
++            if ( (strcmp(temp, "..")) == 0 ) {
++                PathMinusOne(jail_path, temp, 1,sizeof(temp));
++            }
++            if ( (strcmp(temp, "/")) == 0 ) {
++                strncpy(temp, loggedin.udir, LINE_SIZE);
++            }
++            exitcode = chdir(temp);
++            if ( exitcode == -1 ) {
++                printf("ibsh: cd: %s: No such file or directory\n", temp);
++            }
++          }
++          continue;
++      }
++    }
++    else if ( (buf = strstr(filtered_command, "pwd")) != NULL ) {
++     if ( (strcmp(buf, filtered_command)) == 0 ) {
++          printf("%s\n", jail_path);
++          continue;
++      }
++    }
++    else if ( (buf = strstr(filtered_command, "logout")) != NULL ) {
++      if ( (strcmp(buf, filtered_command)) == 0 ) {
++          OPENLOG;
++          syslog(LOG_INFO, "user %s has logged out.", loggedin.uname);
++          CLOSELOG;
++          break;
++      }
++    }
++    else if ( (buf = strstr(filtered_command, "exit")) != NULL ) {
++      if ( (strcmp(buf, filtered_command)) == 0 ) {
++          OPENLOG;
++          syslog(LOG_INFO, "user %s has logged out.", loggedin.uname);
++          CLOSELOG;
++          break;
++      }
++    }
++    else {
++        exitcode = hhsystem(filtered_command);
++        if ( exitcode < 0 ) {
++            printf("%s\n", strerror(errno));
++        }
++    }
++    getcwd(real_path, STRING_SIZE);
++    DelBadFiles(loggedin.udir);
++    chdir (real_path);
++  }
++  return 0;
++}
++
+diff --git a/base/cgcs-users/cgcs-users-1.0/misc.c b/base/cgcs-users/cgcs-users-1.0/misc.c
+new file mode 100644
+index 0000000..d73ddb8
+--- /dev/null
++++ b/base/cgcs-users/cgcs-users-1.0/misc.c
+@@ -0,0 +1,52 @@
++/*
++  Created: 03.19.05 11:34:57 by Attila Nagyidai
++
++  $Id: C\040Console.c,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  Co-Author: Witzy
++  Email: stazzz@altern.org
++  
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++/* Header files */
++#include "ibsh.h"
++
++/* If the command is not ok, there is a possible hack attempt. */
++/* Can also be a typo, but we're not taking any chances. v0.2a */
++void log_attempt( const char *username )
++{
++  char logmsg[STRING_SIZE];
++
++  snprintf(logmsg, 50, "Possible hack attempt by %s.", username);
++  
++  OPENLOG;
++  syslog(LOG_WARNING, "%s", logmsg);
++  CLOSELOG;
++}
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-integ/files/base/0002-Add-DESTDIR-CFLAGS-and-LDFLAGS.patch b/meta-stx/recipes-core/stx-integ/files/base/0002-Add-DESTDIR-CFLAGS-and-LDFLAGS.patch
new file mode 100644 (file)
index 0000000..46137c5
--- /dev/null
@@ -0,0 +1,132 @@
+From edd5d4208db67d560afd3b5873b6ffc17fd5d79f Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Tue, 2 Jul 2019 14:29:52 -0700
+Subject: [PATCH 2/2] Add DESTDIR, CFLAGS and LDFLAGS.
+
+---
+ base/cgcs-users/cgcs-users-1.0/Makefile | 115 ++++++++++++------------
+ 1 file changed, 59 insertions(+), 56 deletions(-)
+
+diff --git a/base/cgcs-users/cgcs-users-1.0/Makefile b/base/cgcs-users/cgcs-users-1.0/Makefile
+index ed37d00..205f653 100644
+--- a/base/cgcs-users/cgcs-users-1.0/Makefile
++++ b/base/cgcs-users/cgcs-users-1.0/Makefile
+@@ -1,56 +1,59 @@
+-# This is the makefile for ibsh 0.3e
+-CC = gcc
+-OBJECTS = main.o command.o jail.o execute.o config.o misc.o antixploit.o delbadfiles.o
+-
+-ibsh: ${OBJECTS} ibsh.h
+-      ${CC} -o ibsh ${OBJECTS}
+-
+-main.o: main.c ibsh.h
+-      ${CC} -c main.c
+-
+-command.o: command.c ibsh.h
+-      ${CC} -c command.c
+-
+-jail.o: jail.c ibsh.h
+-      ${CC} -c jail.c
+-
+-execute.o: execute.c ibsh.h
+-      ${CC} -c execute.c
+-
+-config.o: config.c ibsh.h
+-      ${CC} -c config.c
+-
+-misc.o: misc.c ibsh.h
+-      ${CC} -c misc.c
+-
+-antixploit.o: antixploit.c ibsh.h
+-      ${CC} -c antixploit.c
+-
+-delbadfiles.o: delbadfiles.c ibsh.h
+-      ${CC} -c delbadfiles.c
+-
+-ibsh_install:
+-      cp ./ibsh /bin/
+-      mkdir /etc/ibsh
+-      mkdir /etc/ibsh/cmds
+-      mkdir /etc/ibsh/xtns
+-      cp ./globals.cmds /etc/ibsh/
+-      cp ./globals.xtns /etc/ibsh/
+-
+-ibsh_uninstall:
+-      rm -rf /etc/ibsh/globals.cmds
+-      rm -rf /etc/ibsh/globals.xtns
+-      rm -rf /etc/ibsh/cmds/*.*
+-      rm -rf /etc/ibsh/xtns/*.*
+-      rmdir /etc/ibsh/cmds
+-      rmdir /etc/ibsh/xtns
+-      rmdir /etc/ibsh
+-      rm -rf /bin/ibsh
+-
+-clean:
+-      rm -rf ibsh
+-      rm -rf *.o
+-
+-
+-# 13:49 2005.04.06.
+-
++# This is the makefile for ibsh 0.3e
++CC = gcc
++OBJECTS = main.o command.o jail.o execute.o config.o misc.o antixploit.o delbadfiles.o
++
++ibsh: ${OBJECTS} ibsh.h
++      ${CC} ${CFLAGS} ${LDFLAGS} -o ibsh ${OBJECTS}
++
++main.o: main.c ibsh.h
++      ${CC} ${CFLAGS} -c main.c
++
++command.o: command.c ibsh.h
++      ${CC} ${CFLAGS} -c command.c
++
++jail.o: jail.c ibsh.h
++      ${CC} ${CFLAGS} -c jail.c
++
++execute.o: execute.c ibsh.h
++      ${CC} ${CFLAGS} -c execute.c
++
++config.o: config.c ibsh.h
++      ${CC} ${CFLAGS} -c config.c
++
++misc.o: misc.c ibsh.h
++      ${CC} ${CFLAGS} -c misc.c
++
++antixploit.o: antixploit.c ibsh.h
++      ${CC} ${CFLAGS} -c antixploit.c
++
++delbadfiles.o: delbadfiles.c ibsh.h
++      ${CC} -c delbadfiles.c
++
++ibsh_install:
++      install -d 0755 ${DESTDIR}/bin
++      install -d 0755 ${DESTDIR}/etc/cmds
++      install -d 0755 ${DESTDIR}/etc/xtns
++      cp ./ibsh ${DESTDIR}/bin/
++      mkdir ${DESTDIR}/etc/ibsh
++      mkdir ${DESTDIR}/etc/ibsh/cmds
++      mkdir ${DESTDIR}/etc/ibsh/xtns
++      cp ./globals.cmds ${DESTDIR}/etc/ibsh/
++      cp ./globals.xtns ${DESTDIR}/etc/ibsh/
++
++ibsh_uninstall:
++      rm -rf ${DESTDIR}/etc/ibsh/globals.cmds
++      rm -rf ${DESTDIR}/etc/ibsh/globals.xtns
++      rm -rf ${DESTDIR}/etc/ibsh/cmds/*.*
++      rm -rf ${DESTDIR}/etc/ibsh/xtns/*.*
++      rmdir ${DESTDIR}/etc/ibsh/cmds
++      rmdir ${DESTDIR}/etc/ibsh/xtns
++      rmdir ${DESTDIR}/etc/ibsh
++      rm -rf ${DESTDIR}/bin/ibsh
++
++clean:
++      rm -rf ibsh
++      rm -rf *.o
++
++
++# 13:49 2005.04.06.
++
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/LICENSE b/meta-stx/recipes-core/stx-integ/files/ibsh/LICENSE
new file mode 100644 (file)
index 0000000..a6a2331
--- /dev/null
@@ -0,0 +1,346 @@
+GNU GENERAL PUBLIC LICENSE
+Version 2, June 1991
+
+Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+Everyone is permitted to copy and distribute verbatim copies
+of this license document, but changing it is not allowed.
+
+Preamble
+
+The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+    General Public License applies to most of the Free Software
+    Foundation's software and to any other program whose authors commit to
+    using it.  (Some other Free Software Foundation software is covered by
+    the GNU Library General Public License instead.)  You can apply it to
+    your programs, too.
+
+    When we speak of free software, we are referring to freedom, not
+    price.  Our General Public Licenses are designed to make sure that you
+    have the freedom to distribute copies of free software (and charge for
+    this service if you wish), that you receive source code or can get it
+    if you want it, that you can change the software or use pieces of it
+    in new free programs; and that you know you can do these things.
+
+    To protect your rights, we need to make restrictions that forbid
+    anyone to deny you these rights or to ask you to surrender the rights.
+    These restrictions translate to certain responsibilities for you if you
+    distribute copies of the software, or if you modify it.
+
+    For example, if you distribute copies of such a program, whether
+    gratis or for a fee, you must give the recipients all the rights that
+    you have.  You must make sure that they, too, receive or can get the
+    source code.  And you must show them these terms so they know their
+    rights.
+
+    We protect your rights with two steps: (1) copyright the software, and
+    (2) offer you this license which gives you legal permission to copy,
+    distribute and/or modify the software.
+
+    Also, for each author's protection and ours, we want to make certain
+    that everyone understands that there is no warranty for this free
+        software.  If the software is modified by someone else and passed on, we
+            want its recipients to know that what they have is not the original, so
+            that any problems introduced by others will not reflect on the original
+            authors' reputations.
+
+            Finally, any free program is threatened constantly by software
+            patents.  We wish to avoid the danger that redistributors of a free
+            program will individually obtain patent licenses, in effect making the
+            program proprietary.  To prevent this, we have made it clear that any
+            patent must be licensed for everyone's free use or not licensed at all.
+
+            The precise terms and conditions for copying, distribution and
+                modification follow.
+                \f
+                GNU GENERAL PUBLIC LICENSE
+                TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+                0. This License applies to any program or other work which contains
+                a notice placed by the copyright holder saying it may be distributed
+                under the terms of this General Public License.  The "Program", below,
+                refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+                that is to say, a work containing the Program or a portion of it,
+                either verbatim or with modifications and/or translated into another
+                language.  (Hereinafter, translation is included without limitation in
+                            the term "modification".)  Each licensee is addressed as "you".
+
+                    Activities other than copying, distribution and modification are not
+                    covered by this License;
+they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+    Whether that is true depends on what the Program does.
+
+    1. You may copy and distribute verbatim copies of the Program's
+    source code as you receive it, in any medium, provided that you
+    conspicuously and appropriately publish on each copy an appropriate
+    copyright notice and disclaimer of warranty; keep intact all the
+    notices that refer to this License and to the absence of any warranty;
+    and give any other recipients of the Program a copy of this License
+    along with the Program.
+
+    You may charge a fee for the physical act of transferring a copy, and
+    you may at your option offer warranty protection in exchange for a fee.
+
+    2. You may modify your copy or copies of the Program or any portion
+    of it, thus forming a work based on the Program, and copy and
+    distribute such modifications or work under the terms of Section 1
+    above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+    \f
+    These requirements apply to the modified work as a whole.  If
+    identifiable sections of that work are not derived from the Program,
+    and can be reasonably considered independent and separate works in
+    themselves, then this License, and its terms, do not apply to those
+    sections when you distribute them as separate works.  But when you
+    distribute the same sections as part of a whole which is a work based
+    on the Program, the distribution of the whole must be on the terms of
+    this License, whose permissions for other licensees extend to the
+    entire whole, and thus to each and every part regardless of who wrote it.
+
+    Thus, it is not the intent of this section to claim rights or contest
+    your rights to work written entirely by you; rather, the intent is to
+    exercise the right to control the distribution of derivative or
+    collective works based on the Program.
+
+    In addition, mere aggregation of another work not based on the Program
+    with the Program (or with a work based on the Program) on a volume of
+    a storage or distribution medium does not bring the other work under
+    the scope of this License.
+
+    3. You may copy and distribute the Program (or a work based on it,
+    under Section 2) in object code or executable form under the terms of
+    Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+    The source code for a work means the preferred form of the work for
+    making modifications to it.  For an executable work, complete source
+    code means all the source code for all modules it contains, plus any
+    associated interface definition files, plus the scripts used to
+    control compilation and installation of the executable.  However, as a
+    special exception, the source code distributed need not include
+    anything that is normally distributed (in either source or binary
+    form) with the major components (compiler, kernel, and so on) of the
+    operating system on which the executable runs, unless that component
+    itself accompanies the executable.
+
+    If distribution of executable or object code is made by offering
+    access to copy from a designated place, then offering equivalent
+    access to copy the source code from the same place counts as
+    distribution of the source code, even though third parties are not
+    compelled to copy the source along with the object code.
+    \f
+    4. You may not copy, modify, sublicense, or distribute the Program
+    except as expressly provided under this License.  Any attempt
+    otherwise to copy, modify, sublicense or distribute the Program is
+    void, and will automatically terminate your rights under this License.
+    However, parties who have received copies, or rights, from you under
+    this License will not have their licenses terminated so long as such
+    parties remain in full compliance.
+
+    5. You are not required to accept this License, since you have not
+    signed it.  However, nothing else grants you permission to modify or
+    distribute the Program or its derivative works.  These actions are
+    prohibited by law if you do not accept this License.  Therefore, by
+    modifying or distributing the Program (or any work based on the
+    Program), you indicate your acceptance of this License to do so, and
+    all its terms and conditions for copying, distributing or modifying
+    the Program or works based on it.
+
+    6. Each time you redistribute the Program (or any work based on the
+    Program), the recipient automatically receives a license from the
+    original licensor to copy, distribute or modify the Program subject to
+    these terms and conditions.  You may not impose any further
+    restrictions on the recipients' exercise of the rights granted herein.
+    You are not responsible for enforcing compliance by third parties to
+    this License.
+
+    7. If, as a consequence of a court judgment or allegation of patent
+    infringement or for any other reason (not limited to patent issues),
+                         conditions are imposed on you (whether by court order, agreement or
+                                 otherwise) that contradict the conditions of this License, they do not
+                             excuse you from the conditions of this License.  If you cannot
+                             distribute so as to satisfy simultaneously your obligations under this
+                             License and any other pertinent obligations, then as a consequence you
+                             may not distribute the Program at all.  For example, if a patent
+                             license would not permit royalty-free redistribution of the Program by
+                             all those who receive copies directly or indirectly through you, then
+                             the only way you could satisfy both it and this License would be to
+                             refrain entirely from distribution of the Program.
+
+                             If any portion of this section is held invalid or unenforceable under
+                             any particular circumstance, the balance of the section is intended to
+                             apply and the section as a whole is intended to apply in other
+                             circumstances.
+
+                             It is not the purpose of this section to induce you to infringe any
+                             patents or other property right claims or to contest validity of any
+                             such claims;
+this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system;
+it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+\f
+8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+    9. The Free Software Foundation may publish revised and/or new versions
+    of the General Public License from time to time.  Such new versions will
+    be similar in spirit to the present version, but may differ in detail to
+    address new problems or concerns.
+
+    Each version is given a distinguishing version number.  If the Program
+    specifies a version number of this License which applies to it and "any
+    later version", you have the option of following the terms and conditions
+    either of that version or of any later version published by the Free
+    Software Foundation.  If the Program does not specify a version number of
+    this License, you may choose any version ever published by the Free Software
+    Foundation.
+
+    10. If you wish to incorporate parts of the Program into other free
+    programs whose distribution conditions are different, write to the author
+    to ask for permission.  For software which is copyrighted by the Free
+        Software Foundation, write to the Free Software Foundation;
+we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+NO WARRANTY
+
+11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+        TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+        YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+        PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+    POSSIBILITY OF SUCH DAMAGES.
+
+    END OF TERMS AND CONDITIONS
+    \f
+    How to Apply These Terms to Your New Programs
+
+    If you develop a new program, and you want it to be of the greatest
+    possible use to the public, the best way to achieve this is to make it
+    free software which everyone can redistribute and change under these terms.
+
+    To do so, attach the following notices to the program.  It is safest
+        to attach them to the start of each source file to most effectively
+        convey the exclusion of warranty;
+and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+<one line to give the program's name and a brief idea of what it does.>
+Copyright (C) <year>  <name of author>
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+Gnomovision version 69, Copyright (C) year  name of author
+Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+This is free software, and you are welcome to redistribute it
+under certain conditions;
+type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+`Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+<signature of Ty Coon>, 1 April 1989
+Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/admin.cmds b/meta-stx/recipes-core/stx-integ/files/ibsh/admin.cmds
new file mode 100644 (file)
index 0000000..1f891ea
--- /dev/null
@@ -0,0 +1,11 @@
+# Add any commands the user may execute. Even shell commands.
+# You have to allow logout and/or exit, so the user can logout!
+# cd and pwd should also be allowed. Note: other shell builtin
+# commands are not yet implemented!
+nova
+system
+neutron
+cinder
+glance
+ceilometer
+heat
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/admin.xtns b/meta-stx/recipes-core/stx-integ/files/ibsh/admin.xtns
new file mode 100644 (file)
index 0000000..5828add
--- /dev/null
@@ -0,0 +1,6 @@
+# Add any extension the user may use.
+.doc
+.txt
+.tgz
+.tar
+
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/ibsh-0.3e-cgcs-copyright.patch b/meta-stx/recipes-core/stx-integ/files/ibsh/ibsh-0.3e-cgcs-copyright.patch
new file mode 100644 (file)
index 0000000..1becb73
--- /dev/null
@@ -0,0 +1,26 @@
+diff --git a/config.c b/config.c
+index c1087a5..add7c53 100644
+--- a/config.c
++++ b/config.c
+@@ -6,6 +6,8 @@
+   This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
+   Copyright (C) 2005  Attila Nagyidai
++  Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved.
++
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License
+   as published by the Free Software Foundation; either version 2
+diff --git a/main.c b/main.c
+index cf3ae9e..6cda04e 100644
+--- a/main.c
++++ b/main.c
+@@ -6,6 +6,8 @@
+   This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
+   Copyright (C) 2005  Attila Nagyidai
++  Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved.
++
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License
+   as published by the Free Software Foundation; either version 2
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/ibsh-0.3e-cgcs.patch b/meta-stx/recipes-core/stx-integ/files/ibsh/ibsh-0.3e-cgcs.patch
new file mode 100644 (file)
index 0000000..de22da0
--- /dev/null
@@ -0,0 +1,84 @@
+Index: cgcs-users-1.0-r0/main.c
+===================================================================
+--- cgcs-users-1.0-r0.orig/main.c
++++ cgcs-users-1.0-r0/main.c
+@@ -37,6 +37,7 @@
+ /* Header files */
+ #include "ibsh.h"
++#include "stdlib.h"
+ /* Main: */
+ /* Handle arguments, read config files, start command processing. */
+@@ -57,13 +58,28 @@
+ /* use our builtin code, otherwise use execve. After execve, check if the user didnt */
+ /* use the last command to create some illegal content. If yes, erase that. Give the */
+ /* notice only afterwards. */
++
++void ALRMhandler(int sig) {
++    OPENLOG;
++    syslog(LOG_INFO, "CLI timeout, user %s has logged out.", loggedin.uname);
++    CLOSELOG;
++    exit(0);
++}
++
+ int main(int argc, char **argv)
+ {
+   char temp[STRING_SIZE], *buf;
+   struct stat info;
+   uid_t ruid, euid;
+   gid_t rgid, egid;
++  unsigned int tout_cli = 0;
++  const char* tout = getenv("TMOUT");
++  if (tout)
++    tout_cli = atoi(tout);
++  else
++    //default to 5 mins
++    tout_cli = 300;
+   /* setuid protection */
+   ruid = getuid();
+@@ -107,6 +123,7 @@ int main(int argc, char **argv)
+   signal( SIGQUIT, SIG_IGN );
+   signal( SIGTERM, SIG_IGN );
+   signal( SIGTSTP, SIG_IGN );
++  signal( SIGALRM, ALRMhandler );
+   LoadConfig();
+   /* Command mode */
+@@ -144,6 +161,7 @@ int main(int argc, char **argv)
+   /* will be allowed to run, unless it is mentioned in the */
+   /* config files. Files that are created with an extension */
+   /* that is listed in the other config file, must be deleted! */
++  alarm(tout_cli);
+   for ( ; ; ) {
+     /* Where is he ? */
+     getcwd(real_path, STRING_SIZE);
+@@ -153,12 +171,12 @@ int main(int argc, char **argv)
+     }
+     /* We don't want the user to know where he actually is. */
+     /* This is the prompt! */
+-    printf("[%s]%% ", jail_path);
++    printf("[%s]%% ", loggedin.uname);
+     /* scanf("%s", user_command); */
+     myscanf(user_command, real_path);
++    alarm(tout_cli);
+     /* Command interpretation and execution. */
+     if ( (CommandOK(user_command, loggedin.udir, jail_path, filtered_command)) == 0 ) {
+-        printf("Sorry, can't let you do that!\n");
+         log_attempt(loggedin.uname);  /* v0.2a */
+         continue;
+     }
+Index: cgcs-users-1.0-r0/config.c
+===================================================================
+--- cgcs-users-1.0-r0.orig/config.c
++++ cgcs-users-1.0-r0/config.c
+@@ -166,7 +166,6 @@ int LoadConfig( void )
+                 // Delete '\n'
+                 tmp2[i][strlen(tmp2[i]) - 1] = '\0';
+                 strncpy(extensions[i],tmp2[i],strlen(tmp2[i]));
+-                printf("EXTENSIONS %s\n",extensions[i]);
+               i++;
+       }
+   }
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/ibsh-0.3e.patch b/meta-stx/recipes-core/stx-integ/files/ibsh/ibsh-0.3e.patch
new file mode 100644 (file)
index 0000000..23fe072
--- /dev/null
@@ -0,0 +1,860 @@
+Index: cgcs-users-1.0-r0/AUTHORS.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/AUTHORS.orig
+@@ -0,0 +1,15 @@
++AUTHORS OF PROJECT IBSH
++
++Attila Nagyidai <attila at ibsh.net>
++      * Original program author, project admin, developer.
++
++Shy <shy at ibsh.net>
++      * Developer, debugger, tester, and many more.
++
++Witzy <witzy at ibsh.net>
++      * Developer, debugger, tester, and many more.
++
++http://www.ibsh.net
++irc:
++irc.freenode.net #ibsh
++irc.geek-power.org #ibsh
+Index: cgcs-users-1.0-r0/BUGS.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/BUGS.orig
+@@ -0,0 +1,19 @@
++** Open BUGS **
++None, so far.
++
++** Fixed BUGS **
++- Input length checking on all inputs, string copies, etc. is fixed.
++- The myscanf function will no longer accept more then 80 chars at once,
++so ibsh hopefully wont crash on a too long input.
++- Added signal.h in the header file, the lack of it caused compilation
++problems on some systems.
++- Fixed the infinite loop in DelBadFiles. This function is temporarily 
++taken out of the project
++- Removed the involvment of /bin/sh from system. Added path checking.
++- In jail root, not only ../ is not allowed, but .. too.
++- Fixed a bug, that happened on bsd, when the user pressed ^D.
++- Fixed a bug with opendir
++- Fixed a format string vulnerability in logprintbadfile(). Thanks to
++Kim Streich for the report.
++
++2005.05.23
+Index: cgcs-users-1.0-r0/ChangeLog.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/ChangeLog.orig
+@@ -0,0 +1,34 @@
++0.3e - a buffer overflow and a string bug, both found by RazoR (Nikolay Alexandrov), fixed.
++0.3d - a format string vulnerability, found by Kim Streich, is fixed.
++0.3b-0.3c - bugfixes.
++0.3a  - The admin has the opportunity, to create separate cmds file for each user. 
++      This way the sysadmin has complete control over sensitive applications, which
++      should only be allowed to a selected few.
++      - The admin has the opportunity, to create separate xtns file for each user. 
++      - The extensions policy has been changed. Now both globals.xtns and the user
++      extension files will list the extensions, that are _allowed_ ! In earlier versions,
++      the forbidden extensions were listed, that is allow everything, except to deny a few.
++      From this version on, it's deny everything, except allow the ones, listed in these files.
++      - While the code for the search of illegal/dangerous material stored in user space is
++      back, it will not erase any files any more. Instead, it will remove all
++      rights from that file, so it can not be executed, or read. Files, with the +x bit set,
++      will be chmodded to -x. This is another "defense line" to stop the user to execute
++      programs, stored in user space.
++      - The access to all linux binaries, and source code files, stored in user space, if any, 
++      will be blocked.
++      - Absolute path for restricted users can not be longer then 255 characters. All files,
++      that are longer (with full path), will be renamed.
++      - Minor bug fixes.
++
++0.2a  - Major bug fixes. 
++      - User activities are logged with syslog.
++      - hhsytem revised, hardened. /bin/sh isnt involved anymore into program starting.
++      If the home directory is in the PATH, it's ignored.
++      - erasing illegal content is temporarily suspended and removed.
++
++0.1b  - Major bug fixes.
++      - The config files are accidentally missing from this release!
++
++0.1a  - The first version of the program.
++
++2005.05.23.
+Index: cgcs-users-1.0-r0/CONTRIBUTORS.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/CONTRIBUTORS.orig
+@@ -0,0 +1,7 @@
++CONTRIBUTORS TO PROJECT IBSH
++
++Kim Streich <kstreich at gmail.com>
++      * bug finder, debugger, tester.
++
++RazoR (Nikolay Alexandrov) <Nikolay@Alexandrov.ws>
++      * bug finder, debugger, tester.
+Index: cgcs-users-1.0-r0/COPYING.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/COPYING.orig
+@@ -0,0 +1,340 @@
++                  GNU GENERAL PUBLIC LICENSE
++                     Version 2, June 1991
++
++ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
++     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ Everyone is permitted to copy and distribute verbatim copies
++ of this license document, but changing it is not allowed.
++
++                          Preamble
++
++  The licenses for most software are designed to take away your
++freedom to share and change it.  By contrast, the GNU General Public
++License is intended to guarantee your freedom to share and change free
++software--to make sure the software is free for all its users.  This
++General Public License applies to most of the Free Software
++Foundation's software and to any other program whose authors commit to
++using it.  (Some other Free Software Foundation software is covered by
++the GNU Library General Public License instead.)  You can apply it to
++your programs, too.
++
++  When we speak of free software, we are referring to freedom, not
++price.  Our General Public Licenses are designed to make sure that you
++have the freedom to distribute copies of free software (and charge for
++this service if you wish), that you receive source code or can get it
++if you want it, that you can change the software or use pieces of it
++in new free programs; and that you know you can do these things.
++
++  To protect your rights, we need to make restrictions that forbid
++anyone to deny you these rights or to ask you to surrender the rights.
++These restrictions translate to certain responsibilities for you if you
++distribute copies of the software, or if you modify it.
++
++  For example, if you distribute copies of such a program, whether
++gratis or for a fee, you must give the recipients all the rights that
++you have.  You must make sure that they, too, receive or can get the
++source code.  And you must show them these terms so they know their
++rights.
++
++  We protect your rights with two steps: (1) copyright the software, and
++(2) offer you this license which gives you legal permission to copy,
++distribute and/or modify the software.
++
++  Also, for each author's protection and ours, we want to make certain
++that everyone understands that there is no warranty for this free
++software.  If the software is modified by someone else and passed on, we
++want its recipients to know that what they have is not the original, so
++that any problems introduced by others will not reflect on the original
++authors' reputations.
++
++  Finally, any free program is threatened constantly by software
++patents.  We wish to avoid the danger that redistributors of a free
++program will individually obtain patent licenses, in effect making the
++program proprietary.  To prevent this, we have made it clear that any
++patent must be licensed for everyone's free use or not licensed at all.
++
++  The precise terms and conditions for copying, distribution and
++modification follow.
++\f
++                  GNU GENERAL PUBLIC LICENSE
++   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
++
++  0. This License applies to any program or other work which contains
++a notice placed by the copyright holder saying it may be distributed
++under the terms of this General Public License.  The "Program", below,
++refers to any such program or work, and a "work based on the Program"
++means either the Program or any derivative work under copyright law:
++that is to say, a work containing the Program or a portion of it,
++either verbatim or with modifications and/or translated into another
++language.  (Hereinafter, translation is included without limitation in
++the term "modification".)  Each licensee is addressed as "you".
++
++Activities other than copying, distribution and modification are not
++covered by this License; they are outside its scope.  The act of
++running the Program is not restricted, and the output from the Program
++is covered only if its contents constitute a work based on the
++Program (independent of having been made by running the Program).
++Whether that is true depends on what the Program does.
++
++  1. You may copy and distribute verbatim copies of the Program's
++source code as you receive it, in any medium, provided that you
++conspicuously and appropriately publish on each copy an appropriate
++copyright notice and disclaimer of warranty; keep intact all the
++notices that refer to this License and to the absence of any warranty;
++and give any other recipients of the Program a copy of this License
++along with the Program.
++
++You may charge a fee for the physical act of transferring a copy, and
++you may at your option offer warranty protection in exchange for a fee.
++
++  2. You may modify your copy or copies of the Program or any portion
++of it, thus forming a work based on the Program, and copy and
++distribute such modifications or work under the terms of Section 1
++above, provided that you also meet all of these conditions:
++
++    a) You must cause the modified files to carry prominent notices
++    stating that you changed the files and the date of any change.
++
++    b) You must cause any work that you distribute or publish, that in
++    whole or in part contains or is derived from the Program or any
++    part thereof, to be licensed as a whole at no charge to all third
++    parties under the terms of this License.
++
++    c) If the modified program normally reads commands interactively
++    when run, you must cause it, when started running for such
++    interactive use in the most ordinary way, to print or display an
++    announcement including an appropriate copyright notice and a
++    notice that there is no warranty (or else, saying that you provide
++    a warranty) and that users may redistribute the program under
++    these conditions, and telling the user how to view a copy of this
++    License.  (Exception: if the Program itself is interactive but
++    does not normally print such an announcement, your work based on
++    the Program is not required to print an announcement.)
++\f
++These requirements apply to the modified work as a whole.  If
++identifiable sections of that work are not derived from the Program,
++and can be reasonably considered independent and separate works in
++themselves, then this License, and its terms, do not apply to those
++sections when you distribute them as separate works.  But when you
++distribute the same sections as part of a whole which is a work based
++on the Program, the distribution of the whole must be on the terms of
++this License, whose permissions for other licensees extend to the
++entire whole, and thus to each and every part regardless of who wrote it.
++
++Thus, it is not the intent of this section to claim rights or contest
++your rights to work written entirely by you; rather, the intent is to
++exercise the right to control the distribution of derivative or
++collective works based on the Program.
++
++In addition, mere aggregation of another work not based on the Program
++with the Program (or with a work based on the Program) on a volume of
++a storage or distribution medium does not bring the other work under
++the scope of this License.
++
++  3. You may copy and distribute the Program (or a work based on it,
++under Section 2) in object code or executable form under the terms of
++Sections 1 and 2 above provided that you also do one of the following:
++
++    a) Accompany it with the complete corresponding machine-readable
++    source code, which must be distributed under the terms of Sections
++    1 and 2 above on a medium customarily used for software interchange; or,
++
++    b) Accompany it with a written offer, valid for at least three
++    years, to give any third party, for a charge no more than your
++    cost of physically performing source distribution, a complete
++    machine-readable copy of the corresponding source code, to be
++    distributed under the terms of Sections 1 and 2 above on a medium
++    customarily used for software interchange; or,
++
++    c) Accompany it with the information you received as to the offer
++    to distribute corresponding source code.  (This alternative is
++    allowed only for noncommercial distribution and only if you
++    received the program in object code or executable form with such
++    an offer, in accord with Subsection b above.)
++
++The source code for a work means the preferred form of the work for
++making modifications to it.  For an executable work, complete source
++code means all the source code for all modules it contains, plus any
++associated interface definition files, plus the scripts used to
++control compilation and installation of the executable.  However, as a
++special exception, the source code distributed need not include
++anything that is normally distributed (in either source or binary
++form) with the major components (compiler, kernel, and so on) of the
++operating system on which the executable runs, unless that component
++itself accompanies the executable.
++
++If distribution of executable or object code is made by offering
++access to copy from a designated place, then offering equivalent
++access to copy the source code from the same place counts as
++distribution of the source code, even though third parties are not
++compelled to copy the source along with the object code.
++\f
++  4. You may not copy, modify, sublicense, or distribute the Program
++except as expressly provided under this License.  Any attempt
++otherwise to copy, modify, sublicense or distribute the Program is
++void, and will automatically terminate your rights under this License.
++However, parties who have received copies, or rights, from you under
++this License will not have their licenses terminated so long as such
++parties remain in full compliance.
++
++  5. You are not required to accept this License, since you have not
++signed it.  However, nothing else grants you permission to modify or
++distribute the Program or its derivative works.  These actions are
++prohibited by law if you do not accept this License.  Therefore, by
++modifying or distributing the Program (or any work based on the
++Program), you indicate your acceptance of this License to do so, and
++all its terms and conditions for copying, distributing or modifying
++the Program or works based on it.
++
++  6. Each time you redistribute the Program (or any work based on the
++Program), the recipient automatically receives a license from the
++original licensor to copy, distribute or modify the Program subject to
++these terms and conditions.  You may not impose any further
++restrictions on the recipients' exercise of the rights granted herein.
++You are not responsible for enforcing compliance by third parties to
++this License.
++
++  7. If, as a consequence of a court judgment or allegation of patent
++infringement or for any other reason (not limited to patent issues),
++conditions are imposed on you (whether by court order, agreement or
++otherwise) that contradict the conditions of this License, they do not
++excuse you from the conditions of this License.  If you cannot
++distribute so as to satisfy simultaneously your obligations under this
++License and any other pertinent obligations, then as a consequence you
++may not distribute the Program at all.  For example, if a patent
++license would not permit royalty-free redistribution of the Program by
++all those who receive copies directly or indirectly through you, then
++the only way you could satisfy both it and this License would be to
++refrain entirely from distribution of the Program.
++
++If any portion of this section is held invalid or unenforceable under
++any particular circumstance, the balance of the section is intended to
++apply and the section as a whole is intended to apply in other
++circumstances.
++
++It is not the purpose of this section to induce you to infringe any
++patents or other property right claims or to contest validity of any
++such claims; this section has the sole purpose of protecting the
++integrity of the free software distribution system, which is
++implemented by public license practices.  Many people have made
++generous contributions to the wide range of software distributed
++through that system in reliance on consistent application of that
++system; it is up to the author/donor to decide if he or she is willing
++to distribute software through any other system and a licensee cannot
++impose that choice.
++
++This section is intended to make thoroughly clear what is believed to
++be a consequence of the rest of this License.
++\f
++  8. If the distribution and/or use of the Program is restricted in
++certain countries either by patents or by copyrighted interfaces, the
++original copyright holder who places the Program under this License
++may add an explicit geographical distribution limitation excluding
++those countries, so that distribution is permitted only in or among
++countries not thus excluded.  In such case, this License incorporates
++the limitation as if written in the body of this License.
++
++  9. The Free Software Foundation may publish revised and/or new versions
++of the General Public License from time to time.  Such new versions will
++be similar in spirit to the present version, but may differ in detail to
++address new problems or concerns.
++
++Each version is given a distinguishing version number.  If the Program
++specifies a version number of this License which applies to it and "any
++later version", you have the option of following the terms and conditions
++either of that version or of any later version published by the Free
++Software Foundation.  If the Program does not specify a version number of
++this License, you may choose any version ever published by the Free Software
++Foundation.
++
++  10. If you wish to incorporate parts of the Program into other free
++programs whose distribution conditions are different, write to the author
++to ask for permission.  For software which is copyrighted by the Free
++Software Foundation, write to the Free Software Foundation; we sometimes
++make exceptions for this.  Our decision will be guided by the two goals
++of preserving the free status of all derivatives of our free software and
++of promoting the sharing and reuse of software generally.
++
++                          NO WARRANTY
++
++  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
++FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
++OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
++PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
++OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
++TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
++PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
++REPAIR OR CORRECTION.
++
++  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
++WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
++REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
++INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
++OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
++TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
++YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
++PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGES.
++
++                   END OF TERMS AND CONDITIONS
++\f
++          How to Apply These Terms to Your New Programs
++
++  If you develop a new program, and you want it to be of the greatest
++possible use to the public, the best way to achieve this is to make it
++free software which everyone can redistribute and change under these terms.
++
++  To do so, attach the following notices to the program.  It is safest
++to attach them to the start of each source file to most effectively
++convey the exclusion of warranty; and each file should have at least
++the "copyright" line and a pointer to where the full notice is found.
++
++    <one line to give the program's name and a brief idea of what it does.>
++    Copyright (C) <year>  <name of author>
++
++    This program is free software; you can redistribute it and/or modify
++    it under the terms of the GNU General Public License as published by
++    the Free Software Foundation; either version 2 of the License, or
++    (at your option) any later version.
++
++    This program is distributed in the hope that it will be useful,
++    but WITHOUT ANY WARRANTY; without even the implied warranty of
++    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++    GNU General Public License for more details.
++
++    You should have received a copy of the GNU General Public License
++    along with this program; if not, write to the Free Software
++    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++
++
++Also add information on how to contact you by electronic and paper mail.
++
++If the program is interactive, make it output a short notice like this
++when it starts in an interactive mode:
++
++    Gnomovision version 69, Copyright (C) year  name of author
++    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
++    This is free software, and you are welcome to redistribute it
++    under certain conditions; type `show c' for details.
++
++The hypothetical commands `show w' and `show c' should show the appropriate
++parts of the General Public License.  Of course, the commands you use may
++be called something other than `show w' and `show c'; they could even be
++mouse-clicks or menu items--whatever suits your program.
++
++You should also get your employer (if you work as a programmer) or your
++school, if any, to sign a "copyright disclaimer" for the program, if
++necessary.  Here is a sample; alter the names:
++
++  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
++  `Gnomovision' (which makes passes at compilers) written by James Hacker.
++
++  <signature of Ty Coon>, 1 April 1989
++  Ty Coon, President of Vice
++
++This General Public License does not permit incorporating your program into
++proprietary programs.  If your program is a subroutine library, you may
++consider it more useful to permit linking proprietary applications with the
++library.  If this is what you want to do, use the GNU Library General
++Public License instead of this License.
+Index: cgcs-users-1.0-r0/COPYRIGHT.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/COPYRIGHT.orig
+@@ -0,0 +1,17 @@
++This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++Copyright (C) 2005  Attila Nagyidai
++
++This program is free software; you can redistribute it and/or
++modify it under the terms of the GNU General Public License
++as published by the Free Software Foundation; either version 2
++of the License, or (at your option) any later version.
++
++This program is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this program; if not, write to the Free Software
++Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
+Index: cgcs-users-1.0-r0/INSTALL.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/INSTALL.orig
+@@ -0,0 +1,23 @@
++Installing ibsh is really easy, so no need for the usual sections
++in this document. There is no configure script either, so if 
++something wrong, make will fail.
++
++# make ibsh
++# make ibsh_install
++
++Optionally:
++
++# make clean
++
++
++To uninstall ibsh:
++
++# make ibsh_uninstall
++
++
++Of course you will have to enable this shell by:
++# echo /bin/ibsh >> /etc/shells
++or however you like it.
++And make sure the permissions read 0755 !
++
++2005.03.24.
+Index: cgcs-users-1.0-r0/main.c.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/main.c.orig
+@@ -0,0 +1,233 @@
++/*
++  Created: 03.19.05 11:34:57 by Attila Nagyidai
++
++  $Id: C\040Console.c,v 1.1.2.1 2003/08/13 00:38:46 neum Exp $
++
++  This file is part of IBSH (Iron Bars Shell) , a restricted Unix shell
++  Copyright (C) 2005  Attila Nagyidai
++
++  This program is free software; you can redistribute it and/or
++  modify it under the terms of the GNU General Public License
++  as published by the Free Software Foundation; either version 2
++  of the License, or (at your option) any later version.
++
++  This program is distributed in the hope that it will be useful,
++  but WITHOUT ANY WARRANTY; without even the implied warranty of
++  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++  GNU General Public License for more details.
++
++  You should have received a copy of the GNU General Public License
++  along with this program; if not, write to the Free Software
++  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
++
++  Author: Attila Nagyidai
++  Email: na@ent.hu
++
++  Co-Author: Shy
++  Email: shy@cpan.org
++
++  Co-Author: Witzy
++  Email: stazzz@altern.org
++
++  URL: http://ibsh.sourceforge.net
++  IRC: irc.freenode.net #ibsh
++  RSS, Statistics, etc: http://sourceforge.net/projects/ibsh/
++
++*/
++
++/* Header files */
++#include "ibsh.h"
++
++/* Main: */
++/* Handle arguments, read config files, start command processing. */
++/* IBSH doesnt use any command line arguments, but my text editor */
++/* uses this code in all new c files to create. And i didnt have the */
++/* heart to remove it. ;p */
++/* Technical Description: */
++/* Get the passwd entry for the user. The uid is easily aquired, since */
++/* it is the real user id. After that, grab the passwd file entry upon */
++/* the id, and copy the information to the loggedin struct. */
++/* Add some signal handlers too. */
++/* The infinite loop: */
++/* Get the current directory, the full path. Compute the jailpath from that, */
++/* that is the directories below the users homedir, which is the jail root. */
++/* The jail ceiling if you like. Print some prompt to the user with the jailpath, */
++/* and read stdin for incoming commands. Filter out the bad commands, typos, the */
++/* not allowed commands. It the command is ok, execute it. If it is a shell builtin, */
++/* use our builtin code, otherwise use execve. After execve, check if the user didnt */
++/* use the last command to create some illegal content. If yes, erase that. Give the */
++/* notice only afterwards. */
++int main(int argc, char **argv)
++{
++    char temp[STRING_SIZE], *buf;
++    struct stat info;
++    uid_t ruid, euid;
++    gid_t rgid, egid;
++
++
++    /* setuid protection */
++    ruid = getuid();
++    euid = geteuid();
++    rgid = getgid();
++    egid = getegid();
++    if ( (ruid!=euid) || (ruid==0) || (euid==0) || (rgid!=egid) || (rgid==0) || (egid==0) ) {
++        OPENLOG;
++        syslog(LOG_ERR, "setuid/setgid violation!");
++        CLOSELOG;
++        printf("ibsh: setuid/setgid violation!! exiting...\n");
++#ifdef DEBUG
++        printf("ruid: %d;euid: %d;rgid: %d;egid: %d\n", ruid,euid,rgid,egid);
++#endif
++        exit(0);
++    }
++
++    /* To Do: The code of your application goes here */
++    /* First part: */
++    /* Get essential information about the user who got this shell: */
++    /* first the username, then the user id. Upon this, retrieve the */
++    /* user's record in the passwd file. */
++    bzero(&loggedin, sizeof(loggedin));
++    loggedin.uid = getuid();
++    loggedin.record = getpwuid(loggedin.uid);
++    if ( loggedin.record == NULL ) {
++        loggedin.record = getpwnam(loggedin.uname);
++        if ( loggedin.record == NULL ) {
++            openlog(loggedin.uname, LOG_PID, LOG_AUTH);
++            syslog(LOG_ERR, "Can not obtain user information");
++            printf("Can not obtain user information\n");
++            closelog();
++            exit(0);
++        }
++    }
++    strncpy(loggedin.uname, loggedin.record->pw_name, PAM_SIZE);
++    strncpy(loggedin.udir, loggedin.record->pw_dir, STRING_SIZE);
++
++    /* Second part: */
++    /* Handle some signal catching. Read the configuration files. */
++    signal( SIGINT, SIG_IGN );
++    signal( SIGQUIT, SIG_IGN );
++    signal( SIGTERM, SIG_IGN );
++    signal( SIGTSTP, SIG_IGN );
++    LoadConfig();
++
++    /* Command mode */
++    if(argc == 3) {
++        if ( argv[1][1] == 'c' ) {
++            if ( CommandOK(argv[2], loggedin.udir, "/", filtered_command) == 1) {
++                exitcode = hhsystem(filtered_command);
++                OPENLOG;
++                syslog(LOG_INFO, "command %s ordered, command %s has been executed.",
++                       argv[2], filtered_command);
++                printf("command %s ordered, command %s has been executed.\n",
++                       argv[2], filtered_command);
++                CLOSELOG;
++                exit(exitcode);
++            }
++            printf("CommandOK failed (%s/%s)\n", loggedin.udir, filtered_command);
++            exit(0);
++        }
++        else {
++            printf("Invalid are (%s)\n", argv[1]);
++            exit(0);
++        }
++    }
++
++    OPENLOG;
++    syslog(LOG_INFO, "user %s has logged in.", loggedin.uname);
++    CLOSELOG;
++
++
++#ifdef INCLUDE_DELETE_BAD_FILES
++    DelBadFiles(loggedin.udir);
++#endif
++    if ( chdir (loggedin.udir) < 0 )
++        return -1;
++
++
++    /* Third part: */
++    /* Start reading and processing the user issued commands. */
++    /* Split the command by the spaces, filter out anything, */
++    /* that would allow the user to access files outside the */
++    /* jail. Filter out multiples and pipes as well. No program */
++    /* will be allowed to run, unless it is mentioned in the */
++    /* config files. Files that are created with an extension */
++    /* that is listed in the other config file, must be deleted! */
++    for ( ; ; ) {
++        /* Where is he ? */
++       if ( getcwd(real_path, STRING_SIZE) == NULL )
++           return -1;
++        GetPositionInJail(real_path, loggedin.udir, jail_path);
++        if ( (strlen(jail_path)) == 0 ) {
++            strncpy(jail_path, "/", 2);
++        }
++        /* We don't want the user to know where he actually is. */
++        /* This is the prompt! */
++        printf("[%s]%% ", jail_path);
++        /* scanf("%s", user_command); */
++        myscanf(user_command, real_path);
++        /* Command interpretation and execution. */
++        if ( (CommandOK(user_command, loggedin.udir, jail_path, filtered_command)) == 0 ) {
++            printf("Sorry, can't let you do that!\n");
++            log_attempt(loggedin.uname);  /* v0.2a */
++            continue;
++        }
++        /* If the user issued command starts with a shell builtin. */
++        bzero(temp, strlen(temp));
++        if ( (buf = strstr(filtered_command, "cd")) != NULL ) {
++            if ( (strcmp(buf, filtered_command)) == 0 ) {
++                LTrim3(filtered_command, temp);
++                if ( (strcmp(temp, real_path)) != 0 ) {
++                    if ( (strcmp(temp, "..")) == 0 ) {
++                        PathMinusOne(jail_path, temp, 1,sizeof(temp));
++                    }
++                    if ( (strcmp(temp, "/")) == 0 ) {
++                        strncpy(temp, loggedin.udir, LINE_SIZE);
++                    }
++                    exitcode = chdir(temp);
++                    if ( exitcode == -1 ) {
++                        printf("ibsh: cd: %s: No such file or directory\n", temp);
++                    }
++                }
++                continue;
++            }
++        }
++        else if ( (buf = strstr(filtered_command, "pwd")) != NULL ) {
++            if ( (strcmp(buf, filtered_command)) == 0 ) {
++                printf("%s\n", jail_path);
++                continue;
++            }
++        }
++        else if ( (buf = strstr(filtered_command, "logout")) != NULL ) {
++            if ( (strcmp(buf, filtered_command)) == 0 ) {
++                OPENLOG;
++                syslog(LOG_INFO, "user %s has logged out.", loggedin.uname);
++                CLOSELOG;
++                break;
++            }
++        }
++        else if ( (buf = strstr(filtered_command, "exit")) != NULL ) {
++            if ( (strcmp(buf, filtered_command)) == 0 ) {
++                OPENLOG;
++                syslog(LOG_INFO, "user %s has logged out.", loggedin.uname);
++                printf("user %s has logged out\n", loggedin.uname);
++                CLOSELOG;
++                break;
++            }
++        }
++        else {
++            exitcode = hhsystem(filtered_command);
++            if ( exitcode < 0 ) {
++                printf("%s\n", strerror(errno));
++            }
++        }
++        if ( getcwd(real_path, STRING_SIZE) == NULL ) 
++            return -1;
++#ifdef INCLUDE_BAD_FILES
++        DelBadFiles(loggedin.udir);
++#endif
++        if ( chdir (real_path) < 0 )
++            return 1;
++    }
++    return 0;
++}
++
+Index: cgcs-users-1.0-r0/Makefile.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/Makefile.orig
+@@ -0,0 +1,56 @@
++# This is the makefile for ibsh 0.3e
++CC = gcc -g -O3
++OBJECTS = main.o command.o jail.o execute.o config.o misc.o antixploit.o delbadfiles.o
++
++all ibsh: ${OBJECTS} ibsh.h
++      ${CC} -o ibsh ${OBJECTS}
++
++main.o: main.c ibsh.h
++      ${CC} -c main.c
++
++command.o: command.c ibsh.h
++      ${CC} -c command.c
++
++jail.o: jail.c ibsh.h
++      ${CC} -c jail.c
++
++execute.o: execute.c ibsh.h
++      ${CC} -c execute.c
++
++config.o: config.c ibsh.h
++      ${CC} -c config.c
++
++misc.o: misc.c ibsh.h
++      ${CC} -c misc.c
++
++antixploit.o: antixploit.c ibsh.h
++      ${CC} -c antixploit.c
++
++delbadfiles.o: delbadfiles.c ibsh.h
++      ${CC} -c delbadfiles.c
++
++ibsh_install:
++      cp ./ibsh /bin/
++      mkdir /etc/ibsh
++      mkdir /etc/ibsh/cmds
++      mkdir /etc/ibsh/xtns
++      cp ./globals.cmds /etc/ibsh/
++      cp ./globals.xtns /etc/ibsh/
++
++ibsh_uninstall:
++      rm -rf /etc/ibsh/globals.cmds
++      rm -rf /etc/ibsh/globals.xtns
++      rm -rf /etc/ibsh/cmds/*.*
++      rm -rf /etc/ibsh/xtns/*.*
++      rmdir /etc/ibsh/cmds
++      rmdir /etc/ibsh/xtns
++      rmdir /etc/ibsh
++      rm -rf /bin/ibsh
++
++clean:
++      rm -rf ibsh
++      rm -rf *.o
++
++
++# 13:49 2005.04.06.
++
+Index: cgcs-users-1.0-r0/README.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/README.orig
+@@ -0,0 +1,29 @@
++      Iron Bars SHell - a restricted interactive shell.
++
++Overview
++
++      For long i have been in the search of a decent restricted shell, but in vain.
++      The few i found, were really easy to hack, and there were quite a few docs
++      around on the web about hacking restricted shells with a menu interface.
++      For my definitions, a restricted shell must not only prevent the user to 
++      escape her jail, but also not to access any files outside the jail.
++      The system administrator must have total control over the restricted shell.
++      These are the major features incorporated and realized by ibsh.
++
++
++Features
++
++      Please read the changelog.      
++
++
++Installation
++
++      Read the INSTALL file.
++
++
++Contact
++      See Authors file.
++
++
++Attila Nagyidai
++2005.05.23.
+Index: cgcs-users-1.0-r0/Release.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/Release.orig
+@@ -0,0 +1,17 @@
++This release introduces minor bugfixes, and important new and renewed features.
++Erasing evil files in the home directory of the user is incorporated again, with 
++many improvements. First of all: no file will be erased! Only the access to them
++will be blocked. The extension policy has changed, now ibsh blocks those extensions,
++that are NOT listed. This goes in sync with the usual method of operation of ibsh.
++The execute permission of files in the user space, will be removed.
++New customizing features were added: each user now can have her own commands and 
++extensions file, created and maintained by the system administrator. Some users
++(employees) may require access to special programs. User configuration files allow
++this access only those, who need it, not for everybody.
++Ibsh now scans not only the extensions of files, but the content too! Whatever the permission
++for a certain file exists, if that contains source code, or is a linux binary, access
++will be blocked.
++The absolute path for the users is now limited to 255 characters. Longer, already
++existing filenames will be renamed.
++
++06/04/2005 
+Index: cgcs-users-1.0-r0/TODO.orig
+===================================================================
+--- /dev/null
++++ cgcs-users-1.0-r0/TODO.orig
+@@ -0,0 +1,10 @@
++TODO
++
++      - tab completion.
++      - shell variables.
++      - some changes to the prompt, maybe variable prompt.
++      - history
++      - to be able to use corporate, or other large/complicated programs in a safe
++      working environment, yet be able to share files/work with others.
++
++2005.05.23.
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/operator.cmds b/meta-stx/recipes-core/stx-integ/files/ibsh/operator.cmds
new file mode 100644 (file)
index 0000000..6a0198e
--- /dev/null
@@ -0,0 +1,7 @@
+# Add any commands the user may execute. Even shell commands.
+# You have to allow logout and/or exit, so the user can logout!
+# cd and pwd should also be allowed. Note: other shell builtin
+# commands are not yet implemented!
+touch
+vi
+
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/operator.xtns b/meta-stx/recipes-core/stx-integ/files/ibsh/operator.xtns
new file mode 100644 (file)
index 0000000..ececf5d
--- /dev/null
@@ -0,0 +1,4 @@
+# Add any extension the user may use.
+.doc
+.txt
+
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/secadmin.cmds b/meta-stx/recipes-core/stx-integ/files/ibsh/secadmin.cmds
new file mode 100644 (file)
index 0000000..ae2bfa8
--- /dev/null
@@ -0,0 +1,12 @@
+# Add any commands the user may execute. Even shell commands.
+# You have to allow logout and/or exit, so the user can logout!
+# cd and pwd should also be allowed. Note: other shell builtin
+# commands are not yet implemented!
+#
+touch
+tar
+scp
+sftp
+ssh
+vi
+
diff --git a/meta-stx/recipes-core/stx-integ/files/ibsh/secadmin.xtns b/meta-stx/recipes-core/stx-integ/files/ibsh/secadmin.xtns
new file mode 100644 (file)
index 0000000..5828add
--- /dev/null
@@ -0,0 +1,6 @@
+# Add any extension the user may use.
+.doc
+.txt
+.tgz
+.tar
+
diff --git a/meta-stx/recipes-core/stx-integ/filesystem/filesystem-scripts.inc b/meta-stx/recipes-core/stx-integ/filesystem/filesystem-scripts.inc
new file mode 100644 (file)
index 0000000..23ee1c3
--- /dev/null
@@ -0,0 +1,52 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " filesystem-scripts"
+
+RDEPENDS_filesystem-scripts += " bash"
+
+do_configure_append () {
+       :
+} 
+
+do_compile_append () {
+       :
+}
+
+do_install_append () {
+
+       cd ${S}/filesystem/filesystem-scripts/filesystem-scripts-1.0
+       install -d -m755 ${D}/${sysconfdir}/init.d
+       install -d -m755 ${D}/${libdir}/ocf/resource.d/platform
+       install -d -m755 ${D}/${bindir}
+       install -d -m755 ${D}/${systemd_system_unitdir}
+
+       install -D -m644 uexportfs  ${D}/${sysconfdir}/init.d
+       install -D -m644 nfsserver-mgmt ${D}/${libdir}/ocf/resource.d/platform
+       install -D -m644 nfs-mount ${D}/${bindir}
+       install -D -m644 uexportfs.service ${D}/${systemd_system_unitdir}
+
+}
+
+FILES_filesystem-scripts_append = " \
+       ${sysconfdir}/init.d/uexportfs \
+       ${libdir}/ocf/resource.d/platform/nfsserver-mgmt \
+       ${bindir}/nfs-mount \
+       ${systemd_system_unitdir}/uexportfs.service \
+       "
+
+#pkg_postinst_ontarget_filesystem-scripts() {
+#      /usr/bin/systemctl enable uexportfs.service
+#}
diff --git a/meta-stx/recipes-core/stx-integ/filesystem/iscsi-initiator-utils-config.inc b/meta-stx/recipes-core/stx-integ/filesystem/iscsi-initiator-utils-config.inc
new file mode 100644 (file)
index 0000000..7085048
--- /dev/null
@@ -0,0 +1,44 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " iscsi-initiator-utils-config"
+
+RDEPENDS_iscsi-initiator-utils-config_append += " bash"
+
+do_configure_append () {
+       :
+} 
+
+do_compile_append () {
+       :
+}
+
+do_install_append () {
+
+       cd ${S}/filesystem/iscsi-initiator-utils-config/
+       install -d -m755 ${D}/${libdir}/tempfiles.d
+       install -d -m755 ${D}/${systemd_system_unitdir}
+       install -d -m755 ${D}/${datadir}/starlingx
+
+       install -D -m644 files/iscsi-cache.volatiles ${D}/${libdir}/tempfiles.d
+       install -D -m644 files/iscsi-shutdown.service ${D}/${systemd_system_unitdir}
+       install -m 0644 files/iscsid.conf ${D}/${datadir}/starlingx/stx.iscsid.conf
+}
+
+FILES_iscsi-initiator-utils-config_append = " \
+       ${libdir}/tempfiles.d/iscsi-cache.volatiles \
+       ${systemd_system_unitdir}/iscsi-shutdown.service \
+       ${datadir}/starlingx/stx.iscsid.conf \
+       "
diff --git a/meta-stx/recipes-core/stx-integ/filesystem/nfs-utils-config.inc b/meta-stx/recipes-core/stx-integ/filesystem/nfs-utils-config.inc
new file mode 100644 (file)
index 0000000..3e6e64d
--- /dev/null
@@ -0,0 +1,61 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " nfs-utils-config"
+
+RDEPENDS_nfs-utils-config_append  += " bash"
+
+do_configure_append () {
+       :
+} 
+
+do_compile_append () {
+       :
+}
+
+do_install_append () {
+
+       cd ${S}/filesystem/nfs-utils-config/
+       install -d -m755 ${D}/${sysconfdir}/init.d
+       install -d -m755 ${D}/${datadir}/starlingx
+       install -d -m755 ${D}/${systemd_system_unitdir}
+
+       install -D -m644 files/nfscommon ${D}/${sysconfdir}/init.d/stx-nfscommon
+       install -D -m644 files/nfscommon.service ${D}/${systemd_system_unitdir}/stx-nfscommon.service
+       install -D -m644 files/nfsserver ${D}/${sysconfdir}/init.d/stx-nfsserver
+       install -D -m644 files/nfsserver.service ${D}/${systemd_system_unitdir}/stx-nfsserver.service
+       install -D -m644 files/nfsmount.conf ${D}/${datadir}/starlingx/stx-nfsmount.conf
+       install -D -m644 files/nfsmount.conf ${D}/${sysconfdir}/stx-nfsmount.conf
+
+}
+
+#pkg_postinst_ontarget_nfs-utils-config () {
+#      systemctl disable rpc-statd.service
+#      systemctl disable rpc-statd-notify.service
+#      systemctl disable nfs-lock.service
+#      systemctl disable nfslock.service
+#
+#      systemctl enable nfscommon.service  >/dev/null 2>&1 || :
+#      systemctl enable nfsserver.service  >/dev/null 2>&1 || :
+#}     
+
+FILES_nfs-utils-config_append = " \
+       ${sysconfdir}/init.d/stx-nfscommon \
+       ${systemd_system_unitdir}/stx-nfscommon.service \
+       ${sysconfdir}/init.d/stx-nfsserver \
+       ${systemd_system_unitdir}/stx-nfsserver.service \
+       ${datadir}/starlingx/stx-nfsmount.conf \
+       ${sysconfdir}/stx-nfsmount.conf \
+       " 
diff --git a/meta-stx/recipes-core/stx-integ/filesystem/nfscheck.inc b/meta-stx/recipes-core/stx-integ/filesystem/nfscheck.inc
new file mode 100644 (file)
index 0000000..66f0d09
--- /dev/null
@@ -0,0 +1,47 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " nfscheck"
+
+RDEPENDS_nfscheck_append += " bash"
+
+do_configure_append () {
+       :
+} 
+
+do_compile_append () {
+       :
+}
+
+do_install_append () {
+
+       cd ${S}/filesystem/nfscheck/
+       install -d -m755 ${D}/${bindir}
+       install -d -m755 ${D}/${systemd_system_unitdir}
+
+       install -D -m644 files/nfscheck.sh ${D}/${bindir}
+       install -D -m644 files/nfscheck.service ${D}/${systemd_system_unitdir}
+
+
+}
+
+#pkg_postinst_ontarget_nfscheck () {
+#      /bin/systemctl enable nfscheck.service
+#}     
+
+FILES_nfscheck_append = " \
+               ${systemd_system_unitdir}/nfscheck.service \
+               ${bindir}/nfscheck.sh \
+               "
diff --git a/meta-stx/recipes-core/stx-integ/ibsh_0.3e.bbappend b/meta-stx/recipes-core/stx-integ/ibsh_0.3e.bbappend
new file mode 100644 (file)
index 0000000..6cd9673
--- /dev/null
@@ -0,0 +1,39 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " \
+       file://${PN}/ibsh-0.3e.patch \
+       file://${PN}/ibsh-0.3e-cgcs.patch \
+       file://${PN}/ibsh-0.3e-cgcs-copyright.patch \
+       file://${PN}/admin.cmds \
+       file://${PN}/admin.xtns \
+       file://${PN}/LICENSE \
+       file://${PN}/operator.cmds \
+       file://${PN}/operator.xtns \
+       file://${PN}/secadmin.cmds \
+       file://${PN}/secadmin.xtns \
+       "
+
+do_install_append() {
+       cp ${WORKDIR}/${PN}/admin.cmds ${D}/${sysconfdir}/ibsh/cmds/
+       cp ${WORKDIR}/${PN}/admin.xtns ${D}/${sysconfdir}/ibsh/xtns/
+       cp ${WORKDIR}/${PN}/operator.cmds ${D}/${sysconfdir}/ibsh/cmds/
+       cp ${WORKDIR}/${PN}/operator.xtns ${D}/${sysconfdir}/ibsh/xtns/
+       cp ${WORKDIR}/${PN}/secadmin.cmds ${D}/${sysconfdir}/ibsh/cmds/
+       cp ${WORKDIR}/${PN}/secadmin.xtns ${D}/${sysconfdir}/ibsh/xtns/
+}
+
diff --git a/meta-stx/recipes-core/stx-integ/logging/logmgmt.inc b/meta-stx/recipes-core/stx-integ/logging/logmgmt.inc
new file mode 100644 (file)
index 0000000..f825336
--- /dev/null
@@ -0,0 +1,70 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " logmgmt"
+
+do_configure_append () {
+       cd ${S}/logging/logmgmt/logmgmt
+       distutils_do_configure
+} 
+
+do_compile_append () {
+       cd ${S}/logging/logmgmt/logmgmt
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/logging/logmgmt/logmgmt
+       distutils_do_install
+       cd ${S}/logging/logmgmt/
+       install -d -m 755 ${D}/${bindir}
+       install -p -D -m 700 scripts/bin/logmgmt ${D}/${bindir}/logmgmt
+       install -p -D -m 700 scripts/bin/logmgmt_postrotate ${D}/${bindir}/logmgmt_postrotate
+       install -p -D -m 700 scripts/bin/logmgmt_prerotate ${D}/${bindir}/logmgmt_prerotate
+
+       install -d -m 755 ${D}/${sysconfdir}/init.d
+       install -p -D -m 700 scripts/init.d/logmgmt ${D}/${sysconfdir}/init.d/logmgmt
+
+       install -d -m 755 ${D}/${sysconfdir}/pmon.d/
+       install -p -D -m 644 scripts/pmon.d/logmgmt ${D}/${sysconfdir}/pmon.d/logmgmt
+
+       install -p -D -m 664 scripts/etc/systemd/system/logmgmt.service ${D}/${systemd_system_unitdir}/logmgmt.service
+}
+
+
+#pkg_postinst_ontarget_logmgmt () { 
+#      /usr/bin/systemctl enable logmgmt.service >/dev/null 2>&1
+#}
+
+FILES_logmgmt_append = " \
+       ${bindir}/logmgmt \
+       ${bindir}/logmgmt_postrotate \
+       ${bindir}/logmgmt_prerotate \
+       ${sysconfdir}/init.d/logmgmt \
+       ${sysconfdir}/pmon.d/logmgmt \
+       ${systemd_system_unitdir}/logmgmt.service \
+       ${libdir}/python2.7/site-packages/logmgmt-1.0.0-py2.7.egg-info \
+       ${libdir}/python2.7/site-packages/logmgmt \
+       ${libdir}/python2.7/site-packages/logmgmt-1.0.0-py2.7.egg-info/PKG-INFO \
+       ${libdir}/python2.7/site-packages/logmgmt-1.0.0-py2.7.egg-info/top_level.txt \
+       ${libdir}/python2.7/site-packages/logmgmt-1.0.0-py2.7.egg-info/dependency_links.txt \
+       ${libdir}/python2.7/site-packages/logmgmt-1.0.0-py2.7.egg-info/SOURCES.txt \
+       ${libdir}/python2.7/site-packages/logmgmt/prepostrotate.pyc \
+       ${libdir}/python2.7/site-packages/logmgmt/__init__.pyc \
+       ${libdir}/python2.7/site-packages/logmgmt/logmgmt.pyc \
+       ${libdir}/python2.7/site-packages/logmgmt/__init__.py \
+       ${libdir}/python2.7/site-packages/logmgmt/logmgmt.py \
+       ${libdir}/python2.7/site-packages/logmgmt/prepostrotate.py \
+       "
diff --git a/meta-stx/recipes-core/stx-integ/logging/logrotate-config.inc b/meta-stx/recipes-core/stx-integ/logging/logrotate-config.inc
new file mode 100644 (file)
index 0000000..94132e3
--- /dev/null
@@ -0,0 +1,39 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " logrotate-config"
+
+RDEPENDS_logrotate-config_append = " bash"
+
+do_configure_append () {
+       :
+} 
+
+do_compile_append () {
+       :
+}
+
+do_install_append () {
+       cd ${S}/logging/logrotate-config/files/
+       install -d ${D}/${datadir}/starlingx
+       mkdir -p ${D}/${sysconfdir}/cron.d
+       install -m 644 logrotate-cron.d ${D}/${sysconfdir}/cron.d/logrotate
+       install -m 644 logrotate.conf ${D}/${datadir}/starlingx/logrotate.conf
+}
+
+FILES_logrotate-config_append = " \
+       ${sysconfdir}/cron.d/logrotate \
+       ${datadir}/starlingx/logrotate.conf \
+       "
diff --git a/meta-stx/recipes-core/stx-integ/spectre-meltdown-checker_git.bb b/meta-stx/recipes-core/stx-integ/spectre-meltdown-checker_git.bb
new file mode 100644 (file)
index 0000000..29a6380
--- /dev/null
@@ -0,0 +1,40 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " \
+A shell script to tell if your system is vulnerable against the several \
+\"speculative execution\" CVEs that were made public since 2018. \
+"
+
+SUMMARY = "Spectre and Meltdown Checker"
+HOMEPAGE = "https://github.com/speed47/spectre-meltdown-checker"
+LICENSE = "GPL-3.0"
+LIC_FILES_CHKSUM = "file://spectre-meltdown-checker.sh;beginline=1;endline=5;md5=0113e62a200ec9a5f5ebdd7ad4329133"
+
+SRCREV = "3d21dae16864f8e8262d7a35bd4de300452b274d"
+SRCNAME = "spectre-meltdown-checker"
+BRANCH = "master"
+PROTOCOL = "https"
+PV = "v0.43+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/speed47/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+do_install() {
+       install -d -p -m 0755 ${D}/${sbindir} 
+       install -m 0644 ${S}/${SRCNAME}.sh ${D}/${sbindir}/${SRCNAME}.sh
+}
+
+FILES_${PN} = "${sbindir}/"
diff --git a/meta-stx/recipes-core/stx-integ/stx-collector.inc b/meta-stx/recipes-core/stx-integ/stx-collector.inc
new file mode 100644 (file)
index 0000000..f4b04b4
--- /dev/null
@@ -0,0 +1,92 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " collector"
+
+RDEPENDS_collector = " bash"
+DEPENDS += " ceph"
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       :
+}
+
+do_install_prepend () {
+
+       cd ${S}/tools/collector/scripts
+       install -m 755 -d ${D}/${sysconfdir}/collect.d
+       install -m 755 -d ${D}/${sysconfdir}/collect
+       install -m 755 -d ${D}/${sbindir}
+       install -m 755 -d ${D}/${bindir}
+       install -m 755 -d ${D}/${sbindir}
+       
+       install -m 755 collect ${D}/${sbindir}/collect
+       install -m 755 collect_host ${D}/${sbindir}/collect_host
+       install -m 755 collect_date ${D}/${sbindir}/collect_date
+       install -m 755 collect_utils ${D}/${sbindir}/collect_utils
+       install -m 755 collect_parms ${D}/${sbindir}/collect_parms
+       install -m 755 collect_mask_passwords ${D}/${sbindir}/collect_mask_passwords
+       install -m 755 expect_done ${D}/${sbindir}/expect_done
+       
+       install -m 755 collect_sysinv.sh ${D}/${sysconfdir}/collect.d/collect_sysinv
+       install -m 755 collect_psqldb.sh ${D}/${sysconfdir}/collect.d/collect_psqldb
+       install -m 755 collect_openstack.sh ${D}/${sysconfdir}/collect.d/collect_openstack
+       install -m 755 collect_networking.sh ${D}/${sysconfdir}/collect.d/collect_networking
+       install -m 755 collect_ceph.sh ${D}/${sysconfdir}/collect.d/collect_ceph
+       install -m 755 collect_sm.sh ${D}/${sysconfdir}/collect.d/collect_sm
+       install -m 755 collect_tc.sh ${D}/${sysconfdir}/collect.d/collect_tc
+       install -m 755 collect_nfv_vim.sh ${D}/${sysconfdir}/collect.d/collect_nfv_vim
+       install -m 755 collect_ovs.sh ${D}/${sysconfdir}/collect.d/collect_ovs
+       install -m 755 collect_patching.sh ${D}/${sysconfdir}/collect.d/collect_patching
+       install -m 755 collect_coredump.sh ${D}/${sysconfdir}/collect.d/collect_coredump
+       install -m 755 collect_crash.sh ${D}/${sysconfdir}/collect.d/collect_crash
+       install -m 755 collect_ima.sh ${D}/${sysconfdir}/collect.d/collect_ima
+       install -m 755 collect_fm.sh ${D}/${sysconfdir}/collect.d/collect_fm
+       
+       install -m 755 etc.exclude ${D}/${sysconfdir}/collect/etc.exclude
+       install -m 755 run.exclude ${D}/${sysconfdir}/collect/run.exclude
+       
+#      ln -sf /${sbindir}/collect ${D}//${bindir}/collect
+#      ln -sf /${sbindir}/collect ${D}/%{_sbindir}/collect
+}
+       
+FILES_collector_append += " \
+       ${sbindir}/collect \
+       ${sbindir}/collect_host \
+       ${sbindir}/collect_date \
+       ${sbindir}/collect_utils \
+       ${sbindir}/collect_parms \
+       ${sbindir}/collect_mask_passwords \
+       ${sbindir}/expect_done \
+       ${sysconfdir}/collect.d/collect_sysinv \
+       ${sysconfdir}/collect.d/collect_psqldb \
+       ${sysconfdir}/collect.d/collect_openstack \
+       ${sysconfdir}/collect.d/collect_networking \
+       ${sysconfdir}/collect.d/collect_ceph \
+       ${sysconfdir}/collect.d/collect_sm \
+       ${sysconfdir}/collect.d/collect_tc \
+       ${sysconfdir}/collect.d/collect_nfv_vim \
+       ${sysconfdir}/collect.d/collect_ovs \
+       ${sysconfdir}/collect.d/collect_patching \
+       ${sysconfdir}/collect.d/collect_coredump \
+       ${sysconfdir}/collect.d/collect_crash \
+       ${sysconfdir}/collect.d/collect_ima \
+       ${sysconfdir}/collect.d/collect_fm \
+       ${sysconfdir}/collect/etc.exclude \
+       ${sysconfdir}/collect/run.exclude \
+        "
diff --git a/meta-stx/recipes-core/stx-integ/stx-config-files.inc b/meta-stx/recipes-core/stx-integ/stx-config-files.inc
new file mode 100644 (file)
index 0000000..d20c807
--- /dev/null
@@ -0,0 +1,29 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " stx-integ-config-files"
+
+require config-files/audit-config.inc
+require config-files/io-scheduler.inc
+require config-files/docker-config.inc
+require config-files/iptables-config.inc
+require config-files/memcached-custom.inc
+require config-files/ntp-config.inc
+require config-files/pam-config.inc
+require config-files/shadow-utils-config.inc
+require config-files/rsync-config.inc
+require config-files/sudo-config.inc
+require config-files/syslog-ng-config.inc
+require config-files/util-linux-config.inc
diff --git a/meta-stx/recipes-core/stx-integ/stx-filesystem.inc b/meta-stx/recipes-core/stx-integ/stx-filesystem.inc
new file mode 100644 (file)
index 0000000..3a9a270
--- /dev/null
@@ -0,0 +1,21 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-filesystem"
+
+require filesystem/filesystem-scripts.inc
+require filesystem/iscsi-initiator-utils-config.inc
+require filesystem/nfscheck.inc
+require filesystem/nfs-utils-config.inc
diff --git a/meta-stx/recipes-core/stx-integ/stx-integ.bb b/meta-stx/recipes-core/stx-integ/stx-integ.bb
new file mode 100644 (file)
index 0000000..cd2892b
--- /dev/null
@@ -0,0 +1,70 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-integ"
+
+STABLE = "starlingx/master"
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCNAME = "integ"
+SRCREV = "0bf4b546df8c7fdec8cfc6cb6f71b9609ee54306"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0 & GPL-2.0"
+LIC_FILES_CHKSUM = " \
+       file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://base/cgcs-users/cgcs-users-1.0/LICENSE;md5=3c7b4ff77c7d469e869911fde629c35c \
+       file://virt/kvm-timer-advance/files/LICENSE;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
+       file://tools/storage-topology/storage-topology/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       "
+
+SRC_URI = "git://opendev.org/starlingx/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       "
+inherit distutils setuptools
+
+do_configure () {
+       :
+} 
+
+do_compile() {
+       :
+}
+
+do_install () {
+       
+# kvm-timer-advance
+
+       install -p -D -m 0755 ${S}/virt/kvm-timer-advance/files/setup_kvm_timer_advance.sh \
+                       ${D}/${bindir}/setup_kvm_timer_advance.sh
+       install -p -D -m 444 ${S}/virt/kvm-timer-advance/files/kvm_timer_advance_setup.service \
+                       ${D}/${systemd_system_unitdir}/kvm_timer_advance_setup.service
+
+}
+
+FILES_${PN} = " "
+
+PACKAGES += " kvm-timer-advance"
+DESCRIPTION_kvm-timer-advance = "StarlingX KVM Timer Advance Package"
+
+RDEPENDS_kvm-timer-advance += " \
+       systemd \
+       bash \
+       "
+# RDEPENDS_kvm-timer-advance += " bash"
+FILES_kvm-timer-advance = " \
+       ${bindir}/setup_kvm_timer_advance.sh \
+       ${systemd_system_unitdir}/kvm_timer_advance_setup.service \
+       "
diff --git a/meta-stx/recipes-core/stx-integ/stx-ldap.inc b/meta-stx/recipes-core/stx-integ/stx-ldap.inc
new file mode 100644 (file)
index 0000000..55d91b4
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DEPENDS += " ldapscripts"
diff --git a/meta-stx/recipes-core/stx-integ/stx-logging.inc b/meta-stx/recipes-core/stx-integ/stx-logging.inc
new file mode 100644 (file)
index 0000000..eda8654
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-logging"
+
+
+require logging/logrotate-config.inc
+require logging/logmgmt.inc
diff --git a/meta-stx/recipes-core/stx-integ/stx-networking.inc b/meta-stx/recipes-core/stx-integ/stx-networking.inc
new file mode 100644 (file)
index 0000000..c05d16b
--- /dev/null
@@ -0,0 +1,19 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-networking"
+
+
+#require networking/lldpd.inc
diff --git a/meta-stx/recipes-core/stx-integ/stx-utilities.inc b/meta-stx/recipes-core/stx-integ/stx-utilities.inc
new file mode 100644 (file)
index 0000000..655a5f0
--- /dev/null
@@ -0,0 +1,21 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-utilities"
+
+
+require utilities/platform-util.inc
+require utilities/namespace-utils.inc
+#require utilities/buildinfo.inc
diff --git a/meta-stx/recipes-core/stx-integ/utilities/buildinfo.inc b/meta-stx/recipes-core/stx-integ/utilities/buildinfo.inc
new file mode 100644 (file)
index 0000000..5f01ada
--- /dev/null
@@ -0,0 +1,17 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+
+# Implement this through class ?
diff --git a/meta-stx/recipes-core/stx-integ/utilities/namespace-utils.inc b/meta-stx/recipes-core/stx-integ/utilities/namespace-utils.inc
new file mode 100644 (file)
index 0000000..3daf0ad
--- /dev/null
@@ -0,0 +1,41 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " namespace-utils"
+
+RDEPENDS_namespace-utils_append = " bash"
+
+do_configure_append() {
+       :
+}
+
+do_compile_append() {
+       cd ${S}/utilities/namespace-utils/namespace-utils
+       ${CC} ${TARGET_CFLAGS} -c -o bashns.o bashns.c
+       ${CC} ${TARGET_LDFLAGS} -o bashns bashns.o
+}
+
+do_install_append() {
+       cd ${S}/utilities/namespace-utils/namespace-utils
+
+       install -d ${D}/${sbindir}
+       install -m 500 bashns  ${D}/${sbindir}
+       install -m 500 umount-in-namespace ${D}/${sbindir}
+}
+
+FILES_namespace-utils_append = "\
+       ${sbindir}/bashns \
+       ${sbindir}/umount-in-namespace \
+       "
diff --git a/meta-stx/recipes-core/stx-integ/utilities/platform-util.inc b/meta-stx/recipes-core/stx-integ/utilities/platform-util.inc
new file mode 100644 (file)
index 0000000..eb2bacc
--- /dev/null
@@ -0,0 +1,64 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " platform-util"
+
+RDEPENDS_platform-util_append = " bash"
+
+do_configure_append() {
+       distutils_do_configure
+}
+
+do_compile_append() {
+       cd ${S}/utilities/platform-util/platform-util
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/utilities/platform-util/platform-util
+       distutils_do_install
+
+       install -d ${D}/${bindir}
+       install ${S}/utilities/platform-util/scripts/cgcs_tc_setup.sh ${D}/${bindir}
+       install ${S}/utilities/platform-util/scripts/remotelogging_tc_setup.sh ${D}/${bindir}
+       install ${S}/utilities/platform-util/scripts/connectivity_test ${D}/${bindir}
+
+       install -d ${D}/${sysconfdir}/init.d
+       install ${S}/utilities/platform-util/scripts/log_functions.sh ${D}/${sysconfdir}/init.d
+
+       install -d ${D}/${sbindir}
+       install -m 700 -p -D ${S}/utilities/platform-util/scripts/patch-restart-mtce ${D}/${sbindir}
+       install -m 700 -p -D ${S}/utilities/platform-util/scripts/patch-restart-processes ${D}/${sbindir}
+       install -m 700 -p -D ${S}/utilities/platform-util/scripts/patch-restart-haproxy ${D}/${sbindir}
+
+       install -d ${D}/${systemd_system_unitdir}
+       install -m 644 -p -D ${S}/utilities/platform-util/scripts/opt-platform.mount ${D}/${systemd_system_unitdir} 
+       install -m 644 -p -D ${S}/utilities/platform-util/scripts/opt-platform.service  ${D}/${systemd_system_unitdir}
+}
+
+FILES_platform-util = "\
+       ${libdir}/python2.7/site-packages/platform_util \
+       ${libdir}/python2.7/site-packages/platform_util-1.0.0-py2.7.egg-info \
+       ${bindir}/cgcs_tc_setup.sh  \
+       ${bindir}/remotelogging_tc_setup.sh  \
+       ${bindir}/connectivity_test  \
+       ${bindir}/verify-license \
+       ${sysconfdir}/init.d/log_functions.sh  \
+       ${sbindir}/patch-restart-mtce \
+       ${sbindir}/patch-restart-processes \
+       ${sbindir}/patch-restart-haproxy \
+       ${systemd_system_unitdir}/opt-platform.mount \
+       ${systemd_system_unitdir}/opt-platform.service  \
+       "
diff --git a/meta-stx/recipes-core/stx-metal/files/0001-Use-LDFLAGS-when-linking-and-pass-flags-down-to-subm.patch b/meta-stx/recipes-core/stx-metal/files/0001-Use-LDFLAGS-when-linking-and-pass-flags-down-to-subm.patch
new file mode 100644 (file)
index 0000000..91cd83e
--- /dev/null
@@ -0,0 +1,199 @@
+From 0db72ac0c57a9f15ae6999e4ccf85a9db1e752fd Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Thu, 27 Jun 2019 22:25:15 -0700
+Subject: [PATCH] Use LDFLAGS when linking and pass flags down to submakes
+
+---
+ mtce/src/Makefile             | 27 +++++++++++++++------------
+ mtce/src/alarm/Makefile       |  2 +-
+ mtce/src/fsmon/Makefile       |  2 +-
+ mtce/src/fsync/Makefile       |  2 +-
+ mtce/src/heartbeat/Makefile   |  7 +++++--
+ mtce/src/hostw/Makefile       |  2 +-
+ mtce/src/hwmon/Makefile       |  2 +-
+ mtce/src/lmon/Makefile        |  2 +-
+ mtce/src/maintenance/Makefile |  4 ++--
+ mtce/src/mtclog/Makefile      |  2 +-
+ mtce/src/pmon/Makefile        |  2 +-
+ 11 files changed, 30 insertions(+), 24 deletions(-)
+
+diff --git a/mtce/src/Makefile b/mtce/src/Makefile
+index a9ec0e2..8c256f1 100755
+--- a/mtce/src/Makefile
++++ b/mtce/src/Makefile
+@@ -8,18 +8,21 @@ VER=1
+ VER_MJR=1
+ build:
+-      (cd public ; make lib VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd common ; make lib VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd alarm  ; make build VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd heartbeat ; make build VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd maintenance ; make build VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd hwmon  ; make build VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd mtclog ; make build VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd lmon   ; make build VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd pmon   ; make build VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd fsmon  ; make build VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd hostw  ; make build VER=$(VER) VER_MJR=$(VER_MJR))
+-      (cd fsync  ; make build VER=$(VER) VER_MJR=$(VER_MJR))
++      ( \
++      cd public;\
++      make -e CCFLAGS="$(CCFLAGS) -fPIC" LDFLAGS="$(LDFLAGS) -shared" INCLUDES="$(INCLUDES)" lib VER=$(VER) VER_MJR=$(VER_MJR) \
++      )
++      (cd common ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" lib VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd alarm  ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd heartbeat ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd maintenance ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd hwmon  ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd mtclog ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd lmon   ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd pmon   ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd fsmon  ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd hostw  ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
++      (cd fsync  ; make -e LDFLAGS="$(LDFLAGS)" CCFLAGS="$(CCFLAGS)" INCLUDES="$(INCLUDES)" build VER=$(VER) VER_MJR=$(VER_MJR))
+ clean:
+       @( cd common ; make clean )
+diff --git a/mtce/src/alarm/Makefile b/mtce/src/alarm/Makefile
+index a27b0fb..cff233c 100755
+--- a/mtce/src/alarm/Makefile
++++ b/mtce/src/alarm/Makefile
+@@ -43,7 +43,7 @@ library:
+        ar rcs libalarm.a alarm.o $(EXTRAARFLAGS)
+ process: $(OBJS)
+-      $(CXX) $(CCFLAGS) ${OBJS} -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o mtcalarmd
++      $(CXX) $(CCFLAGS) $(LDFLAGS) ${OBJS} -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o mtcalarmd
+ clean_bins:
+       @rm -v -f $(BINS)
+diff --git a/mtce/src/fsmon/Makefile b/mtce/src/fsmon/Makefile
+index b53cce0..33256ab 100644
+--- a/mtce/src/fsmon/Makefile
++++ b/mtce/src/fsmon/Makefile
+@@ -28,7 +28,7 @@ else
+ endif
+ build: clean static_analysis $(OBJS)
+-      $(CXX) $(CCFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o fsmond
++      $(CXX) $(CCFLAGS) $(LDFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o fsmond
+ common:
+       ( cd ../common ; make clean ; make lib VER=$(VER) VER_MJR=$(VER_MJR))
+diff --git a/mtce/src/fsync/Makefile b/mtce/src/fsync/Makefile
+index b041f15..85c1543 100644
+--- a/mtce/src/fsync/Makefile
++++ b/mtce/src/fsync/Makefile
+@@ -16,7 +16,7 @@ all: build
+       $(CC) $(INCLUDES) $(CCFLAGS) $(EXTRACCFLAGS) -c $< -o $@
+ build: $(OBJS)
+-      $(CC) $(OBJS) $(LDLIBS) -o fsync
++      $(CC) $(CCFLAGS) $(LDFLAGS) $(OBJS) $(LDLIBS) -o fsync
+ clean: 
+       @rm -v -f $(OBJS) fsync
+diff --git a/mtce/src/heartbeat/Makefile b/mtce/src/heartbeat/Makefile
+index a625f20..b8deda7 100755
+--- a/mtce/src/heartbeat/Makefile
++++ b/mtce/src/heartbeat/Makefile
+@@ -33,8 +33,11 @@ endif
+ all: static_analysis common agent client
+ build: static_analysis $(OBJS)
+-      $(CXX) $(CCFLAGS) hbsAlarm.o hbsAgent.o hbsUtil.o hbsCluster.o hbsStubs.o ../common/nodeClass.o -L../public -L../alarm $(LDLIBS) $(EXTRALDFLAGS) -o hbsAgent
+-      $(CXX) $(CCFLAGS) hbsClient.o hbsPmon.o hbsUtil.o -L../public -L../alarm $(LDLIBS) $(EXTRALDFLAGS) -o hbsClient
++      $(CXX) $(CCFLAGS) $(LDFLAGS) \
++              hbsAlarm.o hbsAgent.o hbsUtil.o hbsCluster.o hbsStubs.o ../common/nodeClass.o -L../public -L../alarm \
++                      $(LDLIBS) $(EXTRALDFLAGS) -o hbsAgent
++
++      $(CXX) $(CCFLAGS) $(LDFLAGS) hbsClient.o hbsPmon.o hbsUtil.o -L../public -L../alarm $(LDLIBS) $(EXTRALDFLAGS) -o hbsClient
+ common:
+       ( cd ../common ; make clean ; make lib VER=$(VER) VER_MJR=$(VER_MJR))
+diff --git a/mtce/src/hostw/Makefile b/mtce/src/hostw/Makefile
+index d72708c..af18059 100755
+--- a/mtce/src/hostw/Makefile
++++ b/mtce/src/hostw/Makefile
+@@ -28,7 +28,7 @@ else
+ endif
+ build: clean static_analysis $(OBJS)
+-      $(CXX) $(CCFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o hostwd
++      $(CXX) $(CCFLAGS) $(LDFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o hostwd
+ common:
+       ( cd ../common ; make clean ; make -j1 lib VER=$(VER) VER_MJR=$(VER_MJR))
+diff --git a/mtce/src/hwmon/Makefile b/mtce/src/hwmon/Makefile
+index 6d7cee3..c168c43 100644
+--- a/mtce/src/hwmon/Makefile
++++ b/mtce/src/hwmon/Makefile
+@@ -45,7 +45,7 @@ else
+ endif
+ build: clean static_analysis $(OBJS)
+-      $(CXX) $(CCFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o $(BIN)
++      $(CXX) $(CCFLAGS) $(LDFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o $(BIN)
+ common:
+       ( cd ../common ; make lib VER=$(VER) VER_MJR=$(VER_MJR))
+diff --git a/mtce/src/lmon/Makefile b/mtce/src/lmon/Makefile
+index 9e328a4..a1a8519 100755
+--- a/mtce/src/lmon/Makefile
++++ b/mtce/src/lmon/Makefile
+@@ -29,7 +29,7 @@ else
+ endif
+ build: clean static_analysis $(OBJS)
+-      $(CXX) $(CCFLAGS) $(OBJS) $(LDPATH) $(LDLIBS) $(EXTRALDFLAGS) -o lmond
++      $(CXX) $(CCFLAGS) $(LDFLAGS) $(OBJS) $(LDPATH) $(LDLIBS) $(EXTRALDFLAGS) -o lmond
+ clean:
+       @rm -v -f $(OBJ) lmond *.o *.a
+diff --git a/mtce/src/maintenance/Makefile b/mtce/src/maintenance/Makefile
+index 85c2db6..e6a8892 100755
+--- a/mtce/src/maintenance/Makefile
++++ b/mtce/src/maintenance/Makefile
+@@ -77,10 +77,10 @@ common:
+       ( cd ../common ; make clean ; make lib VER=$(VER) VER_MJR=$(VER_MJR))
+ mtcAgent: $(OBJS)
+-      $(CXX) $(CONTROL_OBJS) -L../public -L../alarm $(LDLIBS) $(EXTRALDFLAGS) -o mtcAgent
++      $(CXX) $(CCFLAGS) $(LDFLAGS) $(CONTROL_OBJS) -L../public -L../alarm $(LDLIBS) $(EXTRALDFLAGS) -o mtcAgent
+ mtcClient: $(OBJS)
+-      $(CXX) $(COMPUTE_OBJS) -L../public -L../alarm $(LDLIBS) $(EXTRALDFLAGS) -o mtcClient
++      $(CXX) $(CCFLAGS) $(LDFLAGS) $(COMPUTE_OBJS) -L../public -L../alarm $(LDLIBS) $(EXTRALDFLAGS) -o mtcClient
+ remove_bins:
+       @rm -v -f $(BINS)
+diff --git a/mtce/src/mtclog/Makefile b/mtce/src/mtclog/Makefile
+index 1d49aa4..fb513d9 100644
+--- a/mtce/src/mtclog/Makefile
++++ b/mtce/src/mtclog/Makefile
+@@ -30,7 +30,7 @@ else
+ endif
+ build: clean static_analysis $(OBJS)
+-      $(CXX) $(CCFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o mtclogd
++      $(CXX) $(CCFLAGS) $(LDFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o mtclogd
+ common:
+       ( cd ../common ; make clean ; make lib VER=$(VER) VER_MJR=$(VER_MJR))
+diff --git a/mtce/src/pmon/Makefile b/mtce/src/pmon/Makefile
+index f75f2be..c34e204 100755
+--- a/mtce/src/pmon/Makefile
++++ b/mtce/src/pmon/Makefile
+@@ -29,7 +29,7 @@ else
+ endif
+ build: clean static_analysis $(OBJS)
+-      $(CXX) $(CCFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o pmond
++      $(CXX) $(CCFLAGS) $(LDFLAGS) $(OBJS) -L../daemon -L../common $(LDLIBS) $(EXTRALDFLAGS) -o pmond
+ common:
+       ( cd ../common ; make clean ; make lib VER=$(VER) VER_MJR=$(VER_MJR))
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-metal/files/0001-Use-snprintf-to-avoid-overflowing-amon.tx_buf.patch b/meta-stx/recipes-core/stx-metal/files/0001-Use-snprintf-to-avoid-overflowing-amon.tx_buf.patch
new file mode 100644 (file)
index 0000000..6afb971
--- /dev/null
@@ -0,0 +1,26 @@
+From 3060369f60b2dc9e6d138cabd18d7f206c7bb0f5 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Thu, 27 Jun 2019 21:40:34 -0700
+Subject: [PATCH] Use snprintf to avoid overflowing amon.tx_buf
+
+---
+ mtce/src/public/amon.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/mtce/src/public/amon.c b/mtce/src/public/amon.c
+index b9146a2..941f905 100644
+--- a/mtce/src/public/amon.c
++++ b/mtce/src/public/amon.c
+@@ -257,7 +257,8 @@ int  active_monitor_dispatch ( void )
+             }
+             memset ( amon.tx_buf, 0 , AMON_MAX_LEN );
+-            sprintf( amon.tx_buf, "%s %8x %d%c", str, magic, seq, '\0' );
++            if (snprintf( amon.tx_buf, AMON_MAX_LEN,  "%s %8x %d%c", str, magic, seq, '\0' ) < 0)
++              fprintf(stderr,"%s:%d amon.txbuf overflow detected!\n", __func__, __LINE__);
+             
+             if ( strcmp ( str, amon.name ) )
+             {
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-metal/files/0001-mtce-compute-dont-install-empty-directory-unless-nee.patch b/meta-stx/recipes-core/stx-metal/files/0001-mtce-compute-dont-install-empty-directory-unless-nee.patch
new file mode 100644 (file)
index 0000000..37b2456
--- /dev/null
@@ -0,0 +1,24 @@
+From 90d95ca75997a935587ff3c774d0f5375c3f505a Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Tue, 9 Jul 2019 12:42:53 -0700
+Subject: [PATCH] mtce-compute dont install empty directory unless needed
+
+---
+ mtce-compute/src/Makefile | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/mtce-compute/src/Makefile b/mtce-compute/src/Makefile
+index 9aea4b0..ed9a8a9 100755
+--- a/mtce-compute/src/Makefile
++++ b/mtce-compute/src/Makefile
+@@ -16,7 +16,6 @@ install:
+       # # Compute-Only Init Scripts (source group x)
+       install -m 755 -p -D scripts/$(SOURCE1) $(buildroot)/$(_sysconfdir)/init.d/goenabledWorker
+       # # Compute-Only Process Monitor Config files (source group 1x)
+-      install -m 755 -d $(buildroot)/$(local_etc_pmond)
+       # # Compute-Only Go Enabled Test (source group 2x)
+       install -m 755 -d $(buildroot)/$(local_etc_goenabledd)
+       install -m 755 -p -D scripts/$(SOURCE22) $(buildroot)/$(local_etc_goenabledd)/virt-support-goenabled.sh
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-metal/files/0001-mtce-control-dont-install-empty-directory-unless-nee.patch b/meta-stx/recipes-core/stx-metal/files/0001-mtce-control-dont-install-empty-directory-unless-nee.patch
new file mode 100644 (file)
index 0000000..bb84cfd
--- /dev/null
@@ -0,0 +1,23 @@
+From e00a8a4ad31c696d2f5dc866f178af0a9f987c12 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Tue, 9 Jul 2019 13:35:35 -0700
+Subject: [PATCH] mtce-control dont install empty directory unless needed
+
+---
+ mtce-control/src/Makefile | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/mtce-control/src/Makefile b/mtce-control/src/Makefile
+index aaa3de7..cb225ce 100755
+--- a/mtce-control/src/Makefile
++++ b/mtce-control/src/Makefile
+@@ -27,6 +27,3 @@ install:
+       # Controller-Only Heartbeat Service file
+       install -m 644 -p -D scripts/$(SOURCE5) $(buildroot)/$(_unitdir)/hbsAgent.service
+-      # Controller-Only Go Enabled Test
+-      install -m 755 -d $(buildroot)/$(local_etc_goenabledd)
+-
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-metal/files/0001-mtce-storage-dont-install-empty-directory-unless-nee.patch b/meta-stx/recipes-core/stx-metal/files/0001-mtce-storage-dont-install-empty-directory-unless-nee.patch
new file mode 100644 (file)
index 0000000..454827a
--- /dev/null
@@ -0,0 +1,31 @@
+From 5d180eb6a5b60d75b4b8ba5af265fc973b6776ca Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Tue, 9 Jul 2019 13:51:46 -0700
+Subject: [PATCH] mtce-storage dont install empty directory unless needed
+
+---
+ mtce-storage/src/Makefile | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/mtce-storage/src/Makefile b/mtce-storage/src/Makefile
+index 842a76c..85c14e0 100755
+--- a/mtce-storage/src/Makefile
++++ b/mtce-storage/src/Makefile
+@@ -12,11 +12,11 @@ install:
+       # Storage-Only Init Scripts
+       install -m 755 -p -D scripts/$(SOURCE1) $(buildroot)/$(_sysconfdir)/init.d/goenabledStorage
+       # Storage-Only Process Monitor Config files
+-      install -m 755 -d $(buildroot)/$(local_etc_pmond)
++      # install -m 755 -d $(buildroot)/$(local_etc_pmond)
+       # Storage-Only Go Enabled Tests
+-      install -m 755 -d $(buildroot)/$(local_etc_goenabledd)
++      # install -m 755 -d $(buildroot)/$(local_etc_goenabledd)
+       # Storage-Only Services
+-      install -m 755 -d $(buildroot)/$(local_etc_servicesd)/storage
++      # install -m 755 -d $(buildroot)/$(local_etc_servicesd)/storage
+       # Install systemd dir
+       install -m 644 -p -D scripts/$(SOURCE2) $(buildroot)/$(_unitdir)/goenabled-storage.service
+       # for license
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-core/stx-metal/files/0001-stx-metal-remove-argparse-requirement-from-inventory.patch b/meta-stx/recipes-core/stx-metal/files/0001-stx-metal-remove-argparse-requirement-from-inventory.patch
new file mode 100644 (file)
index 0000000..eda8033
--- /dev/null
@@ -0,0 +1,24 @@
+From d42dc102aaca386f314808e771a56b9ecbd29876 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Tue, 25 Feb 2020 13:58:15 -0800
+Subject: [PATCH] stx-metal: remove argparse requirement from inventory
+
+---
+ inventory/inventory/requirements.txt | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/inventory/inventory/requirements.txt b/inventory/inventory/requirements.txt
+index 993f224..82170c3 100644
+--- a/inventory/inventory/requirements.txt
++++ b/inventory/inventory/requirements.txt
+@@ -6,7 +6,6 @@ pbr>=2.0 # Apache-2.0
+ SQLAlchemy
+ amqplib>=0.6.1
+ anyjson>=0.3.3
+-argparse
+ eventlet==0.20.0
+ futurist>=1.2.0 # Apache-2.0
+ greenlet>=0.3.2 # MIT
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/stx-metal/files/stx-warrior-adjust-paths.patch b/meta-stx/recipes-core/stx-metal/files/stx-warrior-adjust-paths.patch
new file mode 100644 (file)
index 0000000..b5e5b94
--- /dev/null
@@ -0,0 +1,237 @@
+diff --git a/mtce/src/alarm/scripts/mtcalarm.init b/mtce/src/alarm/scripts/mtcalarm.init
+index 57f348f..419da8a 100644
+--- a/mtce/src/alarm/scripts/mtcalarm.init
++++ b/mtce/src/alarm/scripts/mtcalarm.init
+@@ -20,7 +20,7 @@
+ . /etc/init.d/functions
+ DAEMON_NAME="mtcalarmd"
+-DAEMON="/usr/local/bin/${DAEMON_NAME}"
++DAEMON="/usr/bin/${DAEMON_NAME}"
+ PIDFILE="/var/run/${DAEMON_NAME}.pid"
+ PLATFORM_CONF="/etc/platform/platform.conf"
+diff --git a/mtce/src/fsmon/scripts/fsmon b/mtce/src/fsmon/scripts/fsmon
+index 6d8abcc..de9da01 100644
+--- a/mtce/src/fsmon/scripts/fsmon
++++ b/mtce/src/fsmon/scripts/fsmon
+@@ -20,7 +20,7 @@
+ . /etc/init.d/functions
+ DAEMON_NAME="fsmond"
+-DAEMON="/usr/local/bin/${DAEMON_NAME}"
++DAEMON="/usr/bin/${DAEMON_NAME}"
+ PIDFILE="/var/run/${DAEMON_NAME}.pid"
+ # Linux Standard Base (LSB) Error Codes
+diff --git a/mtce/src/hostw/scripts/hostw b/mtce/src/hostw/scripts/hostw
+index 82a4aaf..4946092 100644
+--- a/mtce/src/hostw/scripts/hostw
++++ b/mtce/src/hostw/scripts/hostw
+@@ -17,12 +17,12 @@
+ # Short-Description: host watchdog daemon
+ ### END INIT INFO
+-# echo "7:3:respawn:/usr/local/bin/hostwd" >> /etc/inittab
++# echo "7:3:respawn:/usr/bin/hostwd" >> /etc/inittab
+ . /etc/init.d/functions
+ DAEMON_NAME="hostwd"
+-DAEMON="/usr/local/bin/${DAEMON_NAME}"
++DAEMON="/usr/bin/${DAEMON_NAME}"
+ IFACE=""
+ if [ ! -e "$DAEMON" ] ; then
+diff --git a/mtce/src/hwmon/scripts/lsb/hwmon b/mtce/src/hwmon/scripts/lsb/hwmon
+index 4596a36..ba459c1 100644
+--- a/mtce/src/hwmon/scripts/lsb/hwmon
++++ b/mtce/src/hwmon/scripts/lsb/hwmon
+@@ -20,7 +20,7 @@
+ . /etc/init.d/functions
+ DAEMON_NAME="hwmond"
+-DAEMON="/usr/local/bin/${DAEMON_NAME}"
++DAEMON="/usr/bin/${DAEMON_NAME}"
+ PIDFILE="/var/run/${DAEMON_NAME}.pid"
+ # Linux Standard Base (LSB) Error Codes
+diff --git a/mtce/src/hwmon/scripts/ocf/hwmon b/mtce/src/hwmon/scripts/ocf/hwmon
+index 14f52a2..b71a77f 100644
+--- a/mtce/src/hwmon/scripts/ocf/hwmon
++++ b/mtce/src/hwmon/scripts/ocf/hwmon
+@@ -45,7 +45,7 @@ OCF_RESKEY_state_default="standby"
+ : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
+ : ${OCF_RESKEY_state=${OCF_RESKEY_state_default}}
+-mydaemon="/usr/local/bin/${OCF_RESKEY_binary}"
++mydaemon="/usr/bin/${OCF_RESKEY_binary}"
+ statusfile="/var/run/${OCF_RESKEY_binary}.info"
+ #######################################################################
+@@ -157,9 +157,9 @@ hwmond_validate() {
+         ocf_log info "hwmond:validate"
+    fi
+-    check_binary "/usr/local/bin/${OCF_RESKEY_binary}"
+-    check_binary "/usr/local/bin/mtcAgent"
+-    check_binary "/usr/local/bin/mtcClient"
++    check_binary "/usr/bin/${OCF_RESKEY_binary}"
++    check_binary "/usr/bin/mtcAgent"
++    check_binary "/usr/bin/mtcClient"
+     check_binary sysinv-api
+     check_binary pidof
+diff --git a/mtce/src/lmon/scripts/lmon b/mtce/src/lmon/scripts/lmon
+index 6024e16..21538ee 100644
+--- a/mtce/src/lmon/scripts/lmon
++++ b/mtce/src/lmon/scripts/lmon
+@@ -20,7 +20,7 @@
+ . /etc/init.d/functions
+ DAEMON_NAME="lmond"
+-DAEMON="/usr/local/bin/${DAEMON_NAME}"
++DAEMON="/usr/bin/${DAEMON_NAME}"
+ IFACE=""
+ if [ ! -e "$DAEMON" ] ; then
+diff --git a/mtce/src/maintenance/mtcCompMsg.cpp b/mtce/src/maintenance/mtcCompMsg.cpp
+index b5b221e..daaea9b 100755
+--- a/mtce/src/maintenance/mtcCompMsg.cpp
++++ b/mtce/src/maintenance/mtcCompMsg.cpp
+@@ -541,7 +541,7 @@ int mtc_service_command ( mtc_socket_type * sock_ptr, int interface )
+             {
+                 ilog ("Disk wipe in progress (%s)\n", interface_name.c_str());
+                 daemon_log ( NODE_RESET_FILE, "wipedisk command" );
+-                rc = system("/usr/local/bin/wipedisk --force");
++                rc = system("/usr/bin/wipedisk --force");
+                 ilog ("Disk wipe complete - Forcing Reboot ...\n");
+                 rc = system("/usr/bin/systemctl reboot --force");
+                 exit (0);
+diff --git a/mtce/src/pmon/scripts/pmon b/mtce/src/pmon/scripts/pmon
+index bdc3ab7..4cb813e 100644
+--- a/mtce/src/pmon/scripts/pmon
++++ b/mtce/src/pmon/scripts/pmon
+@@ -17,12 +17,12 @@
+ # Short-Description: process Monitor daemon 
+ ### END INIT INFO
+-# echo "7:3:respawn:/usr/local/bin/pmond" >> /etc/inittab
++# echo "7:3:respawn:/usr/bin/pmond" >> /etc/inittab
+ . /etc/init.d/functions
+ DAEMON_NAME="pmond"
+-DAEMON="/usr/local/bin/${DAEMON_NAME}"
++DAEMON="/usr/bin/${DAEMON_NAME}"
+ IFACE=""
+ if [ ! -e "$DAEMON" ] ; then
+diff --git a/mtce/src/scripts/hbsAgent b/mtce/src/scripts/hbsAgent
+index d1a2f4f..e8157f0 100755
+--- a/mtce/src/scripts/hbsAgent
++++ b/mtce/src/scripts/hbsAgent
+@@ -43,7 +43,7 @@ OCF_RESKEY_state_default="active"
+ : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
+ : ${OCF_RESKEY_state=${OCF_RESKEY_state_default}}
+-mydaemon="/usr/local/bin/${OCF_RESKEY_binary}"
++mydaemon="/usr/bin/${OCF_RESKEY_binary}"
+ statusfile="/var/run/${OCF_RESKEY_binary}.info"
+ virtualhostfile="/var/run/virtual.host"
+ facterexec="/usr/bin/facter"
+@@ -148,8 +148,8 @@ hbsAgent_validate() {
+         ocf_log info "hbsAgent:validate"  
+     fi
+-    check_binary "/usr/local/bin/${OCF_RESKEY_binary}"
+-    check_binary "/usr/local/bin/mtcAgent"
++    check_binary "/usr/bin/${OCF_RESKEY_binary}"
++    check_binary "/usr/bin/mtcAgent"
+     check_binary pidof
+     if [ ! -f ${OCF_RESKEY_config} ] ; then
+diff --git a/mtce/src/scripts/hbsClient b/mtce/src/scripts/hbsClient
+index 67d7e84..7c412b1 100644
+--- a/mtce/src/scripts/hbsClient
++++ b/mtce/src/scripts/hbsClient
+@@ -20,7 +20,7 @@
+ . /etc/init.d/functions
+ DAEMON_NAME="hbsClient"
+-DAEMON="/usr/local/bin/${DAEMON_NAME}"
++DAEMON="/usr/bin/${DAEMON_NAME}"
+ PIDFILE="/var/run/${DAEMON_NAME}.pid"
+ IFACE=""
+diff --git a/mtce/src/scripts/mtcAgent b/mtce/src/scripts/mtcAgent
+index 6e75ace..885bd3d 100755
+--- a/mtce/src/scripts/mtcAgent
++++ b/mtce/src/scripts/mtcAgent
+@@ -45,7 +45,7 @@ OCF_RESKEY_state_default="standby"
+ : ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
+ : ${OCF_RESKEY_state=${OCF_RESKEY_state_default}}
+-mydaemon="/usr/local/bin/${OCF_RESKEY_binary}"
++mydaemon="/usr/bin/${OCF_RESKEY_binary}"
+ statusfile="/var/run/${OCF_RESKEY_binary}.info"
+ #######################################################################
+@@ -157,10 +157,10 @@ mtcAgent_validate() {
+         ocf_log info "mtcAgent:validate"
+    fi
+-    check_binary "/usr/local/bin/${OCF_RESKEY_binary}"
+-    check_binary "/usr/local/bin/hbsAgent"
+-    check_binary "/usr/local/bin/mtcClient"
+-    check_binary "/usr/local/bin/hbsClient"
++    check_binary "/usr/bin/${OCF_RESKEY_binary}"
++    check_binary "/usr/bin/hbsAgent"
++    check_binary "/usr/bin/mtcClient"
++    check_binary "/usr/bin/hbsClient"
+     check_binary sysinv-api
+     check_binary pidof
+diff --git a/mtce/src/scripts/mtcClient b/mtce/src/scripts/mtcClient
+index 1113689..de4f407 100644
+--- a/mtce/src/scripts/mtcClient
++++ b/mtce/src/scripts/mtcClient
+@@ -20,7 +20,7 @@
+ . /etc/init.d/functions
+ DAEMON_NAME="mtcClient"
+-DAEMON="/usr/local/bin/${DAEMON_NAME}"
++DAEMON="/usr/bin/${DAEMON_NAME}"
+ PIDFILE="/var/run/${DAEMON_NAME}.pid"
+ PLATFORM_CONF="/etc/platform/platform.conf"
+diff --git a/mtce/src/scripts/mtcinit b/mtce/src/scripts/mtcinit
+index 8ff9bbb..4672e2f 100755
+--- a/mtce/src/scripts/mtcinit
++++ b/mtce/src/scripts/mtcinit
+@@ -9,10 +9,10 @@
+ . /etc/init.d/functions
+ MTCCLIENT_NAME="mtcClient"
+-MTCCLIENT="/usr/local/bin/${MTCCLIENT_NAME}"
++MTCCLIENT="/usr/bin/${MTCCLIENT_NAME}"
+ HBSCLIENT_NAME="hbsClient"
+-HBSCLIENT="/usr/local/bin/${HBSCLIENT_NAME}"
++HBSCLIENT="/usr/bin/${HBSCLIENT_NAME}"
+ IFACE=""
+diff --git a/mtce/src/scripts/mtclog b/mtce/src/scripts/mtclog
+index 80db22a..f904c4d 100644
+--- a/mtce/src/scripts/mtclog
++++ b/mtce/src/scripts/mtclog
+@@ -22,7 +22,7 @@
+ . /etc/init.d/functions
+ DAEMON_NAME="mtclogd"
+-DAEMON="/usr/local/bin/${DAEMON_NAME}"
++DAEMON="/usr/bin/${DAEMON_NAME}"
+ PIDFILE="/var/run/${DAEMON_NAME}.pid"
+ PLATFORM_CONF="/etc/platform/platform.conf"
diff --git a/meta-stx/recipes-core/stx-metal/inventory.inc b/meta-stx/recipes-core/stx-metal/inventory.inc
new file mode 100644 (file)
index 0000000..f615e27
--- /dev/null
@@ -0,0 +1,143 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " inventory"
+
+#DEPENDS = " \
+#      python \
+#      python-pbr-native \
+#      "
+
+#python-futurist >= 0.11.0
+#python-keystoneauth1 >= 3.1.0
+#python-keystonemiddleware >= 4.12.0
+#python-neutronclient >= 6.3.0
+#python-oslo-concurrency >= 3.8.0
+#python-oslo-config >= 2:4.0.0
+#python-oslo-context >= 2.14.0
+#python-oslo-db >= 4.24.0
+#python-oslo-i18n >= 2.1.0
+#python-oslo-log >= 3.22.0
+#python-oslo-messaging >= 5.24.2
+#python-oslo-middleware >= 3.27.0
+#python-oslo-policy >= 1.23.0
+#python-oslo-rootwrap >= 5.0.0
+#python-oslo-serialization >= 1.10.0
+#python-oslo-service >= 1.10.0
+#python-oslo-utils >= 3.20.0
+#python-oslo-versionedobjects >= 1.17.0
+#python-osprofiler >= 1.4.0
+#python-stevedore >= 1.20.0
+#python-webob >= 1.7.1
+
+RDEPENDS_inventory += " \
+               bash \
+               python-anyjson \
+               python-amqplib \
+               python-pyudev \
+               python-pyparted \
+               python-ipaddr \
+               python-paste \
+               python-eventlet \
+               python-futurist \
+               python-jsonpatch \
+               python-keystoneauth1 \
+               python-keystonemiddleware \
+               python-neutronclient \
+               python-oslo.concurrency \
+               python-oslo.config \
+               python-oslo.context \
+               python-oslo.db \
+               python-oslo.i18n \
+               python-oslo.log \
+               python-oslo.messaging \
+               python-oslo.middleware \
+               python-oslo.policy \
+               python-oslo.rootwrap \
+               python-oslo.serialization \
+               python-oslo.service \
+               python-oslo.utils \
+               python-oslo.versionedobjects \
+               python-osprofiler \
+               python-pbr \
+               python-pecan \
+               python-psutil \
+               python-requests \
+               python-retrying \
+               python-six \
+               python-sqlalchemy \
+               python-stevedore \
+               python-webob \
+               python-wsme \
+               "
+
+
+
+do_configure_prepend () {
+       cd ${S}/inventory/inventory
+       distutils_do_configure
+} 
+
+do_compile_prepend () {
+       cd ${S}/inventory/inventory
+       distutils_do_compile
+}
+
+do_install_prepend () {
+       cd ${S}/inventory/inventory
+       distutils_do_install
+       
+       install -d -m 755 ${D}/${sysconfdir}/goenabled.d
+       install -p -D -m 755 etc/inventory/inventory_goenabled_check.sh ${D}/${sysconfdir}/goenabled.d/inventory_goenabled_check.sh
+
+       install -d -m 755 ${D}/${sysconfdir}/inventory
+       install -p -D -m 755 etc/inventory/policy.json ${D}/${sysconfdir}/inventory/policy.json
+
+       install -d -m 755 ${D}/${sysconfdir}/motd.d
+       install -p -D -m 755 etc/inventory/motd-system ${D}/${sysconfdir}/motd.d/10-system-config
+
+       install -m 755 -p -D scripts/inventory-api ${D}/${libdir}/ocf/resource.d/platform/inventory-api
+       install -m 755 -p -D scripts/inventory-conductor ${D}/${libdir}/ocf/resource.d/platform/inventory-conductor
+
+       install -d -m 0755 ${D}/${systemd_system_unitdir}/
+       install -m 644 -p -D scripts/inventory-api.service ${D}/${systemd_system_unitdir}/
+       install -m 644 -p -D scripts/inventory-conductor.service ${D}/${systemd_system_unitdir}/
+
+       # Install sql migration
+       # install -m 644 inventory/db/sqlalchemy/migrate_repo/migrate.cfg ${D}/${libdir}/inventory/db/sqlalchemy/migrate_repo/migrate.cfg
+
+}
+
+#pkg_postinst_ontarget-inventory () {
+# install default config files
+#cd ${_builddir}/${name}-${version} && oslo-config-generator --config-file inventory/config-generator.conf --output-file ${_builddir}/${name}-${version}/inventory.conf.sample
+#}
+
+FILES_inventory = " \
+       ${systemd_system_unitdir}/inventory-api.service \
+       ${systemd_system_unitdir}/inventory-conductor.service \
+       ${bindir}/inventory-api \
+       ${bindir}/inventory-conductor \
+       ${bindir}/inventory-dnsmasq-lease-update \
+       ${bindir}/inventory-agent \
+       ${bindir}/inventory-dbsync \
+       ${libdir}/ocf/resource.d/platform/inventory-api \
+       ${libdir}/ocf/resource.d/platform/inventory-conductor \
+       ${libdir}/python2.7/site-packages/inventory*.egg-info/ \
+       ${libdir}/python2.7/site-packages/inventory/ \
+       ${sysconfdir}/goenabled.d/inventory_goenabled_check.sh \
+       ${sysconfdir}/motd.d/10-system-config \
+       ${sysconfdir}/inventory/policy.json \
+       "
diff --git a/meta-stx/recipes-core/stx-metal/mtce-common.inc b/meta-stx/recipes-core/stx-metal/mtce-common.inc
new file mode 100644 (file)
index 0000000..eaa9ae9
--- /dev/null
@@ -0,0 +1,100 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " mtce-common"
+
+
+
+RDEPENDS_mtce-common = " \
+       util-linux \
+       bash \
+       systemd \
+       dpkg \
+       time \
+       libevent \
+       expect \
+       json-c \
+       python-rtslib-fb \
+       fm-common \
+       "
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       cd ${S}/mtce-common/src/
+       oe_runmake clean
+       oe_runmake -e build VER=0 VER_MJR=1 \
+               CCFLAGS="${CXXFLAGS} -DBUILDINFO=\"\\\"\$\$(date)\\\"\"" 
+}
+
+do_install_prepend () {
+
+       cd ${S}/mtce-common/src
+
+       install -m 755 -d ${D}/${libdir}
+
+       install -m 644 -p -D daemon/libdaemon.a ${D}/${libdir}
+       install -m 644 -p -D common/libcommon.a ${D}/${libdir}
+       install -m 644 -p -D common/libthreadUtil.a ${D}/${libdir}
+       install -m 644 -p -D common/libbmcUtils.a ${D}/${libdir}
+       install -m 644 -p -D common/libpingUtil.a ${D}/${libdir}
+       install -m 644 -p -D common/libnodeBase.a ${D}/${libdir}
+       install -m 644 -p -D common/libregexUtil.a ${D}/${libdir}
+       install -m 644 -p -D common/libhostUtil.a ${D}/${libdir}
+
+       # mtce-common headers required to bring in nodeBase.h
+       install -m 755 -d ${D}/${includedir}
+       install -m 755 -d ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/fitCodes.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/logMacros.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/returnCodes.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/nodeTimers.h ${D}/${includedir}/mtce-common
+
+       # mtce-common headers required to build mtce-guest
+       install -m 644 -p -D common/hostClass.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/httpUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/jsonUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/msgClass.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/nodeBase.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/nodeEvent.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/nodeMacro.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/nodeUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/timeUtil.h ${D}/${includedir}/mtce-common
+
+       # mtce-daemon headers required to build mtce-guest
+       install -m 755 -d ${D}/${includedir}/mtce-daemon
+       install -m 644 -p -D daemon/daemon_ini.h ${D}/${includedir}/mtce-daemon
+       install -m 644 -p -D daemon/daemon_common.h ${D}/${includedir}/mtce-daemon
+       install -m 644 -p -D daemon/daemon_option.h ${D}/${includedir}/mtce-daemon
+
+       # remaining mtce-common headers required to build mtce
+       install -m 644 -p -D common/alarmUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/hostUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/ipmiUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/redfishUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/bmcUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/nlEvent.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/pingUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/regexUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/threadUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/tokenUtil.h ${D}/${includedir}/mtce-common
+       install -m 644 -p -D common/secretUtil.h ${D}/${includedir}/mtce-common
+}
+
+# Headers, and static devs go into stx-mtce-dev 
+# and stx-mtce-staticdev packages respecitively
+FILES_mtce-common = " "
diff --git a/meta-stx/recipes-core/stx-metal/mtce-compute.inc b/meta-stx/recipes-core/stx-metal/mtce-compute.inc
new file mode 100644 (file)
index 0000000..3c0aea7
--- /dev/null
@@ -0,0 +1,48 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " mtce-compute"
+
+RDEPENDS_mtce-compute += " \
+       bash \
+       systemd \
+       qemu \
+       "
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       :
+}
+
+do_install_prepend () {
+       cd ${S}/mtce-compute/src/
+       oe_runmake buildroot=${D} \
+               _sysconfdir=${sysconfdir} _unitdir=${systemd_system_unitdir} _datarootdir=${datadir} \
+               install
+}
+
+#pkg_postinst_ontarget_mtce-compute() { 
+#      /usr/bin/systemctl enable goenabled-worker.service
+#}
+
+FILES_mtce-compute = " \
+       ${datadir}/licenses/mtce-compute-1.0/LICENSE \
+       ${systemd_system_unitdir}/goenabled-worker.service \
+       ${sysconfdir}/goenabled.d/virt-support-goenabled.sh \
+       ${sysconfdir}/init.d/goenabledWorker \
+       "
diff --git a/meta-stx/recipes-core/stx-metal/mtce-control.inc b/meta-stx/recipes-core/stx-metal/mtce-control.inc
new file mode 100644 (file)
index 0000000..fe18a68
--- /dev/null
@@ -0,0 +1,53 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " mtce-control"
+
+RDEPENDS_mtce-control += " \
+       bash \
+       systemd \
+       lighttpd \
+       qemu \
+       "
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       :
+}
+
+do_install_prepend () {
+       cd ${S}/mtce-control/src/
+       oe_runmake buildroot=${D} \
+               _sysconfdir=${sysconfdir} _unitdir=${systemd_system_unitdir} _datarootdir=${datadir} \
+               install
+
+}
+
+#pkg_postinst_ontarget_mtce-control () {
+#      # /usr/bin/systemctl enable lighttpd.service
+#      # /usr/bin/systemctl enable qemu_clean.service
+#      /usr/bin/systemctl enable hbsAgent.service
+#}
+
+FILES_mtce-control = " \
+       ${datadir}/licenses/mtce-control-1.0/LICENSE \
+       ${systemd_system_unitdir}/hbsAgent.service \
+       ${sysconfdir}/pmon.d/hbsAgent.conf \
+       ${sysconfdir}/init.d/hbsAgent \
+       ${sysconfdir}/init.d/goenabledControl \
+       "
diff --git a/meta-stx/recipes-core/stx-metal/mtce-storage.inc b/meta-stx/recipes-core/stx-metal/mtce-storage.inc
new file mode 100644 (file)
index 0000000..f1aeb49
--- /dev/null
@@ -0,0 +1,47 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " mtce-storage"
+
+RDEPENDS_mtce-storage_append = " \
+       bash \
+       systemd \
+       "
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       :
+}
+
+do_install_prepend () {
+       cd ${S}/mtce-storage/src/
+       oe_runmake buildroot=${D} \
+               _sysconfdir=${sysconfdir} _unitdir=${systemd_system_unitdir} _datarootdir=${datadir} \
+               install
+
+}
+
+pkg_postinst_ontarget_mtce-storage() { 
+       ${base_bindir}/systemctl enable goenabled-storage.service
+}
+
+FILES_mtce-storage = " \
+       ${datadir}/licenses/mtce-storage-1.0/LICENSE \
+       ${systemd_system_unitdir}/goenabled-storage.service \
+       ${sysconfdir}/init.d/goenabledStorage \
+       "
diff --git a/meta-stx/recipes-core/stx-metal/mtce.inc b/meta-stx/recipes-core/stx-metal/mtce.inc
new file mode 100644 (file)
index 0000000..e2d4358
--- /dev/null
@@ -0,0 +1,295 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " mtce"
+PACKAGES += " mtce-pmon"
+PACKAGES += " mtce-hwmon"
+PACKAGES += " mtce-hostw"
+PACKAGES += " mtce-lmon"
+
+RDEPENDS_mtce-pmon_append = " \
+       bash \
+       systemd \
+       dpkg \
+       fm-common \
+       libevent \
+       json-c \
+       python-rtslib-fb \
+       expect \
+       util-linux \
+       ipmitool \
+       "
+
+RDEPENDS_mtce_append = " \
+       mtce-pmon \
+       python-redfishtool \
+       "
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       cd ${S}/mtce/src/
+       oe_runmake -e VER=1 VER_MJR=1 INCLUDES=" -I. -I../alarm -I../heartbeat -I../maintenance \
+                               -I../hostw -I../public -I../smash -I../common -I../hwmon \
+                               -I${S}/mtce-common/src/common \
+                               -I${S}/mtce-common/src/daemon " \
+               CCFLAGS="${CXXFLAGS}" LDFLAGS="${LDFLAGS} -L${S}/mtce-common/src/common \
+               -L${S}/mtce-common/src/daemon " build
+}
+
+do_install_prepend () {
+# TODO: 
+# Really need to fix the package;s Makefile
+
+       cd ${S}/mtce/src/
+       install -m 755 -d ${D}/${bindir}
+       install -m 755 -d ${D}/${sbindir}
+       install -m 755 -d ${D}/${libdir}
+       install -m 755 -d ${D}/${libdir}/ocf/resource.d/platform
+       install -m 755 -d ${D}/${systemd_system_unitdir}
+       
+       install -m 755 -d ${D}/${sysconfdir}
+       install -m 755 -d ${D}/${sysconfdir}/mtc/tmp
+       install -m 755 -d ${D}/${sysconfdir}/bmc/server_profiles.d
+       install -m 755 -d ${D}/${sysconfdir}/init.d
+       install -m 755 -d ${D}/${sysconfdir}/pmon.d
+       install -m 755 -d ${D}/${sysconfdir}/logrotate.d
+       
+       install -m 755 -d ${D}/${sysconfdir}/serverices.d
+       install -m 755 -d ${D}/${sysconfdir}/serverices.d/controller
+       install -m 755 -d ${D}/${sysconfdir}/serverices.d/worker
+       install -m 755 -d ${D}/${sysconfdir}/serverices.d/storage
+       
+       cd ${S}/mtce/src/
+       install -m 755 -p -D scripts/mtcAgent ${D}/${libdir}/ocf/resource.d/platform
+       install -m 755 -p -D hwmon/scripts/ocf/hwmon ${D}/${libdir}/ocf/resource.d/platform
+       
+       # Config files
+       install -m 644 -p -D scripts/mtc.ini ${D}/${sysconfdir}/mtc.ini
+       install -m 644 -p -D scripts/mtc.conf ${D}/${sysconfdir}/mtc.conf
+       install -m 644 -p -D fsmon/scripts/fsmond.conf ${D}/${sysconfdir}/mtc/fsmond.conf
+       install -m 644 -p -D hwmon/scripts/hwmond.conf ${D}/${sysconfdir}/mtc/hwmond.conf
+       install -m 644 -p -D pmon/scripts/pmond.conf ${D}/${sysconfdir}/mtc/pmond.conf
+       install -m 644 -p -D lmon/scripts/lmond.conf ${D}/${sysconfdir}/mtc/lmond.conf
+       install -m 644 -p -D hostw/scripts/hostwd.conf ${D}/${sysconfdir}/mtc/hostwd.conf
+       
+       install -m 644 -p -D scripts/sensor_hp360_v1_ilo_v4.profile ${D}/${sysconfdir}/bmc/server_profiles.d/
+       install -m 644 -p -D scripts/sensor_hp380_v1_ilo_v4.profile ${D}/${sysconfdir}/bmc/server_profiles.d/
+       install -m 644 -p -D scripts/sensor_quanta_v1_ilo_v4.profile ${D}/${sysconfdir}/bmc/server_profiles.d/
+       
+       
+       # binaries
+       install -m 755 -p -D maintenance/mtcAgent ${D}/${bindir}/mtcAgent
+       install -m 755 -p -D maintenance/mtcClient ${D}/${bindir}/mtcClient
+       install -m 755 -p -D heartbeat/hbsAgent ${D}/${bindir}/hbsAgent
+       install -m 755 -p -D heartbeat/hbsClient ${D}/${bindir}/hbsClient
+       install -m 755 -p -D pmon/pmond ${D}/${bindir}/pmond
+       install -m 755 -p -D lmon/lmond ${D}/${bindir}/lmond
+       install -m 755 -p -D pmon/pmond ${D}/${bindir}/pmond
+       install -m 755 -p -D lmon/lmond ${D}/${bindir}/lmond
+       install -m 755 -p -D hostw/hostwd ${D}/${bindir}/hostwd
+       install -m 755 -p -D fsmon/fsmond ${D}/${bindir}/fsmond
+       install -m 755 -p -D hwmon/hwmond ${D}/${bindir}/hwmond
+       install -m 755 -p -D mtclog/mtclogd ${D}/${bindir}/mtclogd
+       install -m 755 -p -D alarm/mtcalarmd ${D}/${bindir}/mtcalarmd
+       install -m 755 -p -D scripts/wipedisk ${D}/${bindir}/wipedisk
+       install -m 755 -p -D fsync/fsync ${D}/${sbindir}/fsync
+       install -m 700 -p -D pmon/scripts/pmon-restart ${D}/${sbindir}/pmon-restart
+       install -m 700 -p -D pmon/scripts/pmon-start ${D}/${sbindir}/pmon-start
+       install -m 700 -p -D pmon/scripts/pmon-stop ${D}/${sbindir}/pmon-stop
+       
+       # init script files
+       install -m 755 -p -D scripts/mtcClient ${D}/${sysconfdir}/init.d/mtcClient
+       install -m 755 -p -D scripts/hbsClient ${D}/${sysconfdir}/init.d/hbsClient
+       install -m 755 -p -D hwmon/scripts/lsb/hwmon ${D}/${sysconfdir}/init.d/hwmon
+       install -m 755 -p -D fsmon/scripts/fsmon ${D}/${sysconfdir}/init.d/fsmon
+       install -m 755 -p -D scripts/mtclog ${D}/${sysconfdir}/init.d/mtclog
+       install -m 755 -p -D pmon/scripts/pmon ${D}/${sysconfdir}/init.d/pmon
+       install -m 755 -p -D lmon/scripts/lmon ${D}/${sysconfdir}/init.d/lmon
+       install -m 755 -p -D hostw/scripts/hostw ${D}/${sysconfdir}/init.d/hostw
+       install -m 755 -p -D alarm/scripts/mtcalarm.init ${D}/${sysconfdir}/init.d/mtcalarm
+       # install -m 755 -p -D scripts/config ${D}/${sysconfdir}/init.d/config
+       
+       # TODO: Init hack. Should move to proper module
+       install -m 755 -p -D scripts/hwclock.sh ${D}/${sysconfdir}/init.d/hwclock.sh
+       install -m 644 -p -D scripts/hwclock.service ${D}/${systemd_system_unitdir}/hwclock.service
+       
+       
+       # systemd service files
+       install -m 644 -p -D fsmon/scripts/fsmon.service ${D}/${systemd_system_unitdir}/fsmon.service
+       install -m 644 -p -D hwmon/scripts/hwmon.service ${D}/${systemd_system_unitdir}/hwmon.service
+       install -m 644 -p -D pmon/scripts/pmon.service ${D}/${systemd_system_unitdir}/pmon.service
+       install -m 644 -p -D hostw/scripts/hostw.service ${D}/${systemd_system_unitdir}/hostw.service
+       install -m 644 -p -D scripts/mtcClient.service ${D}/${systemd_system_unitdir}/mtcClient.service
+       install -m 644 -p -D scripts/hbsClient.service ${D}/${systemd_system_unitdir}/hbsClient.service
+       install -m 644 -p -D scripts/mtclog.service ${D}/${systemd_system_unitdir}/mtclog.service
+       install -m 644 -p -D scripts/hbsClient.service ${D}/${systemd_system_unitdir}/hbsClient.service
+       install -m 644 -p -D scripts/mtclog.service ${D}/${systemd_system_unitdir}/mtclog.service
+       install -m 644 -p -D scripts/goenabled.service ${D}/${systemd_system_unitdir}/goenabled.service
+       install -m 644 -p -D scripts/runservices.service ${D}/${systemd_system_unitdir}/runservices.service
+       install -m 644 -p -D alarm/scripts/mtcalarm.service ${D}/${systemd_system_unitdir}/mtcalarm.service
+       install -m 644 -p -D lmon/scripts/lmon.service ${D}/${systemd_system_unitdir}/lmon.service
+       
+       # fix the path for init scripts
+       sed -i -e 's|rc.d/||' ${D}/${systemd_system_unitdir}/*.service
+       
+       # go enabled stuff
+       install -m 755 -p -D scripts/goenabled ${D}/${sysconfdir}/init.d/goenabled
+       
+       # start or stop services test script
+       install -m 755 -p -D scripts/mtcTest ${D}/${sysconfdir}/serverices.d/worker
+       install -m 755 -p -D scripts/mtcTest ${D}/${sysconfdir}/serverices.d/controller
+       install -m 755 -p -D scripts/mtcTest ${D}/${sysconfdir}/serverices.d/storage
+       install -m 755 -p -D scripts/runservices ${D}/${sysconfdir}/init.d/runservices
+       
+       
+       # test tools
+       install -m 755 -p -D scripts/dmemchk.sh ${D}/${sbindir}
+       
+       # process monitor config files
+       install -m 644 -p -D scripts/mtcClient.conf ${D}/${sysconfdir}/pmon.d/mtcClient.conf
+       install -m 644 -p -D scripts/hbsClient.conf ${D}/${sysconfdir}/pmon.d/hbsClient.conf
+       install -m 644 -p -D pmon/scripts/acpid.conf ${D}/${sysconfdir}/pmon.d/acpid.conf
+       install -m 644 -p -D pmon/scripts/sshd.conf ${D}/${sysconfdir}/pmon.d/sshd.conf
+       install -m 644 -p -D pmon/scripts/syslog-ng.conf ${D}/${sysconfdir}/pmon.d/syslog-ng.conf
+       install -m 644 -p -D pmon/scripts/nslcd.conf ${D}/${sysconfdir}/pmon.d/nslcd.conf
+       install -m 644 -p -D pmon/scripts/syslog-ng.conf ${D}/${sysconfdir}/pmon.d/syslog-ng.conf
+       install -m 644 -p -D pmon/scripts/nslcd.conf ${D}/${sysconfdir}/pmon.d/nslcd.conf
+       install -m 644 -p -D fsmon/scripts/fsmon.conf ${D}/${sysconfdir}/pmon.d/fsmon.conf
+       install -m 644 -p -D scripts/mtclogd.conf ${D}/${sysconfdir}/pmon.d/mtclogd.conf
+       install -m 644 -p -D alarm/scripts/mtcalarm.pmon.conf ${D}/${sysconfdir}/pmon.d/mtcalarm.conf
+       install -m 644 -p -D lmon/scripts/lmon.pmon.conf ${D}/${sysconfdir}/pmon.d/lmon.conf
+       
+       # log rotation
+       install -m 644 -p -D scripts/mtce.logrotate ${D}/${sysconfdir}/logrotate.d/mtce.logrotate
+       install -m 644 -p -D hostw/scripts/hostw.logrotate ${D}/${sysconfdir}/logrotate.d/hostw.logrotate
+       install -m 644 -p -D pmon/scripts/pmon.logrotate ${D}/${sysconfdir}/logrotate.d/pmon.logrotate
+       install -m 644 -p -D lmon/scripts/lmon.logrotate ${D}/${sysconfdir}/logrotate.d/lmon.logrotate
+       install -m 644 -p -D fsmon/scripts/fsmon.logrotate ${D}/${sysconfdir}/logrotate.d/fsmon.logrotate
+       install -m 644 -p -D hwmon/scripts/hwmon.logrotate ${D}/${sysconfdir}/logrotate.d/hwmon.logrotate
+       install -m 644 -p -D alarm/scripts/mtcalarm.logrotate ${D}/${sysconfdir}/logrotate.d/mtcalarm.logrotate
+       
+       # software development files
+       install -m 644 -p -D heartbeat/mtceHbsCluster.h ${D}/${includedir}/mtceHbsCluster.h
+       install -m 755 -p -D public/libamon.so.1 ${D}/${libdir}/
+       #cd ${D}/%{_libdir} ; ln -s libamon.so.$MAJOR libamon.so.$MAJOR.$MINOR
+       #cd ${D}/%{_libdir} ; ln -s libamon.so.$MAJOR libamon.so
+}
+
+#pkg_postinst_ontarget-mtce () {
+#      /usr/bin/systemctl enable hbsClient.service
+#      /usr/bin/systemctl enable mtcalarm.service 
+#      /usr/bin/systemctl enable mtclog.service 
+#      /usr/bin/systemctl enable mtcClient.service 
+#      /usr/bin/systemctl enable goenabled.service 
+#      /usr/bin/systemctl enable lmon.service 
+#      /usr/bin/systemctl enable hostw.service 
+#      /usr/bin/systemctl enable fsmon.service 
+#      /usr/bin/systemctl enable pmon.service 
+#      # /usr/bin/systemctl enable hwclock.service 
+#      # /usr/bin/systemctl enable runservices.service 
+#}
+
+FILES_mtce-pmon = " \
+       ${sbindir}/pmon-restart \
+       ${sbindir}/pmon-start \
+       ${sbindir}/pmon-stop \
+       ${bindir}/pmond \
+       ${systemd_system_unitdir}/pmon.service \
+       ${sysconfdir}/mtc/pmond.conf \
+       ${sysconfdir}/init.d/pmon \
+       ${sysconfdir}/logrotate.d/pmon.logrotate \
+       "
+
+FILES_mtce-hwmon = " \
+       ${bindir}/hwmond \
+       ${sysconfdir}/init.d/hwmon \
+       ${libdir}/ocf/resource.d/platform/hwmon \
+       ${sysconfdir}/logrotate.d/hwmon.logrotate \
+       ${systemd_system_unitdir}/hwmon.service \
+       ${sysconfdir}/mtc/hwmond.conf \
+       "
+
+FILES_mtce-hostw = " \
+       ${sysconfdir}/mtc/hostwd.conf \
+       ${sysconfdir}/logrotate.d/hostw.logrotate \
+       ${systemd_system_unitdir}/hostw.service \
+       ${sysconfdir}/init.d/hostw \
+       ${bindir}/hostwd \
+       "
+
+FILES_mtce-lmon= " \
+       ${bindir}/lmond \
+       ${sysconfdir}/mtc/lmond.conf \
+       ${sysconfdir}/logrotate.d/lmon.logrotate \
+       ${sysconfdir}/pmon.d/lmon.conf \
+       ${sysconfdir}/init.d/lmon \
+       "
+
+FILES_mtce = " \
+       ${bindir}/mtcAgent \
+       ${bindir}/mtcClient \
+       ${bindir}/fsmond \
+       ${bindir}/hbsAgent \
+       ${bindir}/wipedisk \
+       ${bindir}/hbsClient \
+       ${bindir}/mtcalarmd \
+       ${bindir}/mtclogd \
+       ${sbindir}/fsync \
+       ${sbindir}/dmemchk.sh \
+       ${libdir}/ocf/resource.d/platform/mtcAgent \
+       ${libdir}/libamon.so.1 \
+       ${systemd_system_unitdir}/mtcalarm.service \
+       ${systemd_system_unitdir}/goenabled.service \
+       ${systemd_system_unitdir}/mtclog.service \
+       ${systemd_system_unitdir}/mtcClient.service \
+       ${systemd_system_unitdir}/fsmon.service \
+       ${systemd_system_unitdir}/hbsClient.service \
+       ${systemd_system_unitdir}/hwclock.service \
+       ${systemd_system_unitdir}/runservices.service \
+       ${systemd_system_unitdir}/lmon.service \
+       ${sysconfdir}/pmon.d/nslcd.conf \
+       ${sysconfdir}/pmon.d/mtclogd.conf \
+       ${sysconfdir}/pmon.d/mtcalarm.conf \
+       ${sysconfdir}/pmon.d/syslog-ng.conf \
+       ${sysconfdir}/pmon.d/acpid.conf \
+       ${sysconfdir}/pmon.d/sshd.conf \
+       ${sysconfdir}/pmon.d/fsmon.conf \
+       ${sysconfdir}/pmon.d/hbsClient.conf \
+       ${sysconfdir}/pmon.d/mtcClient.conf \
+       ${sysconfdir}/init.d/runservices \
+       ${sysconfdir}/init.d/goenabled \
+       ${sysconfdir}/init.d/mtcClient \
+       ${sysconfdir}/init.d/hwclock.sh \
+       ${sysconfdir}/init.d/mtclog \
+       ${sysconfdir}/init.d/mtcalarm \
+       ${sysconfdir}/init.d/hbsClient \
+       ${sysconfdir}/init.d/fsmon \
+       ${sysconfdir}/mtc.conf \
+       ${sysconfdir}/bmc/server_profiles.d/sensor_hp380_v1_ilo_v4.profile \
+       ${sysconfdir}/bmc/server_profiles.d/sensor_hp360_v1_ilo_v4.profile \
+       ${sysconfdir}/bmc/server_profiles.d/sensor_quanta_v1_ilo_v4.profile \
+       ${sysconfdir}/serverices.d/worker/mtcTest \
+       ${sysconfdir}/serverices.d/controller/mtcTest \
+       ${sysconfdir}/serverices.d/storage/mtcTest \
+       ${sysconfdir}/mtc/fsmond.conf \
+       ${sysconfdir}/mtc/tmp/ \
+       ${sysconfdir}/mtc.ini \
+       ${sysconfdir}/logrotate.d/fsmon.logrotate \
+       ${sysconfdir}/logrotate.d/mtcalarm.logrotate \
+       ${sysconfdir}/logrotate.d/mtce.logrotate \
+       "
diff --git a/meta-stx/recipes-core/stx-metal/pxe-network-installer.inc b/meta-stx/recipes-core/stx-metal/pxe-network-installer.inc
new file mode 100644 (file)
index 0000000..524c0d6
--- /dev/null
@@ -0,0 +1,104 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " pxe-network-installer"
+
+DEPENDS += " syslinux"
+RDEPENDS_pxe-network-installer += " \
+               syslinux \
+               bash \
+               "
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       :
+}
+
+do_install_prepend () {
+       cd ${S}/installer/pxe-network-installer/pxe-network-installer
+       install -d -m 0755 ${D}/pxeboot
+       install -d -m 0755 ${D}/pxeboot/pxelinux.cfg.files
+       install -d -m 0755 ${D}/pxeboot/rel-${STX_REL}
+       install -d -m 0755 ${D}/pxeboot/rel-${STX_REL}/LiveOS/
+       install -d -m 0755 ${D}/pxeboot/EFI
+       install -d -m 0755 ${D}/pxeboot/EFI/stx_thud
+       install -d -m 0755 ${D}/${sbindir}
+       ln -fs /pxeboot/stx_thud ${D}/pxeboot/centos
+       ln -fs ${libdir}/grub/x86_64-efi ${D}/pxeboot/EFI/stx_thud/
+
+       # Install the files:
+       if [ ! -z "${INSTALLER_IMG_DIR}" ]; then
+               install -m 0644 ${INSTALLER_IMG_DIR}/vmlinuz ${D}/pxeboot/stx_thud/rel-${STX_REL}/installer-bzImage_1.0
+               install -m 0644 ${INSTALLER_IMG_DIR}/initrd.img ${D}/pxeboot/stx_thud/rel-${STX_REL}/installer-intel-x86-64-initrd_1.0
+               install -m 0644 ${INSTALLER_IMG_DIR}/squashfs.img ${D}/pxeboot/stx_thud/rel-${STX_REL}/LiveOS/squashfs.img
+               ln -fs installer-bzImage_1.0 ${D}/pxeboot/rel-${STX_REL}/installer-bzImage
+               ln -fs installer-intel-x86-64-initrd_1.0 ${D}/pxeboot/rel-${STX_REL}/installer-initrd
+       fi
+
+       install -m 755 pxeboot-update.sh ${D}/${sbindir}/pxeboot-update-${STX_REL}.sh
+       install -m 644 ${S}/bsp-files/kickstarts/post_clone_iso_ks.cfg ${D}/pxeboot/post_clone_iso_ks.cfg
+       install -m 644 default ${D}/pxeboot/pxelinux.cfg.files/default
+       install -m 644 default.static ${D}/pxeboot/pxelinux.cfg.files/default.static
+       install -m 644 centos-pxe-controller-install ${D}/pxeboot/pxelinux.cfg.files/pxe-controller-install-${STX_REL}
+       install -m 644 centos-pxe-worker-install ${D}/pxeboot/pxelinux.cfg.files/pxe-worker-install-${STX_REL}
+       install -m 644 centos-pxe-smallsystem-install ${D}/pxeboot/pxelinux.cfg.files/pxe-smallsystem-install-${STX_REL}
+       install -m 644 centos-pxe-storage-install ${D}/pxeboot/pxelinux.cfg.files/pxe-storage-install-${STX_REL}
+       install -m 644 centos-pxe-worker_lowlatency-install ${D}/pxeboot/pxelinux.cfg.files/pxe-worker_lowlatency-install-${STX_REL}
+       install -m 644 centos-pxe-smallsystem_lowlatency-install ${D}/pxeboot/pxelinux.cfg.files/pxe-smallsystem_lowlatency-install-${STX_REL}
+
+       # UEFI support
+       install -m 644 pxe-grub.cfg ${D}/pxeboot/pxelinux.cfg.files/grub.cfg
+       install -m 644 pxe-grub.cfg.static ${D}/pxeboot/pxelinux.cfg.files/grub.cfg.static
+
+       # Copy EFI boot image. It will be used to create ISO on the Controller.
+       if [ ! -z "${INSTALLER_IMG_DIR}" ]; then
+               install -m 644 efiboot.img ${D}/pxeboot/rel-${STX_REL}/
+       fi
+       install -m 644 efi-centos-pxe-controller-install ${D}/pxeboot/pxelinux.cfg.files/efi-pxe-controller-install-${STX_REL}
+       install -m 644 efi-centos-pxe-worker-install ${D}/pxeboot/pxelinux.cfg.files/efi-pxe-worker-install-${STX_REL}
+       install -m 644 efi-centos-pxe-smallsystem-install ${D}/pxeboot/pxelinux.cfg.files/efi-pxe-smallsystem-install-${STX_REL}
+       install -m 644 efi-centos-pxe-storage-install ${D}/pxeboot/pxelinux.cfg.files/efi-pxe-storage-install-${STX_REL}
+       install -m 644 efi-centos-pxe-worker_lowlatency-install ${D}/pxeboot/pxelinux.cfg.files/efi-pxe-worker_lowlatency-install-${STX_REL}
+       install -m 644 efi-centos-pxe-smallsystem_lowlatency-install ${D}/pxeboot/pxelinux.cfg.files/efi-pxe-smallsystem_lowlatency-install-${STX_REL}
+
+       sed -i "s/xxxSW_VERSIONxxx/${STX_REL}/g" ${D}/pxeboot/pxelinux.cfg.files/pxe-* ${D}/pxeboot/pxelinux.cfg.files/efi-pxe-* 
+       
+       # Copy Titanium grub.cfg. It will be used to create ISO on the Controller.
+       install -m 0644 ${S}/bsp-files/grub.cfg ${D}/pxeboot/EFI/ 
+       # UEFI bootloader expect the grub.cfg file to be in /pxeboot/ so create a symlink for it
+       ln -fs pxelinux.cfg/grub.cfg ${D}/pxeboot/grub.cfg
+}
+
+pkg_postinst_ontarget_pxe-network_installer() {
+       install -m 0644 /usr/share/syslinux/menu.c32 /pxeboot
+       install -m 0644 /usr/share/syslinux/vesamenu.c32 /pxeboot
+       install -m 0644 /usr/share/syslinux/chain.c32 /pxeboot
+       install -m 0644 /usr/share/syslinux/linux.c32 /pxeboot
+       install -m 0644 /usr/share/syslinux/reboot.c32 /pxeboot
+       install -m 0644 /usr/share/syslinux/pxechn.c32 /pxeboot
+       install -m 0644 /usr/share/syslinux/pxelinux.0 /pxeboot
+       install -m 0644 /usr/share/syslinux/gpxelinux.0 /pxeboot
+}
+
+FILES_pxe-network-installer = " \
+       /pxeboot \
+       ${sbindir}/pxeboot-update-${STX_REL}.sh \
+       "
+       
+
+
diff --git a/meta-stx/recipes-core/stx-metal/python-inventoryclient.inc b/meta-stx/recipes-core/stx-metal/python-inventoryclient.inc
new file mode 100644 (file)
index 0000000..227ef66
--- /dev/null
@@ -0,0 +1,62 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " python-inventoryclient"
+
+
+# python-keystoneauth1 >= 3.1.0
+# python-pbr >= 2.0.0
+# python-six >= 1.9.0
+# python-oslo-i18n >= 2.1.0
+# python-oslo-utils >= 3.20.0
+# python-requests
+# bash-completion
+#
+RDEPENDS_python-inventoryclient_append = " \
+       python \
+       python-pbr \
+       python-six \
+       python-oslo.i18n \
+       python-oslo.utils \
+       python-requests \
+       bash \
+       bash-completion \
+       "
+
+do_configure_prepend () {
+       cd ${S}/python-inventoryclient/inventoryclient
+       distutils_do_configure
+} 
+
+do_compile_prepend () {
+       cd ${S}/python-inventoryclient/inventoryclient
+       distutils_do_compile
+}
+
+do_install_prepend () {
+       cd ${S}/python-inventoryclient/inventoryclient
+       distutils_do_install
+       
+       install -d -m 755 ${D}/${sysconfdir}/bash_completion.d
+       install -p -D -m 664 tools/inventory.bash_completion ${D}/${sysconfdir}/bash_completion.d
+
+}
+
+FILES_python-inventoryclient = " \
+       ${bindir}/inventory \
+       ${sysconfdir}/bash_completion.d/inventory.bash_completion \
+       ${libdir}/python2.7/site-packages/inventoryclient-*.egg-info/ \
+       ${libdir}/python2.7/site-packages/inventoryclient/ \
+       "
diff --git a/meta-stx/recipes-core/stx-metal/stx-metal.bb b/meta-stx/recipes-core/stx-metal/stx-metal.bb
new file mode 100644 (file)
index 0000000..b3b9029
--- /dev/null
@@ -0,0 +1,92 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-metal"
+
+# STABLE = "starlingx/master"
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "be3cf4eeb50eef55910cf9c73ea47c168005ad64"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = " \
+       file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://installer/pxe-network-installer/pxe-network-installer/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://kickstart/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://mtce-common/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://mtce-compute/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://mtce-control/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://mtce-storage/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://mtce/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://inventory/inventory/LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2 \
+       file://python-inventoryclient/inventoryclient/LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2 \
+       "
+
+
+SRC_URI = "git://opendev.org/starlingx/metal.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+               file://0001-mtce-compute-dont-install-empty-directory-unless-nee.patch \
+               file://0001-mtce-control-dont-install-empty-directory-unless-nee.patch \
+               file://0001-mtce-storage-dont-install-empty-directory-unless-nee.patch \
+               file://0001-Use-snprintf-to-avoid-overflowing-amon.tx_buf.patch \
+               file://0001-Use-LDFLAGS-when-linking-and-pass-flags-down-to-subm.patch \
+               file://0001-stx-metal-remove-argparse-requirement-from-inventory.patch \
+               file://stx-warrior-adjust-paths.patch \
+               "
+
+inherit setuptools
+
+
+DEPENDS = " \
+       python \
+       python-pbr-native \
+       stx-fault \
+       openssl \
+       libevent \
+       json-c \
+       "
+RDEPENDS_${PN}_append = " bash"
+
+require mtce.inc
+require inventory.inc
+require mtce-common.inc
+require mtce-compute.inc
+require mtce-control.inc
+require mtce-storage.inc
+require python-inventoryclient.inc
+require pxe-network-installer.inc
+# require kickstart.inc
+
+do_configure () {
+       :
+} 
+
+do_compile() {
+       :
+}
+
+do_install () {
+       :
+}
+
+pkg_postinst_ontarget_${PN} () {
+# install default config files
+}
+
+FILES_${PN} = " "
+FILES_${PN}-dbg_append += " "
+FILES_${PN}-staticdev_append = " "
+FILES_${PN}-dev_append = " "
diff --git a/meta-stx/recipes-core/stx-monitor-armada-app/monitor-helm-elastic_1.0.bb b/meta-stx/recipes-core/stx-monitor-armada-app/monitor-helm-elastic_1.0.bb
new file mode 100644 (file)
index 0000000..7cc8dab
--- /dev/null
@@ -0,0 +1,108 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Monitor Helm Elastic charts"
+DESCRIPTION = "Monitor Helm Elastic charts"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/Apache-2.0;md5=89aea4e17d99a7cacdbeed46a0096b10"
+
+DEPENDS += " \
+    helm-native \
+    stx-openstack-helm \
+"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV_helm-charts-elastic = "2bd7616ceddbdf2eee88965e2028ee37d304c79c"
+SRCREV_monitor-armada-app = "e5ee6b3a07b74479b93fe90eff0662cf81890f73"
+
+SRC_URI = " \
+    git://github.com/elastic/helm-charts;protocol=${PROTOCOL};name=helm-charts-elastic \
+    git://opendev.org/starlingx/monitor-armada-app.git;protocol=${PROTOCOL};branch=${BRANCH};name=monitor-armada-app;destsuffix=monitor-armada-app \
+"
+
+S = "${WORKDIR}/git"
+
+inherit allarch
+
+patch_folder = "${WORKDIR}/monitor-armada-app/monitor-helm-elastic/files"
+helm_folder = "${nonarch_libdir}/helm"
+helmchart_version = "0.1.0"
+
+do_patch () {
+       cd ${S}
+       git am ${patch_folder}/0001-add-makefile.patch
+       git am ${patch_folder}/0002-Add-compatibility-for-k8s-1.16.patch
+       git am ${patch_folder}/0003-use-oss-image.patch
+       git am ${patch_folder}/0004-Update-to-Elastic-7.4.0-Release.patch
+       git am ${patch_folder}/0005-set-initial-masters-to-master-0.patch
+}
+
+do_configure () {
+       :
+}
+
+do_compile () {
+       # initialize helm and build the toolkit
+       # helm init --client-only does not work if there is no networking
+       # The following commands do essentially the same as: helm init
+       export HOME="${B}/${USER}"
+       export helm_home="${B}/${USER}/.helm"
+       rm -rf ${helm_home}
+
+       mkdir -p ${helm_home}
+       mkdir ${helm_home}/repository
+       mkdir ${helm_home}/repository/cache
+       mkdir ${helm_home}/repository/local
+       mkdir ${helm_home}/plugins
+       mkdir ${helm_home}/starters
+       mkdir ${helm_home}/cache
+       mkdir ${helm_home}/cache/archive
+
+       # Stage a repository file that only has a local repo
+       install -m 0644 ${patch_folder}/repositories.yaml \
+               ${helm_home}/repository/repositories.yaml
+
+       # Stage a local repo index that can be updated by the build
+       install -m 0644 ${patch_folder}/index.yaml ${helm_home}/repository/local/index.yaml
+
+       # Host a server for the charts
+       helm serve --repo-path . &
+       helm repo rm local
+       helm repo add local http://localhost:8879/charts
+
+       # Create the tgz files
+       rm -rf elasticsearch/Makefile
+       make elasticsearch
+
+       # terminate helm server
+       pid=`/bin/pidof helm`
+       kill ${pid}
+       rm -rf ${helm_home}
+}
+
+do_install () {
+       install -d -m 755 ${D}${helm_folder}
+       install -p -D -m 755 ${B}/*.tgz ${D}${helm_folder}
+}
+
+FILES_${PN} = "${helm_folder}"
+
+RDEPENDS_${PN} = " \
+    helm \
+    stx-platform-helm \
+    stx-openstack-helm \
+"
diff --git a/meta-stx/recipes-core/stx-monitor-armada-app/monitor-helm_1.0.bb b/meta-stx/recipes-core/stx-monitor-armada-app/monitor-helm_1.0.bb
new file mode 100644 (file)
index 0000000..78503ac
--- /dev/null
@@ -0,0 +1,120 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Monitor Helm charts"
+DESCRIPTION = "Monitor Helm charts"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/Apache-2.0;md5=89aea4e17d99a7cacdbeed46a0096b10"
+
+DEPENDS += " \
+    helm-native \
+    monitor-helm-elastic \
+"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV_helm-charts = "92b6289ae93816717a8453cfe62bad51cbdb8ad0"
+SRCREV_monitor-armada-app = "e5ee6b3a07b74479b93fe90eff0662cf81890f73"
+
+SRC_URI = " \
+    git://github.com/helm/charts;protocol=${PROTOCOL};name=helm-charts \
+    git://opendev.org/starlingx/monitor-armada-app.git;protocol=${PROTOCOL};branch=${BRANCH};name=monitor-armada-app;destsuffix=monitor-armada-app \
+"
+
+S = "${WORKDIR}/git"
+
+inherit allarch
+
+patch_folder = "${WORKDIR}/monitor-armada-app/monitor-helm/files"
+helm_folder = "${nonarch_libdir}/helm"
+helmchart_version = "0.1.0"
+
+do_patch () {
+       cd ${S}
+       git am ${patch_folder}/0001-Add-Makefile-for-helm-charts.patch
+       git am ${patch_folder}/0002-kibana-workaround-checksum-for-configmap.yaml.patch
+       git am ${patch_folder}/0003-helm-chart-changes-for-stx-monitor.patch
+       git am ${patch_folder}/0004-ipv6-helm-chart-changes.patch
+       git am ${patch_folder}/0005-decouple-config.patch
+       git am ${patch_folder}/0006-add-system-info.patch
+       git am ${patch_folder}/0007-three-masters.patch
+       git am ${patch_folder}/0008-Update-stx-monitor-for-kubernetes-API-1.16.patch
+       git am ${patch_folder}/0009-add-curator-as-of-2019-10-10.patch
+       git am ${patch_folder}/0010-Update-kube-state-metrics-1.8.0-to-commit-09daf19.patch
+       git am ${patch_folder}/0011-update-init-container-env-to-include-node-name.patch
+       git am ${patch_folder}/0012-Add-imagePullSecrets.patch
+       git am ${patch_folder}/0013-removed-unused-images.patch
+}
+
+do_configure () {
+       :
+}
+
+do_compile () {
+       # initialize helm and build the toolkit
+       # helm init --client-only does not work if there is no networking
+       # The following commands do essentially the same as: helm init
+       export HOME="${B}/${USER}"
+       export helm_home="${B}/${USER}/.helm"
+       rm -rf ${helm_home}
+
+       mkdir -p ${helm_home}
+       mkdir ${helm_home}/repository
+       mkdir ${helm_home}/repository/cache
+       mkdir ${helm_home}/repository/local
+       mkdir ${helm_home}/plugins
+       mkdir ${helm_home}/starters
+       mkdir ${helm_home}/cache
+       mkdir ${helm_home}/cache/archive
+
+       # Stage a repository file that only has a local repo
+       install -m 0644 ${patch_folder}/repositories.yaml ${helm_home}/repository/repositories.yaml
+
+       # Stage a local repo index that can be updated by the build
+       install -m 0644 ${patch_folder}/index.yaml ${helm_home}/repository/local/index.yaml
+
+       # Host a server for the charts
+       helm serve --repo-path . &
+       helm repo rm local
+       helm repo add local http://localhost:8879/charts
+
+       # Create the tgz files
+       cd stable
+       make filebeat
+       make metricbeat
+       make kube-state-metrics
+       make kibana
+       make nginx-ingress
+       make logstash
+       make elasticsearch-curator
+
+       # terminate helm server
+       pid=`/bin/pidof helm`
+       kill ${pid}
+       rm -rf ${helm_home}
+}
+
+do_install () {
+       install -d -m 755 ${D}${helm_folder}
+       install -p -D -m 755 ${S}/stable/*.tgz ${D}${helm_folder}
+}
+
+FILES_${PN} = "${helm_folder}"
+
+RDEPENDS_${PN} = " \
+    helm \
+    monitor-helm-elastic \
+"
diff --git a/meta-stx/recipes-core/stx-monitor-armada-app/stx-monitor-helm_1.0.bb b/meta-stx/recipes-core/stx-monitor-armada-app/stx-monitor-helm_1.0.bb
new file mode 100644 (file)
index 0000000..31af03f
--- /dev/null
@@ -0,0 +1,65 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "StarlingX Monitor Application Armada Helm Charts"
+DESCRIPTION = "StarlingX Monitor Application Armada Helm Charts"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/Apache-2.0;md5=89aea4e17d99a7cacdbeed46a0096b10"
+
+DEPENDS += " \
+    monitor-helm \
+    monitor-helm-elastic \
+"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "e5ee6b3a07b74479b93fe90eff0662cf81890f73"
+
+SRC_URI = "git://opendev.org/starlingx/monitor-armada-app.git;protocol=${PROTOCOL};branch=${BRANCH}"
+
+S = "${WORKDIR}/git/stx-monitor-helm/stx-monitor-helm"
+
+inherit allarch
+
+helm_folder = "${nonarch_libdir}/helm"
+armada_folder = "${nonarch_libdir}/armada"
+app_folder = "${nonarch_libdir}/application"
+
+do_configure () {
+       :
+}
+
+do_compile () {
+       :
+}
+
+do_install () {
+       install -d -m 755 ${D}${armada_folder}
+       install -p -D -m 755 ${S}/manifests/*.yaml ${D}${armada_folder}
+       install -d -m 755 ${D}${app_folder}
+       install -p -D -m 755 ${S}/files/metadata.yaml ${D}${app_folder}/monitor_metadata.yaml
+}
+
+FILES_${PN} = " \
+    ${app_folder} \
+    ${armada_folder} \
+"
+
+RDEPENDS_${PN} = " \
+    helm \
+    monitor-helm \
+    monitor-helm-elastic \
+"
diff --git a/meta-stx/recipes-core/stx-monitoring/collectd-extensions.inc b/meta-stx/recipes-core/stx-monitoring/collectd-extensions.inc
new file mode 100644 (file)
index 0000000..c9ab66e
--- /dev/null
@@ -0,0 +1,107 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " collectd-extensions"
+
+RDEPENDS_collectd-extensions += " \
+       systemd \
+       collectd \
+       fm-api \
+       python-influxdb \
+       python-oslo.concurrency \
+       python-httplib2 \
+       tsconfig \
+       "
+
+do_configure_append () {
+       :
+} 
+
+do_compile_append() {
+       :
+}
+
+local_unit_dir = "${sysconfdir}/systemd/system"
+local_plugin_dir = "${sysconfdir}/collectd.d"
+local_python_extensions_dir = "/opt/collectd/extensions/python"
+local_config_extensions_dir = "/opt/collectd/extensions/config"
+
+
+do_install_append() {
+
+       cd ${S}/collectd-extensions/src
+       install -m 755 -d ${D}/${sysconfdir}
+       install -m 755 -d ${D}/${local_unit_dir}
+       install -m 755 -d ${D}/${local_plugin_dir}
+       install -m 755 -d ${D}/${local_config_extensions_dir}
+       install -m 755 -d ${D}/${local_python_extensions_dir}
+
+       # support files ; service and pmon conf
+       install -m 644 collectd.service  ${D}/${local_unit_dir}
+       install -m 600 collectd.conf.pmon  ${D}/${local_config_extensions_dir}
+
+       # collectd python plugin files - notifiers
+       install -m 700  fm_notifier.py ${D}/${local_python_extensions_dir}
+       install -m 700 plugin_common.py ${D}/${local_python_extensions_dir}
+
+       # collectd python plugin files - resource plugins
+       install -m 700 cpu.py  ${D}/${local_python_extensions_dir}
+       install -m 700 memory.py  ${D}/${local_python_extensions_dir}
+       install -m 700 example.py  ${D}/${local_python_extensions_dir}
+       install -m 700 ntpq.py  ${D}/${local_python_extensions_dir}
+       install -m 700 interface.py ${D}/${local_python_extensions_dir}
+       install -m 700 remotels.py  ${D}/${local_python_extensions_dir}
+       install -m 700 ptp.py  ${D}/${local_python_extensions_dir}
+       install -m 700 ovs_interface.py  ${D}/${local_python_extensions_dir}
+
+
+       # collectd plugin conf files into /etc/collectd.d
+       install -m 600 python_plugins.conf  ${D}/${local_plugin_dir}
+       install -m 600 cpu.conf  ${D}/${local_plugin_dir}
+       install -m 600 memory.conf  ${D}/${local_plugin_dir}
+       install -m 600 df.conf  ${D}/${local_plugin_dir}
+       install -m 600 example.conf  ${D}/${local_plugin_dir}
+       install -m 600 ntpq.conf  ${D}/${local_plugin_dir}
+       install -m 600 interface.conf  ${D}/${local_plugin_dir}
+       install -m 600 remotels.conf  ${D}/${local_plugin_dir}
+       install -m 600 ptp.conf  ${D}/${local_plugin_dir}
+       install -m 600 ovs_interface.conf  ${D}/${local_plugin_dir}
+
+}
+
+FILES_collectd-extensions = " \
+       ${local_unit_dir}/collectd.service \
+       ${local_config_extensions_dir}/collectd.conf.pmon \
+       ${local_python_extensions_dir}/fm_notifier.py \
+       ${local_python_extensions_dir}/plugin_common.py \
+       ${local_python_extensions_dir}/cpu.py \
+       ${local_python_extensions_dir}/memory.py \
+       ${local_python_extensions_dir}/example.py \
+       ${local_python_extensions_dir}/ntpq.py \
+       ${local_python_extensions_dir}/interface.py \
+       ${local_python_extensions_dir}/remotels.py \
+       ${local_python_extensions_dir}/ptp.py \
+       ${local_python_extensions_dir}/ovs_interface.py \
+       ${local_plugin_dir}/python_plugins.conf \
+       ${local_plugin_dir}/cpu.conf \
+       ${local_plugin_dir}/memory.conf \
+       ${local_plugin_dir}/df.conf \
+       ${local_plugin_dir}/example.conf \
+       ${local_plugin_dir}/ntpq.conf \
+       ${local_plugin_dir}/interface.conf \
+       ${local_plugin_dir}/remotels.conf \
+       ${local_plugin_dir}/ptp.conf \
+       ${local_plugin_dir}/ovs_interface.conf \
+       "
diff --git a/meta-stx/recipes-core/stx-monitoring/influxdb-extensions.inc b/meta-stx/recipes-core/stx-monitoring/influxdb-extensions.inc
new file mode 100644 (file)
index 0000000..c2bf92c
--- /dev/null
@@ -0,0 +1,48 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " influxdb-extensions"
+
+RDEPENDS_influxdb-extensions += " \
+       systemd \
+       python-influxdb \
+       "
+
+do_configure_append () {
+       :
+} 
+
+do_compile_append() {
+       :
+}
+
+local_unit_dir = "${sysconfdir}/systemd/system"
+
+do_install_append() {
+
+       cd ${S}/influxdb-extensions/src
+       install -m 755 -d ${D}/${sysconfdir}
+       install -m 755 -d ${D}/${local_unit_dir}
+       install -m 755 -d ${D}/${sysconfdir}/influxdb
+
+       # support files ; service and pmon conf
+       install -m 644 influxdb.service  ${D}/${local_unit_dir}
+       install -m 600 influxdb.conf.pmon  ${D}/${sysconfdir}/influxdb
+}
+
+FILES_influxdb-extensions = " \
+       ${local_unit_dir}/influxdb.service \
+       ${sysconfdir}/influxdb \
+       "
diff --git a/meta-stx/recipes-core/stx-monitoring/monitor-tools.inc b/meta-stx/recipes-core/stx-monitoring/monitor-tools.inc
new file mode 100644 (file)
index 0000000..64e5f84
--- /dev/null
@@ -0,0 +1,42 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " monitor-tools"
+
+# RDEPENDS_monitor-tools += " initscripts-config"
+
+do_configure_append () {
+       :
+} 
+
+do_compile_append() {
+       :
+}
+
+do_install_append() {
+
+       cd ${S}/monitor-tools/scripts
+       install -m 755 -d ${D}/${bindir}
+       # support files ; service and pmon conf
+       install -m 644 memtop  ${D}/${bindir}
+       install -m 600 schedtop ${D}/${bindir}
+       install -m 600 occtop  ${D}/${bindir}
+}
+
+FILES_monitor-tools  = " \
+       ${bindir}/memtop \
+       ${bindir}/schedtop \
+       ${bindir}/occtop \
+       "
diff --git a/meta-stx/recipes-core/stx-monitoring/stx-monitoring.bb b/meta-stx/recipes-core/stx-monitoring/stx-monitoring.bb
new file mode 100644 (file)
index 0000000..8414d5f
--- /dev/null
@@ -0,0 +1,69 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-monitoring"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCNAME = "monitoring"
+SRCREV = "8befe1720b02c5e1e3ddf637947643b9b0a0f96f"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+
+# TODO:
+#3b83ef96387f14655fc854ddc3c6bd57  ./collectd-extensions/src/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./influxdb-extensions/src/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./monitor-tools/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./monitor-tools/scripts/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./vm-topology/vm-topology/LICENSE
+
+LIC_FILES_CHKSUM = "file://collectd-extensions/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI = "git://opendev.org/starlingx/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+DEPENDS += " \
+       python \
+       python-pbr-native \
+       stx-metal \
+       stx-fault \
+       json-c \
+       openssl \
+       libevent \
+       libgcc \
+       "
+
+require collectd-extensions.inc
+require influxdb-extensions.inc
+require monitor-tools.inc
+require vm-topology.inc
+
+do_configure () {
+       :
+} 
+
+do_compile() {
+       :
+}
+
+do_install () {
+       :
+}
+
+pkg_postinst_ontarget_${PN} () {
+}
+
+FILES_${PN} = " "
diff --git a/meta-stx/recipes-core/stx-monitoring/vm-topology.inc b/meta-stx/recipes-core/stx-monitoring/vm-topology.inc
new file mode 100644 (file)
index 0000000..a66bf5c
--- /dev/null
@@ -0,0 +1,49 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " vm-topology"
+
+RDEPENDS_vm-topology += " \
+       python \
+       libvirt \
+       python-keyring \
+       "
+
+DEPENDS += " \
+       python-keyring \
+       libvirt \
+       "
+
+inherit setuptools distutils python-dir
+
+do_configure_append () {
+       cd ${S}/vm-topology/vm-topology
+       distutils_do_configure
+} 
+
+do_compile_append() {
+       cd ${S}/vm-topology/vm-topology
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/vm-topology/vm-topology
+       distutils_do_install
+}
+
+FILES_vm-topology  = " \
+       ${bindir}/vm-topology \
+       ${PYTHON_SITEPACKAGES_DIR}/ \
+       "
diff --git a/meta-stx/recipes-core/stx-nfv/files/use-ldflags-mtce-guest.patch b/meta-stx/recipes-core/stx-nfv/files/use-ldflags-mtce-guest.patch
new file mode 100644 (file)
index 0000000..eab5e65
--- /dev/null
@@ -0,0 +1,15 @@
+diff --git a/mtce-guest/src/Makefile b/mtce-guest/src/Makefile
+index 40dd933..ba6e029 100644
+--- a/mtce-guest/src/Makefile
++++ b/mtce-guest/src/Makefile
+@@ -31,8 +31,8 @@ LDLIBS = $(EXTRALDFLAGS) -lstdc++ -ldaemon -lcommon -lfmcommon -ljson-c -levent
+ INCLUDES = -I. -I/usr/include/mtce-common -I/usr/include/mtce-daemon
+ build: $(OBJS)
+-      $(CXX) $(CCPFLAGS) $(AGENT_OBJS)  $(LDLIBS) -L. -o guestAgent
+-      $(CXX) $(CCPFLAGS) $(SERVER_OBJS) $(LDLIBS) -L. -o guestServer
++      $(CXX) $(CCPFLAGS) $(LDFLAGS) $(AGENT_OBJS)  $(LDLIBS) -L. -o guestAgent
++      $(CXX) $(CCPFLAGS) $(LDFLAGS) $(SERVER_OBJS) $(LDLIBS) -L. -o guestServer
+ .cpp.o:
+       $(CXX) $(INCLUDES) $(CCPFLAGS) $(EXTRACCFLAGS) -c $< -o $@
diff --git a/meta-stx/recipes-core/stx-nfv/mtce-guest.inc b/meta-stx/recipes-core/stx-nfv/mtce-guest.inc
new file mode 100644 (file)
index 0000000..268f538
--- /dev/null
@@ -0,0 +1,60 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " mtce-guestagent"
+PACKAGES += " mtce-guestserver"
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       cd ${S}/mtce-guest/src/
+       oe_runmake -e MAJOR="1" MINONR="0" \
+               INCLUDES=" -I. -I${STAGING_INCDIR}/mtce-common/ -I${STAGING_INCDIR}/mtce-daemon/ " \
+               CPPFLAGS="${CXXFLAGS}" LDFLAGS="${LDFLAGS}" build
+}
+
+do_install_prepend () {
+
+       cd ${S}/mtce-guest/src/
+       oe_runmake -e install DESTDIR=${D} PREFIX=${D}/usr/ \
+                      SYSCONFDIR=${D}/${sysconfdir} \
+                           LOCALBINDIR=${D}/${bindir} \
+                           UNITDIR=${D}/${systemd_system_unitdir} 
+
+       rm -rf ${D}/var
+       rm -rf ${D}/var/run
+}
+
+FILES_mtce-guestserver = " \
+       ${sysconfdir}/mtc/tmp \
+       ${sysconfdir}/mtc/guestServer.ini \
+       ${sysconfdir}/pmon.d/guestServer.conf \
+       ${sysconfdir}/logrotate.d/guestServer.logrotate \
+       ${systemd_system_unitdir}/guestServer.service \
+       ${sysconfdir}/init.d/guestServer \
+       ${bindir}/guestServer \
+       "
+
+FILES_mtce-guestagent = " \ 
+       ${sysconfdir}/mtc/tmp \
+       ${sysconfdir}/mtc/guestAgent.ini \
+       ${systemd_system_unitdir}/guestAgent.service \
+       ${sysconfdir}/logrotate.d/guestAgent.logrotate \
+       ${sysconfdir}/init.d/guestAgent \
+       ${libdir}/ocf/resource.d/platform/guestAgent \
+       ${bindir}/guestAgent \
+" 
diff --git a/meta-stx/recipes-core/stx-nfv/nfv-client.inc b/meta-stx/recipes-core/stx-nfv/nfv-client.inc
new file mode 100644 (file)
index 0000000..a559914
--- /dev/null
@@ -0,0 +1,45 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " nfv-client"
+
+inherit setuptools
+
+
+do_configure_prepend () {
+       cd ${S}/nfv/nfv-client
+       distutils_do_configure
+} 
+
+do_compileprepend () {
+       cd ${S}/nfv/nfv-client
+       distutils_do_compile
+}
+
+do_install_prepend () {
+       cd ${S}/nfv/nfv-client
+       distutils_do_install
+       
+       install -d -m 755 ${D}/${sysconfdir}/bash_completion.d
+       install -m 444 scripts/sw-manager.completion ${D}/${sysconfdir}/bash_completion.d/sw-manager
+
+}
+
+FILES_nfv-client = " \
+       ${bindir}/sw-manager \
+       ${sysconfdir}/bash_completion.d/sw-manager \
+       ${libdir}/python2.7/site-packages/nfv_client*egg-info \
+       ${libdir}/python2.7/site-packages/nfv_client \
+       "
diff --git a/meta-stx/recipes-core/stx-nfv/nfv-common.inc b/meta-stx/recipes-core/stx-nfv/nfv-common.inc
new file mode 100644 (file)
index 0000000..0d6b2c6
--- /dev/null
@@ -0,0 +1,43 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " nfv-common"
+
+inherit setuptools
+
+do_configure_prepend () {
+       cd ${S}/nfv/nfv-common
+       distutils_do_configure
+} 
+
+do_compile_prepend () {
+       cd ${S}/nfv/nfv-common
+       distutils_do_compile
+}
+
+do_install_prepend () {
+       cd ${S}/nfv/nfv-common
+       distutils_do_install
+       
+}
+
+pkg_postinst_ontarget_nfv-common () {
+
+}
+
+FILES_nfv-common_append = " \
+       ${libdir}/python2.7/site-packages/nfv_common \
+       ${libdir}/python2.7/site-packages/windriver_nfv_common_plugins-1.0.0-py2.7.egg-info \
+       "
diff --git a/meta-stx/recipes-core/stx-nfv/nfv-plugins.inc b/meta-stx/recipes-core/stx-nfv/nfv-plugins.inc
new file mode 100644 (file)
index 0000000..a166524
--- /dev/null
@@ -0,0 +1,66 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " nfv-plugins"
+
+inherit setuptools
+
+
+do_configure_append () {
+       cd ${S}/nfv/nfv-plugins
+       distutils_do_configure
+} 
+
+do_compile_append () {
+       cd ${S}/nfv/nfv-plugins
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/nfv/nfv-plugins
+       distutils_do_install
+
+       install -d -m 755 ${D}/${sysconfdir}/nfv/
+       install -d -m 755 ${D}/${sysconfdir}/nfv/nfv_plugins/
+       install -d -m 755 ${D}/${sysconfdir}/nfv/nfv_plugins/alarm_handlers/
+
+       install -p -D -m 600 nfv_plugins/alarm_handlers/config.ini \
+                       ${D}/${sysconfdir}/nfv/nfv_plugins/alarm_handlers/config.ini
+
+       install -d -m 755 ${D}/${sysconfdir}/nfv/nfv_plugins/event_log_handlers/
+
+       install -p -D -m 600 nfv_plugins/event_log_handlers/config.ini \
+                       ${D}/${sysconfdir}/nfv/nfv_plugins/event_log_handlers/config.ini
+                       \
+       install -d -m 755 ${D}/${sysconfdir}/nfv/nfv_plugins/nfvi_plugins/
+
+       install -p -D -m 600 nfv_plugins/nfvi_plugins/config.ini \
+                       ${D}/${sysconfdir}/nfv/nfv_plugins/nfvi_plugins/config.ini
+                                       
+       install -d -m 755 ${D}/
+       install -p -D -m 644 scripts/nfvi-plugins.logrotate \
+                       ${D}/${sysconfdir}/logrotate.d/nfvi-plugins.logrotate
+       
+}
+
+FILES_nfv-plugins += " \
+       ${bindir}/nfv-forensic \
+       ${bindir}/nfv-notify \
+       ${libdir}/python2.7/site-packages/windriver_nfv_plugins*egg-info \
+       ${libdir}/python2.7/site-packages/nfv_plugins \
+       ${sysconfdir}/nfv/nfv_plugins/ \
+       ${sysconfdir}/logrotate.d/nfvi-plugins.logrotate \
+       "
+
diff --git a/meta-stx/recipes-core/stx-nfv/nfv-tools.inc b/meta-stx/recipes-core/stx-nfv/nfv-tools.inc
new file mode 100644 (file)
index 0000000..a7f3958
--- /dev/null
@@ -0,0 +1,41 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " nfv-tools"
+inherit setuptools
+
+
+do_configure_append () {
+       cd ${S}/nfv/nfv-tools
+       distutils_do_configure
+} 
+
+do_compile_append () {
+       cd ${S}/nfv/nfv-tools
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/nfv/nfv-tools
+       distutils_do_install
+
+}
+
+FILES_nfv-tools = " \
+       ${bindir}/nfv-forensic \
+       ${bindir}/nfv-notify \
+       ${libdir}/python2.7/site-packages/nfv_tools*egg-info \
+       ${libdir}/python2.7/site-packages/nfv_tools \
+       "
diff --git a/meta-stx/recipes-core/stx-nfv/nfv-vim.inc b/meta-stx/recipes-core/stx-nfv/nfv-vim.inc
new file mode 100644 (file)
index 0000000..553de70
--- /dev/null
@@ -0,0 +1,57 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " nfv-vim"
+
+inherit setuptools
+
+do_configure_prepend () {
+       cd ${S}/nfv/nfv-vim
+       distutils_do_configure
+} 
+
+do_compile_prepend () {
+       cd ${S}/nfv/nfv-vim
+       distutils_do_compile
+}
+
+do_install_prepend () {
+       cd ${S}/nfv/nfv-vim
+       distutils_do_install
+       install -d -m 755 ${D}/usr/lib/ocf/resource.d/nfv
+       install -p -D -m 755 scripts/vim ${D}/usr/lib/ocf/resource.d/nfv/vim
+       install -p -D -m 755 scripts/vim-api ${D}/usr/lib/ocf/resource.d/nfv/vim-api
+       install -p -D -m 755 scripts/vim-webserver ${D}/usr/lib/ocf/resource.d/nfv/vim-webserver
+       install -d -m 755 ${D}/${sysconfdir}/nfv/
+       install -d -m 755 ${D}/${sysconfdir}/nfv/vim/
+       install -p -D -m 600 nfv_vim/config.ini ${D}/${sysconfdir}/nfv/vim/config.ini
+       install -p -D -m 600 nfv_vim/debug.ini ${D}/${sysconfdir}/nfv/vim/debug.ini
+
+}
+
+#pkg_postinst_ontarget_${PN} () {
+
+FILES_nfv-vim_append = " \
+       ${sysconfdir}/nfv/vim \
+       ${libdir}/ocf/resource.d/nfv/vim \
+       ${libdir}/ocf/resource.d/nfv/vim-api \
+       ${libdir}/ocf/resource.d/nfv/vim-webserver \
+       ${bindir}/nfv-vim-webserver \
+       ${bindir}/nfv-vim-api \
+       ${bindir}/nfv-vim-manage \
+       ${bindir}/nfv-vim \
+       ${libdir}/python2.7/site-packages/nfv_vim \
+       ${libdir}/python2.7/site-packages/nfv_vim*egg-info \
+       "
diff --git a/meta-stx/recipes-core/stx-nfv/nova-api-proxy.inc b/meta-stx/recipes-core/stx-nfv/nova-api-proxy.inc
new file mode 100644 (file)
index 0000000..8234b8a
--- /dev/null
@@ -0,0 +1,57 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " nova-api-proxy"
+
+inherit setuptools
+
+
+do_configure_append () {
+       cd ${S}/nova-api-proxy/nova-api-proxy
+       distutils_do_configure
+} 
+
+do_compile_append( ) {
+       cd ${S}/nova-api-proxy/nova-api-proxy
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/nova-api-proxy/nova-api-proxy
+       distutils_do_install
+
+       install -d -m 755 ${D}/${systemd_system_unitdir}
+       install -p -D -m 644 nova_api_proxy/scripts/api-proxy.service ${D}/${systemd_system_unitdir}/api-proxy.service
+       install -d -m 755 ${D}/${sysconfdir}/rc.d/init.d
+       install -p -D -m 755 nova_api_proxy/scripts/api-proxy ${D}/${sysconfdir}/rc.d/init.d/api-proxy
+
+       install -d -m 755 ${D}/${sysconfdir}/proxy
+       install -p -D -m 700 nova_api_proxy/nova-api-proxy.conf ${D}${sysconfdir}/proxy/nova-api-proxy.conf
+       install -p -D -m 700 nova_api_proxy/api-proxy-paste.ini ${D}${sysconfdir}/proxy/api-proxy-paste.ini
+       
+
+}
+
+
+FILES_nova-api-proxy = " \ 
+       ${bindir}/nova-api-proxy \
+       ${sysconfdir}/proxy/api-proxy-paste.ini \
+       ${sysconfdir}/proxy/api-proxy-paste.conf \
+       ${sysconfdir}/proxy/nova-api-proxy.conf \
+       ${sysconfdir}/rc.d/init.d/api-proxy \
+       ${systemd_system_unitdir}/api-proxy.service \
+       ${libdir}/python2.7/site-packages/nova_api_proxy/ \
+       ${libdir}/python2.7/site-packages/api_proxy*egg-info \
+       "
diff --git a/meta-stx/recipes-core/stx-nfv/stx-nfv.bb b/meta-stx/recipes-core/stx-nfv/stx-nfv.bb
new file mode 100644 (file)
index 0000000..8b4430a
--- /dev/null
@@ -0,0 +1,67 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-nfv"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "aaa932c00e028dcbaf0eed6843c4d3e51f09b2c1"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+
+SRC_URI = "git://opendev.org/starlingx/nfv.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://use-ldflags-mtce-guest.patch \
+       "
+
+DEPENDS += " \
+       python \
+       python-pbr-native \
+       stx-metal \
+       stx-fault \
+       json-c \
+       openssl \
+       libevent \
+       libgcc \
+       "
+
+require mtce-guest.inc
+require nfv-vim.inc
+require nfv-common.inc
+require nfv-client.inc
+require nfv-plugins.inc
+require nfv-tools.inc
+require nova-api-proxy.inc
+
+do_configure () {
+       :
+} 
+
+do_compile() {
+       :
+}
+
+do_install () {
+       :
+}
+
+pkg_postinst_ontarget_${PN} () {
+}
+
+FILES_${PN} = " "
diff --git a/meta-stx/recipes-core/stx-openstack-armada-app/openstack-helm-infra_1.0.bb b/meta-stx/recipes-core/stx-openstack-armada-app/openstack-helm-infra_1.0.bb
new file mode 100644 (file)
index 0000000..92ad899
--- /dev/null
@@ -0,0 +1,108 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Openstack-Helm-Infra charts"
+DESCRIPTION = "Openstack-Helm-Infra charts"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/Apache-2.0;md5=89aea4e17d99a7cacdbeed46a0096b10"
+
+DEPENDS += "helm-native"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV_openstack-helm-infra = "c9d6676bf9a5aceb311dc31dadd07cba6a3d6392"
+SRCREV_openstack-armada-app = "863f4b9733d3d4f4fd490606a94b84cfdaf2df2c"
+
+SRC_URI = " \
+    git://github.com/openstack/openstack-helm-infra;protocol=${PROTOCOL};name=openstack-helm-infra \
+    git://opendev.org/starlingx/openstack-armada-app;protocol=${PROTOCOL};branch=${BRANCH};name=openstack-armada-app;destsuffix=openstack-armada-app \
+"
+
+S = "${WORKDIR}/git"
+
+inherit allarch
+
+patch_folder = "${WORKDIR}/openstack-armada-app/openstack-helm-infra/files"
+helm_folder = "${nonarch_libdir}/helm"
+
+do_patch () {
+       cd ${S}
+       git am ${patch_folder}/0001-Allow-multiple-containers-per-daemonset-pod.patch
+       git am ${patch_folder}/0002-Add-imagePullSecrets-in-service-account.patch
+       git am ${patch_folder}/0003-Set-Min-NGINX-handles.patch
+       git am ${patch_folder}/0004-Partial-revert-of-31e3469d28858d7b5eb6355e88b6f49fd6.patch
+       git am ${patch_folder}/0005-Add-TLS-support-for-Gnocchi-public-endpoint.patch
+       git am ${patch_folder}/0006-Fix-pod-restarts-on-all-workers-when-worker-added-re.patch
+       git am ${patch_folder}/0007-Add-io_thread_pool-for-rabbitmq.patch
+       git am ${patch_folder}/0008-Enable-override-of-rabbitmq-probe-parameters.patch
+}
+
+do_configure () {
+       :
+}
+
+do_compile () {
+       # initialize helm and build the toolkit
+       # helm init --client-only does not work if there is no networking
+       # The following commands do essentially the same as: helm init
+       export HOME="${B}/${USER}"
+       export helm_home="${B}/${USER}/.helm"
+       rm -rf ${helm_home}
+
+       mkdir -p ${helm_home}
+       mkdir ${helm_home}/repository
+       mkdir ${helm_home}/repository/cache
+       mkdir ${helm_home}/repository/local
+       mkdir ${helm_home}/plugins
+       mkdir ${helm_home}/starters
+       mkdir ${helm_home}/cache
+       mkdir ${helm_home}/cache/archive
+
+       # Stage a repository file that only has a local repo
+       install -m 0644 ${patch_folder}/repositories.yaml \
+               ${helm_home}/repository/repositories.yaml
+
+       # Host a server for the charts
+       tmpdir=`mktemp -d ${B}/charts-XXXXXX`
+       helm serve ${tmpdir} --address localhost:8879 --url http://localhost:8879/charts &
+       helm repo rm local
+       helm repo add local http://localhost:8879/charts
+
+       # Make the charts. These produce tgz files
+       make helm-toolkit
+       make gnocchi
+       make ingress
+       make libvirt
+       make mariadb
+       make memcached
+       make openvswitch
+       make rabbitmq
+       make ceph-rgw
+
+       # terminate helm server
+       pid=`/bin/pidof helm`
+       kill ${pid}
+       rm -rf ${helm_home}
+}
+
+do_install () {
+       install -d -m 755 ${D}${helm_folder}
+       install -p -D -m 755 ${B}/*.tgz ${D}${helm_folder}
+}
+
+FILES_${PN} = "${helm_folder}"
+
+RDEPENDS_${PN} = "helm"
diff --git a/meta-stx/recipes-core/stx-openstack-armada-app/openstack-helm_1.0.bb b/meta-stx/recipes-core/stx-openstack-armada-app/openstack-helm_1.0.bb
new file mode 100644 (file)
index 0000000..17b1fe0
--- /dev/null
@@ -0,0 +1,126 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Openstack Helm charts"
+DESCRIPTION = "Openstack Helm charts"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/Apache-2.0;md5=89aea4e17d99a7cacdbeed46a0096b10"
+
+DEPENDS += " \
+    helm-native \
+    openstack-helm-infra \
+"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV_openstack-helm = "82c72367c85ca94270f702661c7b984899c1ae38"
+SRCREV_openstack-armada-app = "863f4b9733d3d4f4fd490606a94b84cfdaf2df2c"
+
+SRC_URI = " \
+    git://github.com/openstack/openstack-helm;protocol=${PROTOCOL};name=openstack-helm \
+    git://opendev.org/starlingx/openstack-armada-app;protocol=${PROTOCOL};branch=${BRANCH};name=openstack-armada-app;destsuffix=openstack-armada-app \
+"
+
+S = "${WORKDIR}/git"
+
+inherit allarch
+
+patch_folder = "${WORKDIR}/openstack-armada-app/openstack-helm/files"
+helm_folder = "${nonarch_libdir}/helm"
+toolkit_version = "0.1.0"
+helmchart_version = "0.1.0"
+
+do_patch () {
+       cd ${S}
+       git am ${patch_folder}/0001-Ceilometer-chart-add-the-ability-to-publish-events-t.patch
+       git am ${patch_folder}/0002-Remove-stale-Apache2-service-pids-when-a-POD-starts.patch
+       git am ${patch_folder}/0003-Nova-console-ip-address-search-optionality.patch
+       git am ${patch_folder}/0004-Nova-chart-Support-ephemeral-pool-creation.patch
+       git am ${patch_folder}/0005-Nova-Add-support-for-disabling-Readiness-Liveness-pr.patch
+       git am ${patch_folder}/0006-Add-Placement-Chart.patch
+}
+
+do_configure () {
+       :
+}
+
+do_compile () {
+       # initialize helm and build the toolkit
+       # helm init --client-only does not work if there is no networking
+       # The following commands do essentially the same as: helm init
+       export HOME="${B}/${USER}"
+       export helm_home="${B}/${USER}/.helm"
+       rm -rf ${helm_home}
+
+       mkdir -p ${helm_home}
+       mkdir ${helm_home}/repository
+       mkdir ${helm_home}/repository/cache
+       mkdir ${helm_home}/repository/local
+       mkdir ${helm_home}/plugins
+       mkdir ${helm_home}/starters
+       mkdir ${helm_home}/cache
+       mkdir ${helm_home}/cache/archive
+
+       # Stage a repository file that only has a local repo
+       install -m 0644 ${patch_folder}/repositories.yaml ${helm_home}/repository/repositories.yaml
+
+       # Stage a local repo index that can be updated by the build
+       install -m 0644 ${patch_folder}/index.yaml ${helm_home}/repository/local/index.yaml
+
+       # Stage helm-toolkit in the local repo
+       cp ${RECIPE_SYSROOT}${helm_folder}/helm-toolkit-${toolkit_version}.tgz .
+
+       # Host a server for the charts
+       helm serve --repo-path . &
+       helm repo rm local
+       helm repo add local http://localhost:8879/charts
+
+       # Make the charts. These produce a tgz file
+       make aodh
+       make barbican
+       make ceilometer
+       make cinder
+       make glance
+       make heat
+       make horizon
+       make ironic
+       make keystone
+       make magnum
+       make neutron
+       make nova
+       make panko
+       make placement
+
+       # terminate helm server
+       pid=`/bin/pidof helm`
+       kill ${pid}
+       rm -rf ${helm_home}
+
+       # Remove the helm-toolkit tarball
+       rm helm-toolkit-${toolkit_version}.tgz
+}
+
+do_install () {
+       install -d -m 755 ${D}${helm_folder}
+       install -p -D -m 755 ${B}/*.tgz ${D}${helm_folder}
+}
+
+FILES_${PN} = "${helm_folder}"
+
+RDEPENDS_${PN} = " \
+    helm \
+    openstack-helm-infra \
+"
diff --git a/meta-stx/recipes-core/stx-openstack-armada-app/stx-openstack-helm_1.0.bb b/meta-stx/recipes-core/stx-openstack-armada-app/stx-openstack-helm_1.0.bb
new file mode 100644 (file)
index 0000000..0941ab7
--- /dev/null
@@ -0,0 +1,118 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "StarlingX Openstack Application Helm charts"
+DESCRIPTION = "StarlingX Openstack Application Helm charts"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/Apache-2.0;md5=89aea4e17d99a7cacdbeed46a0096b10"
+
+DEPENDS += " \
+    helm-native \
+    openstack-helm \
+    openstack-helm-infra \
+    stx-platform-helm \
+"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "863f4b9733d3d4f4fd490606a94b84cfdaf2df2c"
+
+SRC_URI = "git://opendev.org/starlingx/openstack-armada-app;protocol=${PROTOCOL};branch=${BRANCH}"
+
+S = "${WORKDIR}/git/stx-openstack-helm/stx-openstack-helm"
+
+inherit allarch
+
+helm_folder = "${nonarch_libdir}/helm"
+armada_folder = "${nonarch_libdir}/armada"
+app_folder = "${nonarch_libdir}/application"
+toolkit_version = "0.1.0"
+helmchart_version = "0.1.0"
+
+do_configure () {
+       :
+}
+
+do_compile () {
+       # initialize helm and build the toolkit
+       # helm init --client-only does not work if there is no networking
+       # The following commands do essentially the same as: helm init
+       export HOME="${B}/${USER}"
+       export helm_home="${B}/${USER}/.helm"
+       rm -rf ${helm_home}
+
+       mkdir -p ${helm_home}
+       mkdir ${helm_home}/repository
+       mkdir ${helm_home}/repository/cache
+       mkdir ${helm_home}/repository/local
+       mkdir ${helm_home}/plugins
+       mkdir ${helm_home}/starters
+       mkdir ${helm_home}/cache
+       mkdir ${helm_home}/cache/archive
+
+       # Stage a repository file that only has a local repo
+       cp ${S}/files/repositories.yaml ${helm_home}/repository/repositories.yaml
+
+       # Stage a local repo index that can be updated by the build
+       cp ${S}/files/index.yaml ${helm_home}/repository/local/index.yaml
+
+       # Stage helm-toolkit in the local repo
+       cp ${RECIPE_SYSROOT}${helm_folder}/helm-toolkit-${toolkit_version}.tgz .
+
+       # Host a server for the charts
+       helm serve --repo-path . &
+       helm repo rm local
+       helm repo add local http://localhost:8879/charts
+
+       # Make the charts. These produce a tgz file
+       cd ${S}/helm-charts
+       make nova-api-proxy
+       make garbd
+       make keystone-api-proxy
+       make fm-rest-api
+       make nginx-ports-control
+       make dcdbsync
+       cd -
+
+       # terminate helm server
+       pid=`/bin/pidof helm`
+       kill ${pid}
+       rm -rf ${helm_home}
+
+       # Remove the helm-toolkit tarball
+       rm helm-toolkit-${toolkit_version}.tgz
+}
+
+do_install () {
+       install -d -m 755 ${D}${app_folder}
+       install -p -D -m 755 ${S}/files/metadata.yaml ${D}${app_folder}
+       install -d -m 755 ${D}${helm_folder}
+       install -p -D -m 755 ${S}/helm-charts/*.tgz ${D}${helm_folder}
+       install -d -m 755 ${D}${armada_folder}
+       install -p -D -m 755 ${S}/manifests/*.yaml ${D}${armada_folder}
+}
+
+FILES_${PN} = " \
+    ${app_folder} \
+    ${helm_folder} \
+    ${armada_folder} \
+"
+
+RDEPENDS_${PN} = " \
+    helm \
+    openstack-helm \
+    openstack-helm-infra \
+"
diff --git a/meta-stx/recipes-core/stx-platform-armada-app/stx-platform-helm_1.0.bb b/meta-stx/recipes-core/stx-platform-armada-app/stx-platform-helm_1.0.bb
new file mode 100644 (file)
index 0000000..6f041e4
--- /dev/null
@@ -0,0 +1,139 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "StarlingX Platform Helm charts"
+DESCRIPTION = "StarlingX Platform Helm charts"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/Apache-2.0;md5=89aea4e17d99a7cacdbeed46a0096b10"
+
+DEPENDS += " \
+    helm-native \
+    openstack-helm \
+    openstack-helm-infra \
+"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV_platform-armada-app = "c67d1eeb605ea1da4ebb2a1219a6f54f05e3eb5e"
+SRCREV_helm-charts = "c01426a2500269fbf1a781214a361de0796297d1"
+
+SRC_URI = " \
+    git://opendev.org/starlingx/platform-armada-app.git;protocol=${PROTOCOL};branch=${BRANCH};name=platform-armada-app \
+    git://opendev.org/starlingx/helm-charts.git;protocol=${PROTOCOL};branch=${BRANCH};name=helm-charts;destsuffix=helm-charts \
+"
+
+S = "${WORKDIR}/git/stx-platform-helm/stx-platform-helm"
+
+inherit allarch
+
+toolkit_version = "0.1.0"
+helm_folder = "${RECIPE_SYSROOT}${nonarch_libdir}/helm"
+helm_repo = "stx-platform"
+
+app_name = "platform-integ-apps"
+app_staging = "${B}/staging"
+app_tarball = "${app_name}.tgz"
+app_folder = "/usr/local/share/applications/helm"
+
+do_configure () {
+       :
+}
+
+do_compile () {
+       # initialize helm and build the toolkit
+       # helm init --client-only does not work if there is no networking
+       # The following commands do essentially the same as: helm init
+       export HOME="${B}/${USER}"
+       export helm_home="${B}/${USER}/.helm"
+       rm -rf ${helm_home}
+
+       mkdir  -p ${helm_home}
+       mkdir  ${helm_home}/repository
+       mkdir  ${helm_home}/repository/cache
+       mkdir  ${helm_home}/repository/local
+       mkdir  ${helm_home}/plugins
+       mkdir  ${helm_home}/starters
+       mkdir  ${helm_home}/cache
+       mkdir  ${helm_home}/cache/archive
+
+       # Stage a repository file that only has a local repo
+       cp ${S}/files/repositories.yaml ${helm_home}/repository/repositories.yaml
+
+       # Stage a local repo index that can be updated by the build
+       cp ${S}/files/index.yaml ${helm_home}/repository/local/index.yaml
+
+       # Stage helm-toolkit in the local repo
+       cp ${helm_folder}/helm-toolkit-${toolkit_version}.tgz ${S}/helm-charts/
+
+       # Host a server for the charts
+       helm serve --repo-path . &
+       helm repo rm local
+       helm repo add local http://localhost:8879/charts
+
+       # Make the charts. These produce a tgz file
+       cp -rf ${WORKDIR}/helm-charts/node-feature-discovery/node-feature-discovery/helm-charts/node-feature-discovery/ \
+               ${S}/helm-charts/
+       cd ${S}/helm-charts
+       make rbd-provisioner
+       make ceph-pools-audit
+       make node-feature-discovery
+       cd -
+
+       # Terminate helm server
+       pid=`/bin/pidof helm`
+       kill ${pid}
+       rm -rf ${helm_home}
+
+       # Create a chart tarball compliant with sysinv kube-app.py
+       # Setup staging
+       mkdir -p ${app_staging}
+       cp ${S}/files/metadata.yaml ${app_staging}
+       cp ${S}/manifests/manifest.yaml ${app_staging}
+
+       mkdir -p ${app_staging}/charts
+       cp ${S}/helm-charts/*.tgz ${app_staging}/charts
+       cd ${app_staging}
+
+       # Populate metadata
+       sed -i 's/@APP_NAME@/${app_name}/g' ${app_staging}/metadata.yaml
+       sed -i 's/@APP_VERSION@/${version}-${tis_patch_ver}/g' ${app_staging}/metadata.yaml
+       sed -i 's/@HELM_REPO@/${helm_repo}/g' ${app_staging}/metadata.yaml
+
+       # package it up
+       find . -type f ! -name '*.md5' -print0 | xargs -0 md5sum > checksum.md5
+       tar -zcf ${B}/${app_tarball} -C ${app_staging}/ .
+
+       # Cleanup staging
+       rm -fr ${app_staging}
+}
+
+do_install () {
+       install -d -m 755 ${D}/${app_folder}
+       install -p -D -m 755 ${B}/${app_tarball} ${D}/${app_folder}
+       install -d -m 755 ${D}/opt/extracharts
+       install -p -D -m 755 ${S}/helm-charts/node-feature-discovery-*.tgz ${D}/opt/extracharts
+}
+
+FILES_${PN} = " \
+    /opt/extracharts \
+    ${app_folder} \
+"
+
+RDEPENDS_${PN} = " \
+    helm \
+    openstack-helm \
+    openstack-helm-infra \
+"
diff --git a/meta-stx/recipes-core/stx-update/cgcs-patch.inc b/meta-stx/recipes-core/stx-update/cgcs-patch.inc
new file mode 100644 (file)
index 0000000..c506818
--- /dev/null
@@ -0,0 +1,136 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " cgcs-patch"
+PACKAGES += " cgcs-patch-agent"
+PACKAGES += " cgcs-patch-controller"
+
+inherit setuptools
+
+RDEPENDS_cgcs-patch_append = " \
+       bash \
+       "
+RDEPENDS_cgcs-patch-agent_append = " \
+       bash \
+       python \
+       "
+
+RDEPENDS_cgcs-patch-controller_append = " \
+       bash \
+       python-requests-toolbelt \
+       createrepo-c \
+       "
+
+do_configure_append () {
+       cd ${S}/cgcs-patch/cgcs-patch
+       distutils_do_configure
+} 
+
+do_compile_append () {
+       cd ${S}/cgcs-patch/cgcs-patch
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/cgcs-patch/cgcs-patch
+       distutils_do_install
+
+       cd ${S}/cgcs-patch/bin
+
+       install -m 755 -d ${D}/${sbindir}
+       install -m 755 -d ${D}/${sysconfdir}/bash_completion.d
+       install -m 755 -d ${D}/${sysconfdir}/goenabled.d
+       install -m 755 -d ${D}/${sysconfdir}/init.d
+       install -m 755 -d ${D}/${sysconfdir}/logrotate.d
+       install -m 755 -d ${D}/${sysconfdir}/patching
+       install -m 700 -d ${D}/${sysconfdir}/patching/patch-scripts
+       install -m 755 -d ${D}/${sysconfdir}/pmon.d
+       install -m 755 -d ${D}/${systemd_system_unitdir}
+
+       install -m 500 sw-patch-agent  ${D}/${sbindir}/sw-patch-agent
+       install -m 500 sw-patch-controller-daemon ${D}/${sbindir}/sw-patch-controller-daemon
+       install -m 555 sw-patch ${D}/${sbindir}/sw-patch
+       install -m 555 rpm-audit ${D}/${sbindir}/rpm-audit
+       
+       install -m 500 sw-patch-controller-daemon-init.sh ${D}/${sysconfdir}/init.d/sw-patch-controller-daemon
+       install -m 500 sw-patch-agent-init.sh ${D}/${sysconfdir}/init.d/sw-patch-agent
+       
+       install -m 600 patching.conf ${D}/${sysconfdir}/patching/patching.conf
+       
+       install -m 644 policy.json ${D}/${sysconfdir}/patching/policy.json 
+       
+       install -m 444 pmon-sw-patch-controller-daemon.conf ${D}/${sysconfdir}/pmon.d/sw-patch-controller-daemon.conf
+       install -m 444 pmon-sw-patch-agent.conf ${D}/${sysconfdir}/pmon.d/sw-patch-agent.conf 
+       install -m 444 *.service ${D}/${systemd_system_unitdir} 
+       install -m 444 sw-patch.completion ${D}/${sysconfdir}/bash_completion.d/sw-patch 
+       install -m 400 patch-functions ${D}/${sysconfdir}/patching/patch-functions 
+
+       install -D -m 444 patch-tmpdirs.conf ${D}/${sysconfdir}/tempfiles.d/patch-tmpdirs.conf
+
+       install -m 500 run-patch-scripts ${D}/${sbindir}/run-patch-scripts 
+       install -m 500 sw-patch-controller-daemon-restart ${D}/${sbindir}/sw-patch-controller-daemon-restart
+       install -m 500 sw-patch-agent-restart ${D}/${sbindir}/sw-patch-agent-restart
+
+
+       install -m 500 run-patch-scripts ${D}/${sbindir}/run-patch-scripts
+       install -m 500 sw-patch-controller-daemon-restart ${D}/${sbindir}/sw-patch-controller-daemon-restart
+       install -m 500 sw-patch-agent-restart ${D}/${sbindir}/sw-patch-agent-restart 
+       install -m 500 sw-patch-init.sh ${D}/${sysconfdir}/init.d/sw-patch
+       install -m 500 sw-patch-controller-init.sh ${D}/${sysconfdir}/init.d/sw-patch-controller 
+       install -m 555 patch_check_goenabled.sh ${D}/${sysconfdir}/goenabled.d/patch_check_goenabled.sh 
+       install -m 444 patching.logrotate ${D}/${sysconfdir}/logrotate.d/patching 
+       
+       install -m 500 upgrade-start-pkg-extract ${D}/${sbindir}/upgrade-start-pkg-extract
+
+}
+
+FILES_cgcs-patch = " \
+       ${libdir}/python2.7/site-packages/cgcs_patch \
+       ${libdir}/python2.7/site-packages/cgcs_patch-1.0-py2.7.egg-info \
+       ${libdir}/python2.7/site-packages/cgcs_make_patch \
+       ${libdir}/python2.7/site-packages/cgcs_patch-1.0-py2.7.egg-info/top_level.txt \
+       ${sbindir}/rpm-audit \
+       ${sysconfdir}/patching/policy.json \
+       ${sysconfdir}/patching/patching.conf \
+       ${sysconfdir}/patching/patch-scripts \
+       ${sysconfdir}/init.d/sw-patch \
+       ${systemd_system_unitdir}/sw-patch.service \
+       ${sysconfdir}/goenabled.d/patch_check_goenabled.sh \
+       ${sysconfdir}/logrotate.d/patching \
+       ${sysconfdir}/tempfiles.d/patch-tmpdirs.conf \
+       ${sysconfdir}/patching/patch-functions \
+"
+
+FILES_cgcs-patch-agent = " \
+       ${sbindir}/sw-patch-agent \
+       ${sbindir}/sw-patch-agent-restart \
+       ${sysconfdir}/pmon.d/sw-patch-agent.conf \
+       ${sbindir}/run-patch-scripts \
+       ${sysconfdir}/init.d/sw-patch-agent \
+       ${systemd_system_unitdir}/sw-patch-agent.service \
+       ${sysconfdir}/bash_completion.d/sw-patch \
+       "
+
+FILES_cgcs-patch-controller = " \
+       ${sbindir}/sw-patch-controller-daemon-restart \
+       ${sysconfdir}/init.d/sw-patch-controller-daemon \
+       ${sbindir}/sw-patch-controller-daemon \
+       ${sbindir}/upgrade-start-pkg-extract \
+       ${sysconfdir}/pmon.d/sw-patch-controller-daemon.conf \
+       ${systemd_system_unitdir}/sw-patch-controller-daemon.service \
+       ${sbindir}/sw-patch \
+       ${sysconfdir}/init.d/sw-patch-controller \
+       ${systemd_system_unitdir}/sw-patch-controller.service \
+       "
diff --git a/meta-stx/recipes-core/stx-update/enable-dev-patch.inc b/meta-stx/recipes-core/stx-update/enable-dev-patch.inc
new file mode 100644 (file)
index 0000000..f1e0036
--- /dev/null
@@ -0,0 +1,35 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " enable-dev-patch"
+
+do_configure_prepend () {
+       :
+} 
+
+do_compile_prepend () {
+       :
+}
+
+do_install_prepend () {
+       cd ${S}/enable-dev-patch/
+       install -m 755 -d ${D}/${sysconfdir}/pki/wrs
+       install -m 444 enable-dev-patch/dev_certificate_enable.bin ${D}/${sysconfdir}/pki/wrs
+
+}
+
+FILES_enable-dev-patch = " \
+       ${sysconfdir}/pki/wrs/dev_certificate_enable.bin \
+       "
diff --git a/meta-stx/recipes-core/stx-update/patch-alarm.inc b/meta-stx/recipes-core/stx-update/patch-alarm.inc
new file mode 100644 (file)
index 0000000..2b1008c
--- /dev/null
@@ -0,0 +1,57 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " patch-alarm"
+DESCRIPTION_patch-alarm = "TIS Platform Patching"
+SUMMARY_patch-alarm = "Patch alarm management"
+
+inherit setuptools
+
+RDEPENDS_patch-alarm_append = " \
+       bash \
+       python \
+       "
+
+do_configure_append () {
+       cd ${S}/patch-alarm/patch-alarm
+       distutils_do_configure
+} 
+
+do_compile_append () {
+       cd ${S}/patch-alarm/patch-alarm
+       distutils_do_compile
+}
+
+do_install_append () {
+       cd ${S}/patch-alarm/patch-alarm
+       distutils_do_install
+
+       cd ${S}/patch-alarm/
+
+       install -m 755 -d ${D}/${bindir}
+       install -m 755 -d ${D}/${sysconfdir}/init.d
+
+       install -m 700 scripts/bin/patch-alarm-manager ${D}/${bindir}/
+       install -m 700 scripts/bin/patch-alarm-manager ${D}/${sysconfdir}/init.d/
+       
+
+}
+
+FILES_patch-alarm = " \
+       ${libdir}/python2.7/site-packages/patch_alarm \
+       ${libdir}/python2.7/site-packages/patch_alarm*.egg-info \
+       ${bindir}/patch-alarm-manager \
+       ${sysconfdir}/init.d/patch-alarm-manager \
+       "
diff --git a/meta-stx/recipes-core/stx-update/stx-update.bb b/meta-stx/recipes-core/stx-update/stx-update.bb
new file mode 100644 (file)
index 0000000..2412e18
--- /dev/null
@@ -0,0 +1,56 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-update"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCREV = "2542c5539bab060830009d02cbb257cc8bf4a376"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI = "git://opendev.org/starlingx/update.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+DEPENDS = " \
+       python \
+       python-pbr-native \
+       "
+
+RDEPENDS_${PN} += " python-requests-toolbelt"
+
+require cgcs-patch.inc
+require enable-dev-patch.inc
+require patch-alarm.inc
+
+do_configure () {
+       :
+} 
+
+do_compile() {
+       :
+}
+
+do_install () {
+       :
+}
+
+pkg_postinst_ontarget_${PN} () { 
+}
+
+FILES_${PN} = " "
diff --git a/meta-stx/recipes-core/stx-update/tsconfig.inc b/meta-stx/recipes-core/stx-update/tsconfig.inc
new file mode 100644 (file)
index 0000000..5277e95
--- /dev/null
@@ -0,0 +1,46 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " tsconfig"
+
+inherit setuptools
+
+RDEPENDS_tsconfig_append = " bash"
+
+do_configure_prepend () {
+       cd ${S}/tsconfig/tsconfig
+       distutils_do_configure
+} 
+
+do_compile_prepend () {
+       cd ${S}/tsconfig/tsconfig
+       distutils_do_compile
+}
+
+do_install_prepend () {
+       cd ${S}/tsconfig/tsconfig
+       distutils_do_install
+
+
+       install -m 755 -d ${D}/${bindir}
+       install -m 500 scripts/tsconfig ${D}/${bindir}/
+
+}
+
+FILES_tsconfig = " \
+       ${libdir}/python2.7/site-packages/tsconfig \
+       ${libdir}/python2.7/site-packages/tsconfig*.egg-info \
+       ${bindir}/tsconfig \
+       "
diff --git a/meta-stx/recipes-core/stx-upstream/openstack-ras_git.bb b/meta-stx/recipes-core/stx-upstream/openstack-ras_git.bb
new file mode 100644 (file)
index 0000000..94c49e3
--- /dev/null
@@ -0,0 +1,48 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Pacemaker High Availability resource agents for OpenStack"
+SUMMARY = "Openstack Resource Agents from Madkiss"
+
+PROTOCOL = "https"
+BRANCH = "stable-grizzly"
+SRCNAME = "openstack-resource-agents"
+SRCREV = "6db39a959438326ef16ae671f02ebbce22309e21"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://COPYING;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI = "git://github.com/madkiss/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+do_configure() {
+       :
+}
+
+
+do_compile() {
+       :
+}
+
+
+do_install() {
+       make  DESTDIR=${D} install
+       rm -rf ${D}/usr/lib/ocf/resource.d/openstack/ceilometer-agent-central
+       rm -rf ${D}/usr/lib/ocf/resource.d/openstack/ceilometer-alarm-evaluator
+       rm -rf ${D}/usr/lib/ocf/resource.d/openstack/ceilometer-alarm-notifier
+}
+
+FILES_${PN} += " ${libdir}"
diff --git a/meta-stx/recipes-core/stx-utilities/ceph/ceph-manager.inc b/meta-stx/recipes-core/stx-utilities/ceph/ceph-manager.inc
new file mode 100644 (file)
index 0000000..fcfbfe4
--- /dev/null
@@ -0,0 +1,57 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " ceph-manager"
+
+RDEPENDS_ceph-manager += " sysinv"
+DESCRIPTION_ceph-manager = " \
+Handle Ceph API calls and provide status updates via alarms. \
+Handle sysinv RPC calls for long running Ceph API operations: \
+       - cache tiering enable \
+       - cache tiering disable \
+"
+
+do_configure_append() {
+       cd ${S}/ceph/ceph-manager/ceph-manager
+       distutils_do_configure
+}
+do_compile_append() {
+       cd ${S}/ceph/ceph-manager/ceph-manager
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/ceph/ceph-manager/ceph-manager
+       distutils_do_install
+
+       install -d -m0755 ${D}/${bindir}
+       install -d -m0755 ${D}/${sysconfdir}/init.d
+       install -d -m0755 ${D}/${sysconfdir}/logrotate.d
+       install -d -m0755 ${D}/${systemd_system_unitdir}
+
+       install -p -m0700 ${S}/ceph/ceph-manager/scripts/bin/ceph-manager ${D}/${sysconfdir}/init.d
+       install -p -m0700 ${S}/ceph/ceph-manager/scripts/bin/ceph-manager ${D}/${bindir}
+       install -p -m0700 ${S}/ceph/ceph-manager/files/ceph-manager.logrotate ${D}/${sysconfdir}/logrotate.d
+       install -p -m0700 ${S}/ceph/ceph-manager/files/ceph-manager.service ${D}/${systemd_system_unitdir}
+}
+
+FILES_ceph-manager = "  \
+       ${bindir}/ceph-manager \
+       ${sysconfdir}/init.d/ceph-manager \
+       ${sysconfdir}/logrotate.d/ceph-manager.logrotate \
+       ${systemd_system_unitdir}/ceph-manager.service \
+       ${PYTHON_SITEPACKAGES_DIR}/ceph_manager \
+       ${PYTHON_SITEPACKAGES_DIR}/ceph_manager-1.0.0-py2.7.egg-info \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/ceph/python-cephclient.inc b/meta-stx/recipes-core/stx-utilities/ceph/python-cephclient.inc
new file mode 100644 (file)
index 0000000..1b204c5
--- /dev/null
@@ -0,0 +1,56 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " python-cephclient"
+
+RDEPENDS_ceph-manager += " \
+       python \
+       python-ipaddress \
+       python-six \
+       python-requests \
+       "
+
+DESCRIPTION_python-cephclient = " \
+A client library in Python for Ceph Mgr RESTful plugin providing REST API \
+access to the cluster over an SSL-secured connection. Python API is compatible \
+with the old Python Ceph client at \
+https://github.com/dmsimard/python-cephclient that no longer works in Ceph \
+mimic because Ceph REST API component was removed. \
+"
+
+do_configure_append() {
+       cd ${S}/ceph/python-cephclient/python-cephclient
+       rm -rf .pytest_cache
+       rm -rf python_cephclient.egg-info
+       rm -f requirements.txt
+       distutils_do_configure
+}
+do_compile_append() {
+       cd ${S}/ceph/python-cephclient/python-cephclient
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/ceph/python-cephclient/python-cephclient
+       distutils_do_install
+}
+
+FILES_python-cephclient = "  \
+       ${PYTHON_SITEPACKAGES_DIR}/cephclient \
+       ${PYTHON_SITEPACKAGES_DIR}/python_cephclient-13.2.2.0-py2.7.egg-info \
+       "
+#      /usr/share/licenses/python-cephclient-13.2.2.0
+#      /usr/share/licenses/python-cephclient-13.2.2.0/LICENSE
+
diff --git a/meta-stx/recipes-core/stx-utilities/files/build.info b/meta-stx/recipes-core/stx-utilities/files/build.info
new file mode 100644 (file)
index 0000000..f76f009
--- /dev/null
@@ -0,0 +1,16 @@
+###
+### StarlingX
+###     Release @STX_RELEASE@
+###
+
+OS="@OS@"
+SW_VERSION="@STX_RELEASE@"
+BUILD_TARGET="Host Installer"
+BUILD_TYPE="Formal"
+BUILD_ID="r/stx.@STX_ID@"
+
+JOB="STX_BUILD_@STX_ID@"
+BUILD_BY="starlingx.build@cengn.ca"
+BUILD_NUMBER="40"
+BUILD_HOST="starlingx_mirror"
+BUILD_DATE="@BUILD_DATE@ +0000"
diff --git a/meta-stx/recipes-core/stx-utilities/security/stx-ssl.inc b/meta-stx/recipes-core/stx-utilities/security/stx-ssl.inc
new file mode 100644 (file)
index 0000000..de901b9
--- /dev/null
@@ -0,0 +1,42 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " stx-ssl"
+
+DESCRIPTION_stx-ssl = " Wind River Security"
+
+do_install_append() {
+
+       openssl req -new -x509 -sha256 \
+               -keyout ${S}/security/stx-ssl/self-signed-server-cert.pem \
+               -out ${S}/security/stx-ssl/self-signed-server-cert.pem \
+               -days 365 -nodes \
+               -config ${S}/security/stx-ssl/server-csr.conf
+
+       install -p -d -m0755 ${D}/${sysconfdir}/ssl/private/
+       install -m0400 ${S}/security/stx-ssl/self-signed-server-cert.pem \
+               ${D}/${sysconfdir}/ssl/private/self-signed-server-cert.pem 
+
+#      install -p -d -m0755 ${D}/${sbindir}
+#      install -m0700 ${S}/security/stx-ssl/files/tpmdevice-setup ${D}/${sbindir}/tpmdevice-setup
+
+       #install -d -m 0755 ${D}/${datadir}/stx-ssl-${PV}/
+       #install -m644 ${S}/security/stx-ssl/LICENSE ${D}/${datadir}/stx-ssl-${PV}/
+}
+
+FILES_stx-ssl = " \
+       ${sysconfdir}/ssl/private/self-signed-server-cert.pem  \
+       "
+#      ${sbindir}/tpmdevice-setup 
diff --git a/meta-stx/recipes-core/stx-utilities/stx-utilities_git.bb b/meta-stx/recipes-core/stx-utilities/stx-utilities_git.bb
new file mode 100644 (file)
index 0000000..cc301fc
--- /dev/null
@@ -0,0 +1,92 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-utilities"
+
+PROTOCOL = "https"
+SRCNAME = "utilities"
+BRANCH = "r/stx.3.0"
+SRCREV = "cbad6b792157d066dd601f0f9ce62dc177d4c848"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+
+#e7b3e2a120f5d4c0f6f562a52b6badf4  ./security/tpm2-openssl-engine/tpm2-openssl-engine/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/build-info/build-info-1.0/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/namespace-utils/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/namespace-utils/namespace-utils/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/nfscheck/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/nfscheck/files/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/pci-irq-affinity-agent/files/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/platform-util/platform-util/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/platform-util/scripts/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/stx-extensions/files/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/update-motd/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/update-motd/files/LICENSE
+#3b83ef96387f14655fc854ddc3c6bd57  ./utilities/worker-utils/worker-utils/LICENSE
+
+LIC_FILES_CHKSUM = " \
+       file://ceph/ceph-manager/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://ceph/python-cephclient/python-cephclient/LICENSE;md5=41687b590435621fc0676ac02c51154f \
+       file://security/stx-ssl/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://tools/collector/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://tools/collector/scripts/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://tools/engtools/hostdata-collectors/scripts/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://utilities/logmgmt/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://utilities/logmgmt/logmgmt/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       "
+
+
+SRC_URI = " \
+       git://opendev.org/starlingx/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       "
+
+inherit setuptools distutils python-dir
+DEPENDS = " \
+       python-pip \
+       python-pbr-native \
+       systemd \
+"
+
+require utilities/build-info.inc
+require utilities/logmgmt.inc
+require utilities/namespace-utils.inc
+require utilities/nfscheck.inc
+require utilities/pci-irq-affinity.inc
+require utilities/platform-util.inc
+require utilities/stx-extensions.inc
+require utilities/update-motd.inc
+require utilities/worker-utils.inc
+require ceph/ceph-manager.inc
+require ceph/python-cephclient.inc
+require security/stx-ssl.inc
+# Skip tpm2-openssl-engine2
+require tools/collector.inc
+require tools/collect-engtools.inc
+
+do_configure() {
+       :
+}
+
+do_compile() {
+       :
+}
+
+do_install() {
+       :
+}
+
+FILES_${PN} = " "
diff --git a/meta-stx/recipes-core/stx-utilities/tools/collect-engtools.inc b/meta-stx/recipes-core/stx-utilities/tools/collect-engtools.inc
new file mode 100644 (file)
index 0000000..66fd74a
--- /dev/null
@@ -0,0 +1,106 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " collect-engtools"
+
+RDEPENDS_collect-engtools += " \
+       iperf3 \
+       bash \
+       perl \
+       python \
+       "
+
+SUMMARY_collect-engtools= "Host performance data collection tools package"
+DESCRIPTION_collect-engtools= " \
+This package contains data collection tools to monitor host performance. \
+Tools are general purpose engineering and debugging related. Includes \
+overall memory, cpu occupancy, per-task cpu, per-task scheduling, per-task \
+io. \
+"
+
+
+do_configure_append() {
+       :
+}
+do_compile_append() {
+       :
+}
+
+do_install_append() {
+       cd ${S}/tools/engtools/hostdata-collectors/scripts
+
+       install -d -m0755 ${D}/${bindir}
+       install -m 755 buddyinfo.py ${D}/${bindir}
+       install -m 755 chewmem ${D}/${bindir}
+       install -m 755 ceph.sh ${D}/${bindir}
+       install -m 755 cleanup-engtools.sh ${D}/${bindir}
+       install -m 755 collect-engtools.sh ${D}/${bindir}
+       install -m 755 diskstats.sh ${D}/${bindir}
+       install -m 755 engtools_util.sh ${D}/${bindir}
+       install -m 755 filestats.sh ${D}/${bindir}
+       install -m 755 iostat.sh ${D}/${bindir}
+       install -m 755 linux_benchmark.sh ${D}/${bindir}
+       install -m 755 memstats.sh ${D}/${bindir}
+       install -m 755 netstats.sh ${D}/${bindir}
+       install -m 755 postgres.sh ${D}/${bindir}
+       install -m 755 rabbitmq.sh ${D}/${bindir}
+       install -m 755 remote/rbzip2-engtools.sh ${D}/${bindir}
+       install -m 755 remote/rstart-engtools.sh ${D}/${bindir}
+       install -m 755 remote/rstop-engtools.sh ${D}/${bindir}
+       install -m 755 remote/rsync-engtools-data.sh ${D}/${bindir}
+       install -m 755 slab.sh ${D}/${bindir}
+       install -m 755 ticker.sh ${D}/${bindir}
+       install -m 755 top.sh ${D}/${bindir}
+       install -m 755 vswitch.sh ${D}/${bindir}
+       install -m 755 live_stream.py ${D}/${bindir}
+
+       install -p -d -m0755 ${D}/${sysconfdir}/engtools/
+       install -m0644 -p cfg/engtools.conf ${D}/${sysconfdir}/engtools
+       install -d -m0755 ${D}/${sysconfdir}/init.d
+       install -m0755 init.d/collect-engtools.sh ${D}/${sysconfdir}/init.d
+
+       install -d -m0755 ${D}/${systemd_system_unitdir}
+       install -m0644 -p -D collect-engtools.service ${D}/${systemd_system_unitdir}
+
+}
+
+FILES_collect-engtools = "  \
+       ${bindir}/buddyinfo.py \
+       ${bindir}/chewmem \
+       ${bindir}/ceph.sh \
+       ${bindir}/cleanup-engtools.sh \
+       ${bindir}/collect-engtools.sh \
+       ${bindir}/diskstats.sh \
+       ${bindir}/engtools_util.sh \
+       ${bindir}/filestats.sh \
+       ${bindir}/iostat.sh \
+       ${bindir}/linux_benchmark.sh \
+       ${bindir}/memstats.sh \
+       ${bindir}/netstats.sh \
+       ${bindir}/postgres.sh \
+       ${bindir}/rabbitmq.sh \
+       ${bindir}/rbzip2-engtools.sh \
+       ${bindir}/rstart-engtools.sh \
+       ${bindir}/rstop-engtools.sh \
+       ${bindir}/rsync-engtools-data.sh \
+       ${bindir}/slab.sh \
+       ${bindir}/ticker.sh \
+       ${bindir}/top.sh \
+       ${bindir}/vswitch.sh \
+       ${bindir}/live_stream.py \
+       ${sysconfdir}/engtools/engtools.conf \
+       ${sysconfdir}/init.d/collect-engtools.sh \
+       ${systemd_system_unitdir}/collect-engtools.service \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/tools/collector.inc b/meta-stx/recipes-core/stx-utilities/tools/collector.inc
new file mode 100644 (file)
index 0000000..4aa163d
--- /dev/null
@@ -0,0 +1,97 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " collector"
+
+RDEPENDS_collector += " bash"
+DESCRIPTION_collector= " \
+This packages scripts that implement data and log collection that field \
+support can execute to gather current state and runtime history for off \
+platform analysis and debug. \
+"
+
+do_configure_append() {
+       :
+}
+do_compile_append() {
+       :
+}
+
+do_install_append() {
+       cd ${S}/tools/collector/scripts
+
+       install -m0755 -d ${D}/${sysconfdir}/collect.d
+       install -m0755 -d ${D}/${sysconfdir}/collect
+       install -m0755 -d ${D}/${sbindir}
+       install -m0755 -d ${D}/${bindir}
+       install -m0755 -d ${D}/${sbindir}
+
+       install -m 755 collect ${D}/${sbindir}/collect
+       install -m 755 collect_host ${D}/${sbindir}/collect_host
+       install -m 755 collect_date ${D}/${sbindir}/collect_date
+       install -m 755 collect_utils ${D}/${sbindir}/collect_utils
+       install -m 755 collect_parms ${D}/${sbindir}/collect_parms
+       install -m 755 collect_mask_passwords ${D}/${sbindir}/collect_mask_passwords
+       install -m 755 expect_done ${D}/${sbindir}/expect_done
+
+       install -m 755 collect_sysinv.sh ${D}/${sysconfdir}/collect.d/collect_sysinv
+       install -m 755 collect_psqldb.sh ${D}/${sysconfdir}/collect.d/collect_psqldb
+       install -m 755 collect_openstack.sh ${D}/${sysconfdir}/collect.d/collect_openstack
+       install -m 755 collect_networking.sh ${D}/${sysconfdir}/collect.d/collect_networking
+       install -m 755 collect_ceph.sh ${D}/${sysconfdir}/collect.d/collect_ceph
+       install -m 755 collect_sm.sh ${D}/${sysconfdir}/collect.d/collect_sm
+       install -m 755 collect_tc.sh ${D}/${sysconfdir}/collect.d/collect_tc
+       install -m 755 collect_nfv_vim.sh ${D}/${sysconfdir}/collect.d/collect_nfv_vim
+       install -m 755 collect_ovs.sh ${D}/${sysconfdir}/collect.d/collect_ovs
+       install -m 755 collect_patching.sh ${D}/${sysconfdir}/collect.d/collect_patching
+       install -m 755 collect_coredump.sh ${D}/${sysconfdir}/collect.d/collect_coredump
+       install -m 755 collect_crash.sh ${D}/${sysconfdir}/collect.d/collect_crash
+       install -m 755 collect_ima.sh ${D}/${sysconfdir}/collect.d/collect_ima
+       install -m 755 collect_fm.sh ${D}/${sysconfdir}/collect.d/collect_fm
+       install -m 755 collect_containerization.sh ${D}/${sysconfdir}/collect.d/collect_containerization
+
+       install -m 755 etc.exclude ${D}/${sysconfdir}/collect/etc.exclude
+       install -m 755 run.exclude ${D}/${sysconfdir}/collect/run.exclude
+
+       ln -sf ${sbindir}/collect ${D}/${bindir}/collect
+}
+
+FILES_collector = "  \
+       ${sbindir}/collect \
+       ${sbindir}/collect_host \
+       ${sbindir}/collect_date \
+       ${sbindir}/collect_utils \
+       ${sbindir}/collect_parms \
+       ${sbindir}/collect_mask_passwords \
+       ${sbindir}/expect_done \
+       ${sysconfdir}/collect.d/collect_sysinv \
+       ${sysconfdir}/collect.d/collect_psqldb \
+       ${sysconfdir}/collect.d/collect_openstack \
+       ${sysconfdir}/collect.d/collect_networking \
+       ${sysconfdir}/collect.d/collect_ceph \
+       ${sysconfdir}/collect.d/collect_sm \
+       ${sysconfdir}/collect.d/collect_tc \
+       ${sysconfdir}/collect.d/collect_nfv_vim \
+       ${sysconfdir}/collect.d/collect_ovs \
+       ${sysconfdir}/collect.d/collect_patching \
+       ${sysconfdir}/collect.d/collect_coredump \
+       ${sysconfdir}/collect.d/collect_crash \
+       ${sysconfdir}/collect.d/collect_ima \
+       ${sysconfdir}/collect.d/collect_fm \
+       ${sysconfdir}/collect.d/collect_containerization \
+       ${sysconfdir}/collect/etc.exclude \
+       ${sysconfdir}/collect/run.exclude \
+       ${bindir}/collect \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/build-info.inc b/meta-stx/recipes-core/stx-utilities/utilities/build-info.inc
new file mode 100644 (file)
index 0000000..38f2fc0
--- /dev/null
@@ -0,0 +1,36 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " build-info"
+
+DESCRIPTION_build-info  = "Build Info"
+SUMMARY_update-motd  = "Build Info"
+
+SRC_URI += "file://build.info"
+
+do_install_append() {
+       install -d ${D}/${sysconfdir}
+       install -m 644 ${WORKDIR}/build.info ${D}/${sysconfdir}
+       sed -i -e "s/@OS@/${DISTRO}/" \
+              -e "s/@STX_RELEASE@/${STX_REL}/" \
+              -e "s/@STX_ID@/${STX_ID}/" \
+              -e "s/@BUILD_DATE@/${STX_BUILD_DATE}/" \
+              ${D}/${sysconfdir}/build.info
+
+}
+
+do_install[vardepsexclude] += "STX_BUILD_DATE"
+
+FILES_build-info = "${sysconfdir}/build.info"
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/collect-engtools.inc b/meta-stx/recipes-core/stx-utilities/utilities/collect-engtools.inc
new file mode 100644 (file)
index 0000000..66fd74a
--- /dev/null
@@ -0,0 +1,106 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " collect-engtools"
+
+RDEPENDS_collect-engtools += " \
+       iperf3 \
+       bash \
+       perl \
+       python \
+       "
+
+SUMMARY_collect-engtools= "Host performance data collection tools package"
+DESCRIPTION_collect-engtools= " \
+This package contains data collection tools to monitor host performance. \
+Tools are general purpose engineering and debugging related. Includes \
+overall memory, cpu occupancy, per-task cpu, per-task scheduling, per-task \
+io. \
+"
+
+
+do_configure_append() {
+       :
+}
+do_compile_append() {
+       :
+}
+
+do_install_append() {
+       cd ${S}/tools/engtools/hostdata-collectors/scripts
+
+       install -d -m0755 ${D}/${bindir}
+       install -m 755 buddyinfo.py ${D}/${bindir}
+       install -m 755 chewmem ${D}/${bindir}
+       install -m 755 ceph.sh ${D}/${bindir}
+       install -m 755 cleanup-engtools.sh ${D}/${bindir}
+       install -m 755 collect-engtools.sh ${D}/${bindir}
+       install -m 755 diskstats.sh ${D}/${bindir}
+       install -m 755 engtools_util.sh ${D}/${bindir}
+       install -m 755 filestats.sh ${D}/${bindir}
+       install -m 755 iostat.sh ${D}/${bindir}
+       install -m 755 linux_benchmark.sh ${D}/${bindir}
+       install -m 755 memstats.sh ${D}/${bindir}
+       install -m 755 netstats.sh ${D}/${bindir}
+       install -m 755 postgres.sh ${D}/${bindir}
+       install -m 755 rabbitmq.sh ${D}/${bindir}
+       install -m 755 remote/rbzip2-engtools.sh ${D}/${bindir}
+       install -m 755 remote/rstart-engtools.sh ${D}/${bindir}
+       install -m 755 remote/rstop-engtools.sh ${D}/${bindir}
+       install -m 755 remote/rsync-engtools-data.sh ${D}/${bindir}
+       install -m 755 slab.sh ${D}/${bindir}
+       install -m 755 ticker.sh ${D}/${bindir}
+       install -m 755 top.sh ${D}/${bindir}
+       install -m 755 vswitch.sh ${D}/${bindir}
+       install -m 755 live_stream.py ${D}/${bindir}
+
+       install -p -d -m0755 ${D}/${sysconfdir}/engtools/
+       install -m0644 -p cfg/engtools.conf ${D}/${sysconfdir}/engtools
+       install -d -m0755 ${D}/${sysconfdir}/init.d
+       install -m0755 init.d/collect-engtools.sh ${D}/${sysconfdir}/init.d
+
+       install -d -m0755 ${D}/${systemd_system_unitdir}
+       install -m0644 -p -D collect-engtools.service ${D}/${systemd_system_unitdir}
+
+}
+
+FILES_collect-engtools = "  \
+       ${bindir}/buddyinfo.py \
+       ${bindir}/chewmem \
+       ${bindir}/ceph.sh \
+       ${bindir}/cleanup-engtools.sh \
+       ${bindir}/collect-engtools.sh \
+       ${bindir}/diskstats.sh \
+       ${bindir}/engtools_util.sh \
+       ${bindir}/filestats.sh \
+       ${bindir}/iostat.sh \
+       ${bindir}/linux_benchmark.sh \
+       ${bindir}/memstats.sh \
+       ${bindir}/netstats.sh \
+       ${bindir}/postgres.sh \
+       ${bindir}/rabbitmq.sh \
+       ${bindir}/rbzip2-engtools.sh \
+       ${bindir}/rstart-engtools.sh \
+       ${bindir}/rstop-engtools.sh \
+       ${bindir}/rsync-engtools-data.sh \
+       ${bindir}/slab.sh \
+       ${bindir}/ticker.sh \
+       ${bindir}/top.sh \
+       ${bindir}/vswitch.sh \
+       ${bindir}/live_stream.py \
+       ${sysconfdir}/engtools/engtools.conf \
+       ${sysconfdir}/init.d/collect-engtools.sh \
+       ${systemd_system_unitdir}/collect-engtools.service \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/logmgmt.inc b/meta-stx/recipes-core/stx-utilities/utilities/logmgmt.inc
new file mode 100644 (file)
index 0000000..fa983b5
--- /dev/null
@@ -0,0 +1,62 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " logmgmt"
+
+RDEPENDS_logmgmt += " \
+       systemd \
+       python-daemon \
+       "
+DESCRIPTION_logmgmt = "Management of /var/log filesystem"
+
+do_configure_append() {
+       cd ${S}/utilities/logmgmt/logmgmt/
+       distutils_do_configure
+}
+do_compile_append() {
+       cd ${S}/utilities/logmgmt/logmgmt/
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/utilities/logmgmt/logmgmt/
+       distutils_do_install
+
+       cd ${S}/utilities/logmgmt/scripts
+       install -d -m0755 ${D}/${bindir}
+       install -m0700 bin/logmgmt ${D}/${bindir}
+       install -m0700 bin/logmgmt_postrotate ${D}/${bindir}
+       install -m0700 bin/logmgmt_prerotate ${D}/${bindir}
+
+       install -d -m0755 ${D}/${sysconfdir}/init.d
+       install -m0700 init.d/logmgmt ${D}/${sysconfdir}/init.d
+
+       install -d -m0755 ${D}/${sysconfdir}/pmon.d
+       install -m0700 pmon.d/logmgmt ${D}/${sysconfdir}/pmon.d
+
+       install -d -m0755 ${D}/${systemd_system_unitdir}
+       install -m0664 etc/systemd/system/logmgmt.service ${D}/${systemd_system_unitdir}
+}
+
+FILES_logmgmt = "  \
+       ${bindir}/logmgmt \
+       ${bindir}/logmgmt_postrotate \
+       ${bindir}/logmgmt_prerotate \
+       ${sysconfdir}/init.d/logmgmt \
+       ${sysconfdir}/pmon.d/logmgmt \
+       ${systemd_system_unitdir}/logmgmt.service \
+       ${PYTHON_SITEPACKAGES_DIR}/logmgmt/ \
+       ${PYTHON_SITEPACKAGES_DIR}/logmgmt-${PV}-py${PYTHON_BASEVERSION}.egg-info/ \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/namespace-utils.inc b/meta-stx/recipes-core/stx-utilities/utilities/namespace-utils.inc
new file mode 100644 (file)
index 0000000..30e4a54
--- /dev/null
@@ -0,0 +1,42 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " namespace-utils"
+
+RDEPENDS_namespace-utils += " bash"
+
+DESCRIPTION_namspace-utils = "Titanium Cloud namespace utilities"
+SUMMARY_namespace-utils = "namespace utils"
+
+do_configure_append() {
+       :
+}
+do_compile_append() {
+       cd ${S}/utilities/namespace-utils/namespace-utils
+       $CC ${LDFLAGS} ${CFLAGS} -o bashns bashns.c
+}
+
+do_install_append() {
+       cd ${S}/utilities/namespace-utils/namespace-utils
+
+       install -d -m0755 ${D}/${sbindir}
+       install -m0500 bashns ${D}/${sbindir}
+       install -m0500 umount-in-namespace  ${D}/${sbindir}
+}
+
+FILES_namespace-utils = "  \
+       ${sbindir}/bashns \
+       ${sbindir}/umount-in-namespace \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/nfscheck.inc b/meta-stx/recipes-core/stx-utilities/utilities/nfscheck.inc
new file mode 100644 (file)
index 0000000..e0ddeb7
--- /dev/null
@@ -0,0 +1,42 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " nfscheck"
+
+RDEPENDS_nfscheck  += " bash systemd"
+DESCRIPTION_nfscheck = "NFS Audit"
+SUMMARY_nfscheck = "NFS Audit"
+
+do_configure_append() {
+       :
+}
+do_compile_append() {
+       :
+}
+
+do_install_append() {
+       cd ${S}/utilities/nfscheck/files
+
+       install -d -m0755 ${D}/${bindir}
+       install -m0755 nfscheck.sh ${D}/${bindir}
+
+       install -d -m0755 ${D}/${systemd_system_unitdir}
+       install -m0644 nfscheck.service ${D}/${systemd_system_unitdir}
+}
+
+FILES_nfscheck = "  \
+       ${bindir}/nfscheck.sh \
+       ${systemd_system_unitdir}/nfscheck.service \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/pci-irq-affinity.inc b/meta-stx/recipes-core/stx-utilities/utilities/pci-irq-affinity.inc
new file mode 100644 (file)
index 0000000..aa1bebf
--- /dev/null
@@ -0,0 +1,60 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " pci-irq-affinity"
+
+RDEPENDS_pci-irq-affinity  += " python-novaclient"
+DESCRIPTION_pci-irq-affinity  = "StarlingX PCI Interrupt Affinity Agent Package"
+SUMMARY_pci-irq-affinity  = "StarlingX PCI Interrupt Affinity Agent Package"
+
+inherit setuptools distutils
+
+do_configure_append() {
+       cd ${S}/utilities/pci-irq-affinity-agent/pci_irq_affinity
+       distutils_do_configure
+}
+do_compile_append() {
+       cd ${S}/utilities/pci-irq-affinity-agent/pci_irq_affinity
+       distutils_do_compile
+}
+
+do_install_append() {
+       cd ${S}/utilities/pci-irq-affinity-agent/pci_irq_affinity
+       distutils_do_install
+
+       cd ${S}/utilities/pci-irq-affinity-agent/files
+
+       install -p -d -m0755 ${D}/${sysconfdir}/init.d
+       install -p -d -m0755 ${D}/${sysconfdir}/pmon.d
+       install -p -d -m0755 ${D}/${sysconfdir}/pci_irq_affinity
+       install -p -d -m0755 ${D}/${systemd_system_unitdir}
+       install -p -d -m0755 ${D}/${bindir}
+
+       install -m0755 pci-irq-affinity-agent ${D}/${sysconfdir}/init.d/pci-irq-affinity-agent
+       install -m0644 pci-irq-affinity-agent.service ${D}/${systemd_system_unitdir}/pci-irq-affinity-agent.service
+
+       install -m0755 nova-sriov ${D}/${bindir}/nova-sriov
+       install -m0755 config.ini ${D}/${sysconfdir}/pci_irq_affinity/config.ini
+}
+
+FILES_pci-irq-affinity = "  \
+       ${bindir}/pci-irq-affinity-agent \
+       ${sysconfdir}/pci_irq_affinity/config.ini \
+       ${bindir}/nova-sriov \
+       ${sysconfdir}/init.d/pci-irq-affinity-agent \
+       ${systemd_system_unitdir}/pci-irq-affinity-agent.service \
+       ${PYTHON_SITEPACKAGES_DIR}/pci_irq_affinity/ \
+       ${PYTHON_SITEPACKAGES_DIR}/pci_irq_affinity_agent-${PV}-py${PYTHON_BASEVERSION}.egg-info/ \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/platform-util.inc b/meta-stx/recipes-core/stx-utilities/utilities/platform-util.inc
new file mode 100644 (file)
index 0000000..fe8d44c
--- /dev/null
@@ -0,0 +1,76 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " platform-util"
+
+RDEPENDS_platform-util  += " bash"
+DESCRIPTION_platform-util  = "platform-util"
+SUMMARY_platform-util  = "StarlingX Platform utilities installed only on controllers"
+
+inherit setuptools distutils
+
+do_configure_append() {
+       cd ${S}/utilities/platform-util/platform-util
+       distutils_do_configure
+}
+do_compile_append() {
+       cd ${S}/utilities/platform-util/platform-util
+       distutils_do_configure
+}
+
+do_install_append() {
+       cd ${S}/utilities/platform-util/platform-util
+       distutils_do_install
+
+       cd ${S}/utilities/platform-util/scripts
+
+       install -d -m0755 ${D}/${bindir}
+       install -m0755 tc_setup.sh ${D}/${bindir}/tc_setup.sh
+       install -m0755 remotelogging_tc_setup.sh ${D}/${bindir}/remotelogging_tc_setup.sh
+       install -m0755 connectivity_test  ${D}/${bindir}/connectivity_test
+       install -m0755 update-iso.sh ${D}/${bindir}/update-iso.sh
+
+       install -p -d -m0755 ${D}/${sysconfdir}/init.d
+       install -m0755 log_functions.sh ${D}/${sysconfdir}/init.d/log_functions.sh
+
+       install -p -d -m0755 ${D}/${sbindir}
+       install -m0755 patch-restart-mtce  ${D}/${sbindir}/patch-restart-mtce
+       install -m0755 patch-restart-processes ${D}/${sbindir}/patch-restart-processes
+       install -m0755 patch-restart-haproxy ${D}/${sbindir}/patch-restart-haproxy
+
+
+
+       install -p -d -m0755 ${D}/${systemd_system_unitdir}
+
+       install -m0644 opt-platform.mount  ${D}/${systemd_system_unitdir}/opt-platform.mount
+       install -m0644 opt-platform.service ${D}/${systemd_system_unitdir}/opt-platform.service
+
+}
+
+FILES_platform-util = "  \
+       ${bindir}/tc_setup.sh \
+       ${bindir}/verify-license \
+       ${bindir}/remotelogging_tc_setup.sh \
+       ${bindir}/connectivity_test \
+       ${bindir}/update-iso.sh \
+       ${sysconfdir}/init.d/log_functions.sh \
+       ${sbindir}/patch-restart-mtce \
+       ${sbindir}/patch-restart-processes \
+       ${sbindir}/patch-restart-haproxy \
+       ${systemd_system_unitdir}/opt-platform.mount \
+       ${systemd_system_unitdir}/opt-platform.service \
+       ${PYTHON_SITEPACKAGES_DIR}/platform_util/ \
+       ${PYTHON_SITEPACKAGES_DIR}/platform_util-${PV}-py${PYTHON_BASEVERSION}.egg-info/ \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/stx-extensions.inc b/meta-stx/recipes-core/stx-utilities/utilities/stx-extensions.inc
new file mode 100644 (file)
index 0000000..aba84f2
--- /dev/null
@@ -0,0 +1,50 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " stx-extensions"
+
+RDEPENDS_stx-extensions  += " systemd"
+DESCRIPTION_stx-extensions  = "TIS Extensions to thirdparty pkgs"
+SUMMARY_stx-extensions  = "TIS Extensions to thirdparty pkgs"
+
+inherit setuptools distutils
+
+do_configure_append() {
+       :
+}
+do_compile_append() {
+       :
+}
+
+do_install_append() {
+       cd ${S}/utilities/stx-extensions/files
+
+
+       install -p -d -m0755 ${D}/${sysconfdir}/sysctl.d
+       install -m0755 coredump-sysctl.conf ${D}/${sysconfdir}/sysctl.d/50-coredump.conf
+
+       install -p -d -m0755 ${D}/${sysconfdir}/systemd/coredump.conf.d
+       install -m0755 coredump.conf ${D}/${sysconfdir}/systemd/coredump.conf.d/coredump.conf
+
+       install -p -d -m0755 ${D}/${sysconfdir}/modules-load.d
+       install -m0644 modules-load-vfio.conf ${D}/${sysconfdir}/modules-load.d/vfio.conf
+
+}
+
+FILES_stx-extensions = "  \
+       ${sysconfdir}/sysctl.d/50-coredump.conf \
+       ${sysconfdir}/systemd/coredump.conf.d/coredump.conf \
+       ${sysconfdir}/modules-load.d/vfio.conf \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/update-motd.inc b/meta-stx/recipes-core/stx-utilities/utilities/update-motd.inc
new file mode 100644 (file)
index 0000000..ef917a0
--- /dev/null
@@ -0,0 +1,62 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " update-motd"
+
+RDEPENDS_update-motd  += " cronie bash"
+DESCRIPTION_update-motd  = "dynamic MOTD generation"
+SUMMARY_update-motd  = "dynamic MOTD generation"
+
+inherit setuptools distutils
+
+do_configure_append() {
+       :
+}
+do_compile_append() {
+       :
+}
+
+do_install_append() {
+       cd ${S}/utilities/update-motd/files
+
+
+       install -d ${D}/${sbindir}
+       install -m 700 motd-update ${D}/${sbindir}/motd-update
+
+       install -m0755 -d ${D}/${sysconfdir}
+       install -m0755 -d ${D}/${sysconfdir}/motd.d
+
+       install -m 755 motd-header ${D}/${sysconfdir}/motd.d/00-header
+       install -m 755 motd-footer ${D}/${sysconfdir}/motd.d/99-footer
+       install -m 644 motd.head ${D}/${sysconfdir}/motd.d/motd.head
+
+       install -m0755  -d ${D}/${sysconfdir}/cron.d
+       install -m 600 motd-update.cron ${D}/${sysconfdir}/cron.d/motd-update
+       install -m 700 customize-banner ${D}/${sbindir}/customize-banner
+       install -m 700 apply_banner_customization ${D}/${sbindir}/apply_banner_customization
+       install -m 700 install_banner_customization ${D}/${sbindir}/install_banner_customization
+
+}
+
+FILES_update-motd = "  \
+       ${sbindir}/motd-update \
+       ${sysconfdir}/motd.d/00-header \
+       ${sysconfdir}/motd.d/99-footer \
+       ${sysconfdir}/motd.d/motd.head \
+       ${sysconfdir}/cron.d/motd-update \
+       ${sbindir}/customize-banner \
+       ${sbindir}/apply_banner_customization \
+       ${sbindir}/install_banner_customization \
+       "
diff --git a/meta-stx/recipes-core/stx-utilities/utilities/worker-utils.inc b/meta-stx/recipes-core/stx-utilities/utilities/worker-utils.inc
new file mode 100644 (file)
index 0000000..16e7fb9
--- /dev/null
@@ -0,0 +1,58 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PACKAGES += " worker-utils"
+
+RDEPENDS_worker-utils  += " perl systemd python bash"
+DESCRIPTION_worker-utils  = "Initial worker node resource reservation and misc. utilities"
+SUMMARY_worker-utils  = "dynamic MOTD generation"
+
+inherit setuptools distutils python-dir
+
+do_configure_append() {
+       :
+}
+do_compile_append() {
+       cd ${S}/utilities/worker-utils/worker-utils
+       oe_runmake all
+}
+
+do_install_append() {
+       cd ${S}/utilities/worker-utils/worker-utils
+
+       oe_runmake install \
+               BINDIR=${D}/${bindir} \
+               INITDDIR=${D}/${sysconfdir}/init.d \
+               GOENABLEDDIR=${D}/${sysconfdir}/goenabled.d \
+               PLATFORMCONFDIR=${D}/${sysconfdir}/platform \
+               SYSTEMDDIR=${D}/${systemd_system_unitdir}
+}
+
+FILES_worker-utils = "  \
+       ${sysconfdir}/init.d/affine-platform.sh \
+       ${sysconfdir}/init.d/affine-tasks.sh \
+       ${sysconfdir}/init.d/cpumap_functions.sh \
+       ${sysconfdir}/init.d/task_affinity_functions.sh \
+       ${bindir}/ps-sched.sh \
+       ${bindir}/topology.py \
+       ${bindir}/topology.pyc \
+       ${bindir}/affine-interrupts.sh \
+       ${bindir}/set-cpu-wakeup-latency.sh \
+       ${bindir}/topology \
+       ${sysconfdir}/platform/worker_reserved.conf \
+       ${sysconfdir}/goenabled.d/worker-goenabled.sh \
+       ${systemd_system_unitdir}/affine-platform.sh.service \
+       ${systemd_system_unitdir}/affine-tasks.service \
+       "
diff --git a/meta-stx/recipes-core/systemd/files/0900-inject-milisec-in-syslog-date.patch b/meta-stx/recipes-core/systemd/files/0900-inject-milisec-in-syslog-date.patch
new file mode 100644 (file)
index 0000000..4a768fa
--- /dev/null
@@ -0,0 +1,76 @@
+From 5ef6dbb951246912ba021f9e2edacd0f9e7619e6 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Sat, 29 Feb 2020 12:48:57 -0800
+Subject: [PATCH] inject milisec in syslog date
+
+From stx.3.0: 0231aba5cdcb96b15106591acfff280159050366
+---
+ src/journal/journald-syslog.c | 45 +++++++++++++++++++++++++++++++----
+ 1 file changed, 40 insertions(+), 5 deletions(-)
+
+diff --git a/src/journal/journald-syslog.c b/src/journal/journald-syslog.c
+index a60a259bc4..0036750353 100644
+--- a/src/journal/journald-syslog.c
++++ b/src/journal/journald-syslog.c
+@@ -25,6 +25,44 @@
+ /* Warn once every 30s if we missed syslog message */
+ #define WARN_FORWARD_SYSLOG_MISSED_USEC (30 * USEC_PER_SEC)
++/*  internal function that builds a formatted time str of the
++ *  tv parameter into the passed buffer. (ie Nov  7 16:28:38.109)
++ *  If tv is NULL, then the clock function is used to build the formatted time
++ *  returns (same as snprintf) - number of characters written to buffer.
++ */
++static int formatSyslogDate(char * buffer, int bufLen, const struct timeval *tv) {
++  struct timeval tv_tmp;
++  long int millisec;
++  char tmpbuf[64];
++  struct tm *tm;
++  time_t t;
++
++  if (!tv) {
++      // no timeval input so get time data from clock
++      usec_t now_usec  = now(CLOCK_REALTIME);
++      time_t now_sec = ((time_t) now_usec / USEC_PER_SEC);
++      long int now_fraction_secs = now_usec % USEC_PER_SEC;
++      tv_tmp.tv_sec = now_sec;
++      tv_tmp.tv_usec = now_fraction_secs;
++      tv = &tv_tmp;
++  }
++
++  t = tv->tv_sec;
++  tm = localtime(&t);
++  if (!tm)
++     return 0;
++
++  // format time to the second granularity - ie Nov  7 16:28:38
++  if (strftime(tmpbuf,sizeof(tmpbuf),"%h %e %T", tm) <= 0)
++     return 0;
++
++  millisec = tv->tv_usec / 1000;
++  // now append millisecond granularity (ie Nov  7 16:28:38.109) to
++  // the formatted string.
++  return snprintf(buffer, bufLen, "%s.%03lu", tmpbuf, millisec);
++}
++
++
+ static void forward_syslog_iovec(Server *s, const struct iovec *iovec, unsigned n_iovec, const struct ucred *ucred, const struct timeval *tv) {
+         static const union sockaddr_union sa = {
+@@ -133,11 +171,8 @@ void server_forward_syslog(Server *s, int priority, const char *identifier, cons
+         iovec[n++] = IOVEC_MAKE_STRING(header_priority);
+         /* Second: timestamp */
+-        t = tv ? tv->tv_sec : ((time_t) (now(CLOCK_REALTIME) / USEC_PER_SEC));
+-        if (!localtime_r(&t, &tm))
+-                return;
+-        if (strftime(header_time, sizeof(header_time), "%h %e %T ", &tm) <= 0)
+-                return;
++      if (formatSyslogDate(header_time, sizeof(header_time), tv) <=0 )
++              return;
+         iovec[n++] = IOVEC_MAKE_STRING(header_time);
+         /* Third: identifier and PID */
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-core/systemd/systemd_241.bbappend b/meta-stx/recipes-core/systemd/systemd_241.bbappend
new file mode 100644 (file)
index 0000000..11ff015
--- /dev/null
@@ -0,0 +1,29 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " \
+       file://0900-inject-milisec-in-syslog-date.patch \
+       "
+
+STX_DEFAULT_LOCALE ?= "en_US.UTF-8"
+
+do_install_append () {
+       install -d ${D}${sysconfdir}
+       echo LANG=${STX_DEFAULT_LOCALE} >> ${D}${sysconfdir}/locale.conf
+}
+
+FILES_${PN} += "${sysconfdir}/locale.conf"
diff --git a/meta-stx/recipes-core/util-linux/util-linux_%.bbappend b/meta-stx/recipes-core/util-linux/util-linux_%.bbappend
new file mode 100644 (file)
index 0000000..625a957
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# Increase su.util-linux update-alternative priortiy
+
+ALTERNATIVE_PRIORITY[su] = "300"
diff --git a/meta-stx/recipes-daemons/lldpd/files/0001-lldpd-client-add-show-interfaces-cmd-from-upstream.patch b/meta-stx/recipes-daemons/lldpd/files/0001-lldpd-client-add-show-interfaces-cmd-from-upstream.patch
new file mode 100644 (file)
index 0000000..51a185f
--- /dev/null
@@ -0,0 +1,296 @@
+From 0e355867f211a922c5b21ddbbb073eb2c35430b9 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Mon, 24 Feb 2020 06:01:03 -0800
+Subject: [PATCH] lldpd client add show interfaces cmd from upstream
+
+commit a54f6012efff77c966f533b8ef35b8627e3c8212
+---
+ src/client/client.h     |  2 +
+ src/client/display.c    | 99 ++++++++++++++++++++++++++++++++++-------
+ src/client/lldpcli.8.in | 20 +++++++++
+ src/client/show.c       | 44 ++++++++++++++++++
+ src/daemon/lldpd.c      |  1 +
+ 5 files changed, 151 insertions(+), 15 deletions(-)
+
+diff --git a/src/client/client.h b/src/client/client.h
+index e3ee352..8da3e3f 100644
+--- a/src/client/client.h
++++ b/src/client/client.h
+@@ -131,6 +131,8 @@ void display_interfaces_stats(lldpctl_conn_t *, struct writer *,
+     struct cmd_env *);
+ void display_interface_stats(lldpctl_conn_t *, struct writer *,
+     lldpctl_atom_t *);
++void display_local_interfaces(lldpctl_conn_t *, struct writer *,
++    struct cmd_env *, int, int);
+diff --git a/src/client/display.c b/src/client/display.c
+index cbd0e31..2769890 100644
+--- a/src/client/display.c
++++ b/src/client/display.c
+@@ -349,7 +349,8 @@ display_port(struct writer *w, lldpctl_atom_t *port, int details)
+       tag_datatag(w, "descr", "PortDescr",
+           lldpctl_atom_get_str(port, lldpctl_k_port_descr));
+-      if (details)
++      if (details &&
++          lldpctl_atom_get_int(port, lldpctl_k_port_ttl) > 0)
+               tag_datatag(w, "ttl", "TTL",
+                   lldpctl_atom_get_str(port, lldpctl_k_port_ttl));
+@@ -473,6 +474,38 @@ display_port(struct writer *w, lldpctl_atom_t *port, int details)
+       tag_end(w);
+ }
++static void
++display_local_ttl(struct writer *w, lldpctl_conn_t *conn, int details)
++{
++      char *ttl;
++      long int tx_hold;
++      long int tx_interval;
++
++      lldpctl_atom_t *configuration;
++      configuration = lldpctl_get_configuration(conn);
++      if (!configuration) {
++              log_warnx("lldpctl", "not able to get configuration. %s",
++                  lldpctl_last_strerror(conn));
++              return;
++      }
++
++      tx_hold = lldpctl_atom_get_int(configuration, lldpctl_k_config_tx_hold);
++      tx_interval = lldpctl_atom_get_int(configuration, lldpctl_k_config_tx_interval);
++
++      if (asprintf(&ttl, "%lu", tx_hold*tx_interval) == -1) {
++              log_warnx("lldpctl", "not enough memory to build TTL.");
++              goto end;
++      }
++
++      tag_start(w, "ttl", "TTL");
++      tag_attr(w, "ttl", "", ttl);
++      tag_end(w);
++      free(ttl);
++end:
++      lldpctl_atom_dec_ref(configuration);
++      return;
++}
++
+ static void
+ display_vlans(struct writer *w, lldpctl_atom_t *port)
+ {
+@@ -582,43 +615,51 @@ display_local_chassis(lldpctl_conn_t *conn, struct writer *w,
+ void
+ display_interface(lldpctl_conn_t *conn, struct writer *w, int hidden,
+-    lldpctl_atom_t *iface, lldpctl_atom_t *neighbor, int details, int protocol)
++    lldpctl_atom_t *iface, lldpctl_atom_t *port, int details, int protocol)
+ {
++      int local = 0;
++
+       if (!hidden &&
+-          lldpctl_atom_get_int(neighbor, lldpctl_k_port_hidden))
++          lldpctl_atom_get_int(port, lldpctl_k_port_hidden))
+               return;
+       /* user might have specified protocol to filter on display */
+       if ((protocol != LLDPD_MODE_MAX) &&
+-          (protocol != lldpctl_atom_get_int(neighbor, lldpctl_k_port_protocol)))
++          (protocol != lldpctl_atom_get_int(port, lldpctl_k_port_protocol)))
+           return;
+-      lldpctl_atom_t *chassis = lldpctl_atom_get(neighbor, lldpctl_k_port_chassis);
++      /* Infer local / remote port from the port index (remote == 0) */
++      local = lldpctl_atom_get_int(port, lldpctl_k_port_index)>0?1:0;
++
++      lldpctl_atom_t *chassis = lldpctl_atom_get(port, lldpctl_k_port_chassis);
+       tag_start(w, "interface", "Interface");
+       tag_attr(w, "name", "",
+           lldpctl_atom_get_str(iface, lldpctl_k_interface_name));
+       tag_attr(w, "via" , "via",
+-          lldpctl_atom_get_str(neighbor, lldpctl_k_port_protocol));
++          lldpctl_atom_get_str(port, lldpctl_k_port_protocol));
+       if (details > DISPLAY_BRIEF) {
+-              tag_attr(w, "rid" , "RID",
+-                  lldpctl_atom_get_str(chassis, lldpctl_k_chassis_index));
++              if (!local)
++                      tag_attr(w, "rid" , "RID",
++                          lldpctl_atom_get_str(chassis, lldpctl_k_chassis_index));
+               tag_attr(w, "age" , "Time",
+-                  display_age(lldpctl_atom_get_int(neighbor, lldpctl_k_port_age)));
++                  display_age(lldpctl_atom_get_int(port, lldpctl_k_port_age)));
+       }
+       display_chassis(w, chassis, details);
+-      display_port(w, neighbor, details);
++      display_port(w, port, details);
++      if (details && local)
++              display_local_ttl(w, conn, details);
+       if (details == DISPLAY_DETAILS) {
+-              display_vlans(w, neighbor);
+-              display_ppvids(w, neighbor);
+-              display_pids(w, neighbor);
+-              display_med(w, neighbor, chassis);
++              display_vlans(w, port);
++              display_ppvids(w, port);
++              display_pids(w, port);
++              display_med(w, port, chassis);
+       }
+       lldpctl_atom_dec_ref(chassis);
+-      display_custom_tlvs(w, neighbor);
++      display_custom_tlvs(w, port);
+       tag_end(w);
+ }
+@@ -675,6 +716,34 @@ display_interfaces(lldpctl_conn_t *conn, struct writer *w,
+       tag_end(w);
+ }
++
++/**
++ * Display information about local interfaces.
++ *
++ * @param conn       Connection to lldpd.
++ * @param w          Writer.
++ * @param hidden     Whatever to show hidden ports.
++ * @param env        Environment from which we may find the list of ports.
++ * @param details    Level of details we need (DISPLAY_*).
++ */
++void
++display_local_interfaces(lldpctl_conn_t *conn, struct writer *w,
++    struct cmd_env *env,
++    int hidden, int details)
++{
++      lldpctl_atom_t *iface;
++      int protocol = LLDPD_MODE_MAX;
++
++      tag_start(w, "lldp", "LLDP interfaces");
++      while ((iface = cmd_iterate_on_interfaces(conn, env))) {
++              lldpctl_atom_t *port;
++              port      = lldpctl_get_port(iface);
++              display_interface(conn, w, hidden, iface, port, details, protocol);
++              lldpctl_atom_dec_ref(port);
++      }
++      tag_end(w);
++ }
++
+ void
+ display_stat(struct writer *w, const char *tag, const char *descr,
+       long unsigned int cnt)
+diff --git a/src/client/lldpcli.8.in b/src/client/lldpcli.8.in
+index 1a20fa8..8a4123e 100644
+--- a/src/client/lldpcli.8.in
++++ b/src/client/lldpcli.8.in
+@@ -134,6 +134,26 @@ one or several ports, the information displayed is limited to the
+ given list of ports.
+ .Ed
++.Cd show interfaces
++.Op ports Ar ethX Op ,...
++.Op Cd details | summary
++.Op Cd hidden
++.Bd -ragged -offset XXXXXX
++Display information about each local interface known by
++.Xr lldpd 8
++daemon. With
++.Cd summary ,
++only the name and the port description of each local interface will be
++displayed. On the other hand, with
++.Cd details ,
++all available information will be displayed, giving a verbose
++view. When using
++.Cd hidden ,
++also display local ports hidden by the smart filter. When specifying
++one or several ports, the information displayed is limited to the
++given list of ports.
++.Ed
++
+ .Cd show chassis
+ .Op Cd details | summary
+ .Bd -ragged -offset XXXXXX
+diff --git a/src/client/show.c b/src/client/show.c
+index fa704b8..8ba8acb 100644
+--- a/src/client/show.c
++++ b/src/client/show.c
+@@ -48,6 +48,35 @@ cmd_show_neighbors(struct lldpctl_conn_t *conn, struct writer *w,
+       return 1;
+ }
++/**
++ * Show interfaces.
++ *
++ * The environment will contain the following keys:
++ *  - C{ports} list of ports we want to restrict showing.
++ *  - C{hidden} if we should show hidden ports.
++ *  - C{summary} if we want to show only a summary
++ *  - C{detailed} for a detailed overview
++ */
++static int
++cmd_show_interfaces(struct lldpctl_conn_t *conn, struct writer *w,
++    struct cmd_env *env, void *arg)
++{
++      log_debug("lldpctl", "show interfaces data (%s) %s hidden interfaces",
++          cmdenv_get(env, "summary")?"summary":
++          cmdenv_get(env, "detailed")?"detailed":
++          "normal", cmdenv_get(env, "hidden")?"with":"without");
++      if (cmdenv_get(env, "ports"))
++              log_debug("lldpctl", "restrict to the following ports: %s",
++                  cmdenv_get(env, "ports"));
++
++      display_local_interfaces(conn, w, env, !!cmdenv_get(env, "hidden"),
++          cmdenv_get(env, "summary")?DISPLAY_BRIEF:
++          cmdenv_get(env, "detailed")?DISPLAY_DETAILS:
++          DISPLAY_NORMAL);
++
++      return 1;
++}
++
+ /**
+  * Show chassis.
+  *
+@@ -286,6 +315,12 @@ register_commands_show(struct cmd_node *root)
+               "Show neighbors data",
+               NULL, NULL, NULL);
++      struct cmd_node *interfaces = commands_new(
++              show,
++              "interfaces",
++              "Show interfaces data",
++              NULL, NULL, NULL);
++
+       struct cmd_node *chassis = commands_new(
+               show,
+               "chassis",
+@@ -306,6 +341,15 @@ register_commands_show(struct cmd_node *root)
+       register_common_commands(neighbors, 1);
++      /* Interfaces data */
++      commands_new(interfaces,
++          NEWLINE,
++          "Show interfaces data",
++          NULL, cmd_show_interfaces, NULL);
++
++      cmd_restrict_ports(interfaces);
++      register_common_commands(interfaces, 0);
++
+       /* Chassis data */
+       commands_new(chassis,
+           NEWLINE,
+diff --git a/src/daemon/lldpd.c b/src/daemon/lldpd.c
+index 97df38e..8ce38a9 100644
+--- a/src/daemon/lldpd.c
++++ b/src/daemon/lldpd.c
+@@ -1037,6 +1037,7 @@ lldpd_send(struct lldpd_hardware *hardware)
+                                   cfg->g_protocols[i].name);
+                               cfg->g_protocols[i].send(cfg,
+                                   hardware);
++                              hardware->h_lport.p_protocol = cfg->g_protocols[i].mode;
+                               sent++;
+                               break;
+                       }
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-daemons/lldpd/files/0002-Clear-station-bit-if-any-other-capability-is-enabled.patch b/meta-stx/recipes-daemons/lldpd/files/0002-Clear-station-bit-if-any-other-capability-is-enabled.patch
new file mode 100644 (file)
index 0000000..47b73da
--- /dev/null
@@ -0,0 +1,27 @@
+From 461e728e8bd71529c09a113f2610766be61aa50a Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Mon, 24 Feb 2020 05:06:57 -0800
+Subject: [PATCH 2/2] Clear station bit if any other capability is enabled
+
+From stx.3.0 b2ed14edc66c7876cd9239a346b92630403e996c
+---
+ src/daemon/interfaces.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/src/daemon/interfaces.c b/src/daemon/interfaces.c
+index d004c70..b870a46 100644
+--- a/src/daemon/interfaces.c
++++ b/src/daemon/interfaces.c
+@@ -328,6 +328,9 @@ interfaces_helper_chassis(struct lldpd *cfg,
+       if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_STATION) &&
+               (LOCAL_CHASSIS(cfg)->c_cap_enabled == 0))
+           LOCAL_CHASSIS(cfg)->c_cap_enabled = LLDP_CAP_STATION;
++      else if (LOCAL_CHASSIS(cfg)->c_cap_enabled != LLDP_CAP_STATION)
++          LOCAL_CHASSIS(cfg)->c_cap_enabled &= ~LLDP_CAP_STATION;
++
+       if (LOCAL_CHASSIS(cfg)->c_id != NULL &&
+           LOCAL_CHASSIS(cfg)->c_id_subtype == LLDP_CHASSISID_SUBTYPE_LLADDR)
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-daemons/lldpd/files/i40e-lldp-configure.sh b/meta-stx/recipes-daemons/lldpd/files/i40e-lldp-configure.sh
new file mode 100644 (file)
index 0000000..4a5e8a4
--- /dev/null
@@ -0,0 +1,141 @@
+#!/bin/bash
+################################################################################
+# Copyright (c) 2016 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+################################################################################
+
+# Certain i40e network devices (XL710 Fortville) have an internal firmware LLDP
+# agent enabled by default. This can prevent LLDP PDUs from being processed by
+# the driver and any upper layer agents.
+#
+# This script allows a user to enable and disable the internal LLDP agent.
+#
+# Note: debugfs must be enabled in the kernel
+#
+# To enable:
+# ./i40e-lldp-configure.sh start
+#
+# To disable:
+# ./i40e-lldp-configure.sh stop
+
+PROGNAME=$(basename $0)
+DEBUGFS_PATH=/sys/kernel/debug
+DEBUGFS_I40_DEVICES_PATH=$DEBUGFS_PATH/i40e
+LLDP_COMMAND=lldp
+
+function log {
+    local MSG="${PROGNAME}: $1"
+    logger -p notice "${MSG}"
+}
+
+function err {
+    local MSG="${PROGNAME}: $1"
+    logger -p error "${MSG}"
+}
+
+function configure_device {
+    local DEVICE=$1
+    local ACTION=$2
+    local DEVICE_PATH=${DEBUGFS_I40_DEVICES}/${DEVICE}
+
+    if [ ! -d ${DEVICE_PATH} ]; then
+        return 1
+    fi
+
+    echo "${LLDP_COMMAND} ${ACTION}" > ${DEVICE_PATH}/command
+    RET=$?
+
+    if [ ${RET} -ne 0 ]; then
+        err "Failed to ${ACTION} internal LLDP agent for device ${DEVICE}"
+        return ${RET}
+    fi
+
+    log "${ACTION} internal LLDP agent for device ${DEVICE}"
+    return ${RET}
+}
+
+function is_debugfs_mounted {
+    if grep -qs "${DEBUGFS_PATH}" /proc/mounts; then
+    return 0
+    fi
+    return 1
+}
+
+function mount_debugfs {
+    mount -t debugfs none ${DEBUGFS_PATH}
+}
+
+function unmount_debugfs {
+    umount ${DEBUGFS_PATH}
+}
+
+function scan_devices {
+    local ACTION=$1
+    local DEBUGFS_MOUNTED="false"
+    local DEVICES=${DEBUGFS_I40_DEVICES_PATH}/*
+
+    if is_debugfs_mounted; then
+        DEBUGFS_MOUNTED="true"
+    fi
+
+    if [ ${DEBUGFS_MOUNTED} = "false" ]; then
+        mount_debugfs
+        RET=$?
+        if [ ${RET} -ne 0 ]; then
+            err "Failed to mount debugfs"
+            return ${RET}
+        fi
+        log "Mounted debugfs"
+    fi
+
+    for DEVICE in $DEVICES; do
+        configure_device ${DEVICE} ${ACTION}
+    done
+
+    if [ ${DEBUGFS_MOUNTED} = "false" ]; then
+        unmount_debugfs
+        RET=$?
+        if [ ${RET} -ne 0 ]; then
+            err "Failed to unmount debugfs"
+            return ${RET}
+        fi
+        log "Unmounted debugfs"
+    fi
+
+    return 0
+}
+
+function start {
+    scan_devices start
+    return $?
+}
+
+function stop {
+    scan_devices stop
+    return $?
+}
+
+function status {
+    return 0
+}
+
+case "$1" in
+    start)
+        start
+        ;;
+    stop)
+        stop
+        ;;
+    restart)
+        stop
+        start
+        ;;
+    status)
+        status
+        ;;
+    *)
+        echo "Usage: $0 {start|stop|restart|status}"
+        exit 1
+esac
diff --git a/meta-stx/recipes-daemons/lldpd/files/lldpd-clear-station.patch b/meta-stx/recipes-daemons/lldpd/files/lldpd-clear-station.patch
new file mode 100644 (file)
index 0000000..6ab09f0
--- /dev/null
@@ -0,0 +1,39 @@
+From b2ed14edc66c7876cd9239a346b92630403e996c Mon Sep 17 00:00:00 2001
+From: Steven Webster <steven.webster@windriver.com>
+Date: Sun, 18 Jun 2017 22:23:49 -0400
+Subject: [PATCH 1/1] Clear station bit if any other capability is enabled
+
+---
+ src/daemon/interfaces.c | 2 ++
+ src/daemon/lldpd.c      | 2 ++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/src/daemon/interfaces.c b/src/daemon/interfaces.c
+index ec81721..4923049 100644
+--- a/src/daemon/interfaces.c
++++ b/src/daemon/interfaces.c
+@@ -309,6 +309,8 @@ interfaces_helper_chassis(struct lldpd *cfg,
+       if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_STATION) &&
+               (LOCAL_CHASSIS(cfg)->c_cap_enabled == 0))
+           LOCAL_CHASSIS(cfg)->c_cap_enabled = LLDP_CAP_STATION;
++      else if (LOCAL_CHASSIS(cfg)->c_cap_enabled != LLDP_CAP_STATION)
++          LOCAL_CHASSIS(cfg)->c_cap_enabled &= ~LLDP_CAP_STATION;
+       if (LOCAL_CHASSIS(cfg)->c_id != NULL &&
+           LOCAL_CHASSIS(cfg)->c_id_subtype == LLDP_CHASSISID_SUBTYPE_LLADDR)
+diff --git a/src/daemon/lldpd.c b/src/daemon/lldpd.c
+index c815705..dac633f 100644
+--- a/src/daemon/lldpd.c
++++ b/src/daemon/lldpd.c
+@@ -1152,6 +1152,8 @@ lldpd_update_localchassis(struct lldpd *cfg)
+       if ((LOCAL_CHASSIS(cfg)->c_cap_available & LLDP_CAP_STATION) &&
+               (LOCAL_CHASSIS(cfg)->c_cap_enabled == 0))
+               LOCAL_CHASSIS(cfg)->c_cap_enabled = LLDP_CAP_STATION;
++      else if (LOCAL_CHASSIS(cfg)->c_cap_enabled != LLDP_CAP_STATION)
++              LOCAL_CHASSIS(cfg)->c_cap_enabled &= ~LLDP_CAP_STATION;
+       /* Set chassis ID if needed. This is only done if chassis ID
+          has not been set previously (with the MAC address of an
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-daemons/lldpd/files/lldpd-create-run-dir.patch b/meta-stx/recipes-daemons/lldpd/files/lldpd-create-run-dir.patch
new file mode 100644 (file)
index 0000000..a4e9d00
--- /dev/null
@@ -0,0 +1,12 @@
+Index: lldpd-0.9.0/src/daemon/lldpd.service.in
+===================================================================
+--- lldpd-0.9.0.orig/src/daemon/lldpd.service.in
++++ lldpd-0.9.0/src/daemon/lldpd.service.in
+@@ -9,6 +9,7 @@ Type=notify
+ NotifyAccess=main
+ EnvironmentFile=-/etc/default/lldpd
+ EnvironmentFile=-/etc/sysconfig/lldpd
++ExecStartPre=/bin/mkdir -p /var/run/lldpd
+ ExecStart=@sbindir@/lldpd $DAEMON_ARGS $LLDPD_OPTIONS
+ Restart=on-failure
diff --git a/meta-stx/recipes-daemons/lldpd/files/lldpd-i40e-disable.patch b/meta-stx/recipes-daemons/lldpd/files/lldpd-i40e-disable.patch
new file mode 100644 (file)
index 0000000..1262e94
--- /dev/null
@@ -0,0 +1,12 @@
+Index: lldpd-0.9.0/src/daemon/lldpd.service.in
+===================================================================
+--- lldpd-0.9.0.orig/src/daemon/lldpd.service.in
++++ lldpd-0.9.0/src/daemon/lldpd.service.in
+@@ -10,6 +10,7 @@
+ EnvironmentFile=-/etc/default/lldpd
+ EnvironmentFile=-/etc/sysconfig/lldpd
+ ExecStartPre=/bin/mkdir -p /var/run/lldpd
++ExecStartPre=/etc/init.d/i40e-lldp-configure.sh stop
+ ExecStart=@sbindir@/lldpd $DAEMON_ARGS $LLDPD_OPTIONS
+ Restart=on-failure
\ No newline at end of file
diff --git a/meta-stx/recipes-daemons/lldpd/files/lldpd-interface-show.patch b/meta-stx/recipes-daemons/lldpd/files/lldpd-interface-show.patch
new file mode 100644 (file)
index 0000000..8fb52a0
--- /dev/null
@@ -0,0 +1,206 @@
+---
+ src/client/client.h  |    2 +
+ src/client/display.c |   59 ++++++++++++++++++++++++++++++++++++++++++++++++++-
+ src/client/show.c    |   44 ++++++++++++++++++++++++++++++++++++++
+ src/lib/atoms/port.c |    7 ++++++
+ src/lib/lldpctl.h    |    1 
+ 5 files changed, 112 insertions(+), 1 deletion(-)
+
+--- a/src/client/client.h
++++ b/src/client/client.h
+@@ -115,6 +115,8 @@ char*  totag(const char *);
+ #define DISPLAY_DETAILS 3
+ void display_interfaces(lldpctl_conn_t *, struct writer *,
+     struct cmd_env *, int, int);
++void display_local_interfaces(lldpctl_conn_t *, struct writer *,
++    struct cmd_env *, int, int);
+ void display_interface(lldpctl_conn_t *, struct writer *, int,
+     lldpctl_atom_t *, lldpctl_atom_t *, int, int);
+ void display_local_chassis(lldpctl_conn_t *, struct writer *,
+--- a/src/client/display.c
++++ b/src/client/display.c
+@@ -344,12 +344,23 @@ display_port(struct writer *w, lldpctl_a
+       tag_datatag(w, "descr", "PortDescr",
+           lldpctl_atom_get_str(port, lldpctl_k_port_descr));
++      tag_datatag(w, "ttl", "Ttl",
++          lldpctl_atom_get_str(port, lldpctl_k_port_ttl));
++
+       /* Dot3 */
+       if (details == DISPLAY_DETAILS) {
+               tag_datatag(w, "mfs", "MFS",
+                   lldpctl_atom_get_str(port, lldpctl_k_port_dot3_mfs));
+-              tag_datatag(w, "aggregation", "Port is aggregated. PortAggregID",
++
++              long int lag_id = lldpctl_atom_get_int(port,
++                  lldpctl_k_port_dot3_aggregid);
++              tag_start(w, "link-aggregation", "LinkAgg");
++              tag_attr(w, "supported", "supported", "yes");
++              tag_attr(w, "enabled", "enabled",
++                      (lag_id > 0)?"yes":"no");
++              tag_datatag(w, "aggregation", "PortAggregID",
+                   lldpctl_atom_get_str(port, lldpctl_k_port_dot3_aggregid));
++              tag_end(w);
+               long int autoneg_support, autoneg_enabled, autoneg_advertised;
+               autoneg_support = lldpctl_atom_get_int(port,
+@@ -663,6 +674,52 @@ display_interfaces(lldpctl_conn_t *conn,
+               lldpctl_atom_dec_ref(port);
+       }
+       tag_end(w);
++}
++
++/**
++ * Display information about local interfaces.
++ *
++ * @param conn       Connection to lldpd.
++ * @param w          Writer.
++ * @param hidden     Whatever to show hidden ports.
++ * @param env        Environment from which we may find the list of ports.
++ * @param details    Level of details we need (DISPLAY_*).
++ */
++void
++display_local_interfaces(lldpctl_conn_t *conn, struct writer *w,
++    struct cmd_env *env,
++    int hidden, int details)
++{
++      lldpctl_atom_t *iface;
++      int protocol = LLDPD_MODE_MAX;
++      const char *proto_str;
++
++      /* user might have specified protocol to filter display results */
++      proto_str = cmdenv_get(env, "protocol");
++
++      if (proto_str) {
++              log_debug("display", "filter protocol: %s ", proto_str);
++
++              protocol = 0;
++              for (lldpctl_map_t *protocol_map =
++                       lldpctl_key_get_map(lldpctl_k_port_protocol);
++                   protocol_map->string;
++                   protocol_map++) {
++                      if (!strcasecmp(proto_str, protocol_map->string)) {
++                              protocol = protocol_map->value;
++                              break;
++                      }
++              }
++      }
++
++      tag_start(w, "lldp", "LLDP interfaces");
++      while ((iface = cmd_iterate_on_interfaces(conn, env))) {
++              lldpctl_atom_t *port;
++              port      = lldpctl_get_port(iface);
++              display_interface(conn, w, hidden, iface, port, details, protocol);
++              lldpctl_atom_dec_ref(port);
++      }
++      tag_end(w);
+ }
+ void
+--- a/src/client/show.c
++++ b/src/client/show.c
+@@ -48,6 +48,35 @@ cmd_show_neighbors(struct lldpctl_conn_t
+ }
+ /**
++ * Show interfaces.
++ *
++ * The environment will contain the following keys:
++ *  - C{ports} list of ports we want to restrict showing.
++ *  - C{hidden} if we should show hidden ports.
++ *  - C{summary} if we want to show only a summary
++ *  - C{detailed} for a detailed overview
++ */
++static int
++cmd_show_interfaces(struct lldpctl_conn_t *conn, struct writer *w,
++    struct cmd_env *env, void *arg)
++{
++      log_debug("lldpctl", "show interfaces data (%s) %s hidden interfaces",
++          cmdenv_get(env, "summary")?"summary":
++          cmdenv_get(env, "detailed")?"detailed":
++          "normal", cmdenv_get(env, "hidden")?"with":"without");
++      if (cmdenv_get(env, "ports"))
++              log_debug("lldpctl", "restrict to the following ports: %s",
++                  cmdenv_get(env, "ports"));
++
++      display_local_interfaces(conn, w, env, !!cmdenv_get(env, "hidden"),
++          cmdenv_get(env, "summary")?DISPLAY_BRIEF:
++          cmdenv_get(env, "detailed")?DISPLAY_DETAILS:
++          DISPLAY_NORMAL);
++
++      return 1;
++}
++
++/**
+  * Show chassis.
+  *
+  * The environment will contain the following keys:
+@@ -269,6 +298,12 @@ register_commands_show(struct cmd_node *
+               "Show neighbors data",
+               NULL, NULL, NULL);
++      struct cmd_node *interfaces = commands_new(
++              show,
++              "interfaces",
++              "Show interfaces data",
++              NULL, NULL, NULL);
++
+       struct cmd_node *chassis = commands_new(
+               show,
+               "chassis",
+@@ -289,6 +324,15 @@ register_commands_show(struct cmd_node *
+       register_common_commands(neighbors, 1);
++      /* Interfaces data */
++      commands_new(interfaces,
++          NEWLINE,
++          "Show interfaces data",
++          NULL, cmd_show_interfaces, NULL);
++
++      cmd_restrict_ports(interfaces);
++      register_common_commands(interfaces, 0);
++
+       /* Chassis data */
+       commands_new(chassis,
+           NEWLINE,
+--- a/src/lib/atoms/port.c
++++ b/src/lib/atoms/port.c
+@@ -19,6 +19,7 @@
+ #include <stdarg.h>
+ #include <string.h>
+ #include <arpa/inet.h>
++#include <time.h>
+ #include "lldpctl.h"
+ #include "../log.h"
+@@ -544,6 +545,7 @@ _lldpctl_atom_get_int_port(lldpctl_atom_
+           (struct _lldpctl_atom_port_t *)atom;
+       struct lldpd_port     *port     = p->port;
+       struct lldpd_hardware *hardware = p->hardware;
++      time_t now = time(NULL);
+       /* Local port only */
+       if (hardware != NULL) {
+@@ -585,6 +587,11 @@ _lldpctl_atom_get_int_port(lldpctl_atom_
+               return port->p_id_subtype;
+       case lldpctl_k_port_hidden:
+               return port->p_hidden_in;
++      case lldpctl_k_port_ttl:
++              if (port->p_lastupdate > 0)
++                      return (port->p_chassis->c_ttl - (now - port->p_lastupdate));
++              else
++                      return port->p_chassis->c_ttl;
+ #ifdef ENABLE_DOT3
+       case lldpctl_k_port_dot3_mfs:
+               if (port->p_mfs > 0)
+--- a/src/lib/lldpctl.h
++++ b/src/lib/lldpctl.h
+@@ -674,6 +674,7 @@ typedef enum {
+       lldpctl_k_port_hidden,     /**< `(I)` Is this port hidden (or should it be displayed?)? */
+       lldpctl_k_port_status,     /**< `(IS,WO)` Operational status of this (local) port */
+       lldpctl_k_port_chassis,    /**< `(A)` Chassis associated to the port */
++      lldpctl_k_port_ttl,        /**< `(I)` The port ttl. */
+       lldpctl_k_port_dot3_mfs = 1300,    /**< `(I)` MFS */
+       lldpctl_k_port_dot3_aggregid,   /**< `(I)` Port aggregation ID */
diff --git a/meta-stx/recipes-daemons/lldpd/files/lldpd.default b/meta-stx/recipes-daemons/lldpd/files/lldpd.default
new file mode 100644 (file)
index 0000000..2364c02
--- /dev/null
@@ -0,0 +1,2 @@
+# Uncomment to start SNMP subagent and enable CDP, SONMP and EDP protocol
+#DAEMON_ARGS="-x -c -s -e"
\ No newline at end of file
diff --git a/meta-stx/recipes-daemons/lldpd/files/lldpd.init b/meta-stx/recipes-daemons/lldpd/files/lldpd.init
new file mode 100644 (file)
index 0000000..c910f0d
--- /dev/null
@@ -0,0 +1,117 @@
+#! /bin/sh
+### BEGIN INIT INFO
+# Provides:          lldpd
+# Required-Start:    $remote_fs $network $syslog
+# Required-Stop:     $network $remote_fs $syslog
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: LLDP daemon
+# Description:       lldpd is a 802.1AB implementation, a L2 network
+#                    discovery protocol. It also supports CDP, EDP and
+#                    various other protocols.
+### END INIT INFO
+
+# Do NOT "set -e"
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="LLDP daemon"
+NAME=lldpd
+DAEMON=/usr/sbin/$NAME
+DAEMON_ARGS=""
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+CHROOT=/var/run/$NAME
+
+# Exit if the package is not installed
+[ -x "$DAEMON" ] || exit 0
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Create the chroot directory if not present
+[ -d "$CHROOT" ] || mkdir -p $CHROOT
+
+# LSB log_* functions
+. /lib/lsb/init-functions
+
+if [ ! -d "$CHROOT" ]; then
+        mkdir -p $CHROOT
+fi
+
+
+do_start()
+{
+       start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
+               || return 1
+       start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
+               $DAEMON_ARGS \
+               || return 2
+}
+
+do_stop()
+{
+       start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+       RETVAL="$?"
+       [ "$RETVAL" = 2 ] && return 2
+       start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
+       [ "$?" = 2 ] && return 2
+       rm -f $PIDFILE
+       return "$RETVAL"
+}
+
+do_reload() {
+       start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
+       return 0
+}
+
+case "$1" in
+  start)
+       [ "$VERBOSE" != no ] && log_begin_msg "Starting $DESC" "$NAME"
+       do_start
+       case "$?" in
+               0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+               2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+       esac
+       ;;
+  stop)
+       [ "$VERBOSE" != no ] && log_begin_msg "Stopping $DESC" "$NAME"
+       do_stop
+       case "$?" in
+               0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
+               2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
+       esac
+       ;;
+  reload)
+       log_begin_msg "Reloading $DESC" "$NAME"
+       do_reload
+       log_end_msg $?
+       ;;
+  restart|force-reload)
+       log_begin_msg "Restarting $DESC" "$NAME"
+       do_stop
+       case "$?" in
+         0|1)
+               do_start
+               case "$?" in
+                       0) log_end_msg 0 ;;
+                       1) log_end_msg 1 ;; # Old process is still running
+                       *) log_end_msg 1 ;; # Failed to start
+               esac
+               ;;
+         *)
+               # Failed to stop
+               log_end_msg 1
+               ;;
+       esac
+       ;;
+  status)
+       status_of_proc $DAEMON $NAME -p $PIDFILE && exit 0 || exit $?
+       ;;
+  *)
+       echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload|status}" >&2
+       exit 3
+       ;;
+esac
+
+:
\ No newline at end of file
diff --git a/meta-stx/recipes-daemons/lldpd/lldpd_%.bbappend b/meta-stx/recipes-daemons/lldpd/lldpd_%.bbappend
new file mode 100644 (file)
index 0000000..dee132d
--- /dev/null
@@ -0,0 +1,43 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/patches:${THISDIR}/files:"
+
+DISTRO_FEATURES_BACKFILL_CONSIDERED_remove = "sysvinit"
+
+SRC_URI += " \
+       file://0001-lldpd-client-add-show-interfaces-cmd-from-upstream.patch \
+       file://0002-Clear-station-bit-if-any-other-capability-is-enabled.patch \
+       file://i40e-lldp-configure.sh \
+       "
+
+# TODO: 
+# Check Yocto handling of i40e firmware?
+# See i40e-lldp-configure.sh and lldpd-i40e-disable.patch
+
+# file://lldpd.init 
+# lldpd-create-run-dir.patch
+
+do_install_append() {
+       cd ${S}
+       install -d -m 0755 ${D}/${sysconfdir}/init.d
+       install -m 0755 ${WORKDIR}/i40e-lldp-configure.sh ${D}/${sysconfdir}/init.d/
+}
+
+FILES_${PN}_append = " \
+       ${sysconfdir}/init.d/i40e-lldp-configure.sh \
+       "
+
+RDEPENDS_${PN} += "bash"
diff --git a/meta-stx/recipes-dbs/mysql/mysql-python/0001-_mysql.c-fix-compilation-with-MariaDB-with-10.3.13.patch b/meta-stx/recipes-dbs/mysql/mysql-python/0001-_mysql.c-fix-compilation-with-MariaDB-with-10.3.13.patch
new file mode 100644 (file)
index 0000000..25d9d39
--- /dev/null
@@ -0,0 +1,34 @@
+From 45436592aa64308b2ab46f84c6107c6d7de0a3ec Mon Sep 17 00:00:00 2001
+From: Mingli Yu <mingli.yu@windriver.com>
+Date: Wed, 6 Mar 2019 00:16:17 -0800
+Subject: [PATCH] _mysql.c: fix compilation with MariaDB 10.3.13
+
+Use standard API function MYSQL_OPT_RECONNECT
+instead of direct modification of internal structures
+which does not work for MariaDB.
+
+Upstream-Status: Pending
+
+Signed-off-by: Mingli Yu <mingli.yu@windriver.com>
+---
+ _mysql.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/_mysql.c
++++ b/_mysql.c
+@@ -2002,7 +2002,14 @@ _mysql_ConnectionObject_ping(
+       int r, reconnect = -1;
+       if (!PyArg_ParseTuple(args, "|I", &reconnect)) return NULL;
+       check_connection(self);
+-      if ( reconnect != -1 ) self->connection.reconnect = reconnect;
++      if ( reconnect != -1 ) {
++#if MYSQL_VERSION_ID >= 50013
++              my_bool recon = reconnect;
++              mysql_options(&self->connection, MYSQL_OPT_RECONNECT, &recon);
++#else
++              self->connection.reconnect = reconnect;
++#endif
++        }
+       Py_BEGIN_ALLOW_THREADS
+       r = mysql_ping(&(self->connection));
+       Py_END_ALLOW_THREADS
diff --git a/meta-stx/recipes-dbs/mysql/mysql-python_1.2.5.bb b/meta-stx/recipes-dbs/mysql/mysql-python_1.2.5.bb
new file mode 100644 (file)
index 0000000..e3ea9c7
--- /dev/null
@@ -0,0 +1,34 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Python interface to MySQL"
+HOMEPAGE = "https://github.com/farcepest/MySQLdb1"
+SECTION = "devel/python"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://GPL-2.0;md5=b234ee4d69f5fce4486a80fdaf4a4263"
+
+DEPENDS = "mysql5"
+
+SRCNAME = "MySQL-python"
+
+SRC_URI = "https://pypi.python.org/packages/source/M/${SRCNAME}/${SRCNAME}-${PV}.zip \
+           file://0001-_mysql.c-fix-compilation-with-MariaDB-with-10.3.13.patch \
+"
+SRC_URI[md5sum] = "654f75b302db6ed8dc5a898c625e030c"
+SRC_URI[sha256sum] = "811040b647e5d5686f84db415efd697e6250008b112b6909ba77ac059e140c74"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
diff --git a/meta-stx/recipes-devtools/erlang/erlang-native_R16B03-1.bbappend b/meta-stx/recipes-devtools/erlang/erlang-native_R16B03-1.bbappend
new file mode 100644 (file)
index 0000000..b8b2ca5
--- /dev/null
@@ -0,0 +1,24 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+# erlang < 20.0 is not compatibel with OpenSSL 1.1.x
+inherit openssl10
+DEPENDS_append = " openssl-native"
+
+SRC_URI += "file://erts-configure.in-avoid-RPATH-warning.patch"
+
+EXTRA_OECONF = '--with-ssl=${STAGING_DIR_NATIVE}/usr --without-krb5 --without-zlib'
diff --git a/meta-stx/recipes-devtools/erlang/erlang_R16B03-1.bbappend b/meta-stx/recipes-devtools/erlang/erlang_R16B03-1.bbappend
new file mode 100644 (file)
index 0000000..b3d75da
--- /dev/null
@@ -0,0 +1,39 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+# erlang < 20.0 is not compatibel with OpenSSL 1.1.x
+inherit openssl10
+
+SRC_URI += " \
+       file://fix-install-ownership.patch \
+       "
+
+EXTRA_OECONF = '--with-ssl=${STAGING_DIR_TARGET}/usr --without-krb5 --without-zlib'
+
+do_configure_prepend () {
+    export erl_xcomp_sysroot="${STAGING_DIR_HOST}/usr"
+    export erl_xcomp_isysroot="${STAGING_DIR_NATIVE}"
+
+    sed -i -e 's/opensslconf.h/opensslconf-64.h/' \
+        ${STAGING_INCDIR}/openssl10/openssl/rc4.h \
+        ${STAGING_INCDIR}/openssl10/openssl/rc2.h
+}
+
+do_install_append () {
+    # Fix the do_package_qa issue
+    chown -R root:root ${D}
+}
diff --git a/meta-stx/recipes-devtools/erlang/files/erts-configure.in-avoid-RPATH-warning.patch b/meta-stx/recipes-devtools/erlang/files/erts-configure.in-avoid-RPATH-warning.patch
new file mode 100644 (file)
index 0000000..5fa15e0
--- /dev/null
@@ -0,0 +1,40 @@
+From fe07e40ed72021225fce05dc1557fd83200d4506 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Mon, 23 Mar 2020 13:00:10 +0800
+Subject: [PATCH] erts/configure.in: avoid RPATH warning
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ erts/configure    | 2 +-
+ erts/configure.in | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/erts/configure b/erts/configure
+index 6940143..a70d5a3 100755
+--- a/erts/configure
++++ b/erts/configure
+@@ -37534,7 +37534,7 @@ fi
+-std_ssl_locations="/usr/local /usr/sfw /usr /opt/local /usr/pkg /usr/local/openssl /usr/lib/openssl /usr/openssl /usr/local/ssl /usr/lib/ssl /usr/ssl"
++std_ssl_locations="/usr/local /usr/sfw /opt/local /usr/pkg /usr/local/openssl /usr/lib/openssl /usr/openssl /usr/local/ssl /usr/lib/ssl /usr/ssl"
+ # Check whether --with-ssl-zlib or --without-ssl-zlib was given.
+diff --git a/erts/configure.in b/erts/configure.in
+index da4bf65..82e7d54 100644
+--- a/erts/configure.in
++++ b/erts/configure.in
+@@ -3843,7 +3843,7 @@ AC_SUBST(STATIC_KERBEROS_LIBS)
+ AC_SUBST(SSL_LINK_WITH_ZLIB)
+ AC_SUBST(STATIC_ZLIB_LIBS)
+-std_ssl_locations="/usr/local /usr/sfw /usr /opt/local /usr/pkg /usr/local/openssl /usr/lib/openssl /usr/openssl /usr/local/ssl /usr/lib/ssl /usr/ssl"
++std_ssl_locations="/usr/local /usr/sfw /opt/local /usr/pkg /usr/local/openssl /usr/lib/openssl /usr/openssl /usr/local/ssl /usr/lib/ssl /usr/ssl"
+ AC_ARG_WITH(ssl-zlib,
+ AS_HELP_STRING([--with-ssl-zlib=PATH],
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-devtools/erlang/files/fix-install-ownership.patch b/meta-stx/recipes-devtools/erlang/files/fix-install-ownership.patch
new file mode 100644 (file)
index 0000000..f29f31f
--- /dev/null
@@ -0,0 +1,11 @@
+diff -Nurpd a/lib/kernel/examples/Makefile b/lib/kernel/examples/Makefile
+--- a/lib/kernel/examples/Makefile     2020-03-01 18:17:45.600815403 +0000
++++ b/lib/kernel/examples/Makefile     2020-03-01 18:18:31.148911639 +0000
+@@ -49,6 +49,6 @@ EXAMPLES  = uds_dist
+ release_spec:
+       $(INSTALL_DIR) "$(RELSYSDIR)"
+       tar cf - $(EXAMPLES) | \
+-      (cd "$(RELSYSDIR)"; tar xf - ; chmod -R ug+w $(EXAMPLES) )
++      (cd "$(RELSYSDIR)"; tar --no-same-owner -xf - ; chmod -R ug+w $(EXAMPLES) )
+ release_docs_spec:
diff --git a/meta-stx/recipes-devtools/go/go-phercloud_git.bb b/meta-stx/recipes-devtools/go/go-phercloud_git.bb
new file mode 100644 (file)
index 0000000..b7a64fb
--- /dev/null
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "an OpenStack SDK for Go"
+HOMEPAGE = "https://github.com/gophercloud/gophercloud"
+SECTION = "devel/go"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=dd19699707373c2ca31531a659130416"
+
+SRCNAME = "gophercloud"
+
+PKG_NAME = "github.com/gophercloud/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "aa00757ee3ab58e53520b6cb910ca0543116400a"
+PV = "0.3.0+git${SRCREV}"
+RDEPENDS_${PN} = "bash"
+S = "${WORKDIR}/git"
+
+do_install() {
+       install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+       cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_phercloud_sysroot_preprocess"
+
+go_phercloud_sysroot_preprocess () {
+    install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+    cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/meta-stx/recipes-devtools/grubby/files/1000-Generic-name-for-Titanium.patch b/meta-stx/recipes-devtools/grubby/files/1000-Generic-name-for-Titanium.patch
new file mode 100644 (file)
index 0000000..57662b9
--- /dev/null
@@ -0,0 +1,25 @@
+From 7d3bd33be8b2c9924a2987710f54f5c62853d86c Mon Sep 17 00:00:00 2001
+From: jmckenna <jason.mckenna@windriver.com>
+Date: Wed, 5 Apr 2017 09:35:06 -0400
+Subject: [PATCH] Use generic OS title for Titanium
+
+---
+ new-kernel-pkg | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/new-kernel-pkg b/new-kernel-pkg
+index 847e959..977ef2d 100755
+--- a/new-kernel-pkg
++++ b/new-kernel-pkg
+@@ -166,6 +166,8 @@ set_title() {
+       fi
+     elif [ $ARCH = 's390' -o $ARCH = 's390x' ]; then
+       title=$(echo $version | sed 's/ /_/g')
++    elif [ -f /etc/build.info ]; then
++      title="Linux ($version)"
+     elif [ -f /etc/os-release ]; then
+       . /etc/os-release
+       title="$NAME ($version) $VERSION"
+-- 
+1.9.1
+
diff --git a/meta-stx/recipes-devtools/grubby/files/1001-Add-support-for-updating-grub-cfg-with-multiboot-2.patch b/meta-stx/recipes-devtools/grubby/files/1001-Add-support-for-updating-grub-cfg-with-multiboot-2.patch
new file mode 100644 (file)
index 0000000..c91096d
--- /dev/null
@@ -0,0 +1,263 @@
+From b2fc58bcd1f18cbc3e0b3d303e9f2132d0e36cd8 Mon Sep 17 00:00:00 2001
+From: Bin Qian <bin.qian@windriver.com>
+Date: Tue, 13 Feb 2018 22:48:54 -0500
+Subject: [PATCH 1/1] Add support for updating grub.cfg with multiboot 2
+
+---
+ Makefile           |   5 +++
+ __init__.py        |   8 ++++
+ grub-cfg-update    |  17 ++++++++
+ grub_cfg_update.py | 126 +++++++++++++++++++++++++++++++++++++++++++++++++++++
+ new-kernel-pkg     |  33 ++++++++++----
+ 5 files changed, 181 insertions(+), 8 deletions(-)
+ create mode 100644 __init__.py
+ create mode 100644 grub-cfg-update
+ create mode 100644 grub_cfg_update.py
+
+diff --git a/Makefile b/Makefile
+index e021f35..93fa41b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -56,6 +56,11 @@ install: all
+               install -m 755 grubby $(DESTDIR)$(PREFIX)/sbin ; \
+               install -m 644 grubby.8 $(DESTDIR)/$(mandir)/man8 ; \
+       fi
++      mkdir -p $(DESTDIR)/usr/lib64/python2.7/site-packages/grubby
++      install -m 644 grub_cfg_update.py $(DESTDIR)/usr/lib64/python2.7/site-packages/grubby/grub_cfg_update.py
++      install -m 644 __init__.py $(DESTDIR)/usr/lib64/python2.7/site-packages/grubby/__init__.py
++      install -m 500 grub-cfg-update $(DESTDIR)$(PREFIX)/sbin/grub-cfg-update
++
+ grubby:: $(OBJECTS)
+       $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $^ $(grubby_LIBS)
+diff --git a/__init__.py b/__init__.py
+new file mode 100644
+index 0000000..5f30af6
+--- /dev/null
++++ b/__init__.py
+@@ -0,0 +1,8 @@
++#!/usr/bin/env python
++#
++# Copyright (c) 2018 Wind River Systems, Inc.
++# SPDX-License-Identifier: Apache-2.0
++#
++#
++#
++#
+\ No newline at end of file
+diff --git a/grub-cfg-update b/grub-cfg-update
+new file mode 100644
+index 0000000..5e457e9
+--- /dev/null
++++ b/grub-cfg-update
+@@ -0,0 +1,17 @@
++#!/usr/bin/env python
++
++"""
++Copyright (c) 2018 Wind River Systems, Inc.
++ SPDX-License-Identifier: Apache-2.0
++
++
++
++"""
++
++import sys
++
++from grubby.grub_cfg_update import main
++
++if __name__ == "__main__":
++    main()
++
+diff --git a/grub_cfg_update.py b/grub_cfg_update.py
+new file mode 100644
+index 0000000..f5cd174
+--- /dev/null
++++ b/grub_cfg_update.py
+@@ -0,0 +1,126 @@
++#!/usr/bin/env python
++#
++# Copyright (c) 2018 Wind River Systems, Inc.
++# SPDX-License-Identifier: Apache-2.0
++#
++#
++#
++#
++import sys
++import argparse
++import os.path
++import re
++import ntpath
++
++
++LINUX_KERNEL_RE = "^[ \t]*module2[ \t]{1,}/vmlinuz-[^ \n\t]*"
++INITRD_RE = "^[ \t]*module2[ \t]{1,}/initramfs-[^ \n\t]*"
++
++
++def is_title(line):
++    m = re.search('^[ ]*menuentry ', line)
++    if m:
++        return True
++    return False
++
++
++def update_title(line, ver):
++    m = re.search("Linux [^ \n\t']*", line)
++    if not m:
++        print "Title pattern not understandable, not updated"
++        return line
++    new_line = re.sub("Linux [^ \n\t']*", "Linux %s" % ver, line)
++    return new_line
++
++
++def is_kernel(line):
++    m = re.search(LINUX_KERNEL_RE, line)
++    if m:
++        return True
++    return False
++
++
++def update_kernel(line, kernel):
++    kernel_name = ntpath.basename(kernel)
++    new_line = re.sub(LINUX_KERNEL_RE,
++                      "        module2 /%s" % kernel_name,
++                      line)
++    return new_line
++
++
++def is_initrd(line):
++    m = re.search(INITRD_RE, line)
++    if m:
++        return True
++    return False
++
++
++def update_initrd(line, initrd):
++    initrd_name = ntpath.basename(initrd)
++    new_line = re.sub(INITRD_RE,
++                      "        module2 /%s" % initrd_name,
++                      line)
++    return new_line
++
++
++def convert_line(line, version):
++    pattern = "^[ \t]*echo[ \t]*['\"]Loading Linux [^ \n\t]*"
++    m = re.search(pattern, line)
++    if not m:
++        return line
++
++    return "        echo     'Loading Linux %s ...'\n" % version
++
++
++def update_cfg(cfg, kernel, initramfs, ver, cfg_out):
++    if not os.path.isfile(cfg):
++        print "grub config file %s not found\n" % cfg
++        sys.exit(-1)
++
++    if not os.path.isfile(kernel):
++        print "specified kernel file %s not found\n" % kernel
++        sys.exit(-1)
++
++    if not os.path.isfile(initramfs):
++        print "specified initrd file %s not found\n" % initramfs
++        sys.exit(-1)
++
++    new_file_content = []
++    with open(cfg) as f:
++        for line in f:
++            if is_title(line):
++                new_line = update_title(line, ver)
++                print new_line
++            elif is_kernel(line):
++                new_line = update_kernel(line, kernel)
++                print new_line
++            elif is_initrd(line):
++                new_line = update_initrd(line, initramfs)
++                print new_line
++            else:
++                new_line = convert_line(line, ver)
++                print new_line
++
++            new_file_content.append(new_line)
++    with open(cfg_out, 'w') as f:
++        for line in new_file_content:
++            f.write("%s" % line)
++
++
++def main():
++    try:
++        parser = argparse.ArgumentParser(description='Update tboot enabled grub config')
++        parser.add_argument('cfg', help='original grub.cfg file path')
++        parser.add_argument('kernel', help='kernel file path')
++        parser.add_argument('initramfs', help='initramfs file path')
++        parser.add_argument('version', help='new version of kernel')
++        parser.add_argument('--cfg-out', help='updated grub.cfg target file path')
++        args = parser.parse_args()
++        cfg_out = args.cfg_out
++        if cfg_out is None:
++            cfg_out = args.cfg
++
++        update_cfg(args.cfg, args.kernel, args.initramfs, args.version, cfg_out)
++    except Exception as e:
++        print e
++        sys.exit(-1)
+diff --git a/new-kernel-pkg b/new-kernel-pkg
+index 977ef2d..1bb0a64 100755
+--- a/new-kernel-pkg
++++ b/new-kernel-pkg
+@@ -185,6 +185,11 @@ install() {
+       return
+     fi
++    grep -q 'tboot=true' /proc/cmdline 2>/dev/null
++    if [ $? == 0 ] ; then
++        return
++    fi
++
+     INITRD=""
+     if [ -f $initrdfile ]; then
+       [ -n "$verbose" ] && echo "found $initrdfile and using it with grubby"
+@@ -334,6 +339,11 @@ remove() {
+       return
+     fi
++    grep -q 'tboot=true' /proc/cmdline 2>/dev/null
++    if [ $? == 0 ] ; then
++        return
++    fi
++
+     local files
+     local f
+     files="/etc/kernel/prerm.d/*[^~] /etc/kernel/prerm.d/$version/*[^~]"
+@@ -483,14 +493,21 @@ update() {
+     fi
+     if [ -n "$cfgGrub2Efi" ]; then
+-      [ -n "$verbose" ] && echo "updating $version from $grub2EfiConfig"
+-      ARGS="--grub2 -c $grub2EfiConfig --efi --update-kernel=$kernelImage \
+-              $INITRD ${kernargs:+--args=\"$kernargs\"} \
+-              ${removeargs:+--remove-args=\"$removeargs\"} \
+-              --title=\"$title\$debugtitle\""
+-
+-      rungrubby ${ARGS}
+-      rungrubby --debug ${ARGS}
++        grep -q 'tboot=true' /proc/cmdline 2>/dev/null
++        if [ $? == 0 ] ; then
++            [ -n "$verbose" ] && echo "calling grub-cfg-update $grub2EfiConfig $kernelImage $initrdfile $version"
++            grub-cfg-update $grub2EfiConfig $kernelImage $initrdfile $version
++            return
++        else
++            [ -n "$verbose" ] && echo "updating $version from $grub2EfiConfig"
++            ARGS="--grub2 -c $grub2EfiConfig --efi --update-kernel=$kernelImage \
++                $INITRD ${kernargs:+--args=\"$kernargs\"} \
++                ${removeargs:+--remove-args=\"$removeargs\"} \
++                --title=\"$title\$debugtitle\""
++
++            rungrubby ${ARGS}
++            rungrubby --debug ${ARGS}
++        fi
+     else
+       [ -n "$verbose" ] && echo "$grub2EfiConfig does not exist, not running grubby"
+     fi
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-devtools/grubby/files/1002-Install-into-libdir-instead-of-hard-coding.patch b/meta-stx/recipes-devtools/grubby/files/1002-Install-into-libdir-instead-of-hard-coding.patch
new file mode 100644 (file)
index 0000000..9181ada
--- /dev/null
@@ -0,0 +1,17 @@
+diff --git a/Makefile b/Makefile
+index 7436c1c..91a4fd1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -56,9 +56,9 @@ install: all
+               install -m 755 grubby $(DESTDIR)$(PREFIX)/sbin ; \
+               install -m 644 grubby.8 $(DESTDIR)/$(mandir)/man8 ; \
+       fi
+-      mkdir -p $(DESTDIR)/usr/lib64/python2.7/site-packages/grubby
+-      install -m 644 grub_cfg_update.py $(DESTDIR)/usr/lib64/python2.7/site-packages/grubby/grub_cfg_update.py
+-      install -m 644 __init__.py $(DESTDIR)/usr/lib64/python2.7/site-packages/grubby/__init__.py
++      mkdir -p $(DESTDIR)/$(libdir)/python2.7/site-packages/grubby
++      install -m 644 grub_cfg_update.py $(DESTDIR)/$(libdir)/python2.7/site-packages/grubby/grub_cfg_update.py
++      install -m 644 __init__.py $(DESTDIR)/$(libdir)/python2.7/site-packages/grubby/__init__.py
+       install -m 500 grub-cfg-update $(DESTDIR)$(PREFIX)/sbin/grub-cfg-update
diff --git a/meta-stx/recipes-devtools/grubby/grubby_%.bbappend b/meta-stx/recipes-devtools/grubby/grubby_%.bbappend
new file mode 100644 (file)
index 0000000..2ae3f7c
--- /dev/null
@@ -0,0 +1,24 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " \
+       file://1000-Generic-name-for-Titanium.patch \
+       file://1001-Add-support-for-updating-grub-cfg-with-multiboot-2.patch \
+       file://1002-Install-into-libdir-instead-of-hard-coding.patch \
+       "
+
+FILES_${PN}_append = " ${libdir}"
diff --git a/meta-stx/recipes-devtools/perl/filter-perl_1.59.bb b/meta-stx/recipes-devtools/perl/filter-perl_1.59.bb
new file mode 100644 (file)
index 0000000..347c2d6
--- /dev/null
@@ -0,0 +1,47 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "perlfilter - Source Filters"
+DESCRIPTION = "\
+  perlfilter - Source Filters \
+    Modules included: \
+    - Filter::Util::Call - Perl Source Filter Utility Module \
+    - Filter::Util::Exec - exec source filter \
+    - Filter::decrypt - template for a decrypt source filter \
+    - Filter::cpp - cpp source filter \
+    - Filter::exec - exec source filter \
+    - Filter::m4 - M4 source filter \
+    - Filter::sh - sh source filter \
+    - Filter::tee - tee source filter \
+"
+HOMEPAGE = "https://metacpan.org/release/Filter"
+
+SECTION = "libs"
+
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://META.yml;beginline=11;endline=11;md5=963ce28228347875ace682de56eef8e8"
+
+CPAN_PACKAGE = "Filter"
+
+SRC_URI = "${CPAN_MIRROR}/authors/id/R/RU/RURBAN/${CPAN_PACKAGE}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "54e08a158bff1f35e0a93b3993dcf52f"
+SRC_URI[sha256sum] = "b4babfad4e0566a9a61199735f6e622a60d3274122752304f18f623412bf4e5a"
+
+S = "${WORKDIR}/${CPAN_PACKAGE}-${PV}"
+
+inherit cpan
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta-stx/recipes-devtools/perl/libhtml-tagset-perl_3.20.bb b/meta-stx/recipes-devtools/perl/libhtml-tagset-perl_3.20.bb
new file mode 100644 (file)
index 0000000..6ece5c6
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "HTML Tagset bits."
+HOMEPAGE = "http://search.cpan.org/dist/HTML-Tagset/"
+SECTION = "libs"
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://README;beginline=60;md5=16ddda2d845a5546f615e6b122d1dbad"
+
+SRC_URI = "http://search.cpan.org/CPAN/authors/id/P/PE/PETDANCE/HTML-Tagset-${PV}.tar.gz"
+
+S = "${WORKDIR}/HTML-Tagset-${PV}"
+
+inherit cpan
+
+BBCLASSEXTEND="native"
+
+
+SRC_URI[md5sum] = "d2bfa18fe1904df7f683e96611e87437"
+SRC_URI[sha256sum] = "adb17dac9e36cd011f5243881c9739417fd102fce760f8de4e9be4c7131108e2"
diff --git a/meta-stx/recipes-devtools/perl/libmailtools-perl_2.18.bb b/meta-stx/recipes-devtools/perl/libmailtools-perl_2.18.bb
new file mode 100644 (file)
index 0000000..ec5a963
--- /dev/null
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "MailTools is a set of Perl modules related to mail applications"
+HOMEPAGE = "http://search.cpan.org/dist/MailTools/"
+SECTION = "libs"
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://lib/Mail/Mailer.pod;beginline=144;md5=93c9027e72612b3555f857c4fc17b953"
+DEPENDS = " \
+       libtest-pod-perl-native \
+       libtimedate-perl-native \
+       "       
+RDEPENDS_${PN} += " \
+       libtest-pod-perl \
+       libtimedate-perl \
+       perl-module-io-handle \
+       perl-module-net-smtp \
+       perl-module-test-more \
+       "
+BBCLASSEXTEND = "native"
+
+SRC_URI = "http://search.cpan.org/CPAN/authors/id/M/MA/MARKOV/MailTools-${PV}.tar.gz"
+SRC_URI[md5sum] = "972468ab5207b90398d77bed4ffc361d"
+SRC_URI[sha256sum] = "dfee9e770257371112f20d978e637759e81bc4f19e97b083585c71ecab37b527"
+
+S = "${WORKDIR}/MailTools-${PV}"
+
+inherit cpan
+
diff --git a/meta-stx/recipes-devtools/perl/libmailtools-perl_2.20.bb b/meta-stx/recipes-devtools/perl/libmailtools-perl_2.20.bb
new file mode 100644 (file)
index 0000000..ac8e890
--- /dev/null
@@ -0,0 +1,40 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "MailTools is a set of Perl modules related to mail applications"
+HOMEPAGE = "http://search.cpan.org/dist/MailTools/"
+SECTION = "libs"
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://lib/Mail/Mailer.pod;beginline=144;md5=641bd171b1aaabba1fc83ac0a98a2d30"
+DEPENDS = " \
+       libtest-pod-perl-native \
+       libtimedate-perl-native \
+       "       
+RDEPENDS_${PN} += " \
+       libtest-pod-perl \
+       libtimedate-perl \
+       perl-module-io-handle \
+       perl-module-net-smtp \
+       perl-module-test-more \
+       "
+BBCLASSEXTEND = "native"
+
+SRC_URI = "http://search.cpan.org/CPAN/authors/id/M/MA/MARKOV/MailTools-${PV}.tar.gz"
+SRC_URI[md5sum] = "53e9d35256c3fd7cef0e4a24b15e9512"
+SRC_URI[sha256sum] = "f55606f7a9cc342ee9d5f996e2b6a4c0047e2ee47cd88c3250ecf0d0c5fb3196"
+
+S = "${WORKDIR}/MailTools-${PV}"
+
+inherit cpan
diff --git a/meta-stx/recipes-devtools/perl/libsocket6-perl/0001-socket6-perl-fix-configure-error.patch b/meta-stx/recipes-devtools/perl/libsocket6-perl/0001-socket6-perl-fix-configure-error.patch
new file mode 100644 (file)
index 0000000..07745c8
--- /dev/null
@@ -0,0 +1,34 @@
+From b33a6a83687fc58cd5f662d44ba8819498cb80a4 Mon Sep 17 00:00:00 2001
+From: Changqing Li <changqing.li@windriver.com>
+Date: Thu, 9 Aug 2018 14:10:32 +0800
+Subject: [PATCH] socket6-perl: fix configure error
+
+only do IPv6_CHECK_INET_NTOP when not cross compile to fix below
+error: checking for working inet_ntop..., configure: error:
+cannot run test program while cross compiling
+
+Upstream-Status: Inappropriate [oe specific]
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+---
+ configure.in | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/configure.in b/configure.in
+index 1fe22e1..1a7cf94 100644
+--- a/configure.in
++++ b/configure.in
+@@ -63,7 +63,9 @@ if test $ac_cv_lib_inet6_getaddrinfo = yes; then
+       INET6LIBS="-L$ipv6_cv_dir/lib -linet6"
+ fi
+-IPv6_CHECK_INET_NTOP()
++if test "$cross_compiling" != yes; then
++      IPv6_CHECK_INET_NTOP()
++fi
+ IPv6_CHECK_SA_LEN()
+ IPv6_CHECK_SIN6_SCOPE_ID()
+ IPv6_CHECK_SOCKLEN_T()
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-devtools/perl/libsocket6-perl_0.23.bb b/meta-stx/recipes-devtools/perl/libsocket6-perl_0.23.bb
new file mode 100644 (file)
index 0000000..24459e0
--- /dev/null
@@ -0,0 +1,39 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Perl extensions for IPv6"
+HOMEPAGE = "https://metacpan.org/release/Socket6"
+SECTION = "libs"
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://README;beginline=31;md5=ad207d410de6d8ca6b4655469baa1ab4"
+
+BBCLASSEXTEND = "native"
+
+CFLAGS += "-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE"
+BUILD_CFLAGS += "-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE"
+
+SRC_URI = "http://search.cpan.org/CPAN/authors/id/U/UM/UMEMOTO/Socket6-${PV}.tar.gz;name=socket6-perl-${PV}"
+SRC_URI[socket6-perl-0.23.md5sum] = "2c02adb13c449d48d232bb704ddbd492"
+SRC_URI[socket6-perl-0.23.sha256sum] = "eda753f0197e8c3c8d4ab20a634561ce84011fa51aa5ff40d4dbcb326ace0833"
+
+S = "${WORKDIR}/Socket6-${PV}"
+
+do_configure_prepend () {
+       mkdir -p m4
+       autoreconf -Wcross --verbose --install --force || oefatal "autoreconf execution failed."
+       sed -i 's:\./configure\(.[^-]\):./configure --build=${BUILD_SYS} --host=${HOST_SYS} --target=${TARGET_SYS} --prefix=${prefix} --exec_prefix=${exec_prefix} --bindir=${bindir} --sbindir=${sbindir} --libexecdir=${libexecdir} --datadir=${datadir} --sysconfdir=${sysconfdir} --sharedstatedir=${sharedstatedir} --localstatedir=${localstatedir} --libdir=${libdir} --includedir=${includedir} --oldincludedir=${oldincludedir} --infodir=${infodir} --mandir=${mandir}\1:' Makefile.PL
+}
+
+inherit cpan
diff --git a/meta-stx/recipes-devtools/perl/libsocket6-perl_0.28.bb b/meta-stx/recipes-devtools/perl/libsocket6-perl_0.28.bb
new file mode 100644 (file)
index 0000000..6c55f02
--- /dev/null
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Perl extensions for IPv6"
+HOMEPAGE = "https://metacpan.org/release/Socket6"
+SECTION = "libs"
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://README;beginline=31;md5=aa15b0e3744ac40eaada8738eccd24df"
+
+BBCLASSEXTEND = "native"
+
+CFLAGS += "-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE"
+BUILD_CFLAGS += "-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE"
+
+SRC_URI = "http://search.cpan.org/CPAN/authors/id/U/UM/UMEMOTO/Socket6-${PV}.tar.gz;name=socket6-perl-${PV} \
+           file://0001-socket6-perl-fix-configure-error.patch \
+"
+SRC_URI[socket6-perl-0.28.md5sum] = "aa8489135a3dbcec6233396e1aeb043b"
+SRC_URI[socket6-perl-0.28.sha256sum] = "bfd49ab99f3197c99285fed4683c4edc06277c1e4453f593e694d7bff0974586"
+
+S = "${WORKDIR}/Socket6-${PV}"
+
+do_configure_prepend () {
+       mkdir -p m4
+       autoreconf -Wcross --verbose --install --force || oefatal "autoreconf execution failed."
+       sed -i 's:\./configure\(.[^-]\):./configure --build=${BUILD_SYS} --host=${HOST_SYS} --target=${TARGET_SYS} --prefix=${prefix} --exec_prefix=${exec_prefix} --bindir=${bindir} --sbindir=${sbindir} --libexecdir=${libexecdir} --datadir=${datadir} --sysconfdir=${sysconfdir} --sharedstatedir=${sharedstatedir} --localstatedir=${localstatedir} --libdir=${libdir} --includedir=${includedir} --oldincludedir=${oldincludedir} --infodir=${infodir} --mandir=${mandir}\1:' Makefile.PL
+}
+
+inherit cpan
diff --git a/meta-stx/recipes-devtools/perl/libtest-pod-perl_1.51.bb b/meta-stx/recipes-devtools/perl/libtest-pod-perl_1.51.bb
new file mode 100644 (file)
index 0000000..6e801c6
--- /dev/null
@@ -0,0 +1,34 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Test::Pod - check for POD errors in files"
+SECTION = "libs"
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://README;beginline=26;md5=ea81140c3f3b2937c6490fc732471cd0"
+HOMEPAGE = "https://github.com/perl-pod/test-pod/"
+BBCLASSEXTEND = "native"
+RDEPENDS_${PN} += " perl-module-test-more \
+                    perl-module-file-spec \
+                    perl-module-pod-simple \
+                    perl-module-test-builder-tester \
+                    "
+
+SRC_URI ="http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Test-Pod-${PV}.tar.gz;name=test-pod-perl-${PV}"
+SRC_URI[test-pod-perl-1.51.md5sum] = "f806aa84de2f0c0fba48b3a5a8a4aecf"
+SRC_URI[test-pod-perl-1.51.sha256sum] = "c1a1d3cedf4a579e3aad89c36f9878a8542b6656dbe71f1581420f49582d7efb"
+S = "${WORKDIR}/Test-Pod-${PV}"
+
+inherit cpan
+
diff --git a/meta-stx/recipes-devtools/perl/libtest-pod-perl_1.52.bb b/meta-stx/recipes-devtools/perl/libtest-pod-perl_1.52.bb
new file mode 100644 (file)
index 0000000..b6dcdb0
--- /dev/null
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Test::Pod - check for POD errors in files"
+SECTION = "libs"
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://README;beginline=26;md5=ea81140c3f3b2937c6490fc732471cd0"
+HOMEPAGE = "https://github.com/perl-pod/test-pod/"
+BBCLASSEXTEND = "native"
+RDEPENDS_${PN} += " perl-module-test-more \
+                    perl-module-file-spec \
+                    perl-module-pod-simple \
+                    perl-module-test-builder-tester \
+                    "
+
+SRC_URI ="http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Test-Pod-${PV}.tar.gz;name=test-pod-perl-${PV}"
+SRC_URI[test-pod-perl-1.52.md5sum] = "472dda77746d48e6465bf62e47aeca81"
+SRC_URI[test-pod-perl-1.52.sha256sum] = "60a8dbcc60168bf1daa5cc2350236df9343e9878f4ab9830970a5dde6fe8e5fc"
+S = "${WORKDIR}/Test-Pod-${PV}"
+
+inherit cpan
diff --git a/meta-stx/recipes-devtools/perl/libwww-perl_6.05.bb b/meta-stx/recipes-devtools/perl/libwww-perl_6.05.bb
new file mode 100644 (file)
index 0000000..a69b37b
--- /dev/null
@@ -0,0 +1,39 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "libwww-perl provides a simple and consistent API to the World Wide Web"
+HOMEPAGE = "http://search.cpan.org/dist/libwww-perl/"
+SECTION = "libs"
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://README;beginline=92;endline=98;md5=3da13bc02f8f17ed35ac5d192cae6fe4"
+DEPENDS = "liburi-perl-native libhtml-parser-perl-native libhtml-tagset-perl-native"
+RDEPENDS_${PN} += " \
+       libhtml-parser-perl \
+       libhtml-tagset-perl \
+       liburi-perl \
+       perl-module-digest-md5 \
+       perl-module-net-ftp \
+       "
+BBCLASSEXTEND = "native"
+
+
+SRC_URI = "http://search.cpan.org/CPAN/authors/id/G/GA/GAAS/libwww-perl-${PV}.tar.gz;name=libwww-perl-${PV}"
+SRC_URI[libwww-perl-6.05.md5sum] = "637d5f1eb61336ca2caa6e026b382f87"
+SRC_URI[libwww-perl-6.05.sha256sum] = "7b25799ff7eec18e8e4e97dc0cad7b2a5b433b50b13feb59d9179173bee78f23"
+
+S = "${WORKDIR}/libwww-perl-${PV}"
+
+inherit cpan
+
diff --git a/meta-stx/recipes-devtools/perl/libwww-perl_6.35.bb b/meta-stx/recipes-devtools/perl/libwww-perl_6.35.bb
new file mode 100644 (file)
index 0000000..cf8df59
--- /dev/null
@@ -0,0 +1,38 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "libwww-perl provides a simple and consistent API to the World Wide Web"
+HOMEPAGE = "https://metacpan.org/release/libwww-perl"
+SECTION = "libs"
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=225d44a95fa3addb1da6d91187ab189f"
+DEPENDS = "liburi-perl-native libhtml-parser-perl-native libhtml-tagset-perl-native"
+RDEPENDS_${PN} += " \
+       libhtml-parser-perl \
+       libhtml-tagset-perl \
+       liburi-perl \
+       perl-module-digest-md5 \
+       perl-module-net-ftp \
+       "
+BBCLASSEXTEND = "native"
+
+
+SRC_URI = "https://cpan.metacpan.org/authors/id/E/ET/ETHER/libwww-perl-${PV}.tar.gz;name=libwww-perl-${PV}"
+SRC_URI[libwww-perl-6.35.md5sum] = "19cd0f55f61359c973caa9301ec405bd"
+SRC_URI[libwww-perl-6.35.sha256sum] = "dda2578d7b32152c4afce834761a61d117de286c705a9f7972c7ac6032ca5953"
+
+S = "${WORKDIR}/libwww-perl-${PV}"
+
+inherit cpan
diff --git a/meta-stx/recipes-devtools/perl/pathtools-perl_3.75.bb b/meta-stx/recipes-devtools/perl/pathtools-perl_3.75.bb
new file mode 100644 (file)
index 0000000..74d95ec
--- /dev/null
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Tools for working with directory and file names"
+DESCRIPTION = "\
+  Modules included: \
+    - Cwd - get pathname of current working directory \
+    - File::Spec - portably perform operations on file names \
+    - File::Spec::AmigaOS - File::Spec for AmigaOS \
+    - File::Spec::Cygwin - methods for Cygwin file specs \
+    - File::Spec::Epoc - methods for Epoc file specs \
+    - File::Spec::Functions - portably perform operations on file names \
+    - File::Spec::Mac - File::Spec for Mac OS (Classic) \
+    - File::Spec::OS2 - methods for OS/2 file specs \
+    - File::Spec::Unix - File::Spec for Unix, base for other File::Spec modules \
+    - File::Spec::VMS - methods for VMS file specs \
+    - File::Spec::Win32 - methods for Win32 file specs \
+"
+HOMEPAGE = "https://metacpan.org/release/PathTools"
+
+SECTION = "libs"
+
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://META.yml;beginline=11;endline=11;md5=963ce28228347875ace682de56eef8e8"
+
+CPAN_PACKAGE = "PathTools" 
+
+SRC_URI = "${CPAN_MIRROR}/authors/id/X/XS/XSAWYERX/${CPAN_PACKAGE}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "8f329058f74468a576442d841c62aa62"
+SRC_URI[sha256sum] = "a558503aa6b1f8c727c0073339081a77888606aa701ada1ad62dd9d8c3f945a2"
+
+S = "${WORKDIR}/${CPAN_PACKAGE}-${PV}"
+
+inherit cpan
+
+RDEPENDS_${PN} = " \
+    perl-module-carp \
+    perl-module-file-basename \
+    perl-module-scalar-util \
+    perl-module-test-more \
+"
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta-stx/recipes-devtools/perl/podlators-perl_4.12.bb b/meta-stx/recipes-devtools/perl/podlators-perl_4.12.bb
new file mode 100644 (file)
index 0000000..2ab71af
--- /dev/null
@@ -0,0 +1,47 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "format POD source into various output formats"
+DESCRIPTION = "\
+  podlators contains Pod::Man and Pod::Text modules which convert POD input \
+  to *roff source output, suitable for man pages, or plain text. It also \
+  includes several subclasses of Pod::Text for formatted output to terminals \
+  with various capabilities. It is the source package for the Pod::Man and \
+  Pod::Text modules included with Perl. \
+"
+HOMEPAGE = "https://www.eyrie.org/~eagle/software/podlators"
+
+SECTION = "libs"
+
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=a25b3c873fe34dc2909a5b1ca2de4048"
+
+CPAN_PACKAGE = "podlators"
+
+SRC_URI = "${CPAN_MIRROR}/authors/id/R/RR/RRA/${CPAN_PACKAGE}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "99d5b0c15d2f72c5218dce1a5a9448c5"
+SRC_URI[sha256sum] = "948717da19630a5f003da4406da90fe1cbdec9ae493671c90dfb6d8b3d63b7eb"
+
+S = "${WORKDIR}/${CPAN_PACKAGE}-${PV}"
+
+inherit cpan
+
+RDEPENDS_${PN} = " \
+    perl-module-encode \
+    perl-module-pod-simple \
+"
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta-stx/recipes-devtools/perl/scalar-list-utils-perl_1.50.bb b/meta-stx/recipes-devtools/perl/scalar-list-utils-perl_1.50.bb
new file mode 100644 (file)
index 0000000..ec11961
--- /dev/null
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "A selection of general-utility list subroutines"
+DESCRIPTION = "\
+  List::Util contains a selection of subroutines that people have expressed \
+  would be nice to have in the perl core, but the usage would not really be \
+  high enough to warrant the use of a keyword, and the size so small such \
+  that being individual extensions would be wasteful.\
+"
+HOMEPAGE= "https://metacpan.org/release/Scalar-List-Utils"
+
+SECTION = "libs"
+
+LICENSE = "Artistic-1.0 | GPL-1.0+"
+
+LIC_FILES_CHKSUM = "file://META.yml;beginline=11;endline=11;md5=963ce28228347875ace682de56eef8e8"
+
+CPAN_PACKAGE = "Scalar-List-Utils"
+
+SRC_URI = "${CPAN_MIRROR}/authors/id/P/PE/PEVANS/${CPAN_PACKAGE}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "136313884d1afa2fb6840695a1034b2c"
+SRC_URI[sha256sum] = "06aab9c693380190e53be09be7daed20c5d6278f71956989c24cca7782013675"
+
+S = "${WORKDIR}/${CPAN_PACKAGE}-${PV}"
+
+inherit cpan
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta-stx/recipes-devtools/python/files/eventlet/0001-CGTS-2869-close-connection-on-HTTP-413-Request-Entit.patch b/meta-stx/recipes-devtools/python/files/eventlet/0001-CGTS-2869-close-connection-on-HTTP-413-Request-Entit.patch
new file mode 100644 (file)
index 0000000..05a28b8
--- /dev/null
@@ -0,0 +1,28 @@
+From bdbcd8615e1720b4098296752a2f4273a0947a8d Mon Sep 17 00:00:00 2001
+From: Daniel Badea <daniel.badea@windriver.com>
+Date: Tue, 6 Sep 2016 15:09:39 +0000
+Subject: [PATCH] CGTS-2869 close connection on HTTP 413 Request Entity Too
+ Large
+
+Discard request body in case it's too large: close connection
+wile request is in progress.
+---
+ eventlet/wsgi.py | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/eventlet/wsgi.py b/eventlet/wsgi.py
+index 6af2b99..8eac966 100644
+--- a/eventlet/wsgi.py
++++ b/eventlet/wsgi.py
+@@ -525,6 +525,8 @@ class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
+         finally:
+             if hasattr(result, 'close'):
+                 result.close()
++            if str(status_code[0]) == '413':
++                self.close_connection = 1
+             request_input = self.environ['eventlet.input']
+             if (request_input.chunked_input or
+                     request_input.position < (request_input.content_length or 0)):
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-devtools/python/files/python-barbican/barbican-fix-path-to-find-configuration-files.patch b/meta-stx/recipes-devtools/python/files/python-barbican/barbican-fix-path-to-find-configuration-files.patch
new file mode 100644 (file)
index 0000000..3f0987e
--- /dev/null
@@ -0,0 +1,35 @@
+From 223013ada4792ab0d8f9b02c89aa139969f99a97 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Sun, 29 Dec 2019 21:41:38 -0800
+Subject: [PATCH] barbican: fix path to find configuration files
+
+From 3364915002aa2deaf63841e338375648e5dc8f24 Mon Sep 17 00:00:00 2001
+From: Keith Holman <Keith.Holman@windriver.com>
+Date: Fri, 13 Jun 2014 13:46:04 -0400
+
+Barbican attempts to find configuration files for tests based on where
+the files are located within the source tree.  On deployment,
+configuration files are installed to a directory specified by a build
+recipe.  This fix updates the location for the configuration files
+with an identifier that is replaced during deployment.
+---
+ barbican/tests/api/test_resources_policy.py | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/barbican/tests/api/test_resources_policy.py b/barbican/tests/api/test_resources_policy.py
+index 37a1630f..d9bd9d72 100644
+--- a/barbican/tests/api/test_resources_policy.py
++++ b/barbican/tests/api/test_resources_policy.py
+@@ -37,8 +37,7 @@ from barbican.tests import utils
+ # Point to the policy.json file located in source control.
+-TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
+-                                            '../../../etc', 'barbican'))
++TEST_VAR_DIR = "%BARBICAN_CONF_DIR%"
+ CONF = config.new_config()
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-devtools/python/files/python-barbican/gunicorn-config.py b/meta-stx/recipes-devtools/python/files/python-barbican/gunicorn-config.py
new file mode 100644 (file)
index 0000000..c8c1e07
--- /dev/null
@@ -0,0 +1,16 @@
+import multiprocessing
+
+bind = '0.0.0.0:9311'
+user = 'barbican'
+group = 'barbican'
+
+timeout = 30
+backlog = 2048
+keepalive = 2
+
+workers = multiprocessing.cpu_count() * 2
+
+loglevel = 'info'
+errorlog = '-'
+accesslog = '-'
+
diff --git a/meta-stx/recipes-devtools/python/files/python-barbican/openstack-barbican-api.service b/meta-stx/recipes-devtools/python/files/python-barbican/openstack-barbican-api.service
new file mode 100644 (file)
index 0000000..197a281
--- /dev/null
@@ -0,0 +1,19 @@
+[Unit]
+Description=Openstack Barbican API server
+After=syslog.target network.target
+Before=httpd.service
+
+[Service]
+PIDFile=/run/barbican/pid
+User=barbican
+Group=barbican
+RuntimeDirectory=barbican
+RuntimeDirectoryMode=770
+ExecStart=/usr/bin/gunicorn --pid /run/barbican/pid -c /etc/barbican/gunicorn-config.py --paste /etc/barbican/barbican-api-paste.ini
+ExecReload=/usr/bin/kill -s HUP $MAINPID
+ExecStop=/usr/bin/kill -s TERM $MAINPID
+StandardError=syslog
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-devtools/python/files/python-barbican/openstack-barbican-keystone-listener.service b/meta-stx/recipes-devtools/python/files/python-barbican/openstack-barbican-keystone-listener.service
new file mode 100644 (file)
index 0000000..595f2eb
--- /dev/null
@@ -0,0 +1,13 @@
+[Unit]
+Description=Openstack Barbican worker daemon
+After=syslog.target network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/barbican-keystone-listener
+User=barbican
+Group=barbican
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-devtools/python/files/python-barbican/openstack-barbican-worker.service b/meta-stx/recipes-devtools/python/files/python-barbican/openstack-barbican-worker.service
new file mode 100644 (file)
index 0000000..2eb311e
--- /dev/null
@@ -0,0 +1,13 @@
+[Unit]
+Description=Openstack Barbican worker daemon
+After=syslog.target network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/barbican-worker
+User=barbican
+Group=barbican
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-devtools/python/files/python-django-horizon/guni_config.py b/meta-stx/recipes-devtools/python/files/python-django-horizon/guni_config.py
new file mode 100644 (file)
index 0000000..57a0727
--- /dev/null
@@ -0,0 +1,59 @@
+import datetime
+import fnmatch
+import os
+import resource
+import subprocess
+from django.conf import settings
+
+
+errorlog = "/var/log/horizon/gunicorn.log"
+capture_output = True
+
+# maxrss ceiling in kbytes
+MAXRSS_CEILING = 512000
+
+
+def worker_abort(worker):
+    path = ("/proc/%s/fd") % os.getpid()
+    contents = os.listdir(path)
+    upload_dir = getattr(settings, 'FILE_UPLOAD_TEMP_DIR', '/tmp')
+    pattern = os.path.join(upload_dir, '*.upload')
+
+    for i in contents:
+        f = os.path.join(path, i)
+        if os.path.exists(f):
+            try:
+                link = os.readlink(f)
+                if fnmatch.fnmatch(link, pattern):
+                    worker.log.info(link)
+                    os.remove(link)
+            except OSError:
+                pass
+
+
+def when_ready(server):
+    subprocess.check_call(["/usr/bin/horizon-assets-compress"])
+
+
+def post_worker_init(worker):
+    worker.nrq = 0
+    worker.restart = False
+
+
+def pre_request(worker, req):
+    worker.nrq += 1
+    if worker.restart:
+        worker.nr = worker.max_requests - 1
+        maxrss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
+        msg = "%(date)s %(uri)s %(rss)u" % ({'date': datetime.datetime.now(),
+                                             'uri': getattr(req, "uri"),
+                                             'rss': maxrss})
+        worker.log.info(msg)
+
+
+def post_request(worker, req, environ, resp):
+    worker.nrq -= 1
+    if not worker.restart:
+        maxrss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
+        if maxrss > MAXRSS_CEILING and worker.nrq == 0:
+            worker.restart = True
diff --git a/meta-stx/recipes-devtools/python/files/python-django-horizon/horizon-assets-compress b/meta-stx/recipes-devtools/python/files/python-django-horizon/horizon-assets-compress
new file mode 100644 (file)
index 0000000..8b17d31
--- /dev/null
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+PYTHON=`which python`
+MANAGE="/usr/share/openstack-dashboard/manage.py"
+STATICDIR="/www/pages/static"
+BRANDDIR="/opt/branding"
+APPLIEDDIR="/opt/branding/applied"
+
+# Handle custom horizon branding
+rm -rf ${APPLIEDDIR}
+if ls ${BRANDDIR}/*.tgz 1> /dev/null 2>&1; then
+    LATESTBRANDING=$(ls $BRANDDIR |grep '\.tgz$' | tail -n 1)
+    mkdir -p ${APPLIEDDIR}
+    tar zxf ${BRANDDIR}/${LATESTBRANDING} -C ${APPLIEDDIR} 2>/dev/null 1>/dev/null
+    RETVAL=$?
+    if [ $RETVAL -ne 0 ]; then
+        echo "Failed to extract ${BRANDDIR}/${LATESTBRANDING}"
+    fi
+fi
+
+echo "Dumping static assets"
+if [ -d ${STATICDIR} ]; then
+    COLLECTARGS=--clear
+fi
+${PYTHON} -- ${MANAGE} collectstatic -v0 --noinput ${COLLECTARGS}
+
+RETVAL=$?
+if [ $RETVAL -ne 0 ]; then
+    echo "Failed to dump static assets."
+    exit $RETVAL
+fi
+
+nice -n 20 ionice -c Idle ${PYTHON} -- ${MANAGE} compress -v0
+RETVAL=$?
+if [ $RETVAL -ne 0 ]; then
+    echo "Failed to compress assets."
+    exit $RETVAL
+fi
diff --git a/meta-stx/recipes-devtools/python/files/python-django-horizon/horizon-clearsessions b/meta-stx/recipes-devtools/python/files/python-django-horizon/horizon-clearsessions
new file mode 100644 (file)
index 0000000..33e0736
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+/usr/bin/nice -n 2 /usr/bin/python /usr/share/openstack-dashboard/manage.py clearsessions
diff --git a/meta-stx/recipes-devtools/python/files/python-django-horizon/horizon-patching-restart b/meta-stx/recipes-devtools/python/files/python-django-horizon/horizon-patching-restart
new file mode 100644 (file)
index 0000000..9fc15df
--- /dev/null
@@ -0,0 +1,80 @@
+#!/bin/bash
+#
+# Copyright (c) 2017 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+#
+# The patching subsystem provides a patch-functions bash source file
+# with useful function and variable definitions.
+#
+. /etc/patching/patch-functions
+
+#
+# We can now check to see what type of node we're on, if it's locked, etc,
+# and act accordingly
+#
+
+#
+# Declare an overall script return code
+#
+declare -i GLOBAL_RC=$PATCH_STATUS_OK
+
+#
+# handle restarting horizon.
+#
+if is_controller
+then
+    # Horizon only runs on the controller
+
+    if [ ! -f $PATCH_FLAGDIR/horizon.restarted ]
+    then
+        # Check SM to see if Horizon is running
+        sm-query service horizon | grep -q 'enabled-active'
+        if [ $? -eq 0 ]
+        then
+            loginfo "$0: Logging out all horizon sessions"
+
+            # Remove sessions
+            rm -f /var/tmp/sessionid*
+
+            loginfo "$0: Restarting horizon"
+
+            # Ask SM to restart Horizon
+            sm-restart service horizon
+            touch $PATCH_FLAGDIR/horizon.restarted
+
+            # Wait up to 30 seconds for service to recover
+            let -i UNTIL=$SECONDS+30
+            while [ $UNTIL -ge $SECONDS ]
+            do
+                # Check to see if it's running
+                sm-query service horizon | grep -q 'enabled-active'
+                if [ $? -eq 0 ]
+                then
+                    break
+                fi
+
+                # Still not running? Let's wait 5 seconds and check again
+                sleep 5
+            done
+
+            sm-query service horizon | grep -q 'enabled-active'
+            if [ $? -ne 0 ]
+            then
+                # Still not running! Clear the flag and mark the RC as failed
+                loginfo "$0: Failed to restart horizon"
+                rm -f $PATCH_FLAGDIR/horizon.restarted
+                GLOBAL_RC=$PATCH_STATUS_FAILED
+                sm-query service horizon
+            fi
+        fi
+    fi
+fi
+
+#
+# Exit the script with the overall return code
+#
+exit $GLOBAL_RC
+
diff --git a/meta-stx/recipes-devtools/python/files/python-django-horizon/horizon.init b/meta-stx/recipes-devtools/python/files/python-django-horizon/horizon.init
new file mode 100755 (executable)
index 0000000..a2b15bc
--- /dev/null
@@ -0,0 +1,152 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides:          OpenStack Dashboard
+# Required-Start:    networking
+# Required-Stop:     networking
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: OpenStack Dashboard
+# Description:       Web based user interface to OpenStack services including
+#                    Nova, Swift, Keystone, etc.
+### END INIT INFO
+
+RETVAL=0
+DESC="openstack-dashboard"
+PIDFILE="/var/run/$DESC.pid"
+PYTHON=`which python`
+# Centos packages openstack_dashboard under /usr/share
+#MANAGE="@PYTHON_SITEPACKAGES@/openstack_dashboard/manage.py"
+MANAGE="/usr/share/openstack-dashboard/manage.py"
+EXEC="/usr/bin/gunicorn"
+BIND="localhost"
+PORT="8008"
+WORKER="eventlet"
+WORKERS=`grep workers /etc/openstack-dashboard/horizon-config.ini | cut -f3 -d' '`
+# Increased timeout to facilitate large image uploads
+TIMEOUT="200"
+STATICDIR="/www/pages/static"
+BRANDDIR="/opt/branding"
+APPLIEDDIR="/opt/branding/applied"
+TMPUPLOADDIR="/scratch/horizon"
+
+source /usr/bin/tsconfig
+
+start()
+{
+    #  Change workers if combined controller/compute
+    . /etc/platform/platform.conf
+    if [ "${WORKERS}" -lt "2" ]; then
+        WORKERS=2
+    fi
+
+    if [ -e $PIDFILE ]; then
+        PIDDIR=/proc/$(cat $PIDFILE)
+        if [ -d ${PIDDIR} ]; then
+            echo "$DESC already running."
+            return
+        else
+            echo "Removing stale PID file $PIDFILE"
+            rm -f $PIDFILE
+        fi
+    fi
+
+    # Clean up any possible orphaned worker threads
+    if lsof -t -i:${PORT} 1> /dev/null 2>&1; then
+        kill $(lsof -t -i:${PORT}) > /dev/null 2>&1
+    fi
+
+    rm -rf ${TMPUPLOADDIR}
+    mkdir -p ${TMPUPLOADDIR}
+
+    echo -n "Starting $DESC..."
+    
+    start-stop-daemon --start --quiet --background --pidfile ${PIDFILE} \
+        --make-pidfile --exec ${PYTHON} -- ${EXEC} --bind ${BIND}:${PORT} \
+        --worker-class ${WORKER} --workers ${WORKERS} --timeout ${TIMEOUT} \
+        --log-syslog  \
+        --config '/usr/share/openstack-dashboard/guni_config.py' \
+        --pythonpath '/usr/share/openstack-dashboard' \
+        openstack_dashboard.wsgi
+    RETVAL=$?
+    if [ $RETVAL -eq 0 ]; then
+        echo "done."
+    else
+        echo "failed."
+    fi
+
+    # now copy customer branding file to CONFIG_PATH/branding if anything updated
+    sm-query service drbd-platform | grep enabled-active > /dev/null 2>&1
+    IS_ACTIVE=$?
+
+    if ls ${BRANDDIR}/*.tgz 1> /dev/null 2>&1; then
+        LATESTBRANDING=$(ls $BRANDDIR |grep '\.tgz$' | tail -n 1)
+        if [ $IS_ACTIVE -eq 0 ]; then
+            # Only do the copy if the tarball has changed
+            if ! cmp --silent ${BRANDDIR}/${LATESTBRANDING} ${CONFIG_PATH}/branding/${LATESTBRANDING} ; then
+                mkdir -p ${CONFIG_PATH}/branding
+                rm -rf ${CONFIG_PATH}/branding/*.tgz
+                cp -r ${BRANDDIR}/${LATESTBRANDING} ${CONFIG_PATH}/branding
+            fi
+        fi
+    fi
+
+    # As part of starting horizon we should kill containerized horizon so that it
+    # will pickup branding changes
+    kubectl --kubeconfig=/etc/kubernetes/admin.conf delete pods -n openstack -l application=horizon 1>/dev/null
+}
+
+stop()
+{
+    if [ ! -e $PIDFILE ]; then return; fi
+
+    echo -n "Stopping $DESC..."
+
+    start-stop-daemon --stop --quiet --pidfile $PIDFILE
+    RETVAL=$?
+    if [ $RETVAL -eq 0 ]; then
+        echo "done."
+    else
+        echo "failed."
+    fi
+    rm -rf ${TMPUPLOADDIR}
+    rm -f $PIDFILE
+}
+
+status()
+{
+    pid=`cat $PIDFILE 2>/dev/null`
+    if [ -n "$pid" ]; then
+        if ps -p $pid &> /dev/null ; then
+            echo "$DESC is running"
+            RETVAL=0
+            return
+        else
+            RETVAL=1
+        fi
+    fi
+    echo "$DESC is not running"
+    RETVAL=3
+}
+
+case "$1" in
+    start)
+        start
+        ;;
+    stop)
+        stop
+        ;;
+    restart|force-reload|reload)
+        stop
+        start
+        ;;
+    status)
+           status
+        ;;
+    *)
+        echo "Usage: $0 {start|stop|force-reload|restart|reload|status}"
+        RETVAL=1
+        ;;
+esac
+
+exit $RETVAL
diff --git a/meta-stx/recipes-devtools/python/files/python-django-horizon/openstack-dashboard-httpd-2.4.conf b/meta-stx/recipes-devtools/python/files/python-django-horizon/openstack-dashboard-httpd-2.4.conf
new file mode 100644 (file)
index 0000000..daf600c
--- /dev/null
@@ -0,0 +1,34 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+WSGIDaemonProcess dashboard
+WSGIProcessGroup dashboard
+WSGISocketPrefix run/wsgi
+
+WSGIScriptAlias /dashboard /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
+Alias /dashboard/static /usr/share/openstack-dashboard/static
+
+<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
+  Options All
+  AllowOverride All
+  Require all granted
+</Directory>
+
+<Directory /usr/share/openstack-dashboard/static>
+  Options All
+  AllowOverride All
+  Require all granted
+</Directory>
+
diff --git a/meta-stx/recipes-devtools/python/files/python-django-horizon/openstack-dashboard-httpd-logging.conf b/meta-stx/recipes-devtools/python/files/python-django-horizon/openstack-dashboard-httpd-logging.conf
new file mode 100644 (file)
index 0000000..639779e
--- /dev/null
@@ -0,0 +1,47 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# if you want logging to a separate file, please update your config 
+# according to the last 4 lines in this snippet, and also take care
+# to introduce a <VirtualHost > directive.
+#
+
+WSGISocketPrefix run/wsgi
+
+<VirtualHost *:80>
+    WSGIScriptAlias /dashboard /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
+    Alias /static /usr/share/openstack-dashboard/static
+
+    WSGIDaemonProcess dashboard
+    WSGIProcessGroup dashboard
+
+    #DocumentRoot %HORIZON_DIR%/.blackhole/
+
+    <Directory />
+        Options FollowSymLinks
+        AllowOverride None
+    </Directory>
+
+    <Directory /usr/share/openstack-dashboard/>
+        Options Indexes FollowSymLinks MultiViews
+        AllowOverride None
+        Order allow,deny
+        allow from all
+    </Directory>
+
+    ErrorLog logs/openstack_dashboard_error.log
+    LogLevel warn
+    CustomLog logs/openstack_dashboard_access.log combined
+</VirtualHost>
diff --git a/meta-stx/recipes-devtools/python/files/python-django-horizon/python-django-horizon-logrotate.conf b/meta-stx/recipes-devtools/python/files/python-django-horizon/python-django-horizon-logrotate.conf
new file mode 100644 (file)
index 0000000..a792a15
--- /dev/null
@@ -0,0 +1,23 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+/var/log/horizon/*.log {
+    weekly
+    rotate 4
+    missingok
+    compress
+    minsize 100k
+}
+
diff --git a/meta-stx/recipes-devtools/python/files/python-django-horizon/python-django-horizon-systemd.conf b/meta-stx/recipes-devtools/python/files/python-django-horizon/python-django-horizon-systemd.conf
new file mode 100644 (file)
index 0000000..078d5d2
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+[Service]
+ExecStartPre=/usr/bin/python /usr/share/openstack-dashboard/manage.py collectstatic --noinput --clear
+ExecStartPre=/usr/bin/python /usr/share/openstack-dashboard/manage.py compress --force
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/chmod_keyringlock2.patch b/meta-stx/recipes-devtools/python/files/python-keyring/chmod_keyringlock2.patch
new file mode 100644 (file)
index 0000000..8d9f6ad
--- /dev/null
@@ -0,0 +1,37 @@
+Index: keyring-5.3/keyring/backends/file.py
+===================================================================
+--- keyring-5.3.orig/keyring/backends/file.py
++++ keyring-5.3/keyring/backends/file.py
+@@ -68,6 +68,9 @@ class BaseKeyring(FileBacked, KeyringBac
+         service = escape_for_ini(service)
+         username = escape_for_ini(username)
++        # ensure the file exists
++        self._ensure_file_path()
++
+         # load the passwords from the file
+         config = configparser.RawConfigParser()
+         if os.path.exists(self.file_path):
+@@ -146,12 +149,16 @@ class BaseKeyring(FileBacked, KeyringBac
+             user_read_write = 0o644
+             os.chmod(self.file_path, user_read_write)
+         if not os.path.isfile(lockdir + "/" + lockfile):
+-             import stat
+-             with open(lockdir + "/" + lockfile, 'w'):
+-                 pass
+-             # must have the lock file with the correct group permissisions g+rw
+-             os.chmod(lockdir + "/" + lockfile, stat.S_IRWXG | stat.S_IRWXU)
+-             os.chown(lockdir + "/" + lockfile,-1,345)
++            with open(lockdir + "/" + lockfile, 'w'):
++                pass
++        if os.path.isfile(lockdir + "/" + lockfile):
++            import stat
++            import grp
++            if oct(stat.S_IMODE(os.stat(lockdir + "/" + lockfile).st_mode)) != '0770':
++                # Must have the lock file with the correct group and permissisions g+rw
++                os.chmod(lockdir + "/" + lockfile, stat.S_IRWXG | stat.S_IRWXU)
++                groupinfo = grp.getgrnam('sys_protected')
++                os.chown(lockdir + "/" + lockfile,-1,groupinfo.gr_gid)
+     def delete_password(self, service, username):
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/chown_keyringlock_file.patch b/meta-stx/recipes-devtools/python/files/python-keyring/chown_keyringlock_file.patch
new file mode 100644 (file)
index 0000000..28c56bc
--- /dev/null
@@ -0,0 +1,12 @@
+Index: keyring-5.3/keyring/backends/file.py
+===================================================================
+--- keyring-5.3.orig/keyring/backends/file.py
++++ keyring-5.3/keyring/backends/file.py
+@@ -151,6 +151,7 @@ class BaseKeyring(FileBacked, KeyringBac
+                  pass
+              # must have the lock file with the correct group permissisions g+rw
+              os.chmod(lockdir + "/" + lockfile, stat.S_IRWXG | stat.S_IRWXU)
++             os.chown(lockdir + "/" + lockfile,-1,345)
+     def delete_password(self, service, username):
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/fix_keyring_lockfile_location.patch b/meta-stx/recipes-devtools/python/files/python-keyring/fix_keyring_lockfile_location.patch
new file mode 100644 (file)
index 0000000..8531a28
--- /dev/null
@@ -0,0 +1,113 @@
+Index: keyring-5.3/keyring/backends/file.py
+===================================================================
+--- keyring-5.3.orig/keyring/backends/file.py
++++ keyring-5.3/keyring/backends/file.py
+@@ -19,6 +19,8 @@ from ..util.escape import escape as esca
+ from oslo_concurrency import lockutils
++lockfile = "keyringlock"
++
+ class FileBacked(object):
+     @abc.abstractproperty
+     def filename(self):
+@@ -104,16 +106,18 @@ class BaseKeyring(FileBacked, KeyringBac
+         service = escape_for_ini(service)
+         username = escape_for_ini(username)
++        # ensure the file exists
++        self._ensure_file_path()
++
+         # encrypt the password
+         password_encrypted = self.encrypt(password.encode('utf-8'))
+         # encode with base64
+         password_base64 = base64.encodestring(password_encrypted).decode()
++        lockdir = os.path.dirname(self.file_path)
+-        with lockutils.lock("keyringlock",external=True,lock_path="/tmp"):
++        with lockutils.lock(lockfile,external=True,lock_path=lockdir):
+-            # ensure the file exists
+-            self._ensure_file_path()
+             config = None
+             try:
+@@ -159,14 +163,13 @@ class BaseKeyring(FileBacked, KeyringBac
+-
+-
+     def _ensure_file_path(self):
+         """
+         Ensure the storage path exists.
+         If it doesn't, create it with "go-rwx" permissions.
+         """
+         storage_root = os.path.dirname(self.file_path)
++        lockdir = storage_root
+         if storage_root and not os.path.isdir(storage_root):
+             os.makedirs(storage_root)
+         if not os.path.isfile(self.file_path):
+@@ -175,13 +178,22 @@ class BaseKeyring(FileBacked, KeyringBac
+                 pass
+             user_read_write = 0o644
+             os.chmod(self.file_path, user_read_write)
++        if not os.path.isfile(lockdir + "/" + lockfile):
++             import stat
++             with open(lockdir + "/" + lockfile, 'w'):
++                 pass
++             # must have the lock file with the correct group permissisions g+rw
++             os.chmod(lockdir + "/" + lockfile, stat.S_IRWXG | stat.S_IRWXU)
++
+     def delete_password(self, service, username):
+         """Delete the password for the username of the service.
+         """
+         service = escape_for_ini(service)
+         username = escape_for_ini(username)
+-        with lockutils.lock("keyringlock",external=True,lock_path="/tmp"):
++
++        lockdir = os.path.dirname(self.file_path)
++        with lockutils.lock(lockfile,external=True,lock_path=lockdir):
+             config = configparser.RawConfigParser()
+             if os.path.exists(self.file_path):
+                 config.read(self.file_path)
+@@ -290,17 +302,6 @@ class EncryptedKeyring(Encrypted, BaseKe
+         # set a reference password, used to check that the password provided
+         #  matches for subsequent checks.
+-        # try to pre-create the /tmp/keyringlock if it doesn't exist
+-        lockfile = "/tmp/keyringlock"
+-        if os.geteuid() == 0 and (not os.path.exists(lockfile)):
+-             from pwd import getpwnam
+-             import stat
+-             nonrootuser = "sysadmin"
+-             with open(lockfile, 'w'):
+-                 pass
+-             # must have the lock file with the correct group permissisions g+rw
+-             os.chmod(lockfile, stat.S_IRWXG | stat.S_IRWXU)
+-
+         self.set_password('keyring-setting', 'password reference',
+             'password reference value')
+@@ -313,9 +314,10 @@ class EncryptedKeyring(Encrypted, BaseKe
+             return False
+         self._migrate()
++        lockdir = os.path.dirname(self.file_path)
+         # lock access to the file_path here, make sure it's not being written
+         # to while while we're checking for keyring-setting
+-        with lockutils.lock("keyringlock",external=True,lock_path="/tmp"):
++        with lockutils.lock(lockfile,external=True,lock_path=lockdir):
+             config = configparser.RawConfigParser()
+             config.read(self.file_path)
+             try:
+@@ -325,7 +327,6 @@ class EncryptedKeyring(Encrypted, BaseKe
+                 )
+             except (configparser.NoSectionError, configparser.NoOptionError):
+                 # The current file doesn't have the keyring-setting, check the backup
+-                logging.warning("_check_file: The current file doesn't have the keyring-setting, check the backup")
+                 if os.path.exists(self.backup_file_path):
+                     config = configparser.RawConfigParser()
+                     config.read(self.backup_file_path)
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/keyring_path_change.patch b/meta-stx/recipes-devtools/python/files/python-keyring/keyring_path_change.patch
new file mode 100644 (file)
index 0000000..46aa235
--- /dev/null
@@ -0,0 +1,24 @@
+---
+ keyring/util/platform_.py |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/keyring/util/platform_.py
++++ b/keyring/util/platform_.py
+@@ -2,6 +2,7 @@ from __future__ import absolute_import
+ import os
+ import platform
++from tsconfig.tsconfig import SW_VERSION
+ def _settings_root_XP():
+       return os.path.join(os.environ['USERPROFILE'], 'Local Settings')
+@@ -19,7 +20,8 @@ def _data_root_Linux():
+       Use freedesktop.org Base Dir Specfication to determine storage
+       location.
+       """
+-      fallback = os.path.expanduser('/opt/platform/.keyring/')
++      keyring_dir = os.path.join('/opt/platform/.keyring', SW_VERSION)
++      fallback = os.path.expanduser(keyring_dir)
+       root = os.environ.get('XDG_DATA_HOME', None) or fallback
+       return os.path.join(root, 'python_keyring')
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/lock_keyring_file.patch b/meta-stx/recipes-devtools/python/files/python-keyring/lock_keyring_file.patch
new file mode 100644 (file)
index 0000000..dab7248
--- /dev/null
@@ -0,0 +1,45 @@
+Index: keyring-5.3/keyring/backends/file.py
+===================================================================
+--- keyring-5.3.orig/keyring/backends/file.py
++++ keyring-5.3/keyring/backends/file.py
+@@ -6,6 +6,7 @@ import base64
+ import sys
+ import json
+ import abc
++import time
+ from ..py27compat import configparser
+@@ -95,14 +96,29 @@ class BaseKeyring(FileBacked, KeyringBac
+         config = configparser.RawConfigParser()
+         config.read(self.file_path)
++        # obtain lock for the keyring file
++        lock = ''
++        i = 60
++        while i:
++            if not os.path.isfile('/tmp/.keyringlock'):
++                lock = open('/tmp/.keyringlock', 'w')
++                break
++            else:
++                time.sleep(0.500)
++                i=i-1
++
+         # update the keyring with the password
+         if not config.has_section(service):
+             config.add_section(service)
+         config.set(service, username, password_base64)
+-        # save the keyring back to the file
+-        with open(self.file_path, 'w') as config_file:
+-            config.write(config_file)
++        if i:
++            # save the keyring back to the file
++            with open(self.file_path, 'w') as config_file:
++                config.write(config_file)
++            lock.close()
++            os.remove('/tmp/.keyringlock')
++
+     def _ensure_file_path(self):
+         """
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/lock_keyring_file2.patch b/meta-stx/recipes-devtools/python/files/python-keyring/lock_keyring_file2.patch
new file mode 100644 (file)
index 0000000..7633b5e
--- /dev/null
@@ -0,0 +1,42 @@
+Index: keyring-5.3/keyring/backends/file.py
+===================================================================
+--- keyring-5.3.orig/keyring/backends/file.py
++++ keyring-5.3/keyring/backends/file.py
+@@ -92,10 +92,6 @@ class BaseKeyring(FileBacked, KeyringBac
+         # ensure the file exists
+         self._ensure_file_path()
+-        # load the keyring from the disk
+-        config = configparser.RawConfigParser()
+-        config.read(self.file_path)
+-
+         # obtain lock for the keyring file
+         lock = ''
+         i = 60
+@@ -107,15 +103,21 @@ class BaseKeyring(FileBacked, KeyringBac
+                 time.sleep(0.500)
+                 i=i-1
+-        # update the keyring with the password
+-        if not config.has_section(service):
+-            config.add_section(service)
+-        config.set(service, username, password_base64)
+         if i:
+-            # save the keyring back to the file
++            # Load the keyring from the disk
++            config = configparser.RawConfigParser()
++            config.read(self.file_path)
++
++            # Update the keyring with the password
++            if not config.has_section(service):
++                config.add_section(service)
++            config.set(service, username, password_base64)
++
++            # Save the keyring back to the file
+             with open(self.file_path, 'w') as config_file:
+                 config.write(config_file)
++
+             lock.close()
+             os.remove('/tmp/.keyringlock')
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/no_keyring_password.patch b/meta-stx/recipes-devtools/python/files/python-keyring/no_keyring_password.patch
new file mode 100644 (file)
index 0000000..798daec
--- /dev/null
@@ -0,0 +1,70 @@
+diff --git a/keyring/backends/file.py b/keyring/backends/file.py
+index f899880..ef6db1d 100644
+--- a/keyring/backends/file.py
++++ b/keyring/backends/file.py
+@@ -116,7 +116,7 @@ class BaseKeyring(FileBacked, KeyringBackend):
+             # create the file without group/world permissions
+             with open(self.file_path, 'w'):
+                 pass
+-            user_read_write = 0o600
++            user_read_write = 0o644
+             os.chmod(self.file_path, user_read_write)
+     def delete_password(self, service, username):
+@@ -172,12 +172,19 @@ class Encrypted(object):
+     def _get_new_password(self):
+         while True:
+-            password = getpass.getpass(
+-                "Please set a password for your new keyring: ")
+-            confirm = getpass.getpass('Please confirm the password: ')
+-            if password != confirm:
+-                sys.stderr.write("Error: Your passwords didn't match\n")
+-                continue
++#****************************************************************
++# Forging the Keyring password to allow automation and still keep
++# the password encoded. TODO to be revisited when Barbican keyring
++# Will be used with the complete PKI solution
++#****************************************************************
++#            password = getpass.getpass(
++#                "Please set a password for your new keyring: ")
++#            confirm = getpass.getpass('Please confirm the password: ')
++#            if password != confirm:
++#                sys.stderr.write("Error: Your passwords didn't match\n")
++#                continue
++            password =  "Please set a password for your new keyring: "
++
+             if '' == password.strip():
+                 # forbid the blank password
+                 sys.stderr.write("Error: blank passwords aren't allowed.\n")
+@@ -248,8 +255,15 @@ class EncryptedKeyring(Encrypted, BaseKeyring):
+         Unlock this keyring by getting the password for the keyring from the
+         user.
+         """
+-        self.keyring_key = getpass.getpass(
+-            'Please enter password for encrypted keyring: ')
++#****************************************************************
++# Forging the Keyring password to allow automation and still keep
++# the password encoded. TODO to be revisited when Barbican keyring
++# Will be used with the complete PKI solution
++#****************************************************************
++#        self.keyring_key = getpass.getpass(
++#            'Please enter password for encrypted keyring: ')
++        self.keyring_key = "Please set a password for your new keyring: "
++
+         try:
+             ref_pw = self.get_password('keyring-setting', 'password reference')
+             assert ref_pw == 'password reference value'
+diff --git a/keyring/util/platform_.py b/keyring/util/platform_.py
+index dcdffea..53b9eae 100644
+--- a/keyring/util/platform_.py
++++ b/keyring/util/platform_.py
+@@ -19,7 +19,7 @@ def _data_root_Linux():
+       Use freedesktop.org Base Dir Specfication to determine storage
+       location.
+       """
+-      fallback = os.path.expanduser('~/.local/share')
++      fallback = os.path.expanduser('/opt/platform/.keyring/')
+       root = os.environ.get('XDG_DATA_HOME', None) or fallback
+       return os.path.join(root, 'python_keyring')
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/remove-reader-lock.patch b/meta-stx/recipes-devtools/python/files/python-keyring/remove-reader-lock.patch
new file mode 100644 (file)
index 0000000..137805d
--- /dev/null
@@ -0,0 +1,136 @@
+---
+ keyring/backends/file.py |   85 ++++++++++++++++++++++-------------------------
+ 1 file changed, 41 insertions(+), 44 deletions(-)
+
+--- a/keyring/backends/file.py
++++ b/keyring/backends/file.py
+@@ -18,6 +18,7 @@ from ..backend import KeyringBackend
+ from ..util import platform_, properties
+ from ..util.escape import escape as escape_for_ini
+ from oslo_concurrency import lockutils
++from tempfile import mkstemp
+ lockfile = "keyringlock"
+@@ -102,11 +103,9 @@ class BaseKeyring(FileBacked, KeyringBac
+         # encode with base64
+         password_base64 = base64.encodestring(password_encrypted).decode()
+-        lockdir = os.path.dirname(self.file_path)
+-
+-        with lockutils.lock(lockfile,external=True,lock_path=lockdir):
+-
++        keyringdir = os.path.dirname(self.file_path)
++        with lockutils.lock(lockfile, external=True, lock_path=keyringdir):
+             config = None
+             try:
+                 # Load the keyring from the disk
+@@ -121,16 +120,20 @@ class BaseKeyring(FileBacked, KeyringBac
+                 config.add_section(service)
+             config.set(service, username, password_base64)
+-            # Save the keyring back to the file
+-            storage_root = os.path.dirname(self.file_path)
+-            tmpfile = "tmpfile.%s" % os.getpid()
+-            with open(storage_root + "/" + tmpfile, 'w') as config_file:
+-                config.write(config_file)
+-            # copy will overwrite but move will not
+-            shutil.copy(storage_root + "/" + tmpfile,self.file_path)
+-            # wipe out tmpfile here
+-            os.remove(storage_root + "/" + tmpfile)
++            # remove any residual temporary files here
++            try:
++                for tmpfile in glob.glob("%s/tmp*" % keyringdir):
++                    os.remove(tmpfile)
++            except:
++                logging.warning("_check_file: tmpfile removal failed")
++            # Write the keyring to a temp file, then move the new file
++            # to avoid overwriting the existing inode
++            (fd, fname) = mkstemp(dir=keyringdir)
++            with os.fdopen(fd, "w") as config_file:
++                config.write(config_file)
++            os.chmod(fname, os.stat(self.file_path).st_mode)
++            shutil.move(fname, self.file_path)
+     def _ensure_file_path(self):
+@@ -167,8 +170,8 @@ class BaseKeyring(FileBacked, KeyringBac
+         service = escape_for_ini(service)
+         username = escape_for_ini(username)
+-        lockdir = os.path.dirname(self.file_path)
+-        with lockutils.lock(lockfile,external=True,lock_path=lockdir):
++        keyringdir = os.path.dirname(self.file_path)
++        with lockutils.lock(lockfile, external=True, lock_path=keyringdir):
+             config = configparser.RawConfigParser()
+             if os.path.exists(self.file_path):
+                 config.read(self.file_path)
+@@ -177,15 +180,21 @@ class BaseKeyring(FileBacked, KeyringBac
+                     raise PasswordDeleteError("Password not found")
+             except configparser.NoSectionError:
+                 raise PasswordDeleteError("Password not found")
+-            # update the file
+-            storage_root = os.path.dirname(self.file_path)
+-            tmpfile = "tmpfile.%s" % os.getpid()
+-            with open(storage_root + "/" + tmpfile, 'w') as config_file:
++
++            # remove any residual temporary files here
++            try:
++                for tmpfile in glob.glob("%s/tmp*" % keyringdir):
++                    os.remove(tmpfile)
++            except:
++                logging.warning("_check_file: tmpfile removal failed")
++
++            # Write the keyring to a temp file, then move the new file
++            # to avoid overwriting the existing inode
++            (fd, fname) = mkstemp(dir=keyringdir)
++            with os.fdopen(fd, "w") as config_file:
+                 config.write(config_file)
+-            # copy will overwrite but move will not
+-            shutil.copy(storage_root + "/" + tmpfile,self.file_path)
+-            # wipe out tmpfile
+-            os.remove(storage_root + "/" + tmpfile)
++            os.chmod(fname, os.stat(self.file_path).st_mode)
++            shutil.move(fname, self.file_path)
+ class PlaintextKeyring(BaseKeyring):
+@@ -294,27 +303,15 @@ class EncryptedKeyring(Encrypted, BaseKe
+             return False
+         self._migrate()
+-        lockdir = os.path.dirname(self.file_path)
+-        # lock access to the file_path here, make sure it's not being written
+-        # to while while we're checking for keyring-setting
+-        with lockutils.lock(lockfile,external=True,lock_path=lockdir):
+-            config = configparser.RawConfigParser()
+-            config.read(self.file_path)
+-            try:
+-                config.get(
+-                    escape_for_ini('keyring-setting'),
+-                    escape_for_ini('password reference'),
+-                )
+-            except (configparser.NoSectionError, configparser.NoOptionError):
+-                return False
+-
+-            # remove any residual temporary files here
+-            try:
+-                for tmpfile in glob.glob(os.path.dirname(self.file_path) + "/" + "tmpfile.*"):
+-                    os.remove(tmpfile)
+-            except:
+-                logging.warning("_check_file: tmpfile removal failed")
+-
++        config = configparser.RawConfigParser()
++        config.read(self.file_path)
++        try:
++            config.get(
++                escape_for_ini('keyring-setting'),
++                escape_for_ini('password reference'),
++            )
++        except (configparser.NoSectionError, configparser.NoOptionError):
++            return False
+         return True
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/remove_others_perms_on_keyringcfg_file.patch b/meta-stx/recipes-devtools/python/files/python-keyring/remove_others_perms_on_keyringcfg_file.patch
new file mode 100644 (file)
index 0000000..dcc4c2e
--- /dev/null
@@ -0,0 +1,15 @@
+---
+ keyring/backends/file.py |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/keyring/backends/file.py
++++ b/keyring/backends/file.py
+@@ -149,7 +149,7 @@ class BaseKeyring(FileBacked, KeyringBac
+             # create the file without group/world permissions
+             with open(self.file_path, 'w'):
+                 pass
+-            user_read_write = 0o644
++            user_read_write = 0o640
+             os.chmod(self.file_path, user_read_write)
+         if not os.path.isfile(lockdir + "/" + lockfile):
+             with open(lockdir + "/" + lockfile, 'w'):
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/use_new_lock.patch b/meta-stx/recipes-devtools/python/files/python-keyring/use_new_lock.patch
new file mode 100644 (file)
index 0000000..d298c20
--- /dev/null
@@ -0,0 +1,243 @@
+Index: keyring-5.3/keyring/backends/file.py
+===================================================================
+--- keyring-5.3.orig/keyring/backends/file.py
++++ keyring-5.3/keyring/backends/file.py
+@@ -7,6 +7,8 @@ import sys
+ import json
+ import abc
+ import time
++import logging
++import shutil
+
+ from ..py27compat import configparser
+
+@@ -14,6 +16,7 @@ from ..errors import PasswordDeleteError
+ from ..backend import KeyringBackend
+ from ..util import platform_, properties
+ from ..util.escape import escape as escape_for_ini
++from oslo_concurrency import lockutils
+
+
+ class FileBacked(object):
+@@ -31,6 +34,13 @@ class FileBacked(object):
+         """
+         return os.path.join(platform_.data_root(), self.filename)
+
++    @properties.NonDataProperty
++    def backup_file_path(self):
++        """
++        The path to the file where passwords are stored. This property
++        may be overridden by the subclass or at the instance level.
++        """
++        return os.path.join(platform_.data_root(), self.backup_filename)
+
+ class BaseKeyring(FileBacked, KeyringBackend):
+     """
+@@ -78,6 +88,16 @@ class BaseKeyring(FileBacked, KeyringBac
+             password = None
+         return password
+
++
++    def filecopy(self,src,dest):
++        """copy file src to dest with default buffer size
++        """
++        with open(src, 'r') as f1:
++            with open(dest, 'w') as f2:
++                shutil.copyfileobj(f1,f2)
++                f2.flush()
++
++
+     def set_password(self, service, username, password):
+         """Write the password in the file.
+         """
+@@ -89,37 +109,56 @@ class BaseKeyring(FileBacked, KeyringBac
+         # encode with base64
+         password_base64 = base64.encodestring(password_encrypted).decode()
+
+-        # ensure the file exists
+-        self._ensure_file_path()
+
+-        # obtain lock for the keyring file
+-        lock = ''
+-        i = 60
+-        while i:
+-            if not os.path.isfile('/tmp/.keyringlock'):
+-                lock = open('/tmp/.keyringlock', 'w')
+-                break
+-            else:
+-                time.sleep(0.500)
+-                i=i-1
++        with lockutils.lock("keyringlock",external=True,lock_path="/tmp"):
+
++            # ensure the file exists
++            self._ensure_file_path()
++
++            config = None
++            try:
++                # Load the keyring from the disk
++                config = configparser.RawConfigParser()
++                config.read(self.file_path)
++            except configparser.ParsingError as e:
++                logging.warning("set_password: keyring file corrupted, Reverting to Backup")
++                # Revert to the backup file (copy backup over current file)
++                try:
++                    src = self.backup_file_path
++                    dest = self.file_path
++                    self.filecopy(src,dest)
++                except shutil.Error as e:
++                    logging.warning("set_password: Revert from Backup failed. Error: %s" % e)
++                    raise
++                # Load the keyring from the disk, if this fails exception is raised
++                try:
++                    config = configparser.RawConfigParser()
++                    config.read(self.file_path)
++                except:
++                    e = sys.exc_info()[0]
++                    logging.warning("set_password: Both keyring files are non useable. Error: %s" % e)
++                    raise
+
+-        if i:
+-            # Load the keyring from the disk
+-            config = configparser.RawConfigParser()
+-            config.read(self.file_path)
+
+             # Update the keyring with the password
+             if not config.has_section(service):
+                 config.add_section(service)
+             config.set(service, username, password_base64)
+
++            # Make a back up of the keyring file here
++            try:
++                src = self.file_path
++                dest = self.backup_file_path
++                self.filecopy(src,dest)
++            except shutil.Error as e:
++                logging.warning("set_password: Backup failed. Error: %s" % e)
++
+             # Save the keyring back to the file
+             with open(self.file_path, 'w') as config_file:
+                 config.write(config_file)
+
+-            lock.close()
+-            os.remove('/tmp/.keyringlock')
++
++
+
+
+     def _ensure_file_path(self):
+@@ -142,17 +181,18 @@ class BaseKeyring(FileBacked, KeyringBac
+         """
+         service = escape_for_ini(service)
+         username = escape_for_ini(username)
+-        config = configparser.RawConfigParser()
+-        if os.path.exists(self.file_path):
+-            config.read(self.file_path)
+-        try:
+-            if not config.remove_option(service, username):
++        with lockutils.lock("keyringlock",external=True,lock_path="/tmp"):
++            config = configparser.RawConfigParser()
++            if os.path.exists(self.file_path):
++                config.read(self.file_path)
++            try:
++                if not config.remove_option(service, username):
++                    raise PasswordDeleteError("Password not found")
++            except configparser.NoSectionError:
+                 raise PasswordDeleteError("Password not found")
+-        except configparser.NoSectionError:
+-            raise PasswordDeleteError("Password not found")
+-        # update the file
+-        with open(self.file_path, 'w') as config_file:
+-            config.write(config_file)
++            # update the file
++            with open(self.file_path, 'w') as config_file:
++                config.write(config_file)
+
+ class PlaintextKeyring(BaseKeyring):
+     """Simple File Keyring with no encryption"""
+@@ -161,6 +201,7 @@ class PlaintextKeyring(BaseKeyring):
+     "Applicable for all platforms, but not recommended"
+
+     filename = 'keyring_pass.cfg'
++    backup_filename = 'crypted_pass_backup.cfg'
+
+     def encrypt(self, password):
+         """Directly return the password itself.
+@@ -214,6 +255,7 @@ class EncryptedKeyring(Encrypted, BaseKe
+     """PyCrypto File Keyring"""
+
+     filename = 'crypted_pass.cfg'
++    backup_filename = 'crypted_pass_backup.cfg'
+     pw_prefix = 'pw:'.encode()
+
+     @properties.ClassProperty
+@@ -247,6 +289,19 @@ class EncryptedKeyring(Encrypted, BaseKe
+         self.keyring_key = self._get_new_password()
+         # set a reference password, used to check that the password provided
+         #  matches for subsequent checks.
++
++        # try to pre-create the /tmp/keyringlock if it doesn't exist
++        lockfile = "/tmp/keyringlock"
++        if os.geteuid() == 0 and (not os.path.exists(lockfile)):
++             from pwd import getpwnam
++             import stat
++             nonrootuser = "sysadmin"
++             with open(lockfile, 'w'):
++                 pass
++             # must have the lock file with the correct group permissisions g+rw
++             os.chmod(lockfile, stat.S_IRWXG | stat.S_IRWXU)
++
++
+         self.set_password('keyring-setting', 'password reference',
+             'password reference value')
+
+@@ -257,15 +312,41 @@ class EncryptedKeyring(Encrypted, BaseKe
+         if not os.path.exists(self.file_path):
+             return False
+         self._migrate()
+-        config = configparser.RawConfigParser()
+-        config.read(self.file_path)
+-        try:
+-            config.get(
+-                escape_for_ini('keyring-setting'),
+-                escape_for_ini('password reference'),
+-            )
+-        except (configparser.NoSectionError, configparser.NoOptionError):
+-            return False
++
++        # lock access to the file_path here, make sure it's not being written
++        # to while while we're checking for keyring-setting
++        with lockutils.lock("keyringlock",external=True,lock_path="/tmp"):
++            config = configparser.RawConfigParser()
++            config.read(self.file_path)
++            try:
++                config.get(
++                    escape_for_ini('keyring-setting'),
++                    escape_for_ini('password reference'),
++                )
++            except (configparser.NoSectionError, configparser.NoOptionError):
++                # The current file doesn't have the keyring-setting, check the backup
++                logging.warning("_check_file: The current file doesn't have the keyring-setting, check the backup")
++                if os.path.exists(self.backup_file_path):
++                    config = configparser.RawConfigParser()
++                    config.read(self.backup_file_path)
++                    try:
++                        config.get(
++                            escape_for_ini('keyring-setting'),
++                            escape_for_ini('password reference'),
++                        )
++                    except (configparser.NoSectionError, configparser.NoOptionError):
++                        return False
++                    # backup file has it, let's use it
++                    try:
++                        src = self.backup_file_path
++                        dest = self.file_path
++                        shutil.copy(src,dest)
++                    except shutil.Error as e:
++                        logging.warning("Revert from Backup failed. Error: %s" % e)
++                        return False
++                else:
++                    return False
++
+         return True
+
+     def _unlock(self):
diff --git a/meta-stx/recipes-devtools/python/files/python-keyring/use_temporary_file.patch b/meta-stx/recipes-devtools/python/files/python-keyring/use_temporary_file.patch
new file mode 100644 (file)
index 0000000..faa968d
--- /dev/null
@@ -0,0 +1,162 @@
+Index: keyring-5.3/keyring/backends/file.py
+===================================================================
+--- keyring-5.3.orig/keyring/backends/file.py
++++ keyring-5.3/keyring/backends/file.py
+@@ -9,6 +9,7 @@ import abc
+ import time
+ import logging
+ import shutil
++import glob
+ from ..py27compat import configparser
+@@ -36,13 +37,6 @@ class FileBacked(object):
+         """
+         return os.path.join(platform_.data_root(), self.filename)
+-    @properties.NonDataProperty
+-    def backup_file_path(self):
+-        """
+-        The path to the file where passwords are stored. This property
+-        may be overridden by the subclass or at the instance level.
+-        """
+-        return os.path.join(platform_.data_root(), self.backup_filename)
+ class BaseKeyring(FileBacked, KeyringBackend):
+     """
+@@ -91,15 +85,6 @@ class BaseKeyring(FileBacked, KeyringBac
+         return password
+-    def filecopy(self,src,dest):
+-        """copy file src to dest with default buffer size
+-        """
+-        with open(src, 'r') as f1:
+-            with open(dest, 'w') as f2:
+-                shutil.copyfileobj(f1,f2)
+-                f2.flush()
+-
+-
+     def set_password(self, service, username, password):
+         """Write the password in the file.
+         """
+@@ -125,23 +110,7 @@ class BaseKeyring(FileBacked, KeyringBac
+                 config = configparser.RawConfigParser()
+                 config.read(self.file_path)
+             except configparser.ParsingError as e:
+-                logging.warning("set_password: keyring file corrupted, Reverting to Backup")
+-                # Revert to the backup file (copy backup over current file)
+-                try:
+-                    src = self.backup_file_path
+-                    dest = self.file_path
+-                    self.filecopy(src,dest)
+-                except shutil.Error as e:
+-                    logging.warning("set_password: Revert from Backup failed. Error: %s" % e)
+-                    raise
+-                # Load the keyring from the disk, if this fails exception is raised
+-                try:
+-                    config = configparser.RawConfigParser()
+-                    config.read(self.file_path)
+-                except:
+-                    e = sys.exc_info()[0]
+-                    logging.warning("set_password: Both keyring files are non useable. Error: %s" % e)
+-                    raise
++                logging.warning("set_password: keyring file corrupted")
+             # Update the keyring with the password
+@@ -149,17 +118,15 @@ class BaseKeyring(FileBacked, KeyringBac
+                 config.add_section(service)
+             config.set(service, username, password_base64)
+-            # Make a back up of the keyring file here
+-            try:
+-                src = self.file_path
+-                dest = self.backup_file_path
+-                self.filecopy(src,dest)
+-            except shutil.Error as e:
+-                logging.warning("set_password: Backup failed. Error: %s" % e)
+-
+             # Save the keyring back to the file
+-            with open(self.file_path, 'w') as config_file:
++            storage_root = os.path.dirname(self.file_path)
++            tmpfile = "tmpfile.%s" % os.getpid()
++            with open(storage_root + "/" + tmpfile, 'w') as config_file:
+                 config.write(config_file)
++            # copy will overwrite but move will not
++            shutil.copy(storage_root + "/" + tmpfile,self.file_path)
++            # wipe out tmpfile here
++            os.remove(storage_root + "/" + tmpfile)
+@@ -203,8 +170,15 @@ class BaseKeyring(FileBacked, KeyringBac
+             except configparser.NoSectionError:
+                 raise PasswordDeleteError("Password not found")
+             # update the file
+-            with open(self.file_path, 'w') as config_file:
++            storage_root = os.path.dirname(self.file_path)
++            tmpfile = "tmpfile.%s" % os.getpid()
++            with open(storage_root + "/" + tmpfile, 'w') as config_file:
+                 config.write(config_file)
++            # copy will overwrite but move will not
++            shutil.copy(storage_root + "/" + tmpfile,self.file_path)
++            # wipe out tmpfile
++            os.remove(storage_root + "/" + tmpfile)
++
+ class PlaintextKeyring(BaseKeyring):
+     """Simple File Keyring with no encryption"""
+@@ -213,7 +187,6 @@ class PlaintextKeyring(BaseKeyring):
+     "Applicable for all platforms, but not recommended"
+     filename = 'keyring_pass.cfg'
+-    backup_filename = 'crypted_pass_backup.cfg'
+     def encrypt(self, password):
+         """Directly return the password itself.
+@@ -267,7 +240,6 @@ class EncryptedKeyring(Encrypted, BaseKe
+     """PyCrypto File Keyring"""
+     filename = 'crypted_pass.cfg'
+-    backup_filename = 'crypted_pass_backup.cfg'
+     pw_prefix = 'pw:'.encode()
+     @properties.ClassProperty
+@@ -326,27 +298,15 @@ class EncryptedKeyring(Encrypted, BaseKe
+                     escape_for_ini('password reference'),
+                 )
+             except (configparser.NoSectionError, configparser.NoOptionError):
+-                # The current file doesn't have the keyring-setting, check the backup
+-                if os.path.exists(self.backup_file_path):
+-                    config = configparser.RawConfigParser()
+-                    config.read(self.backup_file_path)
+-                    try:
+-                        config.get(
+-                            escape_for_ini('keyring-setting'),
+-                            escape_for_ini('password reference'),
+-                        )
+-                    except (configparser.NoSectionError, configparser.NoOptionError):
+-                        return False
+-                    # backup file has it, let's use it
+-                    try:
+-                        src = self.backup_file_path
+-                        dest = self.file_path
+-                        shutil.copy(src,dest)
+-                    except shutil.Error as e:
+-                        logging.warning("Revert from Backup failed. Error: %s" % e)
+-                        return False
+-                else:
+-                    return False
++                return False
++
++            # remove any residual temporary files here
++            try:
++                for tmpfile in glob.glob(os.path.dirname(self.file_path) + "/" + "tmpfile.*"):
++                    os.remove(tmpfile)
++            except:
++                logging.warning("_check_file: tmpfile removal failed")
++
+         return True
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/admin-openrc b/meta-stx/recipes-devtools/python/files/python-keystone/admin-openrc
new file mode 100644 (file)
index 0000000..4b459d7
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Matches bootstrap data in keystone-init
+#
+export OS_PROJECT_DOMAIN_NAME=Default
+export OS_USER_DOMAIN_NAME=Default
+export OS_PROJECT_NAME=admin
+export OS_USERNAME=%ADMIN_USER%
+export OS_PASSWORD=%ADMIN_PASSWORD%
+export OS_AUTH_URL=http://%CONTROLLER_IP%:35357/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
+export OS_REGION_NAME=RegionOne
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/convert_keystone_backend.py b/meta-stx/recipes-devtools/python/files/python-keystone/convert_keystone_backend.py
new file mode 100755 (executable)
index 0000000..eebd59d
--- /dev/null
@@ -0,0 +1,43 @@
+#!/usr/bin/python
+
+import sys
+import ConfigParser 
+import shutil
+
+path = "/etc/keystone/keystone.conf"
+
+if len(sys.argv) != 2:
+       sys.stderr.write("Usage: "+sys.argv[0]+" [sql|hybrid]\n")
+       sys.exit(1)
+
+backend = sys.argv[1]
+if backend == "hybrid":
+       identity_backend = 'keystone.identity.backends.hybrid_identity.Identity'
+       assignment_backend = 'keystone.assignment.backends.hybrid_assignment.Assignment'
+elif backend == "sql":
+       identity_backend = 'keystone.identity.backends.sql.Identity'
+       assignment_backend = 'keystone.assignment.backends.sql.Assignment'
+else:
+       sys.stderr.write("Usage: "+sys.argv[0]+" [sql|hybrid]\n")
+       sys.exit(1)
+
+shutil.copyfile(path, path + ".bak")
+
+cfg = ConfigParser.ConfigParser()
+c = cfg.read(path)
+
+if not cfg.has_section("identity"):
+       cfg.add_section("identity")
+
+cfg.set("identity", "driver", identity_backend)
+
+if not cfg.has_section("assignment"):
+       cfg.add_section("assignment")
+
+cfg.set("assignment", "driver", assignment_backend)
+
+fp = open(path, "w")
+cfg.write(fp)
+fp.close()
+
+exit(0)
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/hybrid-backend-setup b/meta-stx/recipes-devtools/python/files/python-keystone/hybrid-backend-setup
new file mode 100755 (executable)
index 0000000..d3f7eac
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+/etc/keystone/convert_keystone_backend.py hybrid
+
+/etc/init.d/openldap start
+/etc/init.d/keystone restart
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/identity.sh b/meta-stx/recipes-devtools/python/files/python-keystone/identity.sh
new file mode 100644 (file)
index 0000000..af99673
--- /dev/null
@@ -0,0 +1,226 @@
+#!/bin/bash
+
+#
+# Copyright (C) 2014 Wind River Systems, Inc.
+#
+# The identity.sh provides utilities for services to add tenant/role/users,
+# and service/endpoints into keystone database
+#
+
+# Use shared secret for authentication before any user created.
+export OS_SERVICE_TOKEN="password"
+export OS_SERVICE_ENDPOINT="http://localhost:35357/v2.0"
+
+declare -A PARAMS
+
+# Shortcut function to get a newly generated ID
+function get_field () {
+    while read data; do
+        if [ "$1" -lt 0 ]; then
+            field="(\$(NF$1))"
+        else
+            field="\$$(($1 + 1))"
+        fi
+        echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
+    done
+}
+
+# Usage help
+help () {
+    if [ $# -eq 0 ]; then
+        echo "Usage: $0 <subcommand> ..."
+        echo ""
+        echo "Keystone CLI wrapper to create tenant/user/role, and service/endpoint."
+        echo "It uses the default tenant, user and password from environment variables"
+        echo "(OS_TENANT_NAME, OS_USERNAME, OS_PASSWORD) to authenticate with keystone."
+        echo ""
+        echo "Positional arguments:"
+        echo "  <subcommand>"
+        echo "    user-create"
+        echo "    service-create"
+        echo ""
+        echo "See \"identity.sh help COMMAND\" for help on a specific command."
+        exit 0
+    fi
+
+    case "$2" in
+        service-create)
+            echo "Usage: $0 $2 [--name=<name>] [--type=<type>] [--description=<description>] [--region=<region>] [--publicurl=<public url>] [--adminurl=<admin url>] [--internalurl=<internal url>]"
+            echo ""
+            echo "Create service and endpoint in keystone."
+            echo ""
+            echo "Arguments:"
+            echo "  --name=<name>"
+            echo "                  The name of the service"
+            echo "  --type=<type>"
+            echo "                  The type of the service"
+            echo "  --description=<description>"
+            echo "                  Description of the service"
+            echo "  --region=<region>"
+            echo "                  The region of the service"
+            echo "  --publicurl=<public url>"
+            echo "                  Public URL of the service endpoint"
+            echo "  --adminurl=<admin url>"
+            echo "                  Admin URL of the service endpoint"
+            echo "  --internalurl=<internal url>"
+            echo "                  Internal URL of the service endpoint"
+            ;;
+        user-create)
+            echo "Usage: $0 $2 [--name=<name>] [--pass=<password>] [--tenant=<tenant>] [--role=<role>] [--email=<email>]"
+            echo ""
+            echo "Arguments:"
+            echo "  --name=<name>"
+            echo "                  The name of the user"
+            echo "  --pass=<password>"
+            echo "                  The password of the user"
+            echo "  --tenant=<tenant>"
+            echo "                  The tenant of the user belongs to"
+            echo "  --role=<role>"
+            echo "                  The role of the user in the <tenant>"
+            echo "  --email=<email>"
+            echo "                  The email of the user"
+            ;;
+        *)
+            echo "Usage: $0 help <subcommand> ..."
+            echo ""
+            exit 0
+            ;;
+    esac
+}
+
+# Parse the command line parameters in an map
+parse_param () {
+    while [ $# -ne 0 ]; do
+    param=$1
+    shift
+
+    key=`echo $param | cut -d '=' -f 1`
+    key=`echo $key | tr -d '[-*2]'`
+    PARAMS[$key]=`echo $param | cut -d '=' -f 2`
+    done
+}
+
+# Create tenant/role/user, and add user to the tenant as role
+user-create () {
+    # validation checking
+    if [[ "$@" =~ ^--name=.*\ --pass=.*\ --tenant=.*\ --role=.*\ --email=.*$ ]]; then
+        params=`echo "$@" | sed -e 's%--name=\(.*\) --pass=\(.*\) --tenant=\(.*\) --role=\(.*\) --email=\(.*\)%--name=\1|--pass=\2|--tenant=\3|--role=\4|--email=\5%g'`
+    else
+        help
+        exit 1
+    fi
+
+    # parse the cmdline parameters
+    IFS="|"
+    parse_param $params
+    unset IFS
+
+    echo "Adding user in keystone ..."
+
+    if [ "x${PARAMS["tenant"]}" != "x" ]; then
+        # check if tenant exist, create it if not
+        TENANT_ID=$(keystone tenant-get ${PARAMS["tenant"]} | grep " id " | get_field 2)
+        if [ "x$TENANT_ID" == "x" ]; then
+            echo "Creating tenant ${PARAMS["tenant"]} in keystone ..."
+            TENANT_ID=$(keystone tenant-create --name=${PARAMS["tenant"]} | grep " id " | get_field 2)
+        fi
+        echo "Tenant list:"
+        keystone tenant-list
+    fi
+
+    if [ "x${PARAMS["role"]}" != "x" ]; then
+        # check if role exist, create it if not
+        ROLE_ID=$(keystone role-get ${PARAMS["role"]} | grep " id " | get_field 2)
+        if [ "x$ROLE_ID" == "x" ]; then
+            echo "Creating role ${PARAMS["role"]} in keystone ..."
+            ROLE_ID=$(keystone role-create --name=${PARAMS["role"]} | grep " id " | get_field 2)
+        fi
+        echo "Role list:"
+        keystone role-list
+    fi
+
+    if [ "x${PARAMS["name"]}" != "x" ]; then
+        # check if user exist, create it if not
+        USER_ID=$(keystone user-get ${PARAMS["name"]} | grep " id " | get_field 2)
+        if [ "x$USER_ID" == "x" ]; then
+            echo "Creating user ${PARAMS["name"]} in keystone ..."
+            USER_ID=$(keystone user-create --name=${PARAMS["name"]} --pass=${PARAMS["pass"]} --tenant-id $TENANT_ID --email=${PARAMS["email"]} | grep " id " | get_field 2)
+        fi
+        echo "User list:"
+        keystone user-list
+    fi
+
+    if [ "x$USER_ID" != "x" ] && [ "x$TENANT_ID" != "x" ] && [ "x$ROLE_ID" != "x" ]; then
+        # add the user to the tenant as role
+        keystone user-role-list --user-id $USER_ID --tenant-id $TENANT_ID | grep $ROLE_ID &> /dev/null
+        if [ $? -eq 1 ]; then
+            echo "Adding user ${PARAMS["name"]} in tenant ${PARAMS["tenant"]} as ${PARAMS["role"]} ..."
+            keystone user-role-add --tenant-id $TENANT_ID --user-id $USER_ID --role-id $ROLE_ID
+        fi
+    fi
+
+    if [ "x$USER_ID" != "x" ] && [ "x$TENANT_ID" != "x" ]; then
+        echo "User ${PARAMS["name"]} in Tenant ${PARAMS["tenant"]} role list:"
+        keystone user-role-list --user-id $USER_ID --tenant-id $TENANT_ID
+    fi
+}
+
+# Create service and its endpoint
+service-create () {
+    # validation checking
+    if [[ "$@" =~ ^--name=.*\ --type=.*\ --description=.*\ --region=.*\ --publicurl=.*\ --adminurl=.*\ --internalurl=.*$ ]]; then
+        params=`echo "$@" | sed -e 's%--name=\(.*\) --type=\(.*\) --description=\(.*\) --region=\(.*\) --publicurl=\(.*\) --adminurl=\(.*\) --internalurl=\(.*\)%--name=\1|--type=\2|--description=\3|--region=\4|--publicurl=\5|--adminurl=\6|--internalurl=\7%g'`
+    else
+        help
+        exit 1
+    fi
+
+    # parse the cmdline parameters
+    IFS=$"|"
+    parse_param $params
+    unset IFS
+
+    echo "Creating service in keystone ..."
+
+    if [ "x${PARAMS["name"]}" != "x" ]; then
+        # check if service already created, create it if not
+        SERVICE_ID=$(keystone service-get ${PARAMS["name"]} | grep " id " | get_field 2)
+        if [ "x$SERVICE_ID" == "x" ]; then
+            echo "Adding service ${PARAMS["name"]} in keystone ..."
+            SERVICE_ID=$(keystone service-create --name ${PARAMS["name"]} --type ${PARAMS["type"]} --description "${PARAMS["description"]}" | grep " id " | get_field 2)
+        fi
+        echo "Service list:"
+        keystone service-list
+    fi
+
+    if [ "x$SERVICE_ID" != "x" ]; then
+        # create its endpoint
+        keystone endpoint-list | grep $SERVICE_ID | grep ${PARAMS["region"]} | grep ${PARAMS["publicurl"]} | grep ${PARAMS["adminurl"]} | grep ${PARAMS["internalurl"]}
+        if [ $? -eq 1 ]; then
+            echo "Creating endpoint for ${PARAMS["name"]} in keystone ..."
+            keystone endpoint-create --region ${PARAMS["region"]} --service-id $SERVICE_ID --publicurl ${PARAMS["publicurl"]} --adminurl ${PARAMS["adminurl"]} --internalurl ${PARAMS["internalurl"]}
+        fi
+        echo "Endpoints list:"
+        keystone endpoint-list
+    fi
+}
+
+case "$1" in
+    service-create)
+        shift
+        service-create $@
+        ;;
+    user-create)
+        shift
+        user-create $@
+        ;;
+    help)
+        help $@
+        ;;
+    *)
+        help
+        exit 0
+        ;;
+esac
+
+exit 0
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/keystone-explicitly-import-localcontext-from-oslo.me.patch b/meta-stx/recipes-devtools/python/files/python-keystone/keystone-explicitly-import-localcontext-from-oslo.me.patch
new file mode 100644 (file)
index 0000000..5c152e0
--- /dev/null
@@ -0,0 +1,32 @@
+From 0d6b66b2d5314b454a421bd22fcc8173baf0bc95 Mon Sep 17 00:00:00 2001
+From: Bruce Ashfield <bruce.ashfield@windriver.com>
+Date: Mon, 20 Oct 2014 15:59:33 -0400
+Subject: [PATCH] keystone: explicitly import localcontext from oslo.messaging
+
+When using apache as a front end to keystone, juno has a problem when
+authenticating clients due to a failure to import localcontext from
+oslo.
+
+We can work around this issue by doing the export explicitly in the
+entry routine versus in the library itself.
+
+Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
+---
+ httpd/keystone.py | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/httpd/keystone.py b/httpd/keystone.py
+index f5ce498c5152..b2b9285ad2ab 100644
+--- a/httpd/keystone.py
++++ b/httpd/keystone.py
+@@ -32,6 +32,7 @@ from keystone.common import sql
+ from keystone import config
+ from keystone.openstack.common import log
+ from keystone import service
++from oslo.messaging import localcontext
+ CONF = config.CONF
+-- 
+1.9.1
+
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/keystone-fix-location-of-files-for-tests.patch b/meta-stx/recipes-devtools/python/files/python-keystone/keystone-fix-location-of-files-for-tests.patch
new file mode 100644 (file)
index 0000000..3e73696
--- /dev/null
@@ -0,0 +1,34 @@
+From 205ee3cfa4c7efd39f5fe991b53327c1bd771f97 Mon Sep 17 00:00:00 2001
+From: Keith Holman <Keith.Holman@windriver.com>
+Date: Tue, 3 Jun 2014 16:19:54 -0400
+Subject: [PATCH] keystone: fix location of files for tests
+
+Keystone tests define the location of certificate files
+as the location of the files in the source tree. However,
+when installed on the system these files are put in a
+different location.  This patch provides a symbol, which
+is replaced, for the base path of the location of the test
+files.
+
+Signed-off-by: Keith Holman <Keith.Holman@windriver.com>
+---
+ keystone/tests/test_overrides.conf | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/keystone/tests/test_overrides.conf b/keystone/tests/test_overrides.conf
+index 801b0d2..ba35343 100644
+--- a/keystone/tests/test_overrides.conf
++++ b/keystone/tests/test_overrides.conf
+@@ -21,6 +21,6 @@ debug_cache_backend = True
+ proxies = keystone.tests.test_cache.CacheIsolatingProxy
+ [signing]
+-certfile = ../../examples/pki/certs/signing_cert.pem
+-keyfile = ../../examples/pki/private/signing_key.pem
+-ca_certs = ../../examples/pki/certs/cacert.pem
++certfile = %KEYSTONE_PACKAGE_DIR%/examples/pki/certs/signing_cert.pem
++keyfile = %KEYSTONE_PACKAGE_DIR%/examples/pki/private/signing_key.pem
++ca_certs = %KEYSTONE_PACKAGE_DIR%/examples/pki/certs/cacert.pem
+-- 
+1.9.3
+
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/keystone-init b/meta-stx/recipes-devtools/python/files/python-keystone/keystone-init
new file mode 100644 (file)
index 0000000..db4b4fa
--- /dev/null
@@ -0,0 +1,60 @@
+#!/bin/bash
+#
+# Basic keystone setup as described on:
+# https://docs.openstack.org/mitaka/install-guide-ubuntu/keystone-install.html
+# https://docs.openstack.org/keystone/pike/install/keystone-install-ubuntu.html
+#
+# Prerequisites: /etc/postgresql/postgresql-init must be run first to create the DB
+#
+# After complete you should be able to query keystone with something like the 
+# following (https://docs.openstack.org/keystone/latest/api_curl_examples.html)
+#
+#curl -i \
+#  -H "Content-Type: application/json" \
+#  -d '
+#{ "auth": {
+#    "identity": {
+#      "methods": ["password"],
+#      "password": {
+#        "user": {
+#          "name": "%ADMIN_USER%",
+#          "domain": { "id": "default" },
+#          "password": "%ADMIN_PASSWORD%"
+#        }
+#      }
+#    }
+#  }
+#}' \
+#  "http://localhost:5000/v3/auth/tokens" ; echo
+
+
+# Substitutions setup at do_intall()
+DB_USER=%DB_USER%
+KEYSTONE_USER=%KEYSTONE_USER%
+KEYSTONE_GROUP=%KEYSTONE_GROUP%
+CONTROLLER_IP=%CONTROLLER_IP%
+ADMIN_USER=%ADMIN_USER%
+ADMIN_PASSWORD=%ADMIN_PASSWORD%
+ADMIN_ROLE=%ADMIN_ROLE%
+
+# Create the keystone DB and grant the necessary permissions
+sudo -u postgres psql -c "CREATE DATABASE keystone" 2> /dev/null
+sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE keystone TO ${DB_USER}" 2> /dev/null
+
+keystone-manage db_sync
+
+keystone-manage fernet_setup --keystone-user ${KEYSTONE_USER} --keystone-group ${KEYSTONE_GROUP}
+keystone-manage credential_setup --keystone-user ${KEYSTONE_USER} --keystone-group ${KEYSTONE_GROUP}
+
+keystone-manage bootstrap \
+  --bootstrap-password ${ADMIN_PASSWORD} \
+  --bootstrap-username ${ADMIN_USER} \
+  --bootstrap-project-name admin \
+  --bootstrap-role-name ${ADMIN_ROLE} \
+  --bootstrap-service-name keystone \
+  --bootstrap-region-id RegionOne \
+  --bootstrap-admin-url http://${CONTROLLER_IP}:35357 \
+  --bootstrap-internal-url http://${CONTROLLER_IP}:5000 \
+  --bootstrap-public-url http://${CONTROLLER_IP}:5000
+
+#keystone-manage pki_setup --keystone-user=root --keystone-group=daemon
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/keystone-init.service b/meta-stx/recipes-devtools/python/files/python-keystone/keystone-init.service
new file mode 100644 (file)
index 0000000..b114806
--- /dev/null
@@ -0,0 +1,12 @@
+[Unit]
+Description=Barebones OpenStack keystone initialization
+After=postgresql-init.service
+
+[Service]
+Type=oneshot
+ExecStart=%SYSCONFIGDIR%/keystone/keystone-init
+ExecStartPost=/bin/systemctl --no-reload disable keystone-init.service
+RemainAfterExit=No
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/keystone-remove-git-commands-in-tests.patch b/meta-stx/recipes-devtools/python/files/python-keystone/keystone-remove-git-commands-in-tests.patch
new file mode 100644 (file)
index 0000000..928c0fb
--- /dev/null
@@ -0,0 +1,39 @@
+From d2ee135a3c97f714e6da59adf45a91a2f1632057 Mon Sep 17 00:00:00 2001
+From: Keith Holman <Keith.Holman@windriver.com>
+Date: Wed, 4 Jun 2014 11:30:53 -0400
+Subject: [PATCH] keystone: remove git commands in tests
+
+Keystone tests are designed to be used during development
+and will use git to download the latest keystoneclient from
+source to test against.  However, on the system installation
+we install keystone-client as a separate package, and do not
+want to download an external version to test.  In order to
+test against the version installed as a separate package,
+remove the git calls that clone the keystone-client repository
+and just returned the desired directory.
+
+Signed-off-by: Keith Holman <Keith.Holman@windriver.com>
+---
+ keystone/tests/core.py | 2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: git/keystone/tests/core.py
+===================================================================
+--- git.orig/keystone/tests/core.py
++++ git/keystone/tests/core.py
+@@ -125,6 +125,7 @@
+     working_dir = os.getcwd()
+     revdir = os.path.join(VENDOR, '%s-%s' % (name, rev.replace('/', '_')))
++    """
+     modcheck = os.path.join(VENDOR, '.%s-%s' % (name, rev.replace('/', '_')))
+     try:
+         if os.path.exists(modcheck):
+@@ -145,6 +146,7 @@
+             fd.write('1')
+     except environment.subprocess.CalledProcessError:
+         LOG.warning(_('Failed to checkout %s'), repo)
++    """
+     os.chdir(working_dir)
+     return revdir
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/keystone-search-in-etc-directory-for-config-files.patch b/meta-stx/recipes-devtools/python/files/python-keystone/keystone-search-in-etc-directory-for-config-files.patch
new file mode 100644 (file)
index 0000000..6f88e17
--- /dev/null
@@ -0,0 +1,41 @@
+From ed3c1f7c8eb90506eda1aafbc6d4de3b9e2abe71 Mon Sep 17 00:00:00 2001
+From: Keith Holman <Keith.Holman@windriver.com>
+Date: Tue, 3 Jun 2014 11:28:23 -0400
+Subject: [PATCH] keystone: search in etc directory for config files
+
+The core.py file in the tests directory is setup to find
+the file as they exist in the source code tree.  When
+deployed some configuration files are moved to the /etc
+directory.  This modification changes the test code to
+find the files in the new location.
+
+Signed-off-by: Keith Holman <Keith.Holman@windriver.com>
+---
+ keystone/tests/core.py | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+Index: git/keystone/tests/core.py
+===================================================================
+--- git.orig/keystone/tests/core.py
++++ git/keystone/tests/core.py
+@@ -68,9 +68,9 @@
+ PID = six.text_type(os.getpid())
+ TESTSDIR = os.path.dirname(os.path.abspath(__file__))
+ TESTCONF = os.path.join(TESTSDIR, 'config_files')
+-ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..'))
++ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..'))
+ VENDOR = os.path.join(ROOTDIR, 'vendor')
+-ETCDIR = os.path.join(ROOTDIR, 'etc')
++ETCDIR = "/etc/keystone"
+ def _calc_tmpdir():
+@@ -560,7 +560,7 @@
+     def _paste_config(self, config):
+         if not config.startswith('config:'):
+             test_path = os.path.join(TESTSDIR, config)
+-            etc_path = os.path.join(ROOTDIR, 'etc', config)
++            etc_path = os.path.join(ETCDIR, config)
+             for path in [test_path, etc_path]:
+                 if os.path.exists('%s-paste.ini' % path):
+                     return 'config:%s-paste.ini' % path
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/keystone.conf b/meta-stx/recipes-devtools/python/files/python-keystone/keystone.conf
new file mode 100644 (file)
index 0000000..9ae33f0
--- /dev/null
@@ -0,0 +1,3142 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+[DEFAULT]
+
+#
+# From keystone
+#
+
+# Using this feature is *NOT* recommended. Instead, use the `keystone-manage
+# bootstrap` command. The value of this option is treated as a "shared secret"
+# that can be used to bootstrap Keystone through the API. This "token" does not
+# represent a user (it has no identity), and carries no explicit authorization
+# (it effectively bypasses most authorization checks). If set to `None`, the
+# value is ignored and the `admin_token` middleware is effectively disabled.
+# (string value)
+#admin_token = <None>
+
+# The base public endpoint URL for Keystone that is advertised to clients
+# (NOTE: this does NOT affect how Keystone listens for connections). Defaults
+# to the base host URL of the request. For example, if keystone receives a
+# request to `http://server:5000/v3/users`, then this will option will be
+# automatically treated as `http://server:5000`. You should only need to set
+# option if either the value of the base URL contains a path that keystone does
+# not automatically infer (`/prefix/v3`), or if the endpoint should be found on
+# a different host. (uri value)
+#public_endpoint = <None>
+
+# DEPRECATED: The base admin endpoint URL for Keystone that is advertised to
+# clients (NOTE: this does NOT affect how Keystone listens for connections).
+# Defaults to the base host URL of the request. For example, if keystone
+# receives a request to `http://server:35357/v3/users`, then this will option
+# will be automatically treated as `http://server:35357`. You should only need
+# to set option if either the value of the base URL contains a path that
+# keystone does not automatically infer (`/prefix/v3`), or if the endpoint
+# should be found on a different host. (uri value)
+# This option is deprecated for removal since R.
+# Its value may be silently ignored in the future.
+# Reason: With the removal of the 2.0 API keystone does not distinguish between
+# admin and public endpoints.
+#admin_endpoint = <None>
+
+# Maximum depth of the project hierarchy, excluding the project acting as a
+# domain at the top of the hierarchy. WARNING: Setting it to a large value may
+# adversely impact performance. (integer value)
+#max_project_tree_depth = 5
+
+# Limit the sizes of user & project ID/names. (integer value)
+#max_param_size = 64
+
+# Similar to `[DEFAULT] max_param_size`, but provides an exception for token
+# values. With Fernet tokens, this can be set as low as 255. With UUID tokens,
+# this should be set to 32). (integer value)
+#max_token_size = 255
+
+# The maximum number of entities that will be returned in a collection. This
+# global limit may be then overridden for a specific driver, by specifying a
+# list_limit in the appropriate section (for example, `[assignment]`). No limit
+# is set by default. In larger deployments, it is recommended that you set this
+# to a reasonable number to prevent operations like listing all users and
+# projects from placing an unnecessary load on the system. (integer value)
+#list_limit = <None>
+
+# If set to true, strict password length checking is performed for password
+# manipulation. If a password exceeds the maximum length, the operation will
+# fail with an HTTP 403 Forbidden error. If set to false, passwords are
+# automatically truncated to the maximum length. (boolean value)
+#strict_password_check = false
+
+# If set to true, then the server will return information in HTTP responses
+# that may allow an unauthenticated or authenticated user to get more
+# information than normal, such as additional details about why authentication
+# failed. This may be useful for debugging but is insecure. (boolean value)
+#insecure_debug = false
+
+# Default `publisher_id` for outgoing notifications. If left undefined,
+# Keystone will default to using the server's host name. (string value)
+#default_publisher_id = <None>
+
+# Define the notification format for identity service events. A `basic`
+# notification only has information about the resource being operated on. A
+# `cadf` notification has the same information, as well as information about
+# the initiator of the event. The `cadf` option is entirely backwards
+# compatible with the `basic` option, but is fully CADF-compliant, and is
+# recommended for auditing use cases. (string value)
+# Possible values:
+# basic - <No description provided>
+# cadf - <No description provided>
+#notification_format = cadf
+
+# You can reduce the number of notifications keystone emits by explicitly
+# opting out. Keystone will not emit notifications that match the patterns
+# expressed in this list. Values are expected to be in the form of
+# `identity.<resource_type>.<operation>`. By default, all notifications related
+# to authentication are automatically suppressed. This field can be set
+# multiple times in order to opt-out of multiple notification topics. For
+# example, the following suppresses notifications describing user creation or
+# successful authentication events: notification_opt_out=identity.user.create
+# notification_opt_out=identity.authenticate.success (multi valued)
+#notification_opt_out = identity.authenticate.success
+#notification_opt_out = identity.authenticate.pending
+#notification_opt_out = identity.authenticate.failed
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+# Note: This option can be changed without restarting.
+#debug = false
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Note: This option can be changed without restarting.
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and
+# Linux platform is used. This option is ignored if log_config_append is set.
+# (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_syslog = false
+
+# Enable journald for logging. If running in a systemd environment you may wish
+# to enable journal support. Doing so will use the journal native protocol
+# which includes structured metadata in addition to log messages.This option is
+# ignored if log_config_append is set. (boolean value)
+#use_journal = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Use JSON formatting for logging. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_json = false
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_stderr = false
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message
+# is DEBUG. (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string
+# value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Interval, number of seconds, of log rate limiting. (integer value)
+#rate_limit_interval = 0
+
+# Maximum number of logged messages per rate_limit_interval. (integer value)
+#rate_limit_burst = 0
+
+# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
+# or empty string. Logs with level greater or equal to rate_limit_except_level
+# are not filtered. An empty string means that all levels are filtered. (string
+# value)
+#rate_limit_except_level = CRITICAL
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size = 30
+
+# The pool size limit for connections expiration policy (integer value)
+#conn_pool_min_size = 2
+
+# The time-to-live in sec of idle connections in the pool (integer value)
+#conn_pool_ttl = 1200
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Possible values:
+# redis - <No description provided>
+# sentinel - <No description provided>
+# dummy - <No description provided>
+#rpc_zmq_matchmaker = redis
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Number of seconds to wait before all pending messages will be sent after
+# closing a socket. The default value of -1 specifies an infinite linger
+# period. The value of 0 specifies no linger period. Pending messages shall be
+# discarded immediately when the socket is closed. Positive values specify an
+# upper bound for the linger period. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_cast_timeout
+#zmq_linger = -1
+
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target
+# ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 300
+
+# Update period in seconds of a name service record about existing target.
+# (integer value)
+#zmq_target_update = 180
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = false
+
+# Use ROUTER remote proxy. (boolean value)
+#use_router_proxy = false
+
+# This option makes direct connections dynamic or static. It makes sense only
+# with use_router_proxy=False which means to use direct connections for direct
+# message types (ignored otherwise). (boolean value)
+#use_dynamic_connections = false
+
+# How many additional connections to a host will be made for failover reasons.
+# This option is actual only in dynamic connections mode. (integer value)
+#zmq_failover_connections = 2
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49153
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Default serialization mechanism for serializing/deserializing
+# outgoing/incoming messages (string value)
+# Possible values:
+# json - <No description provided>
+# msgpack - <No description provided>
+#rpc_zmq_serialization = json
+
+# This option configures round-robin mode in zmq socket. True means not keeping
+# a queue when server side disconnects. False means to keep queue and messages
+# even if server is disconnected, when the server appears we send all
+# accumulated messages to it. (boolean value)
+#zmq_immediate = true
+
+# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any
+# other negative value) means to skip any overrides and leave it to OS default;
+# 0 and 1 (or any other positive value) mean to disable and enable the option
+# respectively. (integer value)
+#zmq_tcp_keepalive = -1
+
+# The duration between two keepalive transmissions in idle condition. The unit
+# is platform dependent, for example, seconds in Linux, milliseconds in Windows
+# etc. The default value of -1 (or any other negative value and 0) means to
+# skip any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_idle = -1
+
+# The number of retransmissions to be carried out before declaring that remote
+# end is not available. The default value of -1 (or any other negative value
+# and 0) means to skip any overrides and leave it to OS default. (integer
+# value)
+#zmq_tcp_keepalive_cnt = -1
+
+# The duration between two successive keepalive retransmissions, if
+# acknowledgement to the previous keepalive transmission is not received. The
+# unit is platform dependent, for example, seconds in Linux, milliseconds in
+# Windows etc. The default value of -1 (or any other negative value and 0)
+# means to skip any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_intvl = -1
+
+# Maximum number of (green) threads to work concurrently. (integer value)
+#rpc_thread_pool_size = 100
+
+# Expiration timeout in seconds of a sent/received message after which it is
+# not tracked anymore by a client/server. (integer value)
+#rpc_message_ttl = 300
+
+# Wait for message acknowledgements from receivers. This mechanism works only
+# via proxy without PUB/SUB. (boolean value)
+#rpc_use_acks = false
+
+# Number of seconds to wait for an ack from a cast/call. After each retry
+# attempt this timeout is multiplied by some specified multiplier. (integer
+# value)
+#rpc_ack_timeout_base = 15
+
+# Number to multiply base ack timeout by after each retry attempt. (integer
+# value)
+#rpc_ack_timeout_multiplier = 2
+
+# Default number of message sending attempts in case of any problems occurred:
+# positive value N means at most N retries, 0 means no retries, None or -1 (or
+# any other negative values) mean to retry forever. This option is used only if
+# acknowledgments are enabled. (integer value)
+#rpc_retry_attempts = 3
+
+# List of publisher hosts SubConsumer can subscribe on. This option has higher
+# priority then the default publishers list taken from the matchmaker. (list
+# value)
+#subscribe_on =
+
+# Size of executor thread pool when executor is threading or eventlet. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# A URL representing the messaging driver to use and its full configuration.
+# (string value)
+#transport_url = <None>
+
+# DEPRECATED: The messaging driver to use, defaults to rabbit. Other drivers
+# include amqp and zmq. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rpc_backend = rabbit
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange = keystone
+
+
+[access_rules_config]
+
+#
+# From keystone
+#
+
+# Entry point for the access rules config backend driver in the
+# `keystone.access_rules_config` namespace.  Keystone only provides a `json`
+# driver, so there is no reason to change this unless you are providing a
+# custom entry point. (string value)
+#driver = json
+
+# Toggle for access rules caching. This has no effect unless global caching is
+# enabled. (boolean value)
+#caching = true
+
+# Time to cache access rule data in seconds. This has no effect unless global
+# caching is enabled. (integer value)
+#cache_time = <None>
+
+# Path to access rules configuration. If not present, no access rule
+# configuration will be loaded and application credential access rules will be
+# unavailable. (string value)
+#rules_file = /etc/keystone/access_rules.json
+
+# Toggles permissive mode for access rules. When enabled, application
+# credentials can be created with any access rules regardless of operator's
+# configuration. (boolean value)
+#permissive = false
+
+
+[application_credential]
+
+#
+# From keystone
+#
+
+# Entry point for the application credential backend driver in the
+# `keystone.application_credential` namespace.  Keystone only provides a `sql`
+# driver, so there is no reason to change this unless you are providing a
+# custom entry point. (string value)
+#driver = sql
+
+# Toggle for application credential caching. This has no effect unless global
+# caching is enabled. (boolean value)
+#caching = true
+
+# Time to cache application credential data in seconds. This has no effect
+# unless global caching is enabled. (integer value)
+#cache_time = <None>
+
+# Maximum number of application credentials a user is permitted to create. A
+# value of -1 means unlimited. If a limit is not set, users are permitted to
+# create application credentials at will, which could lead to bloat in the
+# keystone database or open keystone to a DoS attack. (integer value)
+#user_limit = -1
+
+
+[assignment]
+
+#
+# From keystone
+#
+
+# Entry point for the assignment backend driver (where role assignments are
+# stored) in the `keystone.assignment` namespace. Only a SQL driver is supplied
+# by keystone itself. Unless you are writing proprietary drivers for keystone,
+# you do not need to set this option. (string value)
+#driver = sql
+
+# A list of role names which are prohibited from being an implied role. (list
+# value)
+#prohibited_implied_role = admin
+
+
+[auth]
+
+#
+# From keystone
+#
+
+# Allowed authentication methods. Note: You should disable the `external` auth
+# method if you are currently using federation. External auth and federation
+# both use the REMOTE_USER variable. Since both the mapped and external plugin
+# are being invoked to validate attributes in the request environment, it can
+# cause conflicts. (list value)
+#methods = external,password,token,oauth1,mapped,application_credential
+
+# Entry point for the password auth plugin module in the
+# `keystone.auth.password` namespace. You do not need to set this unless you
+# are overriding keystone's own password authentication plugin. (string value)
+#password = <None>
+
+# Entry point for the token auth plugin module in the `keystone.auth.token`
+# namespace. You do not need to set this unless you are overriding keystone's
+# own token authentication plugin. (string value)
+#token = <None>
+
+# Entry point for the external (`REMOTE_USER`) auth plugin module in the
+# `keystone.auth.external` namespace. Supplied drivers are `DefaultDomain` and
+# `Domain`. The default driver is `DefaultDomain`, which assumes that all users
+# identified by the username specified to keystone in the `REMOTE_USER`
+# variable exist within the context of the default domain. The `Domain` option
+# expects an additional environment variable be presented to keystone,
+# `REMOTE_DOMAIN`, containing the domain name of the `REMOTE_USER` (if
+# `REMOTE_DOMAIN` is not set, then the default domain will be used instead).
+# You do not need to set this unless you are taking advantage of "external
+# authentication", where the application server (such as Apache) is handling
+# authentication instead of keystone. (string value)
+#external = <None>
+
+# Entry point for the OAuth 1.0a auth plugin module in the
+# `keystone.auth.oauth1` namespace. You do not need to set this unless you are
+# overriding keystone's own `oauth1` authentication plugin. (string value)
+#oauth1 = <None>
+
+# Entry point for the mapped auth plugin module in the `keystone.auth.mapped`
+# namespace. You do not need to set this unless you are overriding keystone's
+# own `mapped` authentication plugin. (string value)
+#mapped = <None>
+
+# Entry point for the application_credential auth plugin module in the
+# `keystone.auth.application_credential` namespace. You do not need to set this
+# unless you are overriding keystone's own `application_credential`
+# authentication plugin. (string value)
+#application_credential = <None>
+
+
+[cache]
+
+#
+# From oslo.cache
+#
+
+# Prefix for building the configuration dictionary for the cache region. This
+# should not need to be changed unless there is another dogpile.cache region
+# with the same configuration name. (string value)
+#config_prefix = cache.oslo
+
+# Default TTL, in seconds, for any cached item in the dogpile.cache region.
+# This applies to any cached method that doesn't have an explicit cache
+# expiration time defined for it. (integer value)
+#expiration_time = 600
+
+# Cache backend module. For eventlet-based or environments with hundreds of
+# threaded servers, Memcache with pooling (oslo_cache.memcache_pool) is
+# recommended. For environments with less than 100 threaded servers, Memcached
+# (dogpile.cache.memcached) or Redis (dogpile.cache.redis) is recommended. Test
+# environments with a single instance of the server can use the
+# dogpile.cache.memory backend. (string value)
+# Possible values:
+# oslo_cache.memcache_pool - <No description provided>
+# oslo_cache.dict - <No description provided>
+# dogpile.cache.memcached - <No description provided>
+# dogpile.cache.redis - <No description provided>
+# dogpile.cache.memory - <No description provided>
+# dogpile.cache.null - <No description provided>
+#backend = dogpile.cache.null
+
+# Arguments supplied to the backend module. Specify this option once per
+# argument to be passed to the dogpile.cache backend. Example format:
+# "<argname>:<value>". (multi valued)
+#backend_argument =
+
+# Proxy classes to import that will affect the way the dogpile.cache backend
+# functions. See the dogpile.cache documentation on changing-backend-behavior.
+# (list value)
+#proxies =
+
+# Global toggle for caching. (boolean value)
+#enabled = true
+
+# Extra debugging from the cache backend (cache keys, get/set/delete/etc
+# calls). This is only really useful if you need to see the specific cache-
+# backend get/set/delete calls with the keys/values.  Typically this should be
+# left set to false. (boolean value)
+#debug_cache_backend = false
+
+# Memcache servers in the format of "host:port". (dogpile.cache.memcache and
+# oslo_cache.memcache_pool backends only). (list value)
+#memcache_servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is tried
+# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (integer value)
+#memcache_dead_retry = 300
+
+# Timeout in seconds for every call to a server. (dogpile.cache.memcache and
+# oslo_cache.memcache_pool backends only). (integer value)
+#memcache_socket_timeout = 3
+
+# Max total number of open connections to every memcached server.
+# (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the pool before
+# it is closed. (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache client
+# connection. (integer value)
+#memcache_pool_connection_get_timeout = 10
+
+
+[catalog]
+
+#
+# From keystone
+#
+
+# Absolute path to the file used for the templated catalog backend. This option
+# is only used if the `[catalog] driver` is set to `templated`. (string value)
+#template_file = default_catalog.templates
+
+# Entry point for the catalog driver in the `keystone.catalog` namespace.
+# Keystone provides a `sql` option (which supports basic CRUD operations
+# through SQL), a `templated` option (which loads the catalog from a templated
+# catalog file on disk), and a `endpoint_filter.sql` option (which supports
+# arbitrary service catalogs per project). (string value)
+#driver = sql
+
+# Toggle for catalog caching. This has no effect unless global caching is
+# enabled. In a typical deployment, there is no reason to disable this.
+# (boolean value)
+#caching = true
+
+# Time to cache catalog data (in seconds). This has no effect unless global and
+# catalog caching are both enabled. Catalog data (services, endpoints, etc.)
+# typically does not change frequently, and so a longer duration than the
+# global default may be desirable. (integer value)
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a catalog collection.
+# There is typically no reason to set this, as it would be unusual for a
+# deployment to have enough services or endpoints to exceed a reasonable limit.
+# (integer value)
+#list_limit = <None>
+
+
+[cors]
+
+#
+# From oslo.middleware
+#
+
+# Indicate whether this resource may be shared with the domain received in the
+# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
+# slash. Example: https://horizon.example.com (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
+# Headers. (list value)
+#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,Openstack-Auth-Receipt
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual request.
+# (list value)
+#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name,Openstack-Auth-Receipt
+
+
+[credential]
+
+#
+# From keystone
+#
+
+# Entry point for the credential backend driver in the `keystone.credential`
+# namespace. Keystone only provides a `sql` driver, so there's no reason to
+# change this unless you are providing a custom entry point. (string value)
+#driver = sql
+
+# Entry point for credential encryption and decryption operations in the
+# `keystone.credential.provider` namespace. Keystone only provides a `fernet`
+# driver, so there's no reason to change this unless you are providing a custom
+# entry point to encrypt and decrypt credentials. (string value)
+#provider = fernet
+
+# Directory containing Fernet keys used to encrypt and decrypt credentials
+# stored in the credential backend. Fernet keys used to encrypt credentials
+# have no relationship to Fernet keys used to encrypt Fernet tokens. Both sets
+# of keys should be managed separately and require different rotation policies.
+# Do not share this repository with the repository used to manage keys for
+# Fernet tokens. (string value)
+#key_repository = /etc/keystone/credential-keys/
+
+
+[database]
+
+#
+# From oslo.db
+#
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection = <None>
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set
+# by the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# If True, transparently enables support for handling MySQL Cluster (NDB).
+# (boolean value)
+#mysql_enable_ndb = false
+
+# Connections which have been present in the connection pool longer than this
+# number of seconds will be replaced with a new one the next time they are
+# checked out from the pool. (integer value)
+# Deprecated group/name - [DATABASE]/idle_timeout
+# Deprecated group/name - [database]/idle_timeout
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#connection_recycle_time = 3600
+
+# Minimum number of SQL connections to keep open in a pool. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size = 1
+
+# Maximum number of SQL connections to keep open in a pool. Setting a value of
+# 0 indicates no limit. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = 5
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Minimum value: 0
+# Maximum value: 100
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost.
+# (boolean value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+
+[domain_config]
+
+#
+# From keystone
+#
+
+# Entry point for the domain-specific configuration driver in the
+# `keystone.resource.domain_config` namespace. Only a `sql` option is provided
+# by keystone, so there is no reason to set this unless you are providing a
+# custom entry point. (string value)
+#driver = sql
+
+# Toggle for caching of the domain-specific configuration backend. This has no
+# effect unless global caching is enabled. There is normally no reason to
+# disable this. (boolean value)
+#caching = true
+
+# Time-to-live (TTL, in seconds) to cache domain-specific configuration data.
+# This has no effect unless `[domain_config] caching` is enabled. (integer
+# value)
+#cache_time = 300
+
+
+[endpoint_filter]
+
+#
+# From keystone
+#
+
+# Entry point for the endpoint filter driver in the `keystone.endpoint_filter`
+# namespace. Only a `sql` option is provided by keystone, so there is no reason
+# to set this unless you are providing a custom entry point. (string value)
+#driver = sql
+
+# This controls keystone's behavior if the configured endpoint filters do not
+# result in any endpoints for a user + project pair (and therefore a
+# potentially empty service catalog). If set to true, keystone will return the
+# entire service catalog. If set to false, keystone will return an empty
+# service catalog. (boolean value)
+#return_all_endpoints_if_no_filter = true
+
+
+[endpoint_policy]
+
+#
+# From keystone
+#
+
+# Entry point for the endpoint policy driver in the `keystone.endpoint_policy`
+# namespace. Only a `sql` driver is provided by keystone, so there is no reason
+# to set this unless you are providing a custom entry point. (string value)
+#driver = sql
+
+
+[eventlet_server]
+
+#
+# From keystone
+#
+
+# DEPRECATED: The IP address of the network interface for the public service to
+# listen on. (unknown value)
+# Deprecated group/name - [DEFAULT]/bind_host
+# Deprecated group/name - [DEFAULT]/public_bind_host
+# This option is deprecated for removal since K.
+# Its value may be silently ignored in the future.
+# Reason: Support for running keystone under eventlet has been removed in the
+# Newton release. These options remain for backwards compatibility because they
+# are used for URL substitutions.
+#public_bind_host = 0.0.0.0
+
+# DEPRECATED: The port number for the public service to listen on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/public_port
+# This option is deprecated for removal since K.
+# Its value may be silently ignored in the future.
+# Reason: Support for running keystone under eventlet has been removed in the
+# Newton release. These options remain for backwards compatibility because they
+# are used for URL substitutions.
+#public_port = 5000
+
+# DEPRECATED: The IP address of the network interface for the admin service to
+# listen on. (unknown value)
+# Deprecated group/name - [DEFAULT]/bind_host
+# Deprecated group/name - [DEFAULT]/admin_bind_host
+# This option is deprecated for removal since K.
+# Its value may be silently ignored in the future.
+# Reason: Support for running keystone under eventlet has been removed in the
+# Newton release. These options remain for backwards compatibility because they
+# are used for URL substitutions.
+#admin_bind_host = 0.0.0.0
+
+# DEPRECATED: The port number for the admin service to listen on. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# Deprecated group/name - [DEFAULT]/admin_port
+# This option is deprecated for removal since K.
+# Its value may be silently ignored in the future.
+# Reason: Support for running keystone under eventlet has been removed in the
+# Newton release. These options remain for backwards compatibility because they
+# are used for URL substitutions.
+#admin_port = 35357
+
+
+[federation]
+
+#
+# From keystone
+#
+
+# Entry point for the federation backend driver in the `keystone.federation`
+# namespace. Keystone only provides a `sql` driver, so there is no reason to
+# set this option unless you are providing a custom entry point. (string value)
+#driver = sql
+
+# Prefix to use when filtering environment variable names for federated
+# assertions. Matched variables are passed into the federated mapping engine.
+# (string value)
+#assertion_prefix =
+
+# Value to be used to obtain the entity ID of the Identity Provider from the
+# environment. For `mod_shib`, this would be `Shib-Identity-Provider`. For
+# `mod_auth_openidc`, this could be `HTTP_OIDC_ISS`. For `mod_auth_mellon`,
+# this could be `MELLON_IDP`. (string value)
+#remote_id_attribute = <None>
+
+# An arbitrary domain name that is reserved to allow federated ephemeral users
+# to have a domain concept. Note that an admin will not be able to create a
+# domain with this name or update an existing domain to this name. You are not
+# advised to change this value unless you really have to. (string value)
+#federated_domain_name = Federated
+
+# A list of trusted dashboard hosts. Before accepting a Single Sign-On request
+# to return a token, the origin host must be a member of this list. This
+# configuration option may be repeated for multiple values. You must set this
+# in order to use web-based SSO flows. For example:
+# trusted_dashboard=https://acme.example.com/auth/websso
+# trusted_dashboard=https://beta.example.com/auth/websso (multi valued)
+#trusted_dashboard =
+
+# Absolute path to an HTML file used as a Single Sign-On callback handler. This
+# page is expected to redirect the user from keystone back to a trusted
+# dashboard host, by form encoding a token in a POST request. Keystone's
+# default value should be sufficient for most deployments. (string value)
+#sso_callback_template = /etc/keystone/sso_callback_template.html
+
+# Toggle for federation caching. This has no effect unless global caching is
+# enabled. There is typically no reason to disable this. (boolean value)
+#caching = true
+
+
+[fernet_receipts]
+
+#
+# From keystone
+#
+
+# Directory containing Fernet receipt keys. This directory must exist before
+# using `keystone-manage fernet_setup` for the first time, must be writable by
+# the user running `keystone-manage fernet_setup` or `keystone-manage
+# fernet_rotate`, and of course must be readable by keystone's server process.
+# The repository may contain keys in one of three states: a single staged key
+# (always index 0) used for receipt validation, a single primary key (always
+# the highest index) used for receipt creation and validation, and any number
+# of secondary keys (all other index values) used for receipt validation. With
+# multiple keystone nodes, each node must share the same key repository
+# contents, with the exception of the staged key (index 0). It is safe to run
+# `keystone-manage fernet_rotate` once on any one node to promote a staged key
+# (index 0) to be the new primary (incremented from the previous highest
+# index), and produce a new staged key (a new key with index 0); the resulting
+# repository can then be atomically replicated to other nodes without any risk
+# of race conditions (for example, it is safe to run `keystone-manage
+# fernet_rotate` on host A, wait any amount of time, create a tarball of the
+# directory on host A, unpack it on host B to a temporary location, and
+# atomically move (`mv`) the directory into place on host B). Running
+# `keystone-manage fernet_rotate` *twice* on a key repository without syncing
+# other nodes will result in receipts that can not be validated by all nodes.
+# (string value)
+#key_repository = /etc/keystone/fernet-keys/
+
+# This controls how many keys are held in rotation by `keystone-manage
+# fernet_rotate` before they are discarded. The default value of 3 means that
+# keystone will maintain one staged key (always index 0), one primary key (the
+# highest numerical index), and one secondary key (every other index).
+# Increasing this value means that additional secondary keys will be kept in
+# the rotation. (integer value)
+# Minimum value: 1
+#max_active_keys = 3
+
+
+[fernet_tokens]
+
+#
+# From keystone
+#
+
+# Directory containing Fernet token keys. This directory must exist before
+# using `keystone-manage fernet_setup` for the first time, must be writable by
+# the user running `keystone-manage fernet_setup` or `keystone-manage
+# fernet_rotate`, and of course must be readable by keystone's server process.
+# The repository may contain keys in one of three states: a single staged key
+# (always index 0) used for token validation, a single primary key (always the
+# highest index) used for token creation and validation, and any number of
+# secondary keys (all other index values) used for token validation. With
+# multiple keystone nodes, each node must share the same key repository
+# contents, with the exception of the staged key (index 0). It is safe to run
+# `keystone-manage fernet_rotate` once on any one node to promote a staged key
+# (index 0) to be the new primary (incremented from the previous highest
+# index), and produce a new staged key (a new key with index 0); the resulting
+# repository can then be atomically replicated to other nodes without any risk
+# of race conditions (for example, it is safe to run `keystone-manage
+# fernet_rotate` on host A, wait any amount of time, create a tarball of the
+# directory on host A, unpack it on host B to a temporary location, and
+# atomically move (`mv`) the directory into place on host B). Running
+# `keystone-manage fernet_rotate` *twice* on a key repository without syncing
+# other nodes will result in tokens that can not be validated by all nodes.
+# (string value)
+#key_repository = /etc/keystone/fernet-keys/
+
+# This controls how many keys are held in rotation by `keystone-manage
+# fernet_rotate` before they are discarded. The default value of 3 means that
+# keystone will maintain one staged key (always index 0), one primary key (the
+# highest numerical index), and one secondary key (every other index).
+# Increasing this value means that additional secondary keys will be kept in
+# the rotation. (integer value)
+# Minimum value: 1
+#max_active_keys = 3
+
+
+[healthcheck]
+
+#
+# From oslo.middleware
+#
+
+# DEPRECATED: The path to respond to healtcheck requests on. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#path = /healthcheck
+
+# Show more detailed information as part of the response (boolean value)
+#detailed = false
+
+# Additional backends that can perform health checks and report that
+# information back as part of a request. (list value)
+#backends =
+
+# Check the presence of a file to determine if an application is running on a
+# port. Used by DisableByFileHealthcheck plugin. (string value)
+#disable_by_file_path = <None>
+
+# Check the presence of a file based on a port to determine if an application
+# is running on a port. Expects a "port:path" list of strings. Used by
+# DisableByFilesPortsHealthcheck plugin. (list value)
+#disable_by_file_paths =
+
+
+[identity]
+
+#
+# From keystone
+#
+
+# This references the domain to use for all Identity API v2 requests (which are
+# not aware of domains). A domain with this ID can optionally be created for
+# you by `keystone-manage bootstrap`. The domain referenced by this ID cannot
+# be deleted on the v3 API, to prevent accidentally breaking the v2 API. There
+# is nothing special about this domain, other than the fact that it must exist
+# to order to maintain support for your v2 clients. There is typically no
+# reason to change this value. (string value)
+#default_domain_id = default
+
+# A subset (or all) of domains can have their own identity driver, each with
+# their own partial configuration options, stored in either the resource
+# backend or in a file in a domain configuration directory (depending on the
+# setting of `[identity] domain_configurations_from_database`). Only values
+# specific to the domain need to be specified in this manner. This feature is
+# disabled by default, but may be enabled by default in a future release; set
+# to true to enable. (boolean value)
+#domain_specific_drivers_enabled = false
+
+# By default, domain-specific configuration data is read from files in the
+# directory identified by `[identity] domain_config_dir`. Enabling this
+# configuration option allows you to instead manage domain-specific
+# configurations through the API, which are then persisted in the backend
+# (typically, a SQL database), rather than using configuration files on disk.
+# (boolean value)
+#domain_configurations_from_database = false
+
+# Absolute path where keystone should locate domain-specific `[identity]`
+# configuration files. This option has no effect unless `[identity]
+# domain_specific_drivers_enabled` is set to true. There is typically no reason
+# to change this value. (string value)
+#domain_config_dir = /etc/keystone/domains
+
+# Entry point for the identity backend driver in the `keystone.identity`
+# namespace. Keystone provides a `sql` and `ldap` driver. This option is also
+# used as the default driver selection (along with the other configuration
+# variables in this section) in the event that `[identity]
+# domain_specific_drivers_enabled` is enabled, but no applicable domain-
+# specific configuration is defined for the domain in question. Unless your
+# deployment primarily relies on `ldap` AND is not using domain-specific
+# configuration, you should typically leave this set to `sql`. (string value)
+#driver = sql
+
+# Toggle for identity caching. This has no effect unless global caching is
+# enabled. There is typically no reason to disable this. (boolean value)
+#caching = true
+
+# Time to cache identity data (in seconds). This has no effect unless global
+# and identity caching are enabled. (integer value)
+#cache_time = 600
+
+# Maximum allowed length for user passwords. Decrease this value to improve
+# performance. Changing this value does not effect existing passwords. (integer
+# value)
+# Maximum value: 4096
+#max_password_length = 4096
+
+# Maximum number of entities that will be returned in an identity collection.
+# (integer value)
+#list_limit = <None>
+
+# The password hashing algorithm to use for passwords stored within keystone.
+# (string value)
+# Possible values:
+# bcrypt - <No description provided>
+# scrypt - <No description provided>
+# pbkdf2_sha512 - <No description provided>
+#password_hash_algorithm = bcrypt
+
+# This option represents a trade off between security and performance. Higher
+# values lead to slower performance, but higher security. Changing this option
+# will only affect newly created passwords as existing password hashes already
+# have a fixed number of rounds applied, so it is safe to tune this option in a
+# running cluster.  The default for bcrypt is 12, must be between 4 and 31,
+# inclusive.  The default for scrypt is 16, must be within `range(1,32)`.  The
+# default for pbkdf_sha512 is 60000, must be within `range(1,1<<32)`  WARNING:
+# If using scrypt, increasing this value increases BOTH time AND memory
+# requirements to hash a password. (integer value)
+#password_hash_rounds = <None>
+
+# Optional block size to pass to scrypt hash function (the `r` parameter).
+# Useful for tuning scrypt to optimal performance for your CPU architecture.
+# This option is only used when the `password_hash_algorithm` option is set to
+# `scrypt`. Defaults to 8. (integer value)
+#scrypt_block_size = <None>
+
+# Optional parallelism to pass to scrypt hash function (the `p` parameter).
+# This option is only used when the `password_hash_algorithm` option is set to
+# `scrypt`. Defaults to 1. (integer value)
+#scrypt_parallelism = <None>
+
+# Number of bytes to use in scrypt and pbkfd2_sha512 hashing salt.  Default for
+# scrypt is 16 bytes. Default for pbkfd2_sha512 is 16 bytes.  Limited to a
+# maximum of 96 bytes due to the size of the column used to store password
+# hashes. (integer value)
+# Minimum value: 0
+# Maximum value: 96
+#salt_bytesize = <None>
+
+
+[identity_mapping]
+
+#
+# From keystone
+#
+
+# Entry point for the identity mapping backend driver in the
+# `keystone.identity.id_mapping` namespace. Keystone only provides a `sql`
+# driver, so there is no reason to change this unless you are providing a
+# custom entry point. (string value)
+#driver = sql
+
+# Entry point for the public ID generator for user and group entities in the
+# `keystone.identity.id_generator` namespace. The Keystone identity mapper only
+# supports generators that produce 64 bytes or less. Keystone only provides a
+# `sha256` entry point, so there is no reason to change this value unless
+# you're providing a custom entry point. (string value)
+#generator = sha256
+
+# The format of user and group IDs changed in Juno for backends that do not
+# generate UUIDs (for example, LDAP), with keystone providing a hash mapping to
+# the underlying attribute in LDAP. By default this mapping is disabled, which
+# ensures that existing IDs will not change. Even when the mapping is enabled
+# by using domain-specific drivers (`[identity]
+# domain_specific_drivers_enabled`), any users and groups from the default
+# domain being handled by LDAP will still not be mapped to ensure their IDs
+# remain backward compatible. Setting this value to false will enable the new
+# mapping for all backends, including the default LDAP driver. It is only
+# guaranteed to be safe to enable this option if you do not already have
+# assignments for users and groups from the default LDAP domain, and you
+# consider it to be acceptable for Keystone to provide the different IDs to
+# clients than it did previously (existing IDs in the API will suddenly
+# change). Typically this means that the only time you can set this value to
+# false is when configuring a fresh installation, although that is the
+# recommended value. (boolean value)
+#backward_compatible_ids = true
+
+
+[jwt_tokens]
+
+#
+# From keystone
+#
+
+# Directory containing public keys for validating JWS token signatures. This
+# directory must exist in order for keystone's server process to start. It must
+# also be readable by keystone's server process. It must contain at least one
+# public key that corresponds to a private key in `keystone.conf [jwt_tokens]
+# jws_private_key_repository`. This option is only applicable in deployments
+# issuing JWS tokens and setting `keystone.conf [tokens] provider = jws`.
+# (string value)
+#jws_public_key_repository = /etc/keystone/jws-keys/public
+
+# Directory containing private keys for signing JWS tokens. This directory must
+# exist in order for keystone's server process to start. It must also be
+# readable by keystone's server process. It must contain at least one private
+# key that corresponds to a public key in `keystone.conf [jwt_tokens]
+# jws_public_key_repository`. In the event there are multiple private keys in
+# this directory, keystone will use a key named `private.pem` to sign tokens.
+# In the future, keystone may support the ability to sign tokens with multiple
+# private keys. For now, only a key named `private.pem` within this directory
+# is required to issue JWS tokens. This option is only applicable in
+# deployments issuing JWS tokens and setting `keystone.conf [tokens] provider =
+# jws`. (string value)
+#jws_private_key_repository = /etc/keystone/jws-keys/private
+
+
+[ldap]
+
+#
+# From keystone
+#
+
+# URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified
+# as a comma separated string. The first URL to successfully bind is used for
+# the connection. (string value)
+#url = ldap://localhost
+
+# The user name of the administrator bind DN to use when querying the LDAP
+# server, if your LDAP server requires it. (string value)
+#user = <None>
+
+# The password of the administrator bind DN to use when querying the LDAP
+# server, if your LDAP server requires it. (string value)
+#password = <None>
+
+# The default LDAP server suffix to use, if a DN is not defined via either
+# `[ldap] user_tree_dn` or `[ldap] group_tree_dn`. (string value)
+#suffix = cn=example,cn=com
+
+# The search scope which defines how deep to search within the search base. A
+# value of `one` (representing `oneLevel` or `singleLevel`) indicates a search
+# of objects immediately below to the base object, but does not include the
+# base object itself. A value of `sub` (representing `subtree` or
+# `wholeSubtree`) indicates a search of both the base object itself and the
+# entire subtree below it. (string value)
+# Possible values:
+# one - <No description provided>
+# sub - <No description provided>
+#query_scope = one
+
+# Defines the maximum number of results per page that keystone should request
+# from the LDAP server when listing objects. A value of zero (`0`) disables
+# paging. (integer value)
+# Minimum value: 0
+#page_size = 0
+
+# The LDAP dereferencing option to use for queries involving aliases. A value
+# of `default` falls back to using default dereferencing behavior configured by
+# your `ldap.conf`. A value of `never` prevents aliases from being dereferenced
+# at all. A value of `searching` dereferences aliases only after name
+# resolution. A value of `finding` dereferences aliases only during name
+# resolution. A value of `always` dereferences aliases in all cases. (string
+# value)
+# Possible values:
+# never - <No description provided>
+# searching - <No description provided>
+# always - <No description provided>
+# finding - <No description provided>
+# default - <No description provided>
+#alias_dereferencing = default
+
+# Sets the LDAP debugging level for LDAP calls. A value of 0 means that
+# debugging is not enabled. This value is a bitmask, consult your LDAP
+# documentation for possible values. (integer value)
+# Minimum value: -1
+#debug_level = <None>
+
+# Sets keystone's referral chasing behavior across directory partitions. If
+# left unset, the system's default behavior will be used. (boolean value)
+#chase_referrals = <None>
+
+# The search base to use for users. Defaults to the `[ldap] suffix` value.
+# (string value)
+#user_tree_dn = <None>
+
+# The LDAP search filter to use for users. (string value)
+#user_filter = <None>
+
+# The LDAP object class to use for users. (string value)
+#user_objectclass = inetOrgPerson
+
+# The LDAP attribute mapped to user IDs in keystone. This must NOT be a
+# multivalued attribute. User IDs are expected to be globally unique across
+# keystone domains and URL-safe. (string value)
+#user_id_attribute = cn
+
+# The LDAP attribute mapped to user names in keystone. User names are expected
+# to be unique only within a keystone domain and are not expected to be URL-
+# safe. (string value)
+#user_name_attribute = sn
+
+# The LDAP attribute mapped to user descriptions in keystone. (string value)
+#user_description_attribute = description
+
+# The LDAP attribute mapped to user emails in keystone. (string value)
+#user_mail_attribute = mail
+
+# The LDAP attribute mapped to user passwords in keystone. (string value)
+#user_pass_attribute = userPassword
+
+# The LDAP attribute mapped to the user enabled attribute in keystone. If
+# setting this option to `userAccountControl`, then you may be interested in
+# setting `[ldap] user_enabled_mask` and `[ldap] user_enabled_default` as well.
+# (string value)
+#user_enabled_attribute = enabled
+
+# Logically negate the boolean value of the enabled attribute obtained from the
+# LDAP server. Some LDAP servers use a boolean lock attribute where "true"
+# means an account is disabled. Setting `[ldap] user_enabled_invert = true`
+# will allow these lock attributes to be used. This option will have no effect
+# if either the `[ldap] user_enabled_mask` or `[ldap] user_enabled_emulation`
+# options are in use. (boolean value)
+#user_enabled_invert = false
+
+# Bitmask integer to select which bit indicates the enabled value if the LDAP
+# server represents "enabled" as a bit on an integer rather than as a discrete
+# boolean. A value of `0` indicates that the mask is not used. If this is not
+# set to `0` the typical value is `2`. This is typically used when `[ldap]
+# user_enabled_attribute = userAccountControl`. Setting this option causes
+# keystone to ignore the value of `[ldap] user_enabled_invert`. (integer value)
+# Minimum value: 0
+#user_enabled_mask = 0
+
+# The default value to enable users. This should match an appropriate integer
+# value if the LDAP server uses non-boolean (bitmask) values to indicate if a
+# user is enabled or disabled. If this is not set to `True`, then the typical
+# value is `512`. This is typically used when `[ldap] user_enabled_attribute =
+# userAccountControl`. (string value)
+#user_enabled_default = True
+
+# List of user attributes to ignore on create and update, or whether a specific
+# user attribute should be filtered for list or show user. (list value)
+#user_attribute_ignore = default_project_id
+
+# The LDAP attribute mapped to a user's default_project_id in keystone. This is
+# most commonly used when keystone has write access to LDAP. (string value)
+#user_default_project_id_attribute = <None>
+
+# If enabled, keystone uses an alternative method to determine if a user is
+# enabled or not by checking if they are a member of the group defined by the
+# `[ldap] user_enabled_emulation_dn` option. Enabling this option causes
+# keystone to ignore the value of `[ldap] user_enabled_invert`. (boolean value)
+#user_enabled_emulation = false
+
+# DN of the group entry to hold enabled users when using enabled emulation.
+# Setting this option has no effect unless `[ldap] user_enabled_emulation` is
+# also enabled. (string value)
+#user_enabled_emulation_dn = <None>
+
+# Use the `[ldap] group_member_attribute` and `[ldap] group_objectclass`
+# settings to determine membership in the emulated enabled group. Enabling this
+# option has no effect unless `[ldap] user_enabled_emulation` is also enabled.
+# (boolean value)
+#user_enabled_emulation_use_group_config = false
+
+# A list of LDAP attribute to keystone user attribute pairs used for mapping
+# additional attributes to users in keystone. The expected format is
+# `<ldap_attr>:<user_attr>`, where `ldap_attr` is the attribute in the LDAP
+# object and `user_attr` is the attribute which should appear in the identity
+# API. (list value)
+#user_additional_attribute_mapping =
+
+# The search base to use for groups. Defaults to the `[ldap] suffix` value.
+# (string value)
+#group_tree_dn = <None>
+
+# The LDAP search filter to use for groups. (string value)
+#group_filter = <None>
+
+# The LDAP object class to use for groups. If setting this option to
+# `posixGroup`, you may also be interested in enabling the `[ldap]
+# group_members_are_ids` option. (string value)
+#group_objectclass = groupOfNames
+
+# The LDAP attribute mapped to group IDs in keystone. This must NOT be a
+# multivalued attribute. Group IDs are expected to be globally unique across
+# keystone domains and URL-safe. (string value)
+#group_id_attribute = cn
+
+# The LDAP attribute mapped to group names in keystone. Group names are
+# expected to be unique only within a keystone domain and are not expected to
+# be URL-safe. (string value)
+#group_name_attribute = ou
+
+# The LDAP attribute used to indicate that a user is a member of the group.
+# (string value)
+#group_member_attribute = member
+
+# Enable this option if the members of the group object class are keystone user
+# IDs rather than LDAP DNs. This is the case when using `posixGroup` as the
+# group object class in Open Directory. (boolean value)
+#group_members_are_ids = false
+
+# The LDAP attribute mapped to group descriptions in keystone. (string value)
+#group_desc_attribute = description
+
+# List of group attributes to ignore on create and update. or whether a
+# specific group attribute should be filtered for list or show group. (list
+# value)
+#group_attribute_ignore =
+
+# A list of LDAP attribute to keystone group attribute pairs used for mapping
+# additional attributes to groups in keystone. The expected format is
+# `<ldap_attr>:<group_attr>`, where `ldap_attr` is the attribute in the LDAP
+# object and `group_attr` is the attribute which should appear in the identity
+# API. (list value)
+#group_additional_attribute_mapping =
+
+# If enabled, group queries will use Active Directory specific filters for
+# nested groups. (boolean value)
+#group_ad_nesting = false
+
+# An absolute path to a CA certificate file to use when communicating with LDAP
+# servers. This option will take precedence over `[ldap] tls_cacertdir`, so
+# there is no reason to set both. (string value)
+#tls_cacertfile = <None>
+
+# An absolute path to a CA certificate directory to use when communicating with
+# LDAP servers. There is no reason to set this option if you've also set
+# `[ldap] tls_cacertfile`. (string value)
+#tls_cacertdir = <None>
+
+# Enable TLS when communicating with LDAP servers. You should also set the
+# `[ldap] tls_cacertfile` and `[ldap] tls_cacertdir` options when using this
+# option. Do not set this option if you are using LDAP over SSL (LDAPS) instead
+# of TLS. (boolean value)
+#use_tls = false
+
+# Specifies which checks to perform against client certificates on incoming TLS
+# sessions. If set to `demand`, then a certificate will always be requested and
+# required from the LDAP server. If set to `allow`, then a certificate will
+# always be requested but not required from the LDAP server. If set to `never`,
+# then a certificate will never be requested. (string value)
+# Possible values:
+# demand - <No description provided>
+# never - <No description provided>
+# allow - <No description provided>
+#tls_req_cert = demand
+
+# The connection timeout to use with the LDAP server. A value of `-1` means
+# that connections will never timeout. (integer value)
+# Minimum value: -1
+#connection_timeout = -1
+
+# Enable LDAP connection pooling for queries to the LDAP server. There is
+# typically no reason to disable this. (boolean value)
+#use_pool = true
+
+# The size of the LDAP connection pool. This option has no effect unless
+# `[ldap] use_pool` is also enabled. (integer value)
+# Minimum value: 1
+#pool_size = 10
+
+# The maximum number of times to attempt reconnecting to the LDAP server before
+# aborting. A value of zero prevents retries. This option has no effect unless
+# `[ldap] use_pool` is also enabled. (integer value)
+# Minimum value: 0
+#pool_retry_max = 3
+
+# The number of seconds to wait before attempting to reconnect to the LDAP
+# server. This option has no effect unless `[ldap] use_pool` is also enabled.
+# (floating point value)
+#pool_retry_delay = 0.1
+
+# The connection timeout to use when pooling LDAP connections. A value of `-1`
+# means that connections will never timeout. This option has no effect unless
+# `[ldap] use_pool` is also enabled. (integer value)
+# Minimum value: -1
+#pool_connection_timeout = -1
+
+# The maximum connection lifetime to the LDAP server in seconds. When this
+# lifetime is exceeded, the connection will be unbound and removed from the
+# connection pool. This option has no effect unless `[ldap] use_pool` is also
+# enabled. (integer value)
+# Minimum value: 1
+#pool_connection_lifetime = 600
+
+# Enable LDAP connection pooling for end user authentication. There is
+# typically no reason to disable this. (boolean value)
+#use_auth_pool = true
+
+# The size of the connection pool to use for end user authentication. This
+# option has no effect unless `[ldap] use_auth_pool` is also enabled. (integer
+# value)
+# Minimum value: 1
+#auth_pool_size = 100
+
+# The maximum end user authentication connection lifetime to the LDAP server in
+# seconds. When this lifetime is exceeded, the connection will be unbound and
+# removed from the connection pool. This option has no effect unless `[ldap]
+# use_auth_pool` is also enabled. (integer value)
+# Minimum value: 1
+#auth_pool_connection_lifetime = 60
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# DEPRECATED: Host to locate redis. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#host = 127.0.0.1
+
+# DEPRECATED: Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#port = 6379
+
+# DEPRECATED: Password for Redis server (optional). (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#password =
+
+# DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g.,
+# [host:port, host1:port ... ] (list value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 2000
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations. (integer value)
+#socket_timeout = 10000
+
+
+[memcache]
+
+#
+# From keystone
+#
+
+# Number of seconds memcached server is considered dead before it is tried
+# again. This is used by the key value store system. (integer value)
+#dead_retry = 300
+
+# Timeout in seconds for every call to a server. This is used by the key value
+# store system. (integer value)
+#socket_timeout = 3
+
+# Max total number of open connections to every memcached server. This is used
+# by the key value store system. (integer value)
+#pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the pool before
+# it is closed. This is used by the key value store system. (integer value)
+#pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache client
+# connection. This is used by the key value store system. (integer value)
+#pool_connection_get_timeout = 10
+
+
+[oauth1]
+
+#
+# From keystone
+#
+
+# Entry point for the OAuth backend driver in the `keystone.oauth1` namespace.
+# Typically, there is no reason to set this option unless you are providing a
+# custom entry point. (string value)
+#driver = sql
+
+# Number of seconds for the OAuth Request Token to remain valid after being
+# created. This is the amount of time the user has to authorize the token.
+# Setting this option to zero means that request tokens will last forever.
+# (integer value)
+# Minimum value: 0
+#request_token_duration = 28800
+
+# Number of seconds for the OAuth Access Token to remain valid after being
+# created. This is the amount of time the consumer has to interact with the
+# service provider (which is typically keystone). Setting this option to zero
+# means that access tokens will last forever. (integer value)
+# Minimum value: 0
+#access_token_duration = 86400
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# Name for the AMQP container. must be globally unique. Defaults to a generated
+# UUID (string value)
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+#trace = false
+
+# Attempt to connect via SSL. If no other ssl-related parameters are given, it
+# will use the system's CA-bundle to verify the server's certificate. (boolean
+# value)
+#ssl = false
+
+# CA certificate PEM file used to verify the server's certificate (string
+# value)
+#ssl_ca_file =
+
+# Self-identifying certificate PEM file for client authentication (string
+# value)
+#ssl_cert_file =
+
+# Private key PEM file used to sign ssl_cert_file certificate (optional)
+# (string value)
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+#ssl_key_password = <None>
+
+# By default SSL checks that the name in the server's certificate matches the
+# hostname in the transport_url. In some configurations it may be preferable to
+# use the virtual hostname instead, for example if the server uses the Server
+# Name Indication TLS extension (rfc6066) to provide a certificate per virtual
+# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the
+# virtual host name instead of the DNS name. (boolean value)
+#ssl_verify_vhost = false
+
+# DEPRECATED: Accept clients using either SSL or plain TCP (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Not applicable - not a SSL server
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string value)
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+#sasl_config_name =
+
+# SASL realm to use if no realm present in username (string value)
+#sasl_default_realm =
+
+# DEPRECATED: User name for message broker authentication (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Should use configuration option transport_url to provide the
+# username.
+#username =
+
+# DEPRECATED: Password for message broker authentication (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Should use configuration option transport_url to provide the
+# password.
+#password =
+
+# Seconds to pause before attempting to re-connect. (integer value)
+# Minimum value: 1
+#connection_retry_interval = 1
+
+# Increase the connection_retry_interval by this many seconds after each
+# unsuccessful failover attempt. (integer value)
+# Minimum value: 0
+#connection_retry_backoff = 2
+
+# Maximum limit for connection_retry_interval + connection_retry_backoff
+# (integer value)
+# Minimum value: 1
+#connection_retry_interval_max = 30
+
+# Time to pause between re-connecting an AMQP 1.0 link that failed due to a
+# recoverable error. (integer value)
+# Minimum value: 1
+#link_retry_delay = 10
+
+# The maximum number of attempts to re-send a reply message which failed due to
+# a recoverable error. (integer value)
+# Minimum value: -1
+#default_reply_retry = 0
+
+# The deadline for an rpc reply message delivery. (integer value)
+# Minimum value: 5
+#default_reply_timeout = 30
+
+# The deadline for an rpc cast or call message delivery. Only used when caller
+# does not provide a timeout expiry. (integer value)
+# Minimum value: 5
+#default_send_timeout = 30
+
+# The deadline for a sent notification message delivery. Only used when caller
+# does not provide a timeout expiry. (integer value)
+# Minimum value: 5
+#default_notify_timeout = 30
+
+# The duration to schedule a purge of idle sender links. Detach link after
+# expiry. (integer value)
+# Minimum value: 1
+#default_sender_link_timeout = 600
+
+# Indicates the addressing mode used by the driver.
+# Permitted values:
+# 'legacy'   - use legacy non-routable addressing
+# 'routable' - use routable addresses
+# 'dynamic'  - use legacy addresses if the message bus does not support routing
+# otherwise use routable addressing (string value)
+#addressing_mode = dynamic
+
+# Enable virtual host support for those message buses that do not natively
+# support virtual hosting (such as qpidd). When set to true the virtual host
+# name will be added to all message bus addresses, effectively creating a
+# private 'subnet' per virtual host. Set to False if the message bus supports
+# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative
+# as the name of the virtual host. (boolean value)
+#pseudo_vhost = true
+
+# address prefix used when sending to a specific server (string value)
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+#group_request_prefix = unicast
+
+# Address prefix for all generated RPC addresses (string value)
+#rpc_address_prefix = openstack.org/om/rpc
+
+# Address prefix for all generated Notification addresses (string value)
+#notify_address_prefix = openstack.org/om/notify
+
+# Appended to the address prefix when sending a fanout message. Used by the
+# message bus to identify fanout messages. (string value)
+#multicast_address = multicast
+
+# Appended to the address prefix when sending to a particular RPC/Notification
+# server. Used by the message bus to identify messages sent to a single
+# destination. (string value)
+#unicast_address = unicast
+
+# Appended to the address prefix when sending to a group of consumers. Used by
+# the message bus to identify messages that should be delivered in a round-
+# robin fashion across consumers. (string value)
+#anycast_address = anycast
+
+# Exchange name used in notification addresses.
+# Exchange name resolution precedence:
+# Target.exchange if set
+# else default_notification_exchange if set
+# else control_exchange if set
+# else 'notify' (string value)
+#default_notification_exchange = <None>
+
+# Exchange name used in RPC addresses.
+# Exchange name resolution precedence:
+# Target.exchange if set
+# else default_rpc_exchange if set
+# else control_exchange if set
+# else 'rpc' (string value)
+#default_rpc_exchange = <None>
+
+# Window size for incoming RPC Reply messages. (integer value)
+# Minimum value: 1
+#reply_link_credit = 200
+
+# Window size for incoming RPC Request messages (integer value)
+# Minimum value: 1
+#rpc_server_credit = 100
+
+# Window size for incoming Notification messages (integer value)
+# Minimum value: 1
+#notify_server_credit = 100
+
+# Send messages of this type pre-settled.
+# Pre-settled messages will not receive acknowledgement
+# from the peer. Note well: pre-settled messages may be
+# silently discarded if the delivery fails.
+# Permitted values:
+# 'rpc-call' - send RPC Calls pre-settled
+# 'rpc-reply'- send RPC Replies pre-settled
+# 'rpc-cast' - Send RPC Casts pre-settled
+# 'notify'   - Send Notifications pre-settled
+#  (multi valued)
+#pre_settled = rpc-cast
+#pre_settled = rpc-reply
+
+
+[oslo_messaging_kafka]
+
+#
+# From oslo.messaging
+#
+
+# DEPRECATED: Default Kafka broker Host (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#kafka_default_host = localhost
+
+# DEPRECATED: Default Kafka broker Port (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#kafka_default_port = 9092
+
+# Max fetch bytes of Kafka consumer (integer value)
+#kafka_max_fetch_bytes = 1048576
+
+# Default timeout(s) for Kafka consumers (floating point value)
+#kafka_consumer_timeout = 1.0
+
+# Pool Size for Kafka Consumers (integer value)
+#pool_size = 10
+
+# The pool size limit for connections expiration policy (integer value)
+#conn_pool_min_size = 2
+
+# The time-to-live in sec of idle connections in the pool (integer value)
+#conn_pool_ttl = 1200
+
+# Group id for Kafka consumer. Consumers in one group will coordinate message
+# consumption (string value)
+#consumer_group = oslo_messaging_consumer
+
+# Upper bound on the delay for KafkaProducer batching in seconds (floating
+# point value)
+#producer_batch_timeout = 0.0
+
+# Size of batch for the producer async send (integer value)
+#producer_batch_size = 16384
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If not set,
+# we fall back to the same configuration used for RPC. (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+# The maximum number of attempts to re-send a notification message which failed
+# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite
+# (integer value)
+#retry = -1
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete = false
+
+# Enable SSL (boolean value)
+#ssl = <None>
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version
+#ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile
+#ssl_key_file =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile
+#ssl_cert_file =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs
+#ssl_ca_file =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
+# be used. This option may not be available in future versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client before abandoning to send it its replies.
+# This value should not be longer than rpc_response_timeout. (integer value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we are
+# currently connected to becomes unavailable. Takes effect only if more than
+# one RabbitMQ node is provided in config. (string value)
+# Possible values:
+# round-robin - <No description provided>
+# shuffle - <No description provided>
+#kombu_failover_strategy = round-robin
+
+# DEPRECATED: The RabbitMQ broker address where a single node is used. (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_host = localhost
+
+# DEPRECATED: The RabbitMQ broker port where a single node is used. (port
+# value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_port = 5672
+
+# DEPRECATED: RabbitMQ HA cluster host:port pairs. (list value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# DEPRECATED: The RabbitMQ userid. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_userid = guest
+
+# DEPRECATED: The RabbitMQ password. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Possible values:
+# PLAIN - <No description provided>
+# AMQPLAIN - <No description provided>
+# RABBIT-CR-DEMO - <No description provided>
+#rabbit_login_method = AMQPLAIN
+
+# DEPRECATED: The RabbitMQ virtual host. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
+# (integer value)
+#rabbit_interval_max = 30
+
+# DEPRECATED: Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
+# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
+# is no longer controlled by the x-ha-policy argument when declaring a queue.
+# If you just want to make sure that all queues (except those with auto-
+# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
+# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL (x-expires).
+# Queues which are unused for the duration of the TTL are automatically
+# deleted. The parameter affects only reply and fanout queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
+# value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
+#fake_rabbit = false
+
+# Maximum number of channels to allow (integer value)
+#channel_max = <None>
+
+# The maximum byte size for an AMQP frame (integer value)
+#frame_max = <None>
+
+# How often to send heartbeats for consumer's connections (integer value)
+#heartbeat_interval = 3
+
+# Arguments passed to ssl.wrap_socket (dict value)
+#ssl_options = <None>
+
+# Set socket timeout in seconds for connection's socket (floating point value)
+#socket_timeout = 0.25
+
+# Set TCP_USER_TIMEOUT in seconds for connection's socket (floating point
+# value)
+#tcp_user_timeout = 0.25
+
+# Set delay for reconnection to some host which has connection error (floating
+# point value)
+#host_connection_reconnect_delay = 0.25
+
+# Connection factory implementation (string value)
+# Possible values:
+# new - <No description provided>
+# single - <No description provided>
+# read_write - <No description provided>
+#connection_factory = single
+
+# Maximum number of connections to keep queued. (integer value)
+#pool_max_size = 30
+
+# Maximum number of connections to create above `pool_max_size`. (integer
+# value)
+#pool_max_overflow = 0
+
+# Default number of seconds to wait for a connections to available (integer
+# value)
+#pool_timeout = 30
+
+# Lifetime of a connection (since creation) in seconds or None for no
+# recycling. Expired connections are closed on acquire. (integer value)
+#pool_recycle = 600
+
+# Threshold at which inactive (since release) connections are considered stale
+# in seconds or None for no staleness. Stale connections are closed on acquire.
+# (integer value)
+#pool_stale = 60
+
+# Default serialization mechanism for serializing/deserializing
+# outgoing/incoming messages (string value)
+# Possible values:
+# json - <No description provided>
+# msgpack - <No description provided>
+#default_serializer_type = json
+
+# Persist notification messages. (boolean value)
+#notification_persistence = false
+
+# Exchange name for sending notifications (string value)
+#default_notification_exchange = ${control_exchange}_notification
+
+# Max number of not acknowledged message which RabbitMQ can send to
+# notification listener. (integer value)
+#notification_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# notification, -1 means infinite retry. (integer value)
+#default_notification_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# notification message (floating point value)
+#notification_retry_delay = 0.25
+
+# Time to live for rpc queues without consumers in seconds. (integer value)
+#rpc_queue_expiration = 60
+
+# Exchange name for sending RPC messages (string value)
+#default_rpc_exchange = ${control_exchange}_rpc
+
+# Exchange name for receiving RPC replies (string value)
+#rpc_reply_exchange = ${control_exchange}_rpc_reply
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc
+# listener. (integer value)
+#rpc_listener_prefetch_count = 100
+
+# Max number of not acknowledged message which RabbitMQ can send to rpc reply
+# listener. (integer value)
+#rpc_reply_listener_prefetch_count = 100
+
+# Reconnecting retry count in case of connectivity problem during sending
+# reply. -1 means infinite retry during rpc_timeout (integer value)
+#rpc_reply_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending
+# reply. (floating point value)
+#rpc_reply_retry_delay = 0.25
+
+# Reconnecting retry count in case of connectivity problem during sending RPC
+# message, -1 means infinite retry. If actual retry attempts in not 0 the rpc
+# request could be processed more than one time (integer value)
+#default_rpc_retry_attempts = -1
+
+# Reconnecting retry delay in case of connectivity problem during sending RPC
+# message (floating point value)
+#rpc_retry_delay = 0.25
+
+
+[oslo_messaging_zmq]
+
+#
+# From oslo.messaging
+#
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Possible values:
+# redis - <No description provided>
+# sentinel - <No description provided>
+# dummy - <No description provided>
+#rpc_zmq_matchmaker = redis
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Number of seconds to wait before all pending messages will be sent after
+# closing a socket. The default value of -1 specifies an infinite linger
+# period. The value of 0 specifies no linger period. Pending messages shall be
+# discarded immediately when the socket is closed. Positive values specify an
+# upper bound for the linger period. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_cast_timeout
+#zmq_linger = -1
+
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target
+# ( < 0 means no timeout). (integer value)
+#zmq_target_expire = 300
+
+# Update period in seconds of a name service record about existing target.
+# (integer value)
+#zmq_target_update = 180
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = false
+
+# Use ROUTER remote proxy. (boolean value)
+#use_router_proxy = false
+
+# This option makes direct connections dynamic or static. It makes sense only
+# with use_router_proxy=False which means to use direct connections for direct
+# message types (ignored otherwise). (boolean value)
+#use_dynamic_connections = false
+
+# How many additional connections to a host will be made for failover reasons.
+# This option is actual only in dynamic connections mode. (integer value)
+#zmq_failover_connections = 2
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49153
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Default serialization mechanism for serializing/deserializing
+# outgoing/incoming messages (string value)
+# Possible values:
+# json - <No description provided>
+# msgpack - <No description provided>
+#rpc_zmq_serialization = json
+
+# This option configures round-robin mode in zmq socket. True means not keeping
+# a queue when server side disconnects. False means to keep queue and messages
+# even if server is disconnected, when the server appears we send all
+# accumulated messages to it. (boolean value)
+#zmq_immediate = true
+
+# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any
+# other negative value) means to skip any overrides and leave it to OS default;
+# 0 and 1 (or any other positive value) mean to disable and enable the option
+# respectively. (integer value)
+#zmq_tcp_keepalive = -1
+
+# The duration between two keepalive transmissions in idle condition. The unit
+# is platform dependent, for example, seconds in Linux, milliseconds in Windows
+# etc. The default value of -1 (or any other negative value and 0) means to
+# skip any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_idle = -1
+
+# The number of retransmissions to be carried out before declaring that remote
+# end is not available. The default value of -1 (or any other negative value
+# and 0) means to skip any overrides and leave it to OS default. (integer
+# value)
+#zmq_tcp_keepalive_cnt = -1
+
+# The duration between two successive keepalive retransmissions, if
+# acknowledgement to the previous keepalive transmission is not received. The
+# unit is platform dependent, for example, seconds in Linux, milliseconds in
+# Windows etc. The default value of -1 (or any other negative value and 0)
+# means to skip any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_intvl = -1
+
+# Maximum number of (green) threads to work concurrently. (integer value)
+#rpc_thread_pool_size = 100
+
+# Expiration timeout in seconds of a sent/received message after which it is
+# not tracked anymore by a client/server. (integer value)
+#rpc_message_ttl = 300
+
+# Wait for message acknowledgements from receivers. This mechanism works only
+# via proxy without PUB/SUB. (boolean value)
+#rpc_use_acks = false
+
+# Number of seconds to wait for an ack from a cast/call. After each retry
+# attempt this timeout is multiplied by some specified multiplier. (integer
+# value)
+#rpc_ack_timeout_base = 15
+
+# Number to multiply base ack timeout by after each retry attempt. (integer
+# value)
+#rpc_ack_timeout_multiplier = 2
+
+# Default number of message sending attempts in case of any problems occurred:
+# positive value N means at most N retries, 0 means no retries, None or -1 (or
+# any other negative values) mean to retry forever. This option is used only if
+# acknowledgments are enabled. (integer value)
+#rpc_retry_attempts = 3
+
+# List of publisher hosts SubConsumer can subscribe on. This option has higher
+# priority then the default publishers list taken from the matchmaker. (list
+# value)
+#subscribe_on =
+
+
+[oslo_middleware]
+
+#
+# From oslo.middleware
+#
+
+# The maximum body size for each  request, in bytes. (integer value)
+# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
+# Deprecated group/name - [DEFAULT]/max_request_body_size
+#max_request_body_size = 114688
+
+# DEPRECATED: The HTTP Header that will be used to determine what the original
+# request protocol scheme was, even if it was hidden by a SSL termination
+# proxy. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#secure_proxy_ssl_header = X-Forwarded-Proto
+
+# Whether the application is behind a proxy or not. This determines if the
+# middleware should parse the headers or not. (boolean value)
+#enable_proxy_headers_parsing = false
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# This option controls whether or not to enforce scope when evaluating
+# policies. If ``True``, the scope of the token used in the request is compared
+# to the ``scope_types`` of the policy being enforced. If the scopes do not
+# match, an ``InvalidScope`` exception will be raised. If ``False``, a message
+# will be logged informing operators that policies are being invoked with
+# mismatching scope. (boolean value)
+#enforce_scope = false
+
+# The file that defines policies. (string value)
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched.  Missing or empty directories are ignored. (multi
+# valued)
+#policy_dirs = policy.d
+
+# Content Type to send and receive data for REST based policy check (string
+# value)
+# Possible values:
+# application/x-www-form-urlencoded - <No description provided>
+# application/json - <No description provided>
+#remote_content_type = application/x-www-form-urlencoded
+
+# server identity verification for REST based policy check (boolean value)
+#remote_ssl_verify_server_crt = false
+
+# Absolute path to ca cert file for REST based policy check (string value)
+#remote_ssl_ca_crt_file = <None>
+
+# Absolute path to client cert for REST based policy check (string value)
+#remote_ssl_client_crt_file = <None>
+
+# Absolute path client key file REST based policy check (string value)
+#remote_ssl_client_key_file = <None>
+
+
+[policy]
+
+#
+# From keystone
+#
+
+# Entry point for the policy backend driver in the `keystone.policy` namespace.
+# Supplied drivers are `rules` (which does not support any CRUD operations for
+# the v3 policy API) and `sql`. Typically, there is no reason to set this
+# option unless you are providing a custom entry point. (string value)
+#driver = sql
+
+# Maximum number of entities that will be returned in a policy collection.
+# (integer value)
+#list_limit = <None>
+
+
+[profiler]
+
+#
+# From osprofiler
+#
+
+#
+# Enables the profiling for all services on this node. Default value is False
+# (fully disable the profiling feature).
+#
+# Possible values:
+#
+# * True: Enables the feature
+# * False: Disables the feature. The profiling cannot be started via this
+# project
+# operations. If the profiling is triggered by another project, this project
+# part
+# will be empty.
+#  (boolean value)
+# Deprecated group/name - [profiler]/profiler_enabled
+#enabled = false
+
+#
+# Enables SQL requests profiling in services. Default value is False (SQL
+# requests won't be traced).
+#
+# Possible values:
+#
+# * True: Enables SQL requests profiling. Each SQL query will be part of the
+# trace and can the be analyzed by how much time was spent for that.
+# * False: Disables SQL requests profiling. The spent time is only shown on a
+# higher level of operations. Single SQL queries cannot be analyzed this
+# way.
+#  (boolean value)
+#trace_sqlalchemy = false
+
+#
+# Secret key(s) to use for encrypting context data for performance profiling.
+# This string value should have the following format:
+# <key1>[,<key2>,...<keyn>],
+# where each key is some random string. A user who triggers the profiling via
+# the REST API has to set one of these keys in the headers of the REST API call
+# to include profiling results of this node for this particular project.
+#
+# Both "enabled" flag and "hmac_keys" config options should be set to enable
+# profiling. Also, to generate correct profiling information across all
+# services
+# at least one key needs to be consistent between OpenStack projects. This
+# ensures it can be used from client side to generate the trace, containing
+# information from all possible resources. (string value)
+#hmac_keys = SECRET_KEY
+
+#
+# Connection string for a notifier backend. Default value is messaging:// which
+# sets the notifier to oslo_messaging.
+#
+# Examples of possible values:
+#
+# * messaging://: use oslo_messaging driver for sending notifications.
+# * mongodb://127.0.0.1:27017 : use mongodb driver for sending notifications.
+# * elasticsearch://127.0.0.1:9200 : use elasticsearch driver for sending
+# notifications.
+#  (string value)
+#connection_string = messaging://
+
+#
+# Document type for notification indexing in elasticsearch.
+#  (string value)
+#es_doc_type = notification
+
+#
+# This parameter is a time value parameter (for example: es_scroll_time=2m),
+# indicating for how long the nodes that participate in the search will
+# maintain
+# relevant resources in order to continue and support it.
+#  (string value)
+#es_scroll_time = 2m
+
+#
+# Elasticsearch splits large requests in batches. This parameter defines
+# maximum size of each batch (for example: es_scroll_size=10000).
+#  (integer value)
+#es_scroll_size = 10000
+
+#
+# Redissentinel provides a timeout option on the connections.
+# This parameter defines that timeout (for example: socket_timeout=0.1).
+#  (floating point value)
+#socket_timeout = 0.1
+
+#
+# Redissentinel uses a service name to identify a master redis service.
+# This parameter defines the name (for example:
+# sentinal_service_name=mymaster).
+#  (string value)
+#sentinel_service_name = mymaster
+
+
+[receipt]
+
+#
+# From keystone
+#
+
+# The amount of time that a receipt should remain valid (in seconds). This
+# value should always be very short, as it represents how long a user has to
+# reattempt auth with the missing auth methods. (integer value)
+# Minimum value: 0
+# Maximum value: 86400
+#expiration = 300
+
+# Entry point for the receipt provider in the `keystone.receipt.provider`
+# namespace. The receipt provider controls the receipt construction and
+# validation operations. Keystone includes just the `fernet` receipt provider
+# for now. `fernet` receipts do not need to be persisted at all, but require
+# that you run `keystone-manage fernet_setup` (also see the `keystone-manage
+# fernet_rotate` command). (string value)
+#provider = fernet
+
+# Toggle for caching receipt creation and validation data. This has no effect
+# unless global caching is enabled, or if cache_on_issue is disabled as we only
+# cache receipts on issue. (boolean value)
+#caching = true
+
+# The number of seconds to cache receipt creation and validation data. This has
+# no effect unless both global and `[receipt] caching` are enabled. (integer
+# value)
+# Minimum value: 0
+#cache_time = 300
+
+# Enable storing issued receipt data to receipt validation cache so that first
+# receipt validation doesn't actually cause full validation cycle. This option
+# has no effect unless global caching and receipt caching are enabled. (boolean
+# value)
+#cache_on_issue = true
+
+
+[resource]
+
+#
+# From keystone
+#
+
+# DEPRECATED: Entry point for the resource driver in the `keystone.resource`
+# namespace. Only a `sql` driver is supplied by keystone. Unless you are
+# writing proprietary drivers for keystone, you do not need to set this option.
+# (string value)
+# This option is deprecated for removal since P.
+# Its value may be silently ignored in the future.
+# Reason: Non-SQL resource cannot be used with SQL Identity and has been unable
+# to be used since Ocata. SQL Resource backend is a requirement as of Pike.
+# Setting this option no longer has an effect on how Keystone operates.
+#driver = sql
+
+# Toggle for resource caching. This has no effect unless global caching is
+# enabled. (boolean value)
+# Deprecated group/name - [assignment]/caching
+#caching = true
+
+# Time to cache resource data in seconds. This has no effect unless global
+# caching is enabled. (integer value)
+# Deprecated group/name - [assignment]/cache_time
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a resource collection.
+# (integer value)
+# Deprecated group/name - [assignment]/list_limit
+#list_limit = <None>
+
+# Name of the domain that owns the `admin_project_name`. If left unset, then
+# there is no admin project. `[resource] admin_project_name` must also be set
+# to use this option. (string value)
+#admin_project_domain_name = <None>
+
+# This is a special project which represents cloud-level administrator
+# privileges across services. Tokens scoped to this project will contain a true
+# `is_admin_project` attribute to indicate to policy systems that the role
+# assignments on that specific project should apply equally across every
+# project. If left unset, then there is no admin project, and thus no explicit
+# means of cross-project role assignments. `[resource]
+# admin_project_domain_name` must also be set to use this option. (string
+# value)
+#admin_project_name = <None>
+
+# This controls whether the names of projects are restricted from containing
+# URL-reserved characters. If set to `new`, attempts to create or update a
+# project with a URL-unsafe name will fail. If set to `strict`, attempts to
+# scope a token with a URL-unsafe project name will fail, thereby forcing all
+# project names to be updated to be URL-safe. (string value)
+# Possible values:
+# off - <No description provided>
+# new - <No description provided>
+# strict - <No description provided>
+#project_name_url_safe = off
+
+# This controls whether the names of domains are restricted from containing
+# URL-reserved characters. If set to `new`, attempts to create or update a
+# domain with a URL-unsafe name will fail. If set to `strict`, attempts to
+# scope a token with a URL-unsafe domain name will fail, thereby forcing all
+# domain names to be updated to be URL-safe. (string value)
+# Possible values:
+# off - <No description provided>
+# new - <No description provided>
+# strict - <No description provided>
+#domain_name_url_safe = off
+
+
+[revoke]
+
+#
+# From keystone
+#
+
+# Entry point for the token revocation backend driver in the `keystone.revoke`
+# namespace. Keystone only provides a `sql` driver, so there is no reason to
+# set this option unless you are providing a custom entry point. (string value)
+#driver = sql
+
+# The number of seconds after a token has expired before a corresponding
+# revocation event may be purged from the backend. (integer value)
+# Minimum value: 0
+#expiration_buffer = 1800
+
+# Toggle for revocation event caching. This has no effect unless global caching
+# is enabled. (boolean value)
+#caching = true
+
+# Time to cache the revocation list and the revocation events (in seconds).
+# This has no effect unless global and `[revoke] caching` are both enabled.
+# (integer value)
+# Deprecated group/name - [token]/revocation_cache_time
+#cache_time = 3600
+
+
+[role]
+
+#
+# From keystone
+#
+
+# Entry point for the role backend driver in the `keystone.role` namespace.
+# Keystone only provides a `sql` driver, so there's no reason to change this
+# unless you are providing a custom entry point. (string value)
+#driver = <None>
+
+# Toggle for role caching. This has no effect unless global caching is enabled.
+# In a typical deployment, there is no reason to disable this. (boolean value)
+#caching = true
+
+# Time to cache role data, in seconds. This has no effect unless both global
+# caching and `[role] caching` are enabled. (integer value)
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a role collection. This
+# may be useful to tune if you have a large number of discrete roles in your
+# deployment. (integer value)
+#list_limit = <None>
+
+
+[saml]
+
+#
+# From keystone
+#
+
+# Determines the lifetime for any SAML assertions generated by keystone, using
+# `NotOnOrAfter` attributes. (integer value)
+#assertion_expiration_time = 3600
+
+# Name of, or absolute path to, the binary to be used for XML signing. Although
+# only the XML Security Library (`xmlsec1`) is supported, it may have a non-
+# standard name or path on your system. If keystone cannot find the binary
+# itself, you may need to install the appropriate package, use this option to
+# specify an absolute path, or adjust keystone's PATH environment variable.
+# (string value)
+#xmlsec1_binary = xmlsec1
+
+# Absolute path to the public certificate file to use for SAML signing. The
+# value cannot contain a comma (`,`). (string value)
+#certfile = /etc/keystone/ssl/certs/signing_cert.pem
+
+# Absolute path to the private key file to use for SAML signing. The value
+# cannot contain a comma (`,`). (string value)
+#keyfile = /etc/keystone/ssl/private/signing_key.pem
+
+# This is the unique entity identifier of the identity provider (keystone) to
+# use when generating SAML assertions. This value is required to generate
+# identity provider metadata and must be a URI (a URL is recommended). For
+# example: `https://keystone.example.com/v3/OS-FEDERATION/saml2/idp`. (uri
+# value)
+#idp_entity_id = <None>
+
+# This is the single sign-on (SSO) service location of the identity provider
+# which accepts HTTP POST requests. A value is required to generate identity
+# provider metadata. For example: `https://keystone.example.com/v3/OS-
+# FEDERATION/saml2/sso`. (uri value)
+#idp_sso_endpoint = <None>
+
+# This is the language used by the identity provider's organization. (string
+# value)
+#idp_lang = en
+
+# This is the name of the identity provider's organization. (string value)
+#idp_organization_name = SAML Identity Provider
+
+# This is the name of the identity provider's organization to be displayed.
+# (string value)
+#idp_organization_display_name = OpenStack SAML Identity Provider
+
+# This is the URL of the identity provider's organization. The URL referenced
+# here should be useful to humans. (uri value)
+#idp_organization_url = https://example.com/
+
+# This is the company name of the identity provider's contact person. (string
+# value)
+#idp_contact_company = Example, Inc.
+
+# This is the given name of the identity provider's contact person. (string
+# value)
+#idp_contact_name = SAML Identity Provider Support
+
+# This is the surname of the identity provider's contact person. (string value)
+#idp_contact_surname = Support
+
+# This is the email address of the identity provider's contact person. (string
+# value)
+#idp_contact_email = support@example.com
+
+# This is the telephone number of the identity provider's contact person.
+# (string value)
+#idp_contact_telephone = +1 800 555 0100
+
+# This is the type of contact that best describes the identity provider's
+# contact person. (string value)
+# Possible values:
+# technical - <No description provided>
+# support - <No description provided>
+# administrative - <No description provided>
+# billing - <No description provided>
+# other - <No description provided>
+#idp_contact_type = other
+
+# Absolute path to the identity provider metadata file. This file should be
+# generated with the `keystone-manage saml_idp_metadata` command. There is
+# typically no reason to change this value. (string value)
+#idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml
+
+# The prefix of the RelayState SAML attribute to use when generating enhanced
+# client and proxy (ECP) assertions. In a typical deployment, there is no
+# reason to change this value. (string value)
+#relay_state_prefix = ss:mem:
+
+
+[security_compliance]
+
+#
+# From keystone
+#
+
+# The maximum number of days a user can go without authenticating before being
+# considered "inactive" and automatically disabled (locked). This feature is
+# disabled by default; set any value to enable it. This feature depends on the
+# `sql` backend for the `[identity] driver`. When a user exceeds this threshold
+# and is considered "inactive", the user's `enabled` attribute in the HTTP API
+# may not match the value of the user's `enabled` column in the user table.
+# (integer value)
+# Minimum value: 1
+#disable_user_account_days_inactive = <None>
+
+# The maximum number of times that a user can fail to authenticate before the
+# user account is locked for the number of seconds specified by
+# `[security_compliance] lockout_duration`. This feature is disabled by
+# default. If this feature is enabled and `[security_compliance]
+# lockout_duration` is not set, then users may be locked out indefinitely until
+# the user is explicitly enabled via the API. This feature depends on the `sql`
+# backend for the `[identity] driver`. (integer value)
+# Minimum value: 1
+#lockout_failure_attempts = <None>
+
+# The number of seconds a user account will be locked when the maximum number
+# of failed authentication attempts (as specified by `[security_compliance]
+# lockout_failure_attempts`) is exceeded. Setting this option will have no
+# effect unless you also set `[security_compliance] lockout_failure_attempts`
+# to a non-zero value. This feature depends on the `sql` backend for the
+# `[identity] driver`. (integer value)
+# Minimum value: 1
+#lockout_duration = 1800
+
+# The number of days for which a password will be considered valid before
+# requiring it to be changed. This feature is disabled by default. If enabled,
+# new password changes will have an expiration date, however existing passwords
+# would not be impacted. This feature depends on the `sql` backend for the
+# `[identity] driver`. (integer value)
+# Minimum value: 1
+#password_expires_days = <None>
+
+# This controls the number of previous user password iterations to keep in
+# history, in order to enforce that newly created passwords are unique. The
+# total number which includes the new password should not be greater or equal
+# to this value. Setting the value to zero (the default) disables this feature.
+# Thus, to enable this feature, values must be greater than 0. This feature
+# depends on the `sql` backend for the `[identity] driver`. (integer value)
+# Minimum value: 0
+#unique_last_password_count = 0
+
+# The number of days that a password must be used before the user can change
+# it. This prevents users from changing their passwords immediately in order to
+# wipe out their password history and reuse an old password. This feature does
+# not prevent administrators from manually resetting passwords. It is disabled
+# by default and allows for immediate password changes. This feature depends on
+# the `sql` backend for the `[identity] driver`. Note: If
+# `[security_compliance] password_expires_days` is set, then the value for this
+# option should be less than the `password_expires_days`. (integer value)
+# Minimum value: 0
+#minimum_password_age = 0
+
+# The regular expression used to validate password strength requirements. By
+# default, the regular expression will match any password. The following is an
+# example of a pattern which requires at least 1 letter, 1 digit, and have a
+# minimum length of 7 characters: ^(?=.*\d)(?=.*[a-zA-Z]).{7,}$ This feature
+# depends on the `sql` backend for the `[identity] driver`. (string value)
+#password_regex = <None>
+
+# Describe your password regular expression here in language for humans. If a
+# password fails to match the regular expression, the contents of this
+# configuration variable will be returned to users to explain why their
+# requested password was insufficient. (string value)
+#password_regex_description = <None>
+
+# Enabling this option requires users to change their password when the user is
+# created, or upon administrative reset. Before accessing any services,
+# affected users will have to change their password. To ignore this requirement
+# for specific users, such as service users, set the `options` attribute
+# `ignore_change_password_upon_first_use` to `True` for the desired user via
+# the update user API. This feature is disabled by default. This feature is
+# only applicable with the `sql` backend for the `[identity] driver`. (boolean
+# value)
+#change_password_upon_first_use = false
+
+
+[shadow_users]
+
+#
+# From keystone
+#
+
+# Entry point for the shadow users backend driver in the
+# `keystone.identity.shadow_users` namespace. This driver is used for
+# persisting local user references to externally-managed identities (via
+# federation, LDAP, etc). Keystone only provides a `sql` driver, so there is no
+# reason to change this option unless you are providing a custom entry point.
+# (string value)
+#driver = sql
+
+
+[signing]
+
+#
+# From keystone
+#
+
+# DEPRECATED: Absolute path to the public certificate file to use for signing
+# responses to revocation lists requests. Set this together with `[signing]
+# keyfile`. For non-production environments, you may be interested in using
+# `keystone-manage pki_setup` to generate self-signed certificates. (string
+# value)
+# This option is deprecated for removal since P.
+# Its value may be silently ignored in the future.
+# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in
+# Pike. These options remain for backwards compatibility.
+#certfile = /etc/keystone/ssl/certs/signing_cert.pem
+
+# DEPRECATED: Absolute path to the private key file to use for signing
+# responses to revocation lists requests. Set this together with `[signing]
+# certfile`. (string value)
+# This option is deprecated for removal since P.
+# Its value may be silently ignored in the future.
+# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in
+# Pike. These options remain for backwards compatibility.
+#keyfile = /etc/keystone/ssl/private/signing_key.pem
+
+# DEPRECATED: Absolute path to the public certificate authority (CA) file to
+# use when creating self-signed certificates with `keystone-manage pki_setup`.
+# Set this together with `[signing] ca_key`. There is no reason to set this
+# option unless you are requesting revocation lists in a non-production
+# environment. Use a `[signing] certfile` issued from a trusted certificate
+# authority instead. (string value)
+# This option is deprecated for removal since P.
+# Its value may be silently ignored in the future.
+# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in
+# Pike. These options remain for backwards compatibility.
+#ca_certs = /etc/keystone/ssl/certs/ca.pem
+
+# DEPRECATED: Absolute path to the private certificate authority (CA) key file
+# to use when creating self-signed certificates with `keystone-manage
+# pki_setup`. Set this together with `[signing] ca_certs`. There is no reason
+# to set this option unless you are requesting revocation lists in a non-
+# production environment. Use a `[signing] certfile` issued from a trusted
+# certificate authority instead. (string value)
+# This option is deprecated for removal since P.
+# Its value may be silently ignored in the future.
+# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in
+# Pike. These options remain for backwards compatibility.
+#ca_key = /etc/keystone/ssl/private/cakey.pem
+
+# DEPRECATED: Key size (in bits) to use when generating a self-signed token
+# signing certificate. There is no reason to set this option unless you are
+# requesting revocation lists in a non-production environment. Use a `[signing]
+# certfile` issued from a trusted certificate authority instead. (integer
+# value)
+# Minimum value: 1024
+# This option is deprecated for removal since P.
+# Its value may be silently ignored in the future.
+# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in
+# Pike. These options remain for backwards compatibility.
+#key_size = 2048
+
+# DEPRECATED: The validity period (in days) to use when generating a self-
+# signed token signing certificate. There is no reason to set this option
+# unless you are requesting revocation lists in a non-production environment.
+# Use a `[signing] certfile` issued from a trusted certificate authority
+# instead. (integer value)
+# This option is deprecated for removal since P.
+# Its value may be silently ignored in the future.
+# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in
+# Pike. These options remain for backwards compatibility.
+#valid_days = 3650
+
+# DEPRECATED: The certificate subject to use when generating a self-signed
+# token signing certificate. There is no reason to set this option unless you
+# are requesting revocation lists in a non-production environment. Use a
+# `[signing] certfile` issued from a trusted certificate authority instead.
+# (string value)
+# This option is deprecated for removal since P.
+# Its value may be silently ignored in the future.
+# Reason: `keystone-manage pki_setup` was deprecated in Mitaka and removed in
+# Pike. These options remain for backwards compatibility.
+#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[token]
+
+#
+# From keystone
+#
+
+# The amount of time that a token should remain valid (in seconds). Drastically
+# reducing this value may break "long-running" operations that involve multiple
+# services to coordinate together, and will force users to authenticate with
+# keystone more frequently. Drastically increasing this value will increase the
+# number of tokens that will be simultaneously valid. Keystone tokens are also
+# bearer tokens, so a shorter duration will also reduce the potential security
+# impact of a compromised token. (integer value)
+# Minimum value: 0
+# Maximum value: 9223372036854775807
+#expiration = 3600
+
+# Entry point for the token provider in the `keystone.token.provider`
+# namespace. The token provider controls the token construction, validation,
+# and revocation operations. Supported upstream providers are `fernet` and
+# `jws`. Neither `fernet` or `jws` tokens require persistence and both require
+# additional setup. If using `fernet`, you're required to run `keystone-manage
+# fernet_setup`, which creates symmetric keys used to encrypt tokens. If using
+# `jws`, you're required to generate an ECDSA keypair using a SHA-256 hash
+# algorithm for signing and validating token, which can be done with `keystone-
+# manage create_jws_keypair`. Note that `fernet` tokens are encrypted and `jws`
+# tokens are only signed. Please be sure to consider this if your deployment
+# has security requirements regarding payload contents used to generate token
+# IDs. (string value)
+#provider = fernet
+
+# Toggle for caching token creation and validation data. This has no effect
+# unless global caching is enabled. (boolean value)
+#caching = true
+
+# The number of seconds to cache token creation and validation data. This has
+# no effect unless both global and `[token] caching` are enabled. (integer
+# value)
+# Minimum value: 0
+# Maximum value: 9223372036854775807
+#cache_time = <None>
+
+# This toggles support for revoking individual tokens by the token identifier
+# and thus various token enumeration operations (such as listing all tokens
+# issued to a specific user). These operations are used to determine the list
+# of tokens to consider revoked. Do not disable this option if you're using the
+# `kvs` `[revoke] driver`. (boolean value)
+#revoke_by_id = true
+
+# This toggles whether scoped tokens may be re-scoped to a new project or
+# domain, thereby preventing users from exchanging a scoped token (including
+# those with a default project scope) for any other token. This forces users to
+# either authenticate for unscoped tokens (and later exchange that unscoped
+# token for tokens with a more specific scope) or to provide their credentials
+# in every request for a scoped token to avoid re-scoping altogether. (boolean
+# value)
+#allow_rescope_scoped_token = true
+
+# DEPRECATED: This controls whether roles should be included with tokens that
+# are not directly assigned to the token's scope, but are instead linked
+# implicitly to other role assignments. (boolean value)
+# This option is deprecated for removal since R.
+# Its value may be silently ignored in the future.
+# Reason: Default roles depend on a chain of implied role assignments. Ex: an
+# admin user will also have the reader and member role. By ensuring that all
+# these roles will always appear on the token validation response, we can
+# improve the simplicity and readability of policy files.
+#infer_roles = true
+
+# DEPRECATED: Enable storing issued token data to token validation cache so
+# that first token validation doesn't actually cause full validation cycle.
+# This option has no effect unless global caching is enabled and will still
+# cache tokens even if `[token] caching = False`. (boolean value)
+# This option is deprecated for removal since S.
+# Its value may be silently ignored in the future.
+# Reason: Keystone already exposes a configuration option for caching tokens.
+# Having a separate configuration option to cache tokens when they are issued
+# is redundant, unnecessarily complicated, and is misleading if token caching
+# is disabled because tokens will still be pre-cached by default when they are
+# issued. The ability to pre-cache tokens when they are issued is going to rely
+# exclusively on the ``keystone.conf [token] caching`` option in the future.
+#cache_on_issue = true
+
+# This controls the number of seconds that a token can be retrieved for beyond
+# the built-in expiry time. This allows long running operations to succeed.
+# Defaults to two days. (integer value)
+#allow_expired_window = 172800
+
+
+[tokenless_auth]
+
+#
+# From keystone
+#
+
+# The list of distinguished names which identify trusted issuers of client
+# certificates allowed to use X.509 tokenless authorization. If the option is
+# absent then no certificates will be allowed. The format for the values of a
+# distinguished name (DN) must be separated by a comma and contain no spaces.
+# Furthermore, because an individual DN may contain commas, this configuration
+# option may be repeated multiple times to represent multiple values. For
+# example, keystone.conf would include two consecutive lines in order to trust
+# two different DNs, such as `trusted_issuer = CN=john,OU=keystone,O=openstack`
+# and `trusted_issuer = CN=mary,OU=eng,O=abc`. (multi valued)
+#trusted_issuer =
+
+# The federated protocol ID used to represent X.509 tokenless authorization.
+# This is used in combination with the value of `[tokenless_auth]
+# issuer_attribute` to find a corresponding federated mapping. In a typical
+# deployment, there is no reason to change this value. (string value)
+#protocol = x509
+
+# The name of the WSGI environment variable used to pass the issuer of the
+# client certificate to keystone. This attribute is used as an identity
+# provider ID for the X.509 tokenless authorization along with the protocol to
+# look up its corresponding mapping. In a typical deployment, there is no
+# reason to change this value. (string value)
+#issuer_attribute = SSL_CLIENT_I_DN
+
+
+[trust]
+
+#
+# From keystone
+#
+
+# Allows authorization to be redelegated from one user to another, effectively
+# chaining trusts together. When disabled, the `remaining_uses` attribute of a
+# trust is constrained to be zero. (boolean value)
+#allow_redelegation = false
+
+# Maximum number of times that authorization can be redelegated from one user
+# to another in a chain of trusts. This number may be reduced further for a
+# specific trust. (integer value)
+#max_redelegation_count = 3
+
+# Entry point for the trust backend driver in the `keystone.trust` namespace.
+# Keystone only provides a `sql` driver, so there is no reason to change this
+# unless you are providing a custom entry point. (string value)
+#driver = sql
+
+
+[unified_limit]
+
+#
+# From keystone
+#
+
+# Entry point for the unified limit backend driver in the
+# `keystone.unified_limit` namespace. Keystone only provides a `sql` driver, so
+# there's no reason to change this unless you are providing a custom entry
+# point. (string value)
+#driver = sql
+
+# Toggle for unified limit caching. This has no effect unless global caching is
+# enabled. In a typical deployment, there is no reason to disable this.
+# (boolean value)
+#caching = true
+
+# Time to cache unified limit data, in seconds. This has no effect unless both
+# global caching and `[unified_limit] caching` are enabled. (integer value)
+#cache_time = <None>
+
+# Maximum number of entities that will be returned in a role collection. This
+# may be useful to tune if you have a large number of unified limits in your
+# deployment. (integer value)
+#list_limit = <None>
+
+# The enforcement model to use when validating limits associated to projects.
+# Enforcement models will behave differently depending on the existing limits,
+# which may result in backwards incompatible changes if a model is switched in
+# a running deployment. (string value)
+# Possible values:
+# flat - <No description provided>
+# strict_two_level - <No description provided>
+#enforcement_model = flat
+
+
+[wsgi]
+
+#
+# From keystone
+#
+
+# If set to true, this enables the oslo debug middleware in Keystone. This
+# Middleware prints a lot of information about the request and the response. It
+# is useful for getting information about the data on the wire (decoded) and
+# passed to the WSGI application pipeline. This middleware has no effect on the
+# "debug" setting in the [DEFAULT] section of the config file or setting
+# Keystone's log-level to "DEBUG"; it is specific to debugging the WSGI data as
+# it enters and leaves Keystone (specific request-related data). This option is
+# used for introspection on the request and response data between the web
+# server (apache, nginx, etc) and Keystone.  This middleware is inserted as the
+# first element in the middleware chain and will show the data closest to the
+# wire.  WARNING: NOT INTENDED FOR USE IN PRODUCTION. THIS MIDDLEWARE CAN AND
+# WILL EMIT SENSITIVE/PRIVILEGED DATA. (boolean value)
+#debug_middleware = false
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/keystone-all b/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/keystone-all
new file mode 100644 (file)
index 0000000..bde324b
--- /dev/null
@@ -0,0 +1,156 @@
+#!/bin/sh
+# Copyright (c) 2013-2018 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+### BEGIN INIT INFO
+# Provides:          OpenStack Keystone-wsgi
+# Required-Start:    networking
+# Required-Stop:     networking
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: OpenStack Keystone
+# Description:       Openstack Identitiy service running on WSGI compatable gunicorn web server 
+#                    
+### END INIT INFO
+
+RETVAL=0
+#public 5000
+
+DESC_PUBLIC="openstack-keystone"
+
+PIDFILE_PUBLIC="/var/run/$DESC_PUBLIC.pid"
+
+PYTHON=`which python`
+
+source /etc/keystone/keystone-extra.conf
+source /etc/platform/platform.conf
+
+if [ -n ${@:2:1} ] ; then
+        if [ ${@:2:1}="--public-bind-addr" ] ; then
+                PUBLIC_BIND_ADDR_CMD=${@:3:1}
+        fi
+fi
+
+
+###
+EXEC="/usr/bin/gunicorn"
+
+WORKER="eventlet"
+# Increased timeout to facilitate large image uploads
+TIMEOUT="200"
+
+# Calculate the no of workers based on the number of workers retrieved by
+# Platform Eng which is retreived from the keystone-extra.conf
+
+if [ "$system_type" == "All-in-one" ]; then
+    TIS_WORKERS_FACTOR=1
+else
+    TIS_WORKERS_FACTOR=1.5
+fi
+TIS_WORKERS=$(echo "${TIS_WORKERS_FACTOR}*${TIS_PUBLIC_WORKERS}"|bc )
+TIS_WORKERS=${TIS_WORKERS%.*}
+
+#--max-requests , --max-requests-jitter Configuration
+#--max-requests = The max number of requests a worker will process before restarting
+#--max-requests-jitter = The maximum jitter to add to the max_requests setting.
+MAX_REQUESTS=100000
+MAX_REQ_JITTER_CAP_FACTOR=0.5
+MAX_REQ_JITTER_PUBLIC=$(echo "${TIS_WORKERS}*${MAX_REQ_JITTER_CAP_FACTOR}+1"|bc)
+MAX_REQ_JITTER_PUBLIC=${MAX_REQ_JITTER_PUBLIC%.*}
+
+
+start()
+{
+    # Got proper no of workers . Starting gunicorn now
+    echo -e "Initialising keystone service using gunicorn .. \n"
+
+    if [ -z "$PUBLIC_BIND_ADDR" ]; then
+        echo "Keystone floating ip not found . Cannot start services. Exiting .."
+        exit 1
+    fi
+    BIND_PUBLIC=$PUBLIC_BIND_ADDR:5000
+
+    if [ -e $PIDFILE_PUBLIC ]; then
+        PIDDIR=/proc/$(cat $PIDFILE_PUBLIC)
+        if [ -d ${PIDDIR} ]; then
+            echo "$DESC_PUBLIC already running."
+            exit 1
+        else
+            echo "Removing stale PID file $PIDFILE_PUBLIC"
+            rm -f $PIDFILE_PUBLIC
+        fi
+    fi
+
+    echo -e "Starting $DESC_PUBLIC...\n";
+    echo -e "Worker is ${WORKER} --workers ${TIS_WORKERS} --timeout ${TIMEOUT} --max_requests ${MAX_REQUESTS} --max_request_jitter public ${MAX_REQ_JITTER_PUBLIC}\n" ;
+
+    echo -e "Starting keystone process at port 5000 \n" ;
+
+    start-stop-daemon --start --quiet --background --pidfile ${PIDFILE_PUBLIC} \
+        --make-pidfile --exec ${PYTHON} -- ${EXEC} --bind ${BIND_PUBLIC} \
+        --worker-class ${WORKER} --workers ${TIS_WORKERS} --timeout ${TIMEOUT} \
+        --max-requests ${MAX_REQUESTS}  --max-requests-jitter ${MAX_REQ_JITTER_PUBLIC} \
+        --log-syslog  \
+        --pythonpath '/usr/share/keystone' public:application --name keystone-public
+
+    RETVAL=$?
+    if [ $RETVAL -eq 0 ]; then
+        echo -e "Keystone started at port 5000... \n"
+    else
+        echo -e "Failed to start Keystone .. \n"
+    fi
+}
+
+stop()
+{
+    if [  -e $PIDFILE_PUBLIC ]; then 
+       start-stop-daemon --stop --quiet --pidfile $PIDFILE_PUBLIC
+       RETVAL_PUBLIC=$?
+           if [ $RETVAL_PUBLIC -eq 0 ]; then
+               echo "Stopped $DESC_PUBLIC."
+           else
+               echo "Stopping failed - $PIDFILE_PUBLIC"
+           fi
+           rm -f $PIDFILE_PUBLIC
+    else 
+       echo "Already stopped - $PIDFILE_PUBLIC"
+    fi 
+}
+
+status()
+{
+    pid_public=`cat $PIDFILE_PUBLIC 2>/dev/null`
+
+    if [ -n "$pid_public" ]; then
+        echo -e "\033[32m $DESC_PUBLIC  is running..\033[0m"
+    else
+        echo -e "\033[31m $DESC_PUBLIC  is not running..\033[0m"
+    fi
+}
+
+
+
+case "$1" in
+    start)
+        start
+        ;;
+    stop)
+        stop
+        ;;
+    restart|force-reload|reload)
+        stop
+        start
+        ;;
+    status)
+       status
+        ;;
+    *)
+        #echo "Usage: $0 {start|stop|force-reload|restart|reload|status} OR {/usr/bin/keystone-all start --public-bind-addr xxx.xxx.xxx}"
+       start
+        #RETVAL=1
+        ;;
+esac
+
+exit $RETVAL
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/keystone-fernet-keys-rotate-active b/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/keystone-fernet-keys-rotate-active
new file mode 100644 (file)
index 0000000..8080ea0
--- /dev/null
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+#
+# Wrapper script to rotate keystone fernet keys on active controller only
+#
+KEYSTONE_KEYS_ROTATE_INFO="/var/run/keystone-keys-rotate.info"
+KEYSTONE_KEYS_ROTATE_CMD="/usr/bin/nice -n 2 /usr/bin/keystone-manage fernet_rotate --keystone-user keystone --keystone-group keystone"
+
+function is_active_pgserver()
+{
+    # Determine whether we're running on the same controller as the service.
+    local service=postgres
+    local enabledactive=$(/usr/bin/sm-query service $service| grep enabled-active)
+    if [ "x$enabledactive" == "x" ]
+    then
+        # enabled-active not found for that service on this controller
+        return 1
+    else
+        # enabled-active found for that resource
+        return 0
+    fi
+}
+
+if is_active_pgserver
+then
+    if [ ! -f ${KEYSTONE_KEYS_ROTATE_INFO} ]
+    then
+        echo delay_count=0 > ${KEYSTONE_KEYS_ROTATE_INFO}
+    fi
+
+    source ${KEYSTONE_KEYS_ROTATE_INFO}
+    sudo -u postgres psql -d sysinv -c "SELECT alarm_id, entity_instance_id from i_alarm;" | grep -P "^(?=.*100.101)(?=.*${HOSTNAME})" &>/dev/null
+    if [ $? -eq 0 ]
+    then
+        source /etc/platform/platform.conf
+        if [ "${system_type}" = "All-in-one" ]
+        then
+            source /etc/init.d/task_affinity_functions.sh
+            idle_core=$(get_most_idle_core)
+            if [ "$idle_core" -ne "0" ]
+            then
+                sh -c "exec taskset -c $idle_core ${KEYSTONE_KEYS_ROTATE_CMD}"
+                sed -i "/delay_count/s/=.*/=0/" ${KEYSTONE_KEYS_ROTATE_INFO}
+                exit 0
+            fi
+        fi
+
+        if [ "$delay_count" -lt "3" ]
+        then
+            newval=$(($delay_count+1))
+            sed -i "/delay_count/s/=.*/=$newval/" ${KEYSTONE_KEYS_ROTATE_INFO}
+            (sleep 3600; /usr/bin/keystone-fernet-keys-rotate-active) &
+            exit 0
+        fi
+
+    fi
+
+    eval ${KEYSTONE_KEYS_ROTATE_CMD}
+    sed -i "/delay_count/s/=.*/=0/" ${KEYSTONE_KEYS_ROTATE_INFO}
+
+fi
+
+exit 0
+
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/openstack-keystone.service b/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/openstack-keystone.service
new file mode 100644 (file)
index 0000000..a72aa84
--- /dev/null
@@ -0,0 +1,14 @@
+[Unit]
+Description=OpenStack Identity Service (code-named Keystone)
+After=syslog.target network.target
+
+[Service]
+Type=forking
+#ReminAfterExit is set to yes as we have 2 pids to monitor
+RemainAfterExit=yes
+ExecStart=/usr/bin/keystone-all start
+ExecStop=/usr/bin/keystone-all stop
+ExecReload=/usr/bin/keystone-all reload
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/password-rules.conf b/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/password-rules.conf
new file mode 100644 (file)
index 0000000..6a24880
--- /dev/null
@@ -0,0 +1,49 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# The password rules captures the [security_compliance]
+# section of the generic Keystone configuration (keystone.conf)
+# This configuration is used to statically define the password
+# rules for password validation in pre-Keystone environments
+#
+# N.B: Only set non-default keys here (default commented configuration
+# items not needed)
+
+[security_compliance]
+
+#
+# From keystone
+#
+
+# This controls the number of previous user password iterations to keep in
+# history, in order to enforce that newly created passwords are unique. Setting
+# the value to one (the default) disables this feature. Thus, to enable this
+# feature, values must be greater than 1. This feature depends on the `sql`
+# backend for the `[identity] driver`. (integer value)
+# Minimum value: 1
+unique_last_password_count = 2
+
+# The regular expression used to validate password strength requirements. By
+# default, the regular expression will match any password. The following is an
+# example of a pattern which requires at least 1 letter, 1 digit, and have a
+# minimum length of 7 characters: ^(?=.*\d)(?=.*[a-zA-Z]).{7,}$ This feature
+# depends on the `sql` backend for the `[identity] driver`. (string value)
+password_regex = ^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*()<>{}+=_\\\[\]\-?|~`,.;:]).{7,}$
+
+# Describe your password regular expression here in language for humans. If a
+# password fails to match the regular expression, the contents of this
+# configuration variable will be returned to users to explain why their
+# requested password was insufficient. (string value)
+password_regex_description = Password must have a minimum length of 7 characters, and must contain at least 1 upper case, 1 lower case, 1 digit, and 1 special character
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/public.py b/meta-stx/recipes-devtools/python/files/python-keystone/stx-files/public.py
new file mode 100644 (file)
index 0000000..d3a29f3
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (c) 2013-2017 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from keystone.server import wsgi as wsgi_server
+
+import sys
+sys.argv = sys.argv[:1]
+
+application = wsgi_server.initialize_public_application()
diff --git a/meta-stx/recipes-devtools/python/files/python-keystone/wsgi-keystone.conf b/meta-stx/recipes-devtools/python/files/python-keystone/wsgi-keystone.conf
new file mode 100644 (file)
index 0000000..b3a06bb
--- /dev/null
@@ -0,0 +1,67 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+Listen 5000
+Listen 35357
+
+<VirtualHost *:5000>
+    WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
+    WSGIProcessGroup keystone-public
+    WSGIScriptAlias / /usr/bin/keystone-wsgi-public
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    ErrorLogFormat "%{cu}t %M"
+    ErrorLog /var/log/apache2/keystone.log
+    CustomLog /var/log/apache2/keystone_access.log combined
+
+    <Directory /usr/bin>
+        Require all granted
+    </Directory>
+</VirtualHost>
+
+<VirtualHost *:35357>
+    WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
+    WSGIProcessGroup keystone-admin
+    WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    ErrorLogFormat "%{cu}t %M"
+    ErrorLog /var/log/apache2/keystone.log
+    CustomLog /var/log/apache2/keystone_access.log combined
+
+    <Directory /usr/bin>
+        Require all granted
+    </Directory>
+</VirtualHost>
+
+Alias /identity /usr/bin/keystone-wsgi-public
+<Location /identity>
+    SetHandler wsgi-script
+    Options +ExecCGI
+
+    WSGIProcessGroup keystone-public
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
+
+Alias /identity_admin /usr/bin/keystone-wsgi-admin
+<Location /identity_admin>
+    SetHandler wsgi-script
+    Options +ExecCGI
+
+    WSGIProcessGroup keystone-admin
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/meta-stx/recipes-devtools/python/files/python-pynacl/0001-Enable-cross-compile.patch b/meta-stx/recipes-devtools/python/files/python-pynacl/0001-Enable-cross-compile.patch
new file mode 100644 (file)
index 0000000..893d678
--- /dev/null
@@ -0,0 +1,35 @@
+From 97c9b5fdbffda65a7cacb2d0d7beac59c34f902c Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Thu, 10 Oct 2019 11:30:18 -0700
+Subject: [PATCH] Enable cross compile
+
+---
+ setup.py | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/setup.py b/setup.py
+index 747dc62..1eeb7bf 100644
+--- a/setup.py
++++ b/setup.py
+@@ -167,6 +167,9 @@ class build_clib(_build_clib):
+                 configure, "--disable-shared", "--enable-static",
+                 "--disable-debug", "--disable-dependency-tracking",
+                 "--with-pic", "--prefix", os.path.abspath(self.build_clib),
++              os.environ.get('PYNACL_CROSS_TARGET'),
++              os.environ.get('PYNACL_CROSS_HOST'),
++              os.environ.get('PYNACL_CROSS_BUILD'),
+             ],
+             cwd=build_temp,
+         )
+@@ -176,7 +179,7 @@ class build_clib(_build_clib):
+         subprocess.check_call(["make"] + make_args, cwd=build_temp)
+         # Check the build library
+-        subprocess.check_call(["make", "check"] + make_args, cwd=build_temp)
++        # subprocess.check_call(["make", "check"] + make_args, cwd=build_temp)
+         # Install the built library
+         subprocess.check_call(["make", "install"] + make_args, cwd=build_temp)
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-devtools/python/files/python-redfishtool/0001-Adapt-redfishtool-to-python2.patch b/meta-stx/recipes-devtools/python/files/python-redfishtool/0001-Adapt-redfishtool-to-python2.patch
new file mode 100644 (file)
index 0000000..5cad092
--- /dev/null
@@ -0,0 +1,640 @@
+From ecaf5c44da357e2ee5279a3f84a060f7af2c9dd1 Mon Sep 17 00:00:00 2001
+From: zhipengl <zhipengs.liu@intel.com>
+Date: Fri, 21 Jun 2019 01:50:14 +0800
+Subject: [PATCH] Adapt-redfishtool-to-python2
+
+Signed-off-by: zhipengl <zhipengs.liu@intel.com>
+---
+ redfishtool/AccountService.py       | 18 +++++++++---------
+ redfishtool/Chassis.py              | 22 +++++++++++-----------
+ redfishtool/Managers.py             | 29 ++++++++++++++---------------
+ redfishtool/ServiceRoot.py          |  2 +-
+ redfishtool/SessionService.py       | 10 +++++-----
+ redfishtool/Systems.py              | 36 ++++++++++++++++++------------------
+ redfishtool/raw.py                  |  2 +-
+ redfishtool/redfishtoolTransport.py | 29 ++++++++++++++---------------
+ setup.py                            |  1 +
+ 9 files changed, 74 insertions(+), 75 deletions(-)
+
+diff --git a/redfishtool/AccountService.py b/redfishtool/AccountService.py
+index e0ec106..bfb17f8 100644
+--- a/redfishtool/AccountService.py
++++ b/redfishtool/AccountService.py
+@@ -34,7 +34,7 @@ import getopt
+ import re
+ import sys
+ from    .ServiceRoot import RfServiceRoot
+-from   urllib.parse import urljoin
++from   urlparse import urljoin
+ class RfAccountServiceMain():
+     def __init__(self):
+@@ -259,13 +259,13 @@ class RfAccountServiceOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="UserName")
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, UserName".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, UserName".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no account was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=accountsLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the session specific by -i or -m or -l
+         # search collection to find path using getPath2 
+@@ -287,14 +287,14 @@ class RfAccountServiceOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+         # else, return ALL of the Accounts members
+         else:
+             rft.printVerbose(4,"getting expanded Accounts Collection")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=accountsLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         
+         return(rc,r,j,d)
+@@ -324,13 +324,13 @@ class RfAccountServiceOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="IsPredefined")
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, IsPredefined".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, IsPredefined".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no account was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=rolesLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the session specific by -i or -m or -l
+         # search collection to find path using getPath2 
+@@ -352,14 +352,14 @@ class RfAccountServiceOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+         # else, return ALL of the Accounts members
+         else:
+             rft.printVerbose(4,"getting expanded Roles Collection")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=rolesLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         
+         return(rc,r,j,d)
+diff --git a/redfishtool/Chassis.py b/redfishtool/Chassis.py
+index d8f0bf5..0494bd9 100644
+--- a/redfishtool/Chassis.py
++++ b/redfishtool/Chassis.py
+@@ -37,7 +37,7 @@ import getopt
+ import re
+ import sys
+ from    .ServiceRoot import RfServiceRoot
+-from   urllib.parse import urljoin
++from   urlparse import urljoin
+ class RfChassisMain():
+     def __init__(self):
+@@ -208,7 +208,7 @@ class RfChassisOperations():
+             rft.printVerbose(4,"Expand Chassis collection to return ALL Chassis collection members fully expanded in response")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=systemsLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         # otherwise, just return the collection
+         # now read the /Chassis collection
+@@ -217,7 +217,7 @@ class RfChassisOperations():
+             if cmdTop is True:   prop=rft.prop
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=systemsLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," Chassis Collection:",skip1=True, printV12=cmdTop)
++                rft.printVerbose(1," Chassis Collection:", skip1=True, printV12=cmdTop)
+                 
+         return(rc,r,j,d)
+@@ -262,7 +262,7 @@ class RfChassisOperations():
+         #loop through the members and create the list sub-operation response
+         rc,r,j,d=rft.listCollection(rft, r, d, prop="AssetTag")
+         if(rc==0):
+-            rft.printVerbose(1," list {} Collection member info: Id, URI, AssetTag".format(collName,skip1=True, printV12=cmdTop))
++            rft.printVerbose(1," list {} Collection member info: Id, URI, AssetTag".format(collName), skip1=True, printV12=cmdTop)
+         return(rc,r,j,d)
+@@ -439,7 +439,7 @@ class RfChassisOperations():
+         rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=resLink, prop=prop)
+         if(rc==0):
+-            rft.printVerbose(1," {} Resource ".format(resName,skip1=True, printV12=cmdTop))
++            rft.printVerbose(1," {} Resource ".format(resName), skip1=True, printV12=cmdTop)
+         return(rc,r,j,d)
+@@ -464,7 +464,7 @@ class RfChassisOperations():
+         
+         rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=resLink, prop=prop)
+         if(rc==0):
+-            rft.printVerbose(1," {} Resource ".format(resName,skip1=True, printV12=cmdTop))
++            rft.printVerbose(1," {} Resource ".format(resName), skip1=True, printV12=cmdTop)
+             
+         return(rc,r,j,d)
+@@ -521,13 +521,13 @@ class RfChassisOperations():
+             if( prop in powerControl[indx] ):
+                 respDataVal=powerControl[indx][prop]
+                 respData={prop: respDataVal}
+-                rft.printVerbose(1," Get Current Power consumption (PowerConsumedWatts) of PowerControl[{}] resource".format(indx,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get Current Power consumption (PowerConsumedWatts) of PowerControl[{}] resource".format(indx), skip1=True, printV12=cmdTop)
+             else:
+                 rft.printErr("Error: Property {} not not returned in PowerControl[{}] resource".format(prop,indx))
+                 return(4,r,j,d)
+         else:
+             respData=powerControl[indx]  #return the full powerControl array
+-            rft.printVerbose(1," Chassis PowerControl[{}] array:".format(indx,skip1=True, printV12=cmdTop))
++            rft.printVerbose(1," Chassis PowerControl[{}] array:".format(indx), skip1=True, printV12=cmdTop)
+         
+         return(rc,r,j,respData)
+@@ -684,13 +684,13 @@ class RfChassisOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="Name")
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no Log was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=logLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the proc specific by -i or -m
+         # search collection to find path using getPath2 
+@@ -712,7 +712,7 @@ class RfChassisOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+             # If '--Entries' specified, get "Entries" nav link and read it
+             if rc == 0 and rft.gotEntriesOptn:
+diff --git a/redfishtool/Managers.py b/redfishtool/Managers.py
+index 586a871..400dad7 100644
+--- a/redfishtool/Managers.py
++++ b/redfishtool/Managers.py
+@@ -37,7 +37,7 @@ import getopt
+ import re
+ import sys
+ from    .ServiceRoot import RfServiceRoot
+-from   urllib.parse import urljoin
++from   urlparse import urljoin
+ class RfManagersMain():
+     def __init__(self):
+@@ -211,7 +211,7 @@ class RfManagersOperations():
+             rft.printVerbose(4,"Expand Managers collection to return ALL Managers collection members fully expanded in response")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=systemsLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         # otherwise, just return the collection
+         # now read the /Managers collection
+@@ -467,7 +467,7 @@ class RfManagersOperations():
+         rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=resLink, prop=prop)
+         if(rc==0):
+-            rft.printVerbose(1," {} Resource ".format(resName,skip1=True, printV12=cmdTop))
++            rft.printVerbose(1," {} Resource ".format(resName), skip1=True, printV12=cmdTop)
+         return(rc,r,j,d)
+@@ -503,13 +503,13 @@ class RfManagersOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="Name")
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no NIC was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=nicLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the proc specific by -i or -m
+         # search collection to find path using getPath2 
+@@ -531,14 +531,14 @@ class RfManagersOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+         # else, return ALL of the EthernetInterfaces members
+         else:
+             rft.printVerbose(4,"getting expanded EthernetInterfaces Collection")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=nicLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         
+         return(rc,r,j,d)
+@@ -568,13 +568,13 @@ class RfManagersOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="Name" )
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no SerialInterfaces controller was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=cntlrLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the proc specific by -i or -m
+         # search collection to find path using getPath2 
+@@ -596,14 +596,14 @@ class RfManagersOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+         # else, return ALL of the SerialInterfaces members
+         else:
+             rft.printVerbose(4,"getting expanded SerialInterfaces Collection")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=cntlrLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         
+         return(rc,r,j,d)
+@@ -633,13 +633,12 @@ class RfManagersOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="Name")
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName,skip1=True, printV12=cmdTop))
+-
++                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no Log was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=logLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the proc specific by -i or -m
+         # search collection to find path using getPath2 
+@@ -661,7 +660,7 @@ class RfManagersOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+             # If '--Entries' specified, get "Entries" nav link and read it
+             if rc == 0 and rft.gotEntriesOptn:
+diff --git a/redfishtool/ServiceRoot.py b/redfishtool/ServiceRoot.py
+index 5d85b5d..9395b83 100644
+--- a/redfishtool/ServiceRoot.py
++++ b/redfishtool/ServiceRoot.py
+@@ -12,7 +12,7 @@
+ #
+ import requests
+ import json
+-from urllib.parse import urljoin, urlparse, urlunparse
++from urlparse import urljoin, urlparse, urlunparse
+ class RfServiceRoot:
+     def __init__(self):
+diff --git a/redfishtool/SessionService.py b/redfishtool/SessionService.py
+index 7a07811..c7a1624 100644
+--- a/redfishtool/SessionService.py
++++ b/redfishtool/SessionService.py
+@@ -30,7 +30,7 @@ import getopt
+ import re
+ import sys
+ from    .ServiceRoot import RfServiceRoot
+-from   urllib.parse import urljoin
++from   urlparse import urljoin
+ class RfSessionServiceMain():
+     def __init__(self):
+@@ -267,13 +267,13 @@ class RfSessionServiceOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="UserName")
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, Socket".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, Socket".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no session was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=sessionsLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the session specific by -i or -m or -l
+         # search collection to find path using getPath2 
+@@ -295,14 +295,14 @@ class RfSessionServiceOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+         # else, return ALL of the Sessions members
+         else:
+             rft.printVerbose(4,"getting expanded Sessions Collection")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=sessionsLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         
+         return(rc,r,j,d)
+diff --git a/redfishtool/Systems.py b/redfishtool/Systems.py
+index 9a7dfbe..9a9148a 100644
+--- a/redfishtool/Systems.py
++++ b/redfishtool/Systems.py
+@@ -39,7 +39,7 @@ import getopt
+ import re
+ import sys
+ from    .ServiceRoot import RfServiceRoot
+-from   urllib.parse import urljoin
++from   urlparse import urljoin
+ class RfSystemsMain():
+     def __init__(self):
+@@ -216,7 +216,7 @@ class RfSystemsOperations():
+             rft.printVerbose(4,"Expand Systems collection to return ALL Systems collection members fully expanded in response")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=systemsLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         # otherwise, just return the collection
+         # now read the /Systems collection
+@@ -419,7 +419,7 @@ class RfSystemsOperations():
+                                          reqData=reqPostData)
+                    
+         if(rc==0):
+-            rft.printVerbose(1," Systems reset: ", resetType, skip1=True, printV12=cmdTop)
++            rft.printVerbose(1,(" Systems reset: {}").format(resetType), skip1=True, printV12=cmdTop)
+             resetd=None
+             return(rc,r,False,resetd)
+         else: return(rc,r,False,None)
+@@ -623,13 +623,13 @@ class RfSystemsOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="Socket")
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, Socket".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, Socket".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no proc was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=procsLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the proc specific by -i or -m
+         # search collection to find path using getPath2 
+@@ -651,14 +651,14 @@ class RfSystemsOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+         # else, return ALL of the processor members
+         else:
+             rft.printVerbose(4,"getting expanded Processor Collection")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=procsLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         
+         return(rc,r,j,d)
+@@ -688,13 +688,13 @@ class RfSystemsOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="Name")
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no NIC was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=nicLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the proc specific by -i or -m
+         # search collection to find path using getPath2 
+@@ -716,14 +716,14 @@ class RfSystemsOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+         # else, return ALL of the EthernetInterfaces members
+         else:
+             rft.printVerbose(4,"getting expanded EthernetInterfaces Collection")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=nicLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         
+         return(rc,r,j,d)
+@@ -753,13 +753,13 @@ class RfSystemsOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="Name" )
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no SimpleStorage controller was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=cntlrLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the proc specific by -i or -m
+         # search collection to find path using getPath2 
+@@ -781,14 +781,14 @@ class RfSystemsOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+         # else, return ALL of the SimpleStorage members
+         else:
+             rft.printVerbose(4,"getting expanded SimpleStorage Collection")
+             rc,r,j,d=rft.getAllCollectionMembers(rft, r.url, relPath=cntlrLink)
+             if(rc==0):
+-                rft.printVerbose(1," Get ALL {} Collection Members".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," Get ALL {} Collection Members".format(collName), skip1=True, printV12=cmdTop)
+         
+         return(rc,r,j,d)
+@@ -818,13 +818,13 @@ class RfSystemsOperations():
+             #loop through the members and create the list sub-operation response
+             rc,r,j,d=rft.listCollection(rft, r, d, prop="Name")
+             if(rc==0):
+-                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," list {} Collection member info: Id, URI, Name".format(collName), skip1=True, printV12=cmdTop)
+         # else: check if no Log was specified.  If not, return the collection
+         elif(rft.IdLevel2OptnCount==0):
+             rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', r.url, relPath=logLink, prop=prop)
+             if(rc==0):
+-                rft.printVerbose(1," {} Collection ".format(collName,skip1=True, printV12=cmdTop))
++                rft.printVerbose(1," {} Collection ".format(collName), skip1=True, printV12=cmdTop)
+         # else:  check if the -a (all) option is set. If not, return the proc specific by -i or -m
+         # search collection to find path using getPath2 
+@@ -846,7 +846,7 @@ class RfSystemsOperations():
+             elif( r is None ):
+                 rc,r,j,d=rft.rftSendRecvRequest(rft.AUTHENTICATED_API, 'GET', collUrl, relPath=path2, prop=prop)
+                 if(rc==0):
+-                    rft.printVerbose(1," {} Collection Member ".format(collName,skip1=True, printV12=cmdTop))
++                    rft.printVerbose(1," {} Collection Member ".format(collName), skip1=True, printV12=cmdTop)
+             # If '--Entries' specified, get "Entries" nav link and read it
+             if rc == 0 and rft.gotEntriesOptn:
+diff --git a/redfishtool/raw.py b/redfishtool/raw.py
+index bfb617c..1b32d0b 100644
+--- a/redfishtool/raw.py
++++ b/redfishtool/raw.py
+@@ -30,7 +30,7 @@ import getopt
+ import re
+ import sys
+ #from    .ServiceRoot import RfServiceRoot
+-from   urllib.parse import urljoin, urlparse, urlunparse
++from   urlparse import urljoin, urlparse, urlunparse
+ class RfRawMain():
+     def __init__(self):
+diff --git a/redfishtool/redfishtoolTransport.py b/redfishtool/redfishtoolTransport.py
+index 017fa11..f157eff 100644
+--- a/redfishtool/redfishtoolTransport.py
++++ b/redfishtool/redfishtoolTransport.py
+@@ -39,7 +39,7 @@ import json
+ import sys
+ import socket
+ import time
+-from urllib.parse import urljoin, urlparse, urlunparse
++from urlparse import urljoin, urlparse, urlunparse
+ from requests.auth import HTTPBasicAuth, AuthBase
+ from .ServiceRoot import RfServiceRoot
+@@ -730,31 +730,32 @@ class RfTransport():
+          return(0)
+-    def printVerbose(self,v,*argv, skip1=False, printV12=True,**kwargs): 
++    def printVerbose(self,v, argv, skip1=False, printV12=True):
+         if(self.quiet):
+             return(0)
+         if( (v==1 or v==2) and (printV12 is True) and (self.verbose >= v )):
+             if(skip1 is True):  print("#")
+-            print("#",*argv, **kwargs)
++            print("#", argv)
+         elif( (v==1 or v==2) and (self.verbose >4 )):
+             if(skip1 is True):  print("#")
+-            print("#",*argv, **kwargs)            
++            print("#", argv)
+         elif((v==3 ) and (printV12 is True) and (self.verbose >=v)):
+             if(skip1 is True):  print("#")
+-            print("#REQUEST:",*argv,file=sys.stdout,**kwargs)
++            sys.stdout.write("#REQUEST:",argv)
++            # print("#REQUEST:",argv,file=sys.stdout)
+         elif((v==4 or v==5) and (self.verbose >=v)):
+             if(skip1 is True):  print("#")
+-            print("#DB{}:".format(v),*argv,file=sys.stdout,**kwargs)
++            sys.stdout.write("#DB{}:".format(v),argv)
++            # print("#DB{}:".format(v),argv,file=sys.stdout)
+         elif( v==0):  #print no mater value of verbose, but not if quiet=1
+             if(skip1 is True):  print("")
+-            print(*argv, **kwargs)
++            print(argv)
+         else:
+             pass
+         sys.stdout.flush()
+         #if you set v= anything except 0,1,2,3,4,5 it is ignored
+-
+     def printStatus(self, s, r=None, hdrs=None, authMsg=None, addSessionLoginInfo=False): 
+         if(self.quiet):
+             return(0)
+@@ -785,22 +786,20 @@ class RfTransport():
+         sys.stdout.flush()
+         
+-
+-
+-    def printErr(self,*argv,noprog=False,prepend="",**kwargs):
++    def printErr(self,argv,noprog=False,prepend=""):
+         if( self.quiet == False):
+             if(noprog is True):
+-                print(prepend,*argv, file=sys.stderr, **kwargs)
++                sys.stderr.write("{}{}".format(prepend,argv))
+             else:
+-                print(prepend,"  {}:".format(self.program),*argv, file=sys.stderr, **kwargs)
++                sys.stderr.write("{}  {}:{}".format(prepend, self.program, argv))
+         else:
+             pass
+-        
++
+         sys.stderr.flush()
+         return(0)
+-    def printStatusErr4xx(self, status_code,*argv,noprog=False, prepend="",**kwargs):
++    def printStatusErr4xx(self, status_code):
+         if(self.quiet):
+             return(0)
+         if( status_code < 400 ):
+diff --git a/setup.py b/setup.py
+index d37d099..481f429 100644
+--- a/setup.py
++++ b/setup.py
+@@ -1,5 +1,6 @@
+ from setuptools import setup
+ from os import path
++from io import open
+ this_directory = path.abspath(path.dirname(__file__))
+ with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-devtools/python/files/python-unittest2/0001-port-unittest2-argparse-is-part-of-stdlib.patch b/meta-stx/recipes-devtools/python/files/python-unittest2/0001-port-unittest2-argparse-is-part-of-stdlib.patch
new file mode 100644 (file)
index 0000000..c25dc56
--- /dev/null
@@ -0,0 +1,25 @@
+From b21d5406cf8dc1e2ac9da81b04cf9f7b0af32015 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Tue, 25 Feb 2020 21:46:00 +0000
+Subject: [PATCH] port unittest2 argparse is part of stdlib
+
+---
+ setup.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/setup.py b/setup.py
+index 416afbe..00e51ca 100755
+--- a/setup.py
++++ b/setup.py
+@@ -57,7 +57,7 @@ KEYWORDS = "unittest testing tests".split(' ')
+ # Both install and setup requires - because we read VERSION from within the
+ # package, and the package also exports all the APIs.
+ # six for compat helpers
+-REQUIRES = ['argparse', 'six>=1.4', 'traceback2'],
++REQUIRES = ['six>=1.4', 'traceback2'],
+ params = dict(
+     name=NAME,
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-devtools/python/files/requests/0001-close-connection-on-HTTP-413-Request-Entit.patch b/meta-stx/recipes-devtools/python/files/requests/0001-close-connection-on-HTTP-413-Request-Entit.patch
new file mode 100644 (file)
index 0000000..5779d4b
--- /dev/null
@@ -0,0 +1,45 @@
+From 268a1f179e554027637bd2951b24ad44ecb4a1ee Mon Sep 17 00:00:00 2001
+From: Daniel Badea <daniel.badea@windriver.com>
+Date: Wed, 7 Sep 2016 09:10:10 +0000
+Subject: [PATCH] close connection on HTTP 413 Request Entity Too
+ Large
+
+Allow low_conn to retrieve/handle unread response data buffers
+in case ProtocolError or socket.error are raised while sending
+request data.
+---
+ requests/adapters.py | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/requests/adapters.py b/requests/adapters.py
+index fd46325..087258a 100644
+--- a/requests/adapters.py
++++ b/requests/adapters.py
+@@ -466,12 +466,18 @@ class HTTPAdapter(BaseAdapter):
+
+                     low_conn.endheaders()
+
+-                    for i in request.body:
+-                        low_conn.send(hex(len(i))[2:].encode('utf-8'))
+-                        low_conn.send(b'\r\n')
+-                        low_conn.send(i)
+-                        low_conn.send(b'\r\n')
+-                    low_conn.send(b'0\r\n\r\n')
++                    try:
++                        for i in request.body:
++                            low_conn.send(hex(len(i))[2:].encode('utf-8'))
++                            low_conn.send(b'\r\n')
++                            low_conn.send(i)
++                            low_conn.send(b'\r\n')
++                        low_conn.send(b'0\r\n\r\n')
++                    except (ProtocolError, socket.error) as err:
++                        # allow low_conn to retrieve/handle unread response
++                        # data buffers in case ProtocolError or socket.error
++                        # are raised while sending request data
++                        pass
+
+                     # Receive the response from the server
+                     try:
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-devtools/python/python-3parclient_4.2.3.bb b/meta-stx/recipes-devtools/python/python-3parclient_4.2.3.bb
new file mode 100644 (file)
index 0000000..547f44e
--- /dev/null
@@ -0,0 +1,26 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "HPE 3PAR HTTP REST Client"
+HOMEPAGE = "https://pythonhosted.org/python-3parclient/"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://PKG-INFO;md5=c6f4d3b3208673edc0228bbc3ad053cc"
+
+SRC_URI[md5sum] = "845e688b5607a71fc307e8371daf5d40"
+SRC_URI[sha256sum] = "fcd1c5c7d9356f4244a6c0b2b6dd6c64366399642c348b02999ea8fbf79e3a8d"
+
+PYPI_PACKAGE = "python-3parclient"
+inherit setuptools pypi
diff --git a/meta-stx/recipes-devtools/python/python-adal_1.0.2.bb b/meta-stx/recipes-devtools/python/python-adal_1.0.2.bb
new file mode 100644 (file)
index 0000000..c919ddc
--- /dev/null
@@ -0,0 +1,41 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Microsoft Azure Active Directory Authentication Library (ADAL) for Python"
+HOMEPAGE = "https://github.com/AzureAD/azure-activedirectory-library-for-python"
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://setup.py;beginline=2;endline=27;md5=43b81fae9d7baa1f0b1c9774a68ca33a"
+
+
+inherit pypi setuptools
+
+PYPI_PACKAGE = "adal"
+
+SRC_URI[md5sum] = "895791621c696fbbb00dee975260f890" 
+SRC_URI[sha256sum] = "4c020807b3f3cfd90f59203077dd5e1f59671833f8c3c5028ec029ed5072f9ce"
+
+RDEPENDS_${PN} += " \
+       ${PYTHON_PN}-requests \
+       ${PYTHON_PN}-dateutil \
+       ${PYTHON_PN}-pyjwt \
+       ${PYTHON_PN}-crypt \
+       ${PYTHON_PN}-datetime \
+       ${PYTHON_PN}-json \
+       ${PYTHON_PN}-logging \
+       ${PYTHON_PN}-netclient \
+       ${PYTHON_PN}-threading \
+       ${PYTHON_PN}-xml \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-alabaster_0.7.12.bb b/meta-stx/recipes-devtools/python/python-alabaster_0.7.12.bb
new file mode 100644 (file)
index 0000000..5b7f7eb
--- /dev/null
@@ -0,0 +1,29 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " \
+       Alabaster is a visually (c)lean, responsive, configurable theme for the \
+       Sphinx documentation system. It is Python 2+3 compatible. \
+       "
+HOMEPAGE = "https://alabaster.readthedocs.io/en/latest/"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=da053683d66d543813a727e8a30c96ca"
+
+SRC_URI[md5sum] = "3591827fde96d1dd23970fb05410ed04"
+SRC_URI[sha256sum] = "a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02"
+
+PYPI_PACKAGE = "alabaster"
+inherit setuptools pypi
diff --git a/meta-stx/recipes-devtools/python/python-amqp_2.5.2.bb b/meta-stx/recipes-devtools/python/python-amqp_2.5.2.bb
new file mode 100644 (file)
index 0000000..a479667
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Low-level AMQP client for Python"
+HOMEPAGE = "https://pypi.python.org/pypi/amqp/"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=9d6ba772ac59c08a25a12ce15bd5f27b"
+
+SRC_URI[md5sum] = "852ecff645c00f124c78915fcc8ea7c0"
+SRC_URI[sha256sum] = "77f1aef9410698d20eaeac5b73a87817365f457a507d82edf292e12cbb83b08d"
+
+PYPI_PACKAGE = "amqp"
+inherit setuptools pypi
+
+RDEPENDS_${PN} += " \
+       python-vine \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-aniso8601_git.bb b/meta-stx/recipes-devtools/python/python-aniso8601_git.bb
new file mode 100644 (file)
index 0000000..34e5e86
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Another ISO 8601 parser for Python"
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=bf2bdb115b4d685026985cc189ca5375"
+
+SRCREV = "b5aad0fbef5664e624986a10bd1362e0ac05214c"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://bitbucket.org/nielsenb/aniso8601.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-ansible_%.bbappend b/meta-stx/recipes-devtools/python/python-ansible_%.bbappend
new file mode 100644 (file)
index 0000000..5e5058a
--- /dev/null
@@ -0,0 +1,55 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+HOMEPAGE = "https://github.com/ansible/ansible/"
+SECTION = "devel/python"
+LICENSE = "GPLv3"
+LIC_FILES_CHKSUM = "file://COPYING;md5=8f0e2cd40e05189ec81232da84bd6e1a"
+
+PV = "2.8.5"
+
+SRCNAME = "ansible"
+
+SRC_URI = "http://releases.ansible.com/ansible/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "86f0c18250895338709243d997005de3"
+SRC_URI[sha256sum] = "8e9403e755ce8ef27b6066cdd7a4c567aa80ebe2fd90d0ff8efa0a725d246986"
+
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+CLEANBROKEN = "1"
+
+ANSIBLE_WHITELIST_MODULES = "  \
+       cloud \
+       clustering \
+       commands \
+       database \
+       files \
+       identity \
+       inventory \
+       messaging \
+       monitoring \
+       net_tools \
+       network \
+       notification \
+       packaging \
+       remote_management \
+       source_control \
+       storage \
+       system \
+       utilities \
+       web_infrastructure \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-aodhclient_git.bb b/meta-stx/recipes-devtools/python/python-aodhclient_git.bb
new file mode 100644 (file)
index 0000000..e10f005
--- /dev/null
@@ -0,0 +1,61 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "client library for Aodh built on the Aodh API"
+HOMEPAGE = "https://launchpad.net/python-aodhclient"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRCREV = "6d03e61986418a3a95635405ae65ae9f28a5c61e"
+SRCNAME = "python-aodhclient"
+BRANCH = "stable/train"
+PROTOCOL = "https"
+PV = "1.2.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+       
+inherit setuptools monitor rmargparse
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native\
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-pbr \
+        python-cliff \
+        python-oslo.i18n \
+        python-oslo.serialization \
+        python-oslo.utils \
+        python-keystoneauth1 \
+        python-six \
+        python-osc-lib \
+       python-pyparsing \
+       "
+
+
+do_install_append() {
+       :
+}
+
+FILES_${PN} += " \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-backports-functools-lru-cache_%.bbappend b/meta-stx/recipes-devtools/python/python-backports-functools-lru-cache_%.bbappend
new file mode 100644 (file)
index 0000000..2ed52d4
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit python-backports-init
diff --git a/meta-stx/recipes-devtools/python/python-backports-init/backports/__init__.py b/meta-stx/recipes-devtools/python/python-backports-init/backports/__init__.py
new file mode 100644 (file)
index 0000000..febdb2f
--- /dev/null
@@ -0,0 +1,5 @@
+# A Python "namespace package" http://www.python.org/dev/peps/pep-0382/
+# This always goes inside of a namespace package's __init__.py
+
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
diff --git a/meta-stx/recipes-devtools/python/python-backports-init_1.0.bb b/meta-stx/recipes-devtools/python/python-backports-init_1.0.bb
new file mode 100644 (file)
index 0000000..1848fad
--- /dev/null
@@ -0,0 +1,40 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Helper package to avoid backports/__init__.py conflicts"
+DETAIL = "backports packages in python2 suffer from a flaw in the namespace \
+implementation and can conflict with each other. For OE purposes, at least \
+fix the conflicting install of .../site-packages/backports/__init__.py"
+AUTHOR = "Tim Orling <ticotimo@gmail.com>"
+SECTION = "devel/python"
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302"
+
+SRC_URI = "file://backports/__init__.py"
+
+inherit python-dir
+
+# provide to avoid warnings
+do_compile() {
+    :
+}
+
+do_install() {
+    install -d ${D}${PYTHON_SITEPACKAGES_DIR}/backports
+    install ${WORKDIR}/backports/__init__.py ${D}${PYTHON_SITEPACKAGES_DIR}/backports/
+}
+
+FILES_${PN} = "${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py"
diff --git a/meta-stx/recipes-devtools/python/python-backports-ssl_%.bbappend b/meta-stx/recipes-devtools/python/python-backports-ssl_%.bbappend
new file mode 100644 (file)
index 0000000..2ed52d4
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit python-backports-init
diff --git a/meta-stx/recipes-devtools/python/python-barbican_git.bb b/meta-stx/recipes-devtools/python/python-barbican_git.bb
new file mode 100644 (file)
index 0000000..80f3fdf
--- /dev/null
@@ -0,0 +1,169 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Barbican is a ReST API designed for the secure storage, provisioning and management of secrets."
+HOMEPAGE = "https://wiki.openstack.org/wiki/Barbican"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=fc8be9e7dffe97390d1216b01fd0be01"
+
+PR = "r0"
+SRCNAME = "barbican"
+BARBICAN_MAX_PACKET_SIZE ?= "65535"
+
+SRC_URI = " \
+       git://github.com/openstack/barbican.git;branch=${BRANCH} \
+       file://${BPN}/barbican-fix-path-to-find-configuration-files.patch \
+       file://${BPN}/openstack-barbican-api.service \
+       file://${BPN}/openstack-barbican-worker.service \
+       file://${BPN}/openstack-barbican-keystone-listener.service \
+       file://${BPN}/gunicorn-config.py \
+       "
+
+SRCREV = "4c0ddda941289fba8e5ec4341b5d02d155d46162"
+BRANCH = "stable/stein"
+PV = "8.0.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+inherit update-rc.d setuptools identity hosts useradd default_configs monitor systemd
+
+SYSTEMD_SERVICE_${SRCNAME} = " \
+       openstack-barbican-api.service \
+       openstack-barbican-worker.service \
+       openstack-barbican-keystone-listener.service \
+       "
+
+SYSTEMD_AUTO_ENABLE_${SRCNAME} = "disable"
+
+SERVICECREATE_PACKAGES = "${SRCNAME}-setup"
+KEYSTONE_HOST="${CONTROLLER_IP}"
+
+# USERCREATE_PARAM and SERVICECREATE_PARAM contain the list of parameters to be set.
+# If the flag for a parameter in the list is not set here, the default value will be given to that parameter.
+# Parameters not in the list will be set to empty.
+
+USERCREATE_PARAM_${SRCNAME}-setup = "name pass tenant role email"
+SERVICECREATE_PARAM_${SRCNAME}-setup = "name type description region publicurl adminurl internalurl"
+python () {
+    flags = {'type':'keystore',\
+             'description':'Barbican Key Management Service',\
+             'publicurl':"'http://${KEYSTONE_HOST}:9311/v1'",\
+             'adminurl':"'http://${KEYSTONE_HOST}:9312/v1'",\
+             'internalurl':"'http://${KEYSTONE_HOST}:9313/v1'"}
+    d.setVarFlags("SERVICECREATE_PARAM_%s-setup" % d.getVar('SRCNAME',True), flags)
+}
+SERVICECREATE_PACKAGES[vardeps] += "KEYSTONE_HOST"
+
+do_install_append() {
+    TEMPLATE_CONF_DIR=${S}${sysconfdir}/${SRCNAME}
+    BARBICAN_CONF_DIR=${D}${sysconfdir}/${SRCNAME}
+
+    install -d ${BARBICAN_CONF_DIR}
+    cp -r ${TEMPLATE_CONF_DIR}/* ${BARBICAN_CONF_DIR}
+
+    install -d ${D}${localstatedir}/lib/barbican
+
+    # Install the systemd service files
+    install -d ${D}${systemd_system_unitdir}/
+    install -m 644 ${WORKDIR}/${BPN}/*.service ${D}${systemd_system_unitdir}
+
+    # python-gunicorn and gunicorn-config.py are required by openstack-barbican-api.service
+    install -m 644 ${WORKDIR}/${PN}/gunicorn-config.py ${BARBICAN_CONF_DIR}
+
+    # Modify barbican-api-paste.ini for gunicorn
+    echo '[server:main]' >> ${BARBICAN_CONF_DIR}/barbican-api-paste.ini
+    echo 'use = egg:gunicorn#main' >> ${BARBICAN_CONF_DIR}/barbican-api-paste.ini
+
+    sed -e "s:%BARBICAN_CONF_DIR%:${sysconfdir}/${SRCNAME}:g" \
+        -i ${D}/${PYTHON_SITEPACKAGES_DIR}/${SRCNAME}/tests/api/test_resources_policy.py
+}
+
+USERADD_PACKAGES = "${PN}"
+GROUPADD_PARAM_${PN} = "--system barbican"
+USERADD_PARAM_${PN}  = "--system --home /var/lib/barbican -g barbican \
+                        --no-create-home --shell /bin/false barbican"
+
+PACKAGES += "${SRCNAME} \
+             ${SRCNAME}-setup "
+
+FILES_${PN} = "${libdir}/* \
+"
+FILES_${SRCNAME} = "${sysconfdir}/${SRCNAME}/* \
+                    ${sysconfdir}/init.d/barbican-api \
+                   ${bindir} \
+                   ${bindir}/* \
+                    ${localstatedir}/* \
+                    ${systemd_system_unitdir} \
+"
+
+ALLOW_EMPTY_${SRCNAME}-setup = "1"
+pkg_postinst_${SRCNAME}-setup () {
+    if [ -z "$D" ]; then
+        chown -R barbican:barbican ${sysconfdir}/${SRCNAME}
+        chown -R barbican:barbican ${localstatedir}/lib/barbican
+    fi
+}
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+# Stx config files
+DEPENDS += " \
+       openstack-barbican-api \
+       "
+
+
+RDEPENDS_${SRCNAME} = "${PN} \
+                       ${SRCNAME}-setup \
+                       uwsgi \
+                       python-falcon \
+                       python-oslo.messaging"
+
+RDEPENDS_${PN} += " \
+        python-pip \
+        python-pbr \
+        python-alembic \
+        python-babel \
+        python-eventlet \
+        python-falcon \
+        python-iso8601 \
+        python-jsonschema \
+        python-kombu \
+        python-netaddr \
+        python-pastedeploy \
+        python-paste \
+        python-pycrypto \
+        python-pysqlite \
+        python-keystoneclient \
+        python-sqlalchemy \
+        python-stevedore \
+        python-webob \
+        python-wsgiref \
+        python-barbicanclient \
+        python-gunicorn \
+       python-castellan \
+       python-ldap3 \
+        "
+
+INITSCRIPT_PACKAGES = "${SRCNAME}"
+INITSCRIPT_NAME_${SRCNAME} = "barbican-api"
+INITSCRIPT_PARAMS_${SRCNAME} = "${OS_DEFAULT_INITSCRIPT_PARAMS}"
+
+MONITOR_SERVICE_PACKAGES = "${SRCNAME}"
+MONITOR_SERVICE_${SRCNAME} = "barbican"
+
+
+FILES_${PN}_append = " ${datadir}/"
diff --git a/meta-stx/recipes-devtools/python/python-beaker_git.bb b/meta-stx/recipes-devtools/python/python-beaker_git.bb
new file mode 100644 (file)
index 0000000..96b104f
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Beaker is a web session and general caching library that includes WSGI middleware for use in web applications."
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=5297e0e46f5be6c86a87e35afe958cc7"
+
+SRCREV = "4a2cb747c16dc58af39b1ae56e1cf14dfeb9c9a7"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/bbangert/beaker;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-castellan_git.bb b/meta-stx/recipes-devtools/python/python-castellan_git.bb
new file mode 100644 (file)
index 0000000..aacd868
--- /dev/null
@@ -0,0 +1,49 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Generic Key Manager interface for OpenStack"
+HOMEPAGE = "https://github.com/openstack/castellan"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRCREV = "8e2929b8779eaa03f15a25da5cf64ef8539a026b"
+SRCNAME = "castellan"
+PROTOCOL = "https"
+BRANCH = "stable/stein"
+S = "${WORKDIR}/git"
+PV = "0.17.0+git${SRCPV}"
+
+SRC_URI = "git://opendev.org/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-pbr \
+        python-babel \
+        python-cryptography \
+        python-barbicanclient \
+        python-oslo.config \
+        python-oslo.context \
+        python-oslo.i18n \
+        python-oslo.log \
+        python-oslo.utils \
+        python-keystoneauth1 \
+        "
diff --git a/meta-stx/recipes-devtools/python/python-cffi_%.bbappend b/meta-stx/recipes-devtools/python/python-cffi_%.bbappend
new file mode 100644 (file)
index 0000000..835739e
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " python-pycparser"
diff --git a/meta-stx/recipes-devtools/python/python-cherrypy_git.bb b/meta-stx/recipes-devtools/python/python-cherrypy_git.bb
new file mode 100644 (file)
index 0000000..d586961
--- /dev/null
@@ -0,0 +1,69 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+SUMMARY = " CherryPy is a pythonic, object-oriented HTTP framework"
+DESCRIPTION = "\
+       It allows building web applications in much the same way one would build any other object-oriented program. \
+       This design results in less and more readable code being developed faster. It's all just properties and methods. \
+       It is now more than ten years old and has proven fast and very stable. \
+       It is being used in production by many sites, from the simplest to the most demanding. \
+       "
+
+
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE.md;md5=a8cbc5da4e6892b15a972a0b18622b2b"
+
+SRCREV = "994803e4923e53b7079c79f4e9b502cc1b8d0aa6"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+PV = "18.2.0"
+
+SRC_URI = "git://github.com/cherrypy/cherrypy.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python python-setuptools-scm-native"
+inherit setuptools distutils pkgconfig
+
+RDEPENDS_${PN} += " \
+       python-compression \
+       python-crypt \
+       python-datetime \
+       python-email \
+       python-fcntl \
+       python-html \
+       python-io \
+       python-json \
+       python-logging \
+       python-netclient \
+       python-netserver \
+       python-profile \
+       python-pydoc \
+       python-xml \
+       python-unixadmin \
+       "
+       
+RDEPENDS_${PN} += " \
+       python-cheroot \
+       python-contextlib2 \
+       python-memcached \
+       python-portend \
+       python-pyopenssl \
+       python-routes \
+       python-simplejson \
+       python-six \
+       python-zc-lockfile \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-cinderclient/cinder-api-check.sh b/meta-stx/recipes-devtools/python/python-cinderclient/cinder-api-check.sh
new file mode 100644 (file)
index 0000000..9e64f8b
--- /dev/null
@@ -0,0 +1,14 @@
+#! /bin/bash
+
+CMD="cinder list"
+
+data=$($CMD 2>&1)
+res=$?
+if [ ${res} -eq 127 ]; then
+    exit 0
+elif [ ${res} -ne 0 ]; then
+    echo "OpenStack \"cinder api\" failed: "
+    echo $data
+    exit $res
+fi
+exit 0
diff --git a/meta-stx/recipes-devtools/python/python-cinderclient_git.bb b/meta-stx/recipes-devtools/python/python-cinderclient_git.bb
new file mode 100644 (file)
index 0000000..3d431d5
--- /dev/null
@@ -0,0 +1,63 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Client for the OpenStack Cinder API"
+HOMEPAGE = "https://opendev.org/openstack/python-cinderclient"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3572962e13e5e739b30b0864365e0795"
+
+SRCREV = "4e17e1d1912f1902a37e4db543e38cdbe3961358"
+SRCNAME = "python-cinderclient"
+BRANCH = "stable/train"
+PROTOCOL = "https"
+PV = "4.1.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native\
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+       bash \
+       python-pbr \
+       python-prettytable \
+       python-keystoneauth1 \
+       python-oslo.i18n \
+       python-oslo.utils \
+       python-six \
+       python-osc-lib \
+       python-babel \
+       python-requests \
+       python-simplejson \
+       "
+
+
+do_install_append() {
+       :
+}
+
+FILES_${PN} += " \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-cliff_2.14.1.bb b/meta-stx/recipes-devtools/python/python-cliff_2.14.1.bb
new file mode 100644 (file)
index 0000000..bfcd2b7
--- /dev/null
@@ -0,0 +1,47 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Command Line Interface Formulation Framework"
+HOMEPAGE = "https://github.com/dreamhost/cliff"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI[md5sum] = "4c5d43d98970c79b091e25676cce2b10"
+SRC_URI[sha256sum] = "b47387a344898ccb28ca7f386f017ade9eb66dc1713e5c642a0bc09ec606cc67"
+
+inherit setuptools pypi
+
+DEPENDS += "\
+    python-pbr \
+    "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+    python-pbr-native \
+    "
+
+RDEPENDS_${PN} += "python-prettytable \
+            python-cmd2 \
+            python-pbr \
+            python-pyparsing \
+            python-prettytable \
+            python-six \
+            python-stevedore \
+            python-unicodecsv \
+            python-pyyaml \
+"
+
+CLEANBROKEN = "1"
diff --git a/meta-stx/recipes-devtools/python/python-configobj_git.bb b/meta-stx/recipes-devtools/python/python-configobj_git.bb
new file mode 100644 (file)
index 0000000..2e88509
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "ConfigObj is a simple but powerful config file reader and writer: an ini file round tripper. Its main feature is that it is very easy to use, with a straightforward programmer’s interface and a simple syntax for config files."
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=9a851af36881461de16b8fecf59a9e17"
+
+SRCREV = "45fbf1b85b181853caea4d251e6d4c0232735e85"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/DiffSK/configobj;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-configshell_git.bb b/meta-stx/recipes-devtools/python/python-configshell_git.bb
new file mode 100644 (file)
index 0000000..e43ad0e
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " \
+ConfigShell is a Python library that provides a framework for building simple but nice CLI-based applications running both as single-command tools and interactive shells providing a UNIX filesystem-like navigation interface, as well as full autocompletion support and interactive inline help. It is part of LIO. \
+       "
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://COPYING;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRCREV = "020d540850ca36f31af68e9b545a520f1122ea69"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/Datera/configshell.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python python-pyparsing-native"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-construct_git.bb b/meta-stx/recipes-devtools/python/python-construct_git.bb
new file mode 100644 (file)
index 0000000..d0a46f0
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Construct is a powerful declarative and symmetrical parser and builder for binary data"
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3fd0f2c25089e629957285e6bc402a20"
+
+SRCREV = "a6a45f09da003b278cc5955486ad17d9144c136b"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/construct/construct.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python python-pbr-native"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-d2to1_git.bb b/meta-stx/recipes-devtools/python/python-d2to1_git.bb
new file mode 100644 (file)
index 0000000..ec80926
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "d2to1 (the ‘d’ is for ‘distutils’) allows using distutils2-like setup.cfg files for a package’s metadata with a distribute/setuptools setup.py script. It works by providing a distutils2-formatted setup.cfg file containing all of a package’s metadata, and a very minimal setup.py which will slurp its arguments from the setup.cfg."
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=ec5c9882880dafb7f5a0b8d2642ea581"
+
+SRCREV = "d16d8fdcec77a6d3fcbd10ce23aa8810d8dd3bf2"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/embray/d2to1;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-dateutil.inc b/meta-stx/recipes-devtools/python/python-dateutil.inc
new file mode 100644 (file)
index 0000000..aafb569
--- /dev/null
@@ -0,0 +1,38 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Extensions to the standard Python datetime module"
+DESCRIPTION = "The dateutil module provides powerful extensions to the datetime module available in the Python standard library."
+HOMEPAGE = "https://dateutil.readthedocs.org"
+LICENSE = "BSD-3-Clause & Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=e3155c7bdc71f66e02678411d2abf996"
+
+SRC_URI[md5sum] = "f2a1d4b680b297b367a974664ca3a4f6"
+SRC_URI[sha256sum] = "73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"
+
+PYPI_PACKAGE = "python-dateutil"
+inherit pypi
+
+PACKAGES =+ "${PN}-zoneinfo"
+FILES_${PN}-zoneinfo = "${libdir}/${PYTHON_DIR}/site-packages/dateutil/zoneinfo"
+
+DEPENDS += "${PYTHON_PN}-setuptools-scm-native"
+
+RDEPENDS_${PN}_class-target = "\
+    ${PYTHON_PN}-datetime \
+    ${PYTHON_PN}-numbers \
+    ${PYTHON_PN}-six \
+    ${PYTHON_PN}-stringold \
+"
diff --git a/meta-stx/recipes-devtools/python/python-dateutil_2.8.1.bb b/meta-stx/recipes-devtools/python/python-dateutil_2.8.1.bb
new file mode 100644 (file)
index 0000000..dfb2f2b
--- /dev/null
@@ -0,0 +1,17 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit setuptools
+require python-dateutil.inc
diff --git a/meta-stx/recipes-devtools/python/python-defusedxml_0.6.0.bb b/meta-stx/recipes-devtools/python/python-defusedxml_0.6.0.bb
new file mode 100644 (file)
index 0000000..e642c1c
--- /dev/null
@@ -0,0 +1,26 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "defusedxml -- defusing XML bombs and other exploits"
+HOMEPAGE = "https://github.com/tiran/defusedxml"
+
+LICENSE = "Python-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=056fea6a4b395a24d0d278bf5c80249e"
+#a5c8025e305fb49e6d405769358851f6
+
+SRC_URI[md5sum] = "a59741f675c4cba649de40a99f732897"
+SRC_URI[sha256sum] = "f684034d135af4c6cbb949b8a4d2ed61634515257a67299e5f940fbaa34377f5"
+
+inherit pypi setuptools
diff --git a/meta-stx/recipes-devtools/python/python-django-babel_git.bb b/meta-stx/recipes-devtools/python/python-django-babel_git.bb
new file mode 100644 (file)
index 0000000..02f2966
--- /dev/null
@@ -0,0 +1,39 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Tools for using Babel with Django"
+HOMEPAGE = "https://github.com/python-babel/django-babel"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://COPYING;md5=5ae97ab65116b8d7890c59de57577b46"
+
+SRCREV = "1da5c0ba7ef3b12810154d9e64b3e847ecbb06cc"
+SRCNAME = "django-babel"
+BRANCH = "master"
+PROTOCOL = "https"
+PV = "0.6.2+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/python-babel/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+       
+inherit setuptools
+
+DEPENDS += " python-pip"
+
+RDEPENDS_${PN}_append = " \
+       python-django \
+       python-babel \
+       "
+
diff --git a/meta-stx/recipes-devtools/python/python-django-debreach_git.bb b/meta-stx/recipes-devtools/python/python-django-debreach_git.bb
new file mode 100644 (file)
index 0000000..7ef43e3
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Simple database sharding (horizontal partitioning) library for Django applications."
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=04c6b8f6ee56b21d30ecb172b066902c"
+
+
+SRCREV = "b425bb719ea5de583fae7db5b7419e5fed569cb0"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/lpomfrey/django-debreach.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-django-horizon_15.1.0.bb b/meta-stx/recipes-devtools/python/python-django-horizon_15.1.0.bb
new file mode 100644 (file)
index 0000000..a1397e6
--- /dev/null
@@ -0,0 +1,102 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Simple database sharding (horizontal partitioning) library for Django applications."
+HOMEPAGE = "http://horizon.openstack.org/"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+inherit setuptools 
+
+SRC_URI[md5sum] = "21c32ba58806b351ede4eca6804e6d3e"
+SRC_URI[sha256sum] = "25cf663f8f9a0233edbd5ba322acc28805fca684df28290c3e62a79abf6724e9"
+
+S = "${WORKDIR}/horizon-${PV}"
+
+SRC_URI = "https://tarballs.openstack.org/horizon/horizon-${PV}.tar.gz"
+
+DEPENDS += " \
+       python-pip \
+       python-pbr-native \
+       "
+
+
+RDEPENDS_${PN} = " \
+       python-pbr \
+       python-babel \
+       python-django \
+       python-django \
+       python-django-babel \
+       python-django-compressor \
+       python-django-debreach \
+       python-django-pyscss \
+       python-futurist \
+       python-iso8601 \
+       python-keystoneauth1 \
+       python-netaddr \
+       python-oslo.concurrency \
+       python-oslo.config \
+       python-oslo.i18n \
+       python-oslo.policy \
+       python-oslo.serialization \
+       python-oslo.upgradecheck \
+       python-oslo.utils \
+       python-osprofiler \
+       python-pint \
+       python-pymongo \
+       python-pyscss \
+       python-cinderclient \
+       python-glanceclient \
+       python-keystoneclient \
+       python-neutronclient \
+       python-novaclient \
+       python-swiftclient \
+       python-pytz \
+       python-pyyaml \
+       python-requests \
+       python-semantic-version \
+       python-six \
+       python-xstatic \
+       python-xstatic-angular \
+       python-xstatic-angular-bootstrap \
+       python-xstatic-angular-fileupload \
+       python-xstatic-angular-gettext \
+       python-xstatic-angular-lrdragndrop \
+       python-xstatic-angular-schema-form \
+       python-xstatic-bootstrap-datepicker \
+       python-xstatic-bootstrap-scss \
+       python-xstatic-bootswatch \
+       python-xstatic-d3 \
+       python-xstatic-hogan \
+       python-xstatic-font-awesome \
+       python-xstatic-jasmine \
+       python-xstatic-jquery \
+       python-xstatic-jquery-migrate \
+       python-xstatic-jquery.quicksearch \
+       python-xstatic-jquery.tablesorter \
+       python-xstatic-jquery-ui \
+       python-xstatic-jsencrypt \
+       python-xstatic-mdi \
+       python-xstatic-objectpath \
+       python-xstatic-mdi \
+       python-xstatic-objectpath \
+       python-xstatic-rickshaw \
+       python-xstatic-roboto-fontface \
+       python-xstatic-smart-table \
+       python-xstatic-spin \
+       python-xstatic-term.js \
+       python-xstatic-tv4 \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-django-horizon_15.1.0.bbappend b/meta-stx/recipes-devtools/python/python-django-horizon_15.1.0.bbappend
new file mode 100644 (file)
index 0000000..ea77f18
--- /dev/null
@@ -0,0 +1,131 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/guni_config.py \
+       file://${BPN}/horizon-assets-compress \
+       file://${BPN}/horizon-clearsessions \
+       file://${BPN}/horizon.init \
+       file://${BPN}/horizon-patching-restart \
+       file://${BPN}/openstack-dashboard-httpd-2.4.conf \
+       file://${BPN}/openstack-dashboard-httpd-logging.conf \
+       file://${BPN}/python-django-horizon-logrotate.conf \
+       file://${BPN}/python-django-horizon-systemd.conf \
+       "
+
+do_configure_prepend () {
+       cd ${S}
+
+       # STX remove troublesome files introduced by tox
+       rm -f openstack_dashboard/test/.secret_key_store
+       rm -f openstack_dashboard/test/*.secret_key_store.lock
+       rm -f openstack_dashboard/local/.secret_key_store
+       rm -f openstack_dashboard/local/*.secret_key_store.lock
+       rm -rf horizon.egg-info
+
+       # drop config snippet
+       cp -p ${WORKDIR}/${BPN}/openstack-dashboard-httpd-logging.conf .
+       cp -p ${WORKDIR}/${BPN}/guni_config.py .
+
+       # customize default settings
+       # WAS [PATCH] disable debug, move web root
+       sed -i "/^DEBUG =.*/c\DEBUG = False" openstack_dashboard/local/local_settings.py.example
+       sed -i "/^WEBROOT =.*/c\WEBROOT = '/dashboard/'" openstack_dashboard/local/local_settings.py.example
+       sed -i "/^.*ALLOWED_HOSTS =.*/c\ALLOWED_HOSTS = ['horizon.example.com', 'localhost']" openstack_dashboard/local/local_settings.py.example
+       sed -i "/^.*LOCAL_PATH =.*/c\LOCAL_PATH = '/tmp'" openstack_dashboard/local/local_settings.py.example
+       sed -i "/^.*POLICY_FILES_PATH =.*/c\POLICY_FILES_PATH = '/etc/openstack-dashboard'" openstack_dashboard/local/local_settings.py.example
+
+       sed -i "/^BIN_DIR = .*/c\BIN_DIR = '/usr/bin'" openstack_dashboard/settings.py
+       sed -i "/^COMPRESS_PARSER = .*/a COMPRESS_OFFLINE = True" openstack_dashboard/settings.py
+
+       # set COMPRESS_OFFLINE=True
+       sed -i 's:COMPRESS_OFFLINE.=.False:COMPRESS_OFFLINE = True:' openstack_dashboard/settings.py
+
+       # STX: MANIFEST needs .eslintrc files for angular
+       echo "include .eslintrc"   >> MANIFEST.in
+
+       # MANIFEST needs to include json and pot files under openstack_dashboard
+       echo "recursive-include openstack_dashboard *.json *.pot .eslintrc"   >> MANIFEST.in
+
+       # MANIFEST needs to include pot files  under horizon
+       echo "recursive-include horizon *.pot .eslintrc"   >> MANIFEST.in
+}
+
+do_install_append () {
+       cd ${S}
+
+       # STX
+       install -d -m 755 ${D}/opt/branding
+       mkdir -p ${D}${sysconfdir}/rc.d/init.d
+       install -m 755 -D -p ${WORKDIR}/${BPN}/horizon.init ${D}${sysconfdir}/rc.d/init.d/horizon
+       install -m 755 -D -p ${WORKDIR}/${BPN}/horizon-clearsessions ${D}/${bindir}/horizon-clearsessions
+       install -m 755 -D -p ${WORKDIR}/${BPN}/horizon-patching-restart ${D}/${bindir}/horizon-patching-restart
+       install -m 755 -D -p ${WORKDIR}/${BPN}/horizon-assets-compress ${D}/${bindir}/horizon-assets-compress
+
+       # drop httpd-conf snippet
+       install -m 0644 -D -p ${WORKDIR}/${BPN}/openstack-dashboard-httpd-2.4.conf ${D}${sysconfdir}/httpd/conf.d/openstack-dashboard.conf
+       install -d -m 755 ${D}${datadir}/openstack-dashboard
+       install -d -m 755 ${D}${sysconfdir}/openstack-dashboard
+
+       # create directory for systemd snippet
+       mkdir -p ${D}${systemd_system_unitdir}/httpd.service.d/
+       cp ${WORKDIR}/${BPN}/python-django-horizon-systemd.conf ${D}${systemd_system_unitdir}/httpd.service.d/openstack-dashboard.conf
+
+       # Copy everything to /usr/share
+       mv ${D}${libdir}/python2.7/site-packages/openstack_dashboard \
+          ${D}${datadir}/openstack-dashboard
+       cp manage.py ${D}${datadir}/openstack-dashboard
+
+       # STX
+       cp guni_config.py ${D}${datadir}/openstack-dashboard
+       rm -rf ${D}${libdir}/python2.7/site-packages/openstack_dashboard
+
+       # remove unnecessary .po files
+       find ${D} -name django.po -exec rm '{}' \;
+       find ${D} -name djangojs.po -exec rm '{}' \;
+
+       # Move config to /etc, symlink it back to /usr/share
+       mv ${D}${datadir}/openstack-dashboard/openstack_dashboard/local/local_settings.py.example ${D}${sysconfdir}/openstack-dashboard/local_settings
+
+       mv ${D}${datadir}/openstack-dashboard/openstack_dashboard/conf/*.json ${D}${sysconfdir}/openstack-dashboard
+       cp -a  ${S}/openstack_dashboard/conf/cinder_policy.d ${D}${sysconfdir}/openstack-dashboard
+       cp -a  ${S}/openstack_dashboard/conf/nova_policy.d ${D}${sysconfdir}/openstack-dashboard
+
+       # copy static files to ${datadir}/openstack-dashboard/static
+       mkdir -p ${D}${datadir}/openstack-dashboard/static
+       cp -a openstack_dashboard/static/* ${D}${datadir}/openstack-dashboard/static
+       cp -a horizon/static/* ${D}${datadir}/openstack-dashboard/static
+
+       # create /var/run/openstack-dashboard/ and /var/log/horizon
+       install -m 0755 -d ${D}/${sysconfdir}/tmpfiles.d
+       echo "d ${localstatedir}/run/openstack-dashboard 0755 root root -" >> ${D}/${sysconfdir}/tmpfiles.d/openstack-dashboard.conf
+       echo "d ${localstatedir}/log/horizon 0755 root root -" >> ${D}/${sysconfdir}/tmpfiles.d/openstack-dashboard.conf
+
+       # place logrotate config:
+       mkdir -p ${D}${sysconfdir}/logrotate.d
+       cp -a ${WORKDIR}/${BPN}/python-django-horizon-logrotate.conf ${D}${sysconfdir}/logrotate.d/openstack-dashboard
+
+       chown -R root:root ${D}
+}
+
+FILES_${PN} += "\
+       ${datadir}/openstack-dashboard \
+       ${systemd_system_unitdir} \
+       ${localstatedir} \
+       /opt \
+       "
+
+RPROVIDES_${PN} = "openstack-dashboard"
diff --git a/meta-stx/recipes-devtools/python/python-django-openstack-auth_git.bbappend b/meta-stx/recipes-devtools/python/python-django-openstack-auth_git.bbappend
new file mode 100644 (file)
index 0000000..ef4bf7a
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DEPENDS += "\
+       python-pbr-native \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-django_git.bb b/meta-stx/recipes-devtools/python/python-django_git.bb
new file mode 100644 (file)
index 0000000..4346361
--- /dev/null
@@ -0,0 +1,36 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "client library for Aodh built on the Aodh API"
+HOMEPAGE = "https://launchpad.net/python-aodhclient"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause & Python-2.0"
+LIC_FILES_CHKSUM = " \
+       file://LICENSE;md5=f09eb47206614a4954c51db8a94840fa\
+       file://LICENSE.python;md5=6b60258130e4ed10d3101517eb5b9385 \
+       "
+
+SRCREV = "1c9cb948d7b0c264d244763b6682ab790a6b90a0"
+SRCNAME = "django"
+BRANCH = "stable/1.11.x"
+PROTOCOL = "https"
+PV = "1.11.20+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/django/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+       
+inherit setuptools
+
+FILES_${PN} += "${datadir}/django/"
diff --git a/meta-stx/recipes-devtools/python/python-django_git.bbappend b/meta-stx/recipes-devtools/python/python-django_git.bbappend
new file mode 100644 (file)
index 0000000..1536c26
--- /dev/null
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+do_install_append() {
+    install -d ${D}/${sysconfdir}/bash_completion.d
+    install -m 755 ${S}/extras/django_bash_completion ${D}/${sysconfdir}/bash_completion.d 
+}
+
+PACKAGES =+ "${PN}-bash-completion"
+
+FILES_${PN}-bash-completion = "${sysconfdir}/bash_completion.d/*"
diff --git a/meta-stx/recipes-devtools/python/python-docker_3.3.0.bb b/meta-stx/recipes-devtools/python/python-docker_3.3.0.bb
new file mode 100644 (file)
index 0000000..820a685
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A Python library for the Docker Engine API"
+HOMEPAGE = "https://pypi.org/project/docker/3.3.0"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=34f3846f940453127309b920eeb89660"
+
+SRC_URI[md5sum] = "660c3c0c3776cb88f34fc549d7c89ed2"
+SRC_URI[sha256sum] = "dc5cc0971a0d36fe94c5ce89bd4adb6c892713500af7b0818708229c3199911a"
+
+inherit setuptools pypi
+
+RDEPENDS_${PN}_append = " \
+       python-docker-pycreds \
+       python-backports-ssl \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-ethtool_git.bb b/meta-stx/recipes-devtools/python/python-ethtool_git.bb
new file mode 100644 (file)
index 0000000..39e743b
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Python bindings for the ethtool kernel interface"
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe"
+
+SRCREV = "b8b09b69ef6d4f8a6f12a6c441305790060dd829"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/fedora-python/python-ethtool.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " libnl python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-eventlet_%.bbappend b/meta-stx/recipes-devtools/python/python-eventlet_%.bbappend
new file mode 100644 (file)
index 0000000..e4f4cd7
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " file://eventlet/0001-CGTS-2869-close-connection-on-HTTP-413-Request-Entit.patch"
diff --git a/meta-stx/recipes-devtools/python/python-firewall_git.bb b/meta-stx/recipes-devtools/python/python-firewall_git.bb
new file mode 100644 (file)
index 0000000..a4f1121
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Temporarily allow access to your current IP address into security groups, and automatically closes the hole when quitting. Convenient for SSH-ing into a box from a home or from any off-site location without worrying about leaving SSH open to the world or dealing with VPN."
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=7f1e0f09645893c55fb92d53ccea0a57"
+
+SRCREV = "59b63088b8b791cc495d809240f94fddaa87dc55"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/mattrobenolt/firewall;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-futurist_1.8.1.bb b/meta-stx/recipes-devtools/python/python-futurist_1.8.1.bb
new file mode 100644 (file)
index 0000000..c7d671e
--- /dev/null
@@ -0,0 +1,40 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Useful additions to futures, from the future"
+HOMEPAGE = "https://pypi.python.org/pypi/futurist"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRC_URI[md5sum] = "3e5a4b8254ded1624ec807cb7ae04ba5"
+SRC_URI[sha256sum] = "499ee57728a987028725f836ac22aa18899702162fa0a0f1bbe2ecd5c8daf1eb"
+
+inherit setuptools pypi
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-pbr \
+        python-six \
+        python-monotonic \
+        python-futures \
+        python-contextlib2 \
+        python-prettytable \
+        "
+
diff --git a/meta-stx/recipes-devtools/python/python-glanceclient_git.bb b/meta-stx/recipes-devtools/python/python-glanceclient_git.bb
new file mode 100644 (file)
index 0000000..39d09aa
--- /dev/null
@@ -0,0 +1,62 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Client library for Glance built on the OpenStack Images API."
+HOMEPAGE = "https://opendev.org/openstack/python-glanceclient"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=34400b68072d710fecd0a2940a0d1658"
+
+SRCREV = "44a4dbd6ce2642daeaca9f45ac99e2d1b39e805a"
+SRCNAME = "python-glanceclient"
+BRANCH = "stable/train"
+PROTOCOL = "https"
+PV = "2.16.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+       
+inherit setuptools monitor rmargparse
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native\
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+       bash \
+        python-pbr \
+        python-prettytable \
+        python-keystoneauth1 \
+       python-warlock \
+        python-six \
+        python-oslo.utils \
+        python-oslo.i18n \
+       python-wrapt \
+       python-pyopenssl \
+       "
+
+
+do_install_append() {
+       :
+}
+
+FILES_${PN} += " \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-gnocchiclient_7.0.4.bb b/meta-stx/recipes-devtools/python/python-gnocchiclient_7.0.4.bb
new file mode 100644 (file)
index 0000000..5e1e933
--- /dev/null
@@ -0,0 +1,36 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "python-gnocchiclient"
+STABLE = "master"
+PROTOCOL = "https"
+BRANCH = "master"
+SRCREV = "64814b9ace54e0151e9c28f4e57b87dafc984241"
+S = "${WORKDIR}/git"
+PV = "7.0.4"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRC_URI = "git://github.com/gnocchixyz/python-gnocchiclient.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+DEPENDS += " \
+       python \
+       python-pbr-native \
+       "
+
+RDEPENDS_${PN}_append = " python-ujson"
+
+inherit setuptools
diff --git a/meta-stx/recipes-devtools/python/python-google-auth_git.bb b/meta-stx/recipes-devtools/python/python-google-auth_git.bb
new file mode 100644 (file)
index 0000000..72789fd
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "This library simplifies using Google’s various server-to-server authentication mechanisms to access Google APIs."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=86d3f3a95c324c9479bd8986968f4327"
+
+SRCREV = "1322d896ba725b8d73fd7ac4793601d9f574a839"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/googleapis/google-auth-library-python.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+RDEPENDS_${PN}_append = " python-rsa python-pyasn1-modules"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-gunicorn_git.bb b/meta-stx/recipes-devtools/python/python-gunicorn_git.bb
new file mode 100644 (file)
index 0000000..65b2ffa
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Gunicorn 'Green Unicorn' is a Python WSGI HTTP Server for UNIX. It's a pre-fork worker model ported from Ruby's Unicorn project. The Gunicorn server is broadly compatible with various web frameworks, simply implemented, light on server resource usage, and fairly speedy."
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=f75f3fb94cdeab1d607e2adaa6077752"
+
+
+SRCREV = "67949dc71c54e435e62c1fcedc2315a9098d54ea"
+PROTOCOL = "https"
+BRANCH = "19.x"
+S = "${WORKDIR}/git"
+PV = "19.10.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/benoitc/gunicorn.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-hgtools_6.3.bbappend b/meta-stx/recipes-devtools/python/python-hgtools_6.3.bbappend
new file mode 100644 (file)
index 0000000..c2aa92d
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+BBCLASSEXTEND_append = " native nativesdk"
diff --git a/meta-stx/recipes-devtools/python/python-horizon_git.bbappend b/meta-stx/recipes-devtools/python/python-horizon_git.bbappend
new file mode 100644 (file)
index 0000000..ef4bf7a
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DEPENDS += "\
+       python-pbr-native \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-imagesize_1.2.0.bb b/meta-stx/recipes-devtools/python/python-imagesize_1.2.0.bb
new file mode 100644 (file)
index 0000000..ead33f5
--- /dev/null
@@ -0,0 +1,28 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " \
+       This module analyzes JPEG/JPEG 2000/PNG/GIF/TIFF/SVG image headers and returns image size. \
+       "
+HOMEPAGE = "https://github.com/shibukawa/imagesize_py"
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE.rst;md5=0c128f0f7e8a02e1b83884c0b5a41cda"
+
+SRC_URI[md5sum] = "3a1e124594183778a8f87e4bcdb6dca9"
+SRC_URI[sha256sum] = "b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"
+
+PYPI_PACKAGE = "imagesize"
+inherit setuptools pypi
diff --git a/meta-stx/recipes-devtools/python/python-importlib-metadata_0.23.bb b/meta-stx/recipes-devtools/python/python-importlib-metadata_0.23.bb
new file mode 100644 (file)
index 0000000..da8b2ab
--- /dev/null
@@ -0,0 +1,42 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Read metadata from Python packages"
+DESCRIPTION = "Read metadata from Python packages"
+HOMEPAGE = "https://pypi.org/project/importlib-metadata/"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=e88ae122f3925d8bde8319060f2ddb8e"
+
+SRC_URI[md5sum] = "80d677d744995336c9c22d21a85ddeb8"
+SRC_URI[sha256sum] = "aa18d7378b00b40847790e7c27e11673d7fed219354109d0e7b9e5b25dc3ad26"
+
+DEPENDS += "${PYTHON_PN}-setuptools-scm-native"
+
+PYPI_PACKAGE = "importlib_metadata"
+
+inherit pypi setuptools
+
+S = "${WORKDIR}/importlib_metadata-${PV}"
+
+RDEPENDS_${PN} += "\
+    ${PYTHON_PN}-zipp \
+    ${PYTHON_PN}-pathlib2 \
+    python-compression \
+    python-configparser \
+    python-contextlib2 \
+    python-pathlib2 \
+"
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta-stx/recipes-devtools/python/python-influxdb_git.bb b/meta-stx/recipes-devtools/python/python-influxdb_git.bb
new file mode 100644 (file)
index 0000000..ce49699
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "InfluxDB is an open-source distributed time series database, find more about InfluxDB at https://docs.influxdata.com/influxdb/latest"
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=046523829184aac3703a4c60c0ae2104"
+
+SRCREV = "dc83fc6576b6463dcc77a0c101475a2a71ed655a"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/influxdata/influxdb-python.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-iniparse_git.bb b/meta-stx/recipes-devtools/python/python-iniparse_git.bb
new file mode 100644 (file)
index 0000000..d3323de
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Iniparse is a INI parser for Python"
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=52f28065af11d69382693b45b5a8eb54"
+
+SRCREV = "0305d08121461776222e515ae21a8405ee68eef4"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/candlepin/python-iniparse.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+
+inherit setuptools distutils
diff --git a/meta-stx/recipes-devtools/python/python-ironicclient_2.7.0.bb b/meta-stx/recipes-devtools/python/python-ironicclient_2.7.0.bb
new file mode 100644 (file)
index 0000000..33d4683
--- /dev/null
@@ -0,0 +1,39 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "python-ironicclient"
+STABLE = "master"
+PROTOCOL = "https"
+BRANCH = "master"
+PV = "2.7.0"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRC_URI = "https://tarballs.openstack.org/python-ironicclient/python-ironicclient-${PV}.tar.gz"
+SRC_URI[md5sum] = "6b13e133eb0c521a09c377f28fef139e"
+
+DEPENDS += " \
+       python \
+       python-pbr-native \
+       "
+
+inherit setuptools
+
+RDEPENDS_${PN}_append = " \
+       bash \
+       python-dogpile.cache \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-jmespath_git.bb b/meta-stx/recipes-devtools/python/python-jmespath_git.bb
new file mode 100644 (file)
index 0000000..a62033b
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "JMESPath (pronounced “james path”) allows you to declaratively specify how to extract elements from a JSON document."
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=2683790f5fabb41a3f75b70558799eb4"
+
+PV = "0.9.4"
+
+SRCREV = "4a4f6dbd98549b2b8fc71bbc19860d317a6abfdb"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/jmespath/jmespath.py;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-jwcrypto_git.bb b/meta-stx/recipes-devtools/python/python-jwcrypto_git.bb
new file mode 100644 (file)
index 0000000..9eb282f
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "An implementation of the JOSE Working Group documents: RFC 7515 - JSON Web Signature (JWS) RFC 7516 - JSON Web Encryption (JWE) RFC 7517 - JSON Web Key (JWK) RFC 7518 - JSON Web Algorithms (JWA) RFC 7519 - JSON Web Token (JWT) RFC 7520 - Examples of Protecting Content Using JSON Object Signing and Encryption (JOSE)"
+
+LICENSE = "LGPL-3.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=e6a600fd5e1d9cbde2d983680233ad02"
+
+SRCREV = "437ea86caef224cf769e30cafe30f1c0b4e0f3e2"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/latchset/jwcrypto.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+
+inherit setuptools distutils
diff --git a/meta-stx/recipes-devtools/python/python-jwt_git.bb b/meta-stx/recipes-devtools/python/python-jwt_git.bb
new file mode 100644 (file)
index 0000000..af74654
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A messaging framework built on the QPID Proton engine. It provides a callback-based API for message passing. See the User Guide in the docs directory for more detail."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=54830c88a42666c6835a0f834f93a521"
+
+SRCREV = "39f0ee6fd6d71ee335b1229b91ab0bb47bdd71f0"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/GehirnInc/python-jwt;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-keyring_5.3.bbappend b/meta-stx/recipes-devtools/python/python-keyring_5.3.bbappend
new file mode 100644 (file)
index 0000000..ac82982
--- /dev/null
@@ -0,0 +1,32 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " \
+      file://python-keyring/no_keyring_password.patch \
+      file://python-keyring/lock_keyring_file.patch \
+      file://python-keyring/lock_keyring_file2.patch \
+      file://python-keyring/use_new_lock.patch \
+      file://python-keyring/fix_keyring_lockfile_location.patch \
+      file://python-keyring/use_temporary_file.patch \
+      file://python-keyring/chown_keyringlock_file.patch \
+      file://python-keyring/chmod_keyringlock2.patch \
+      file://python-keyring/keyring_path_change.patch \
+      file://python-keyring/remove-reader-lock.patch \
+      file://python-keyring/remove_others_perms_on_keyringcfg_file.patch \
+"
+
+DEPENDS += " python-hgtools-native"
diff --git a/meta-stx/recipes-devtools/python/python-keystone_git.bb b/meta-stx/recipes-devtools/python/python-keystone_git.bb
new file mode 100644 (file)
index 0000000..e6f3b6c
--- /dev/null
@@ -0,0 +1,317 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Authentication service for OpenStack"
+HOMEPAGE = "http://www.openstack.org"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRCREV = "c78581b4608f3dc10e945d358963000f284f188a"
+SRCNAME = "keystone"
+PROTOCOL = "git"
+BRANCH = "stable/stein"
+S = "${WORKDIR}/git"
+PV = "15.0.0+git${SRCPV}"
+
+
+SRC_URI = " \
+       git://opendev.org/openstack/${SRCNAME}.git;protocol=${PROTOCOL};branch=${BRANCH} \
+       file://${PN}/keystone.conf \
+       file://${PN}/identity.sh \
+       file://${PN}/convert_keystone_backend.py \
+       file://${PN}/wsgi-keystone.conf \
+       file://${PN}/admin-openrc \
+       file://${PN}/keystone-init.service \
+       file://${PN}/stx-files/openstack-keystone.service \
+       file://${PN}/stx-files/keystone-all \
+       file://${PN}/stx-files/keystone-fernet-keys-rotate-active \
+       file://${PN}/stx-files/public.py \
+       file://${PN}/stx-files/password-rules.conf \
+       "
+
+
+inherit setuptools identity hosts default_configs monitor useradd systemd
+
+SERVICE_TOKEN = "password"
+TOKEN_FORMAT ?= "PKI"
+
+USERADD_PACKAGES = "${PN}"
+USERADD_PARAM_${PN} = "--system -m -s /bin/false keystone"
+
+LDAP_DN ?= "dc=my-domain,dc=com"
+
+SERVICECREATE_PACKAGES = "${SRCNAME}-setup"
+KEYSTONE_HOST="${CONTROLLER_IP}"
+
+# USERCREATE_PARAM and SERVICECREATE_PARAM contain the list of parameters to be
+# set.  If the flag for a parameter in the list is not set here, the default
+# value will be given to that parameter. Parameters not in the list will be set
+# to empty.
+
+USERCREATE_PARAM_${SRCNAME}-setup = "name pass tenant role email"
+python () {
+    flags = {'name':'${ADMIN_USER}',\
+             'pass':'${ADMIN_PASSWORD}',\
+             'tenant':'${ADMIN_TENANT}',\
+             'role':'${ADMIN_ROLE}',\
+             'email':'${ADMIN_USER_EMAIL}',\
+            }
+    d.setVarFlags("USERCREATE_PARAM_%s-setup" % d.getVar('SRCNAME',True), flags)
+}
+
+SERVICECREATE_PARAM_${SRCNAME}-setup = "name type description region publicurl adminurl internalurl"
+python () {
+    flags = {'type':'identity',\
+             'description':'OpenStack Identity',\
+             'publicurl':"'http://${KEYSTONE_HOST}:8081/keystone/main/v2.0'",\
+             'adminurl':"'http://${KEYSTONE_HOST}:8081/keystone/admin/v2.0'",\
+             'internalurl':"'http://${KEYSTONE_HOST}:8081/keystone/main/v2.0'"}
+    d.setVarFlags("SERVICECREATE_PARAM_%s-setup" % d.getVar('SRCNAME',True), flags)
+}
+
+do_install_append() {
+
+    KEYSTONE_CONF_DIR=${D}${sysconfdir}/keystone
+    KEYSTONE_DATA_DIR=${D}${localstatedir}/lib/keystone
+    KEYSTONE_PACKAGE_DIR=${D}${PYTHON_SITEPACKAGES_DIR}/keystone
+    APACHE_CONF_DIR=${D}${sysconfdir}/apache2/conf.d/
+
+
+    # Create directories
+    install -m 755 -d ${KEYSTONE_CONF_DIR}
+    install -m 755 -d ${KEYSTONE_DATA_DIR}
+    install -m 755 -d ${APACHE_CONF_DIR}
+    install -d ${D}${localstatedir}/log/${SRCNAME}
+
+    # Setup the systemd service file
+    install -d ${D}${systemd_system_unitdir}/
+    install -m 644 ${WORKDIR}/${PN}/keystone-init.service ${D}${systemd_system_unitdir}/keystone-init.service
+
+    mv  ${D}/${datadir}/etc/keystone/sso_callback_template.html ${KEYSTONE_CONF_DIR}/
+    rm -rf ${D}/${datadir}
+
+    # Setup the admin-openrc file
+    KS_OPENRC_FILE=${KEYSTONE_CONF_DIR}/admin-openrc
+    install -m 600 ${WORKDIR}/${PN}/admin-openrc ${KS_OPENRC_FILE}
+    sed -e "s:%CONTROLLER_IP%:${CONTROLLER_IP}:g" -i ${KS_OPENRC_FILE}
+    sed -e "s:%ADMIN_USER%:${ADMIN_USER}:g" -i ${KS_OPENRC_FILE}
+    sed -e "s:%ADMIN_PASSWORD%:${ADMIN_PASSWORD}:g" -i ${KS_OPENRC_FILE}
+
+    # Install various configuration files. We have to select suitable
+    # permissions as packages such as Apache require read access.
+    #
+    # Apache needs to read the keystone.conf
+    install -m 644 ${WORKDIR}/${PN}/keystone.conf ${KEYSTONE_CONF_DIR}/
+    # Apache needs to read the wsgi-keystone.conf
+    install -m 644 ${WORKDIR}/${PN}/wsgi-keystone.conf ${APACHE_CONF_DIR}/keystone.conf
+    install -m 600 ${S}${sysconfdir}/logging.conf.sample  ${KEYSTONE_CONF_DIR}/logging.conf
+
+    # Copy examples from upstream
+    cp -r ${S}/examples ${KEYSTONE_PACKAGE_DIR}
+
+    # Edit the configuration to allow it to work out of the box
+    KEYSTONE_CONF_FILE=${KEYSTONE_CONF_DIR}/keystone.conf
+    sed "/# admin_endpoint = .*/a \
+        public_endpoint = http://%CONTROLLER_IP%:5000/ " \
+        -i ${KEYSTONE_CONF_FILE}
+
+    sed "/# admin_endpoint = .*/a \
+        admin_endpoint = http://%CONTROLLER_IP%:35357/ " \
+        -i ${KEYSTONE_CONF_FILE}
+    
+    sed -e "s:%SERVICE_TOKEN%:${SERVICE_TOKEN}:g" -i ${KEYSTONE_CONF_FILE}
+    sed -e "s:%DB_USER%:${DB_USER}:g" -i ${KEYSTONE_CONF_FILE}
+    sed -e "s:%DB_PASSWORD%:${DB_PASSWORD}:g" -i ${KEYSTONE_CONF_FILE}
+    sed -e "s:%CONTROLLER_IP%:${CONTROLLER_IP}:g" -i ${KEYSTONE_CONF_FILE}
+    sed -e "s:%CONTROLLER_IP%:${CONTROLLER_IP}:g" -i ${KEYSTONE_CONF_FILE}
+    sed -e "s:%TOKEN_FORMAT%:${TOKEN_FORMAT}:g" -i ${KEYSTONE_CONF_FILE}
+    
+    install -d ${KEYSTONE_PACKAGE_DIR}/tests/tmp
+    if [ -e "${KEYSTONE_PACKAGE_DIR}/tests/test_overrides.conf" ];then
+        sed -e "s:%KEYSTONE_PACKAGE_DIR%:${PYTHON_SITEPACKAGES_DIR}/keystone:g" \
+            -i ${KEYSTONE_PACKAGE_DIR}/tests/test_overrides.conf
+    fi
+
+    if ${@bb.utils.contains('DISTRO_FEATURES', 'OpenLDAP', 'true', 'false', d)};
+    then
+        sed -i -e '/^\[identity\]/a \
+driver = keystone.identity.backends.hybrid_identity.Identity \
+\
+[assignment]\
+driver = keystone.assignment.backends.hybrid_assignment.Assignment\
+' ${D}${sysconfdir}/keystone/keystone.conf
+
+        sed -i -e '/^\[ldap\]/a \
+url = ldap://localhost \
+user = cn=Manager,${LDAP_DN} \
+password = secret \
+suffix = ${LDAP_DN} \
+use_dumb_member = True \
+\
+user_tree_dn = ou=Users,${LDAP_DN} \
+user_attribute_ignore = enabled,email,tenants,default_project_id \
+user_id_attribute = uid \
+user_name_attribute = uid \
+user_mail_attribute = email \
+user_pass_attribute = keystonePassword \
+\
+tenant_tree_dn = ou=Groups,${LDAP_DN} \
+tenant_desc_attribute = description \
+tenant_domain_id_attribute = businessCategory \
+tenant_attribute_ignore = enabled \
+tenant_objectclass = groupOfNames \
+tenant_id_attribute = cn \
+tenant_member_attribute = member \
+tenant_name_attribute = ou \
+\
+role_attribute_ignore = enabled \
+role_objectclass = groupOfNames \
+role_member_attribute = member \
+role_id_attribute = cn \
+role_name_attribute = ou \
+role_tree_dn = ou=Roles,${LDAP_DN} \
+' ${KEYSTONE_CONF_FILE}
+
+        install -m 0755 ${WORKDIR}/${PN}/convert_keystone_backend.py \
+            ${D}${sysconfdir}/keystone/convert_keystone_backend.py
+    fi
+
+    
+    install -m 755 ${WORKDIR}/${PN}/stx-files/keystone-fernet-keys-rotate-active ${D}/${bindir}/keystone-fernet-keys-rotate-active
+    install -m 440 ${WORKDIR}/${PN}/stx-files/password-rules.conf ${KEYSTONE_CONF_DIR}/password-rules.conf
+    install -m 755 ${WORKDIR}/${PN}/stx-files/public.py ${KEYSTONE_DATA_DIR}/public.py
+    install -m 644 ${WORKDIR}/${PN}/stx-files/openstack-keystone.service ${D}${systemd_system_unitdir}/openstack-keystone.service
+    install -m 755 ${WORKDIR}/${PN}/stx-files/keystone-all ${D}${bindir}/keystone-all
+    
+}
+
+# By default tokens are expired after 1 day so by default we can set
+# this token flush cronjob to run every 2 days
+KEYSTONE_TOKEN_FLUSH_TIME ??= "0 0 */2 * *"
+
+pkg_postinst_${SRCNAME}-cronjobs () {
+    if [ -z "$D" ]; then
+       # By default keystone expired tokens are not automatic removed out of the
+       # database.  So we create a cronjob for cleaning these expired tokens.
+       echo "${KEYSTONE_TOKEN_FLUSH_TIME} root /usr/bin/keystone-manage token_flush" >> /etc/crontab
+    fi
+}
+
+pkg_postinst_${SRCNAME} () {
+    # openstak-keystone will be run in httpd/apache2 instead of standalone
+    ln -sf ${systemd_system_unitdir}/apache2.service $D${sysconfdir}/systemd/system/openstack-keystone.service
+}
+
+PACKAGES += " ${SRCNAME}-tests ${SRCNAME} ${SRCNAME}-setup ${SRCNAME}-cronjobs"
+
+SYSTEMD_PACKAGES += "${SRCNAME}-setup"
+SYSTEMD_SERVICE_${SRCNAME}-setup = "keystone-init.service"
+SYSTEMD_SERVICE_${SRCNAME} = "openstack-keystone.service"
+
+SYSTEMD_AUTO_ENABLE_${SRCNAME}-setup = "disable"
+SYSTEMD_AUTO_ENABLE_${SRCNAME} = "disable"
+
+FILES_${SRCNAME}-setup = " \
+    ${systemd_system_unitdir}/keystone-init.service \
+    "
+
+ALLOW_EMPTY_${SRCNAME}-cronjobs = "1"
+
+FILES_${PN} = "${libdir}/* \
+    "
+
+FILES_${SRCNAME}-tests = "${sysconfdir}/${SRCNAME}/run_tests.sh"
+
+FILES_${SRCNAME} = "${bindir}/* \
+    ${sysconfdir}/${SRCNAME}/* \
+    ${localstatedir}/* \
+    ${datadir}/openstack-dashboard/openstack_dashboard/api/keystone-httpd.py \
+    ${sysconfdir}/apache2/conf.d/keystone.conf \
+    ${systemd_system_unitdir}/openstack-keystone.service \
+    "
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-babel \
+        python-pbr \
+        python-webob \
+        python-pastedeploy \
+        python-paste \
+        python-routes \
+        python-cryptography \
+        python-six \
+        python-sqlalchemy \
+        python-sqlalchemy-migrate \
+        python-stevedore \
+        python-passlib \
+        python-keystoneclient \
+        python-keystonemiddleware \
+        python-bcrypt \
+        python-scrypt \
+        python-oslo.cache \
+        python-oslo.concurrency \
+        python-oslo.config \
+        python-oslo.context \
+        python-oslo.messaging \
+        python-oslo.db \
+        python-oslo.i18n \
+        python-oslo.log \
+        python-oslo.middleware \
+        python-oslo.policy \
+        python-oslo.serialization \
+        python-oslo.utils \
+        python-oauthlib \
+        python-pysaml2 \
+        python-dogpile.cache \
+        python-jsonschema \
+        python-pycadf \
+        python-msgpack \
+        python-osprofiler \
+       python-flask \
+       python-flask-restful \
+        python-pytz \
+        "
+
+RDEPENDS_${SRCNAME}-tests += " bash"
+
+PACKAGECONFIG ?= "${@bb.utils.contains('DISTRO_FEATURES', 'OpenLDAP', 'OpenLDAP', '', d)}"
+PACKAGECONFIG[OpenLDAP] = ",,,python-ldap python-keystone-hybrid-backend"
+
+# TODO:
+#    if DISTRO_FEATURE contains "tempest" then add *-tests to the main RDEPENDS
+
+RDEPENDS_${SRCNAME} = " \
+    ${PN} \
+    postgresql \
+    postgresql-client \
+    python-psycopg2 \
+    apache2 \
+    "
+
+RDEPENDS_${SRCNAME}-setup = "postgresql sudo ${SRCNAME}"
+RDEPENDS_${SRCNAME}-cronjobs = "cronie ${SRCNAME}"
+
+MONITOR_SERVICE_PACKAGES = "${SRCNAME}"
+MONITOR_SERVICE_${SRCNAME} = "keystone"
diff --git a/meta-stx/recipes-devtools/python/python-keystoneauth1.inc b/meta-stx/recipes-devtools/python/python-keystoneauth1.inc
new file mode 100644 (file)
index 0000000..ce1c0fd
--- /dev/null
@@ -0,0 +1,43 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Authentication Library for OpenStack Identity"
+HOMEPAGE = "https://pypi.python.org/pypi/keystoneauth1"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=96f840d59b245a1c817fbcb901afc574"
+
+SRC_URI[md5sum] = "0864f3485db8709d1dec0c8fa6073a31"
+SRC_URI[sha256sum] = "db91ccab5cc43dac4a25dc2c090642d7c31f9ceb28df2c685620d7c12335a8cd"
+
+inherit pypi
+
+DEPENDS += " \
+        ${PYTHON_PN}-pbr \
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        ${PYTHON_PN}-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        ${PYTHON_PN}-pbr \
+        ${PYTHON_PN}-iso8601 \
+        ${PYTHON_PN}-requests \
+        ${PYTHON_PN}-six \
+        ${PYTHON_PN}-stevedore \
+       python2-os-service-types \
+        "
diff --git a/meta-stx/recipes-devtools/python/python-keystoneauth1_3.17.1.bb b/meta-stx/recipes-devtools/python/python-keystoneauth1_3.17.1.bb
new file mode 100644 (file)
index 0000000..2100f2d
--- /dev/null
@@ -0,0 +1,17 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit setuptools
+require python-keystoneauth1.inc
diff --git a/meta-stx/recipes-devtools/python/python-keystoneclient_git.bbappend b/meta-stx/recipes-devtools/python/python-keystoneclient_git.bbappend
new file mode 100644 (file)
index 0000000..fd8b2a2
--- /dev/null
@@ -0,0 +1,32 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SRC_URI = "\
+       git://github.com/openstack/python-keystoneclient.git;branch=stable/rocky \
+       "
+
+PV = "3.17.0+git${SRCPV}"
+SRCREV = "234ea50d5dfa3c6b71c15d32223a2ddf84c1aa1e"
+DEPENDS += " \
+        python-pip \
+        python-pbr \
+        "
+
+RDEPENDS_${PN}_append = " \
+       python-keystone \
+       keystone-setup \
+       keystone-cronjobs \
+       keystone \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-keystonemiddleware_git.bb b/meta-stx/recipes-devtools/python/python-keystonemiddleware_git.bb
new file mode 100644 (file)
index 0000000..6161939
--- /dev/null
@@ -0,0 +1,60 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Middleware for Openstack identity API"
+HOMEPAGE = "https://launchpad.net/keystonemiddleware"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=4a4d0e932ffae1c0131528d30d419c55"
+
+SRCREV = "83d0612e03471f56be3be2b521cc21974118cebe"
+SRCNAME = "keystonemiddleware"
+BRANCH = "stable/train"
+PROTOCOL = "https"
+PV = "5.1.0"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://git.openstack.org/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-keystoneauth1 \
+        python-oslo.cache \
+        python-oslo.config \
+        python-oslo.context \
+        python-oslo.i18n \
+        python-oslo.log \
+        python-oslo.serialization \
+        python-oslo.utils \
+        python-pbr \
+        python-positional \
+        python-pycadf \
+        python-keystoneclient \
+        python-requests \
+        python-six \
+        python-webob \
+        "
diff --git a/meta-stx/recipes-devtools/python/python-kombu_4.6.7.bbappend b/meta-stx/recipes-devtools/python/python-kombu_4.6.7.bbappend
new file mode 100644 (file)
index 0000000..b0a5046
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN} += "python-importlib-metadata"
diff --git a/meta-stx/recipes-devtools/python/python-kubernetes_8.0.0.bb b/meta-stx/recipes-devtools/python/python-kubernetes_8.0.0.bb
new file mode 100644 (file)
index 0000000..2063104
--- /dev/null
@@ -0,0 +1,31 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Kubernetes python client"
+HOMEPAGE = "https://pypi.org/project/kubernetes/8.0.0"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=ad09685d909e7a9f763d2bb62d4bd6fb"
+
+SRC_URI[md5sum] = "c1d6d0ac57a8a49c5fd383a39ee9ab4b"
+SRC_URI[sha256sum] = "54f8e7bb1dd9a55cf416dff76a63c4ae441764280942d9913f2243676f29d02c"
+
+inherit setuptools pypi
+
+RDEPENDS_${PN}_append = " \
+       ${PYTHON_PN}-adal \
+       ${PYTHON_PN}-google-auth \
+       ${PYTHON_PN}-requests-oauthlib \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-ldap3_git.bb b/meta-stx/recipes-devtools/python/python-ldap3_git.bb
new file mode 100644 (file)
index 0000000..c96e52d
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "ldap3 is a strictly RFC 4510 conforming LDAP V3 pure Python client library. The same codebase runs in Python 2, Python 3, PyPy and PyPy3."
+
+LICENSE = "LGPL-3.0"
+LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=32be5282b8f7cafe30b89d2980fbc7d0"
+
+SRCREV = "08810e435392a1f1a96d5e65bb444b9cb9e96ae3"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/cannatag/ldap3;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-ldap_3.2.0.bb b/meta-stx/recipes-devtools/python/python-ldap_3.2.0.bb
new file mode 100644 (file)
index 0000000..e73b40e
--- /dev/null
@@ -0,0 +1,46 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+#
+# Copyright (C) 2012 Wind River Systems, Inc.
+#
+SUMMARY = "Provides a wrapper in Python to LDAP"
+DESCRIPTION = "This module provides access to the LDAP \
+(Lightweight Directory Access Protocol) through Python operations \
+instead of C API. The module mainly acts as a wrapper for the \
+OpenLDAP 2.x libraries. Errors will appear as exceptions."
+
+LICENSE = "PSF"
+HOMEPAGE = "http://www.python-ldap.org/"
+DEPENDS = "python openldap cyrus-sasl"
+
+PYPI_PACKAGE = "python-ldap"
+inherit pypi setuptools
+
+LIC_FILES_CHKSUM = "file://LICENCE;md5=36ce9d726d0321b73c1521704d07db1b"
+SRC_URI[md5sum] = "fe22522208dc9b06d16eb70f8553eaab"
+SRC_URI[sha256sum] = "7d1c4b15375a533564aad3d3deade789221e450052b21ebb9720fb822eccdb8e"
+
+do_configure_prepend() {
+    sed -i -e 's:^library_dirs =.*::' setup.cfg
+    sed -i -e 's:^include_dirs =.*:include_dirs = =/usr/include/sasl/:' setup.cfg
+}
+
+RDEPENDS_${PN} = " \
+    ${PYTHON_PN}-pprint \
+    ${PYTHON_PN}-threading \
+    ${PYTHON_PN}-pyasn1 \
+    ${PYTHON_PN}-pyasn1-modules \
+"
diff --git a/meta-stx/recipes-devtools/python/python-ldappool_git.bb b/meta-stx/recipes-devtools/python/python-ldappool_git.bb
new file mode 100644 (file)
index 0000000..c9f23a8
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A simple connector pool for python-ldap."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://setup.py;md5=9c24605289b49ad77a51ba7986425158"
+
+
+SRCREV = "f75c1a8a34d4fbe6df7889205b5c6a4db6886c33"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://opendev.org/openstack/ldappool.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python python-pbr-native"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-lefthandclient_2.1.0.bb b/meta-stx/recipes-devtools/python/python-lefthandclient_2.1.0.bb
new file mode 100644 (file)
index 0000000..b4b4865
--- /dev/null
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "This is a Client library that can talk to the HPE LeftHand/StoreVirtual Storage array. The HPE LeftHand storage array has a REST web service interface as well as runs SSH. This client library implements a simple interface to talk with that REST interface using the python Requests http library and communicates via SSH using Pytohn’s paramiko library."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRC_URI[sha256sum] = "efdea5cdd2ecce20f9bb482dd0a6f55bcefc462df42eaef4d85258c04e991b20"
+
+SRCREV = "db9773f0f97c5af04da89fcb7dca2a6ddf8d5e4a"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/hpe-storage/python-lefthandclient.git;rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools distutils
diff --git a/meta-stx/recipes-devtools/python/python-lefthandclient_2.1.0.bbappend b/meta-stx/recipes-devtools/python/python-lefthandclient_2.1.0.bbappend
new file mode 100644 (file)
index 0000000..b9a8642
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " python-paramiko"
diff --git a/meta-stx/recipes-devtools/python/python-linecache2_1.0.0.bbappend b/meta-stx/recipes-devtools/python/python-linecache2_1.0.0.bbappend
new file mode 100644 (file)
index 0000000..f939587
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+BBCLASSEXTEND = "native"
diff --git a/meta-stx/recipes-devtools/python/python-magnumclient_git.bb b/meta-stx/recipes-devtools/python/python-magnumclient_git.bb
new file mode 100644 (file)
index 0000000..4e37b9b
--- /dev/null
@@ -0,0 +1,51 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Python client for containers service"
+HOMEPAGE = "https://github.com/openstack/python-magnumclient"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d2794c0df5b907fdace235a619d80314"
+
+SRCREV = "37e602d160632a386c2960ec8777bfc65642a9a9"
+SRCNAME = "python-magnumclient"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+PV = "2.12.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} +=" \
+       python-pbr \
+       python-babel \
+       python-stevedore \
+       python-requests \
+       python-oslo.i18n \
+       python-oslo.serialization \
+       python-oslo.utils \
+       python-os-client-config \
+       python-osc-lib \
+       python-prettytable \
+       python-cryptography \
+       python-decorator \
+       "
+
+inherit setuptools
diff --git a/meta-stx/recipes-devtools/python/python-migrate_git.bb b/meta-stx/recipes-devtools/python/python-migrate_git.bb
new file mode 100644 (file)
index 0000000..5532d08
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A simple language agnostic database migration tool"
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=123c58ff9658c9062dbe401889464492"
+
+SRCREV = "b53b7168f8ac27e4c557de6e62ad85fe00d99566"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/kofrasa/migrate;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-munch_git.bb b/meta-stx/recipes-devtools/python/python-munch_git.bb
new file mode 100644 (file)
index 0000000..1109873
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Munch is a dictionary that supports attribute-style access, a la JavaScript."
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=f0291cd32b2d6a91d9b19970c94d0a46"
+
+
+SRCREV = "d0fbbce7b8205a243337faed0b61472dfe09706d"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/Infinidat/munch.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-murano-pkg-check_git.bb b/meta-stx/recipes-devtools/python/python-murano-pkg-check_git.bb
new file mode 100644 (file)
index 0000000..9db8963
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Murano package validator tool"
+
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+
+SRCREV = "18119f5e9d5a9d706c13188e057cb5b242c51f89"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/murano-pkg-check.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python python-pbr-native"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-muranoclient_git.bb b/meta-stx/recipes-devtools/python/python-muranoclient_git.bb
new file mode 100644 (file)
index 0000000..81f4065
--- /dev/null
@@ -0,0 +1,56 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "python-muranoclient"
+STABLE = "master"
+PROTOCOL = "https"
+BRANCH = "master"
+SRCREV = "70b4392c7f8524ac25dbf3ab0feb3ac4127c1ecf"
+S = "${WORKDIR}/git"
+PV = "1.1.1"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRC_URI = "git://github.com/openstack/python-muranoclient.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+DEPENDS += " \
+       python \
+       python-pbr-native \
+       "
+
+inherit setuptools
+
+RDEPENDS_${PN}_append = " \
+       bash    \
+       python-pbr \
+       python-prettytable \
+       python-glanceclient \
+       python-keystoneclient \
+       python-iso8601 \
+       python-six \
+       python-babel \
+       python-pyopenssl \
+       python-requests \
+       python-pyyaml \
+       python-yaql \
+       python-osc-lib \
+       python-murano-pkg-check \
+       python-oslo.serialization \
+       python-oslo.utils \
+       python-oslo.log \
+       python-oslo.i18n \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-neutronclient_git.bb b/meta-stx/recipes-devtools/python/python-neutronclient_git.bb
new file mode 100644 (file)
index 0000000..91beb27
--- /dev/null
@@ -0,0 +1,76 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "CLI and python client library for OpenStack Neutron"
+HOMEPAGE = "https://launchpad.net/neutron"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRCREV = "680b417111dbbda9e318700286c4efd9055f1af3"
+SRCNAME = "python-neutronclient"
+BRANCH = "stable/train"
+PROTOCOL = "https"
+PV = "6.12.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+       
+inherit setuptools monitor rmargparse
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native\
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-pbr \
+        python-cliff \
+        python-debtcollector \
+        python-iso8601 \
+        python-netaddr \
+        python-osc-lib \
+        python-oslo.i18n \
+        python-oslo.serialization \
+        python-oslo.utils \
+        python-os-client-config \
+        python-keystoneauth1 \
+        python-keystoneclient \
+        python-requests \
+        python-simplejson \
+        python-six \
+        python-babel \
+        "
+
+
+PACKAGECONFIG ?= "bash-completion"
+PACKAGECONFIG[bash-completion] = ",,bash-completion,bash-completion ${BPN}-bash-completion"
+
+do_install_append() {
+       install -d ${D}/${sysconfdir}/bash_completion.d
+       install -m 664 ${S}/tools/neutron.bash_completion ${D}/${sysconfdir}/bash_completion.d
+}
+
+PACKAGES =+ "${BPN}-bash-completion"
+FILES_${BPN}-bash-completion = "${sysconfdir}/bash_completion.d/*"
+
+MONITOR_CHECKS_${PN} += "\
+       neutron-api-check.sh \
+"
diff --git a/meta-stx/recipes-devtools/python/python-novaclient/nova-api-check.sh b/meta-stx/recipes-devtools/python/python-novaclient/nova-api-check.sh
new file mode 100644 (file)
index 0000000..b9ba6bc
--- /dev/null
@@ -0,0 +1,14 @@
+#! /bin/bash
+
+CMD="nova list"
+
+data=$($CMD 2>&1)
+res=$?
+if [ ${res} -eq 127 ]; then
+    exit 0
+elif [ ${res} -ne 0 ]; then
+    echo "OpenStack \"nova api\" failed: "
+    echo $data
+    exit $res
+fi
+exit 0
diff --git a/meta-stx/recipes-devtools/python/python-novaclient_git.bb b/meta-stx/recipes-devtools/python/python-novaclient_git.bb
new file mode 100644 (file)
index 0000000..b9fa774
--- /dev/null
@@ -0,0 +1,77 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Client library for OpenStack Compute API"
+HOMEPAGE = "https://github.com/openstack/python-novaclient"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=7cdb54622cacc9bc9b2883091e6dd669"
+
+SRC_URI = "git://github.com/openstack/python-novaclient.git;branch=stable/pike"
+
+SRCREV = "62bf8809c660ed0675f301c235b1d434caeaf580"
+SRCNAME = "python-novaclient"
+PROTOCOL = "https"
+BRANCH = "stable/train"
+S = "${WORKDIR}/git"
+PV = "13.0.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools python-dir
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-pbr \
+        python-keystoneauth1 \
+        python-iso8601 \
+        python-oslo.i18n \
+        python-oslo.serialization \
+        python-oslo.utils \
+        python-prettytable \
+        python-simplejson \
+        python-six \
+        python-babel \
+        "
+
+PACKAGECONFIG ?= "bash-completion"
+PACKAGECONFIG[bash-completion] = ",,bash-completion,bash-completion ${BPN}-bash-completion"
+
+do_install_append() {
+       install -d ${D}/${sysconfdir}/bash_completion.d
+       install -m 664 ${S}/tools/nova.bash_completion ${D}/${sysconfdir}/bash_completion.d
+
+       mv ${D}/${bindir}/nova ${D}/${bindir}/nova-${PYTHON_BASEVERSION}
+       ln -s ./nova-${PYTHON_BASEVERSION}  ${D}/${bindir}/nova-2
+       ln -s ./nova-2  ${D}/${bindir}/nova
+
+       if [ -e "${D}/${PYTHON_SITEPACKAGES_DIR}/novaclient/tests/v1_1/test_servers.py" ]; then
+               sed -e "s:%PYTHON_SITEPACKAGES_DIR%:${PYTHON_SITEPACKAGES_DIR}:g" \
+                   -i ${D}/${PYTHON_SITEPACKAGES_DIR}/novaclient/tests/v1_1/test_servers.py
+       fi
+
+}
+
+PACKAGES =+ "${BPN}-bash-completion"
+FILES_${BPN}-bash-completion = "${sysconfdir}/bash_completion.d/*"
diff --git a/meta-stx/recipes-devtools/python/python-openstackclient_git.bbappend b/meta-stx/recipes-devtools/python/python-openstackclient_git.bbappend
new file mode 100644 (file)
index 0000000..43b226b
--- /dev/null
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+
+PV = "3.19.0+git${SRCPV}"
+SRCREV = "83359fbe4fd7e5850abd45a467bf197c284519b1"
+
+SRC_URI = " \
+        git://github.com/openstack/python-openstackclient.git;branch=stable/stein \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-openstacksdk_%.bbappend b/meta-stx/recipes-devtools/python/python-openstacksdk_%.bbappend
new file mode 100644 (file)
index 0000000..04b0d82
--- /dev/null
@@ -0,0 +1,17 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " python-munch"
+RDEPENDS_${PN}_append = " python2-os-service-types"
diff --git a/meta-stx/recipes-devtools/python/python-openstacksdk_git.bbappend b/meta-stx/recipes-devtools/python/python-openstacksdk_git.bbappend
new file mode 100644 (file)
index 0000000..4ace9d7
--- /dev/null
@@ -0,0 +1,19 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN} += " bash"
+PV = "0.25.0+git${SRCPV}"
+SRCREV="9cd0ef2ac5b8cc90df344f69edc8ac68224f292e"
+
diff --git a/meta-stx/recipes-devtools/python/python-osc-lib_1.12.1.bb b/meta-stx/recipes-devtools/python/python-osc-lib_1.12.1.bb
new file mode 100644 (file)
index 0000000..d6d9cbb
--- /dev/null
@@ -0,0 +1,45 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "OpenStackClient Library"
+HOMEPAGE = "http://opensource.perlig.de/rcssmin/"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRC_URI[md5sum] = "73c8bd90b8325b2595b814f41d06fdb8"
+SRC_URI[sha256sum] = "26d3e32c8c4eff47240c458cddb6b75db52034d643f01de2841ad9e84904d7aa"
+
+inherit setuptools pypi
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-pbr \
+        python-six \
+        python-babel \
+        python-cliff \
+        python-keystoneauth1 \
+        python-os-client-config \
+        python-oslo.i18n \
+        python-oslo.utils \
+        python-simplejson \
+        python-stevedore \
+        "
+
+CLEANBROKEN = "1"
diff --git a/meta-stx/recipes-devtools/python/python-oslo.cache_git.bb b/meta-stx/recipes-devtools/python/python-oslo.cache_git.bb
new file mode 100644 (file)
index 0000000..03e90f3
--- /dev/null
@@ -0,0 +1,50 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "oslo.config enabled dogpile cache"
+HOMEPAGE = "https://github.com/openstack/oslo.cache"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRCREV = "3b8d9c3f6c87d62e5502cf4a9ae89e4067180c1f"
+SRCNAME = "oslo.cache"
+PROTOCOL = "https"
+BRANCH = "stable/train"
+S = "${WORKDIR}/git"
+PV = "1.26.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools rmargparse
+
+DEPENDS += " \
+        python-pbr \
+        python-pip \
+        "
+
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+# RDEPENDS_default:
+RDEPENDS_${PN} += " \
+        python-dogpile.cache \
+        python-six \
+        python-oslo.config \
+        python-oslo.i18n \
+        python-oslo.log \
+        python-oslo.utils \
+        "
diff --git a/meta-stx/recipes-devtools/python/python-oslo.concurrency_git.bb b/meta-stx/recipes-devtools/python/python-oslo.concurrency_git.bb
new file mode 100644 (file)
index 0000000..f15de88
--- /dev/null
@@ -0,0 +1,51 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "oslo.concurrency library"
+HOMEPAGE = "https://github.com/openstack/oslo.concurrency"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=34400b68072d710fecd0a2940a0d1658"
+
+SRCREV = "5b42d276350666410a7d010a5152467ad509d3f9"
+SRCNAME = "oslo.concurrency"
+PROTOCOL = "https"
+BRANCH = "stable/train"
+S = "${WORKDIR}/git"
+PV = "3.26.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools rmargparse
+
+DEPENDS += " \
+        python-pbr \
+        python-pip \
+        "
+
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+# RDEPENDS_default:
+RDEPENDS_${PN} += " \
+        python-pbr \
+        python-fasteners \
+        python-oslo.config \
+        python-oslo.i18n \
+        python-oslo.utils \
+        python-six \
+        python-enum34 \
+        "
diff --git a/meta-stx/recipes-devtools/python/python-oslo.config_git.bb b/meta-stx/recipes-devtools/python/python-oslo.config_git.bb
new file mode 100644 (file)
index 0000000..81199e5
--- /dev/null
@@ -0,0 +1,54 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "API supporting parsing command line arguments and .ini style configuration files."
+HOMEPAGE = "https://pypi.python.org/pypi/oslo.config/5.2.0"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=c46f31914956e4579f9b488e71415ac8"
+
+SRCREV = "31c11ab4289efa1a91835f3daa928fe927ac4276"
+SRCNAME = "oslo.config"
+PROTOCOL = "https"
+BRANCH = "stable/queens"
+S = "${WORKDIR}/git"
+PV = "5.2.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools rmargparse
+
+DEPENDS += " \
+        python-pbr \
+        python-pip \
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+    python-pbr \
+    python-netaddr \
+    python-six \
+    python-stevedore \
+    python-debtcollector \
+    python-oslo.i18n \
+    python-rfc3986 \
+    python-pyyaml \
+    python-importlib-metadata \
+    "
+       
diff --git a/meta-stx/recipes-devtools/python/python-oslo.context_git.bb b/meta-stx/recipes-devtools/python/python-oslo.context_git.bb
new file mode 100644 (file)
index 0000000..9b8ac3b
--- /dev/null
@@ -0,0 +1,49 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Oslo Context Library"
+HOMEPAGE = "https://launchpad.net/oslo"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRCREV = "76a07f9022f0fa967707c9f6cb5a4a24aac6b3ef"
+SRCNAME = "oslo.context"
+PROTOCOL = "https"
+BRANCH = "stable/stein"
+S = "${WORKDIR}/git"
+PV = "2.22.1+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+# RDEPENDS_default: 
+RDEPENDS_${PN} += " \
+        bash \
+        python-pbr \
+        python-debtcollector \
+        python-positional \
+        "
diff --git a/meta-stx/recipes-devtools/python/python-oslo.db_git.bb b/meta-stx/recipes-devtools/python/python-oslo.db_git.bb
new file mode 100644 (file)
index 0000000..665a359
--- /dev/null
@@ -0,0 +1,50 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Oslo db library"
+HOMEPAGE = "http://launchpad.net/oslo"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=34400b68072d710fecd0a2940a0d1658"
+
+SRCREV = "4de33ebd504a2c3dbddc2492bdb96ae7bca77d66"
+SRCNAME = "oslo.db"
+PROTOCOL = "https"
+BRANCH = "stable/stein"
+S = "${WORKDIR}/git"
+PV = "4.27.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-oslo.config \
+       python-oslo.i18n \
+       python-oslo.serialization \
+       python-oslo.utils \
+       python-six \
+        python-alembic \
+       python-sqlalchemy \
+       python-sqlalchemy-migrate \
+       python-stevedore \
+       python-pbr \
+       python-debtcollector \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-oslo.log_git.bb b/meta-stx/recipes-devtools/python/python-oslo.log_git.bb
new file mode 100644 (file)
index 0000000..0ec652e
--- /dev/null
@@ -0,0 +1,58 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Oslo Log Library"
+HOMEPAGE = "https://launchpad.net/oslo"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=34400b68072d710fecd0a2940a0d1658"
+
+SRCREV = "110191aa505cfefafee1c8579213b2f9c3397b6c"
+SRCNAME = "oslo.log"
+PROTOCOL = "https"
+BRANCH = "stable/train"
+S = "${WORKDIR}/git"
+PV = "3.38.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        python-babel \
+        python-pbr-native \
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+# RDEPENDS_default: 
+RDEPENDS_${PN} += " \
+        bash \
+        python-pbr \
+        python-six \
+        python-oslo.config \
+        python-oslo.context \
+        python-oslo.i18n \
+        python-oslo.utils \
+        python-oslo.serialization \
+        python-pyinotify \
+        python-debtcollector \
+        python-dateutil \
+        python-monotonic \
+        "
diff --git a/meta-stx/recipes-devtools/python/python-oslo.middleware_git.bb b/meta-stx/recipes-devtools/python/python-oslo.middleware_git.bb
new file mode 100644 (file)
index 0000000..ae89ed8
--- /dev/null
@@ -0,0 +1,56 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Oslo Middleware Library"
+HOMEPAGE = "https://launchpad.net/oslo"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=34400b68072d710fecd0a2940a0d1658"
+
+SRCREV = "8812bc3fc490f0db4977418eaedf58190a0df394"
+SRCNAME = "oslo.middleware"
+PROTOCOL = "https"
+BRANCH = "stable/train"
+S = "${WORKDIR}/git"
+PV = "3.31.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+        "
+
+# RDEPENDS_default:
+RDEPENDS_${PN} += " \
+        python-pbr \
+        python-jinja2 \
+        python-oslo.config \
+        python-oslo.context \
+        python-oslo.i18n \
+        python-oslo.utils \
+        python-six \
+        python-stevedore \
+        python-webob \
+        python-debtcollector \
+        python-statsd \
+        "
diff --git a/meta-stx/recipes-devtools/python/python-oslo.policy_git.bb b/meta-stx/recipes-devtools/python/python-oslo.policy_git.bb
new file mode 100644 (file)
index 0000000..69fd13f
--- /dev/null
@@ -0,0 +1,46 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Oslo policy library"
+HOMEPAGE = "https://github.com/openstack/oslo.policy"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRCREV = "b9fd10e2612f26c93d49c168a0408aba6d20e5bf"
+SRCNAME = "oslo.policy"
+PROTOCOL = "https"
+BRANCH = "stable/train"
+S = "${WORKDIR}/git"
+PV = "1.43.1+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-oslo.config \
+       python-oslo.i18n \
+       python-oslo.serialization \
+       python-oslo.utils \
+       python-six \
+       python-pyyaml \
+       python-requests \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-oslo.serialization_git.bb b/meta-stx/recipes-devtools/python/python-oslo.serialization_git.bb
new file mode 100644 (file)
index 0000000..370aa9d
--- /dev/null
@@ -0,0 +1,53 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Oslo Serialization API"
+HOMEPAGE = "https://launchpad.net/oslo"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=34400b68072d710fecd0a2940a0d1658"
+
+SRCREV = "576b13ec26baa671da05df56a8d14aba6fa3e826"
+SRCNAME = "oslo.serialization"
+PROTOCOL = "https"
+BRANCH = "stable/train"
+S = "${WORKDIR}/git"
+PV = "2.23.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+inherit setuptools
+
+# DEPENDS_default: python-pip
+
+DEPENDS += " \
+        python-pip \
+       python-pbr-native\
+       "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+       python-pbr-native \
+       "
+
+# RDEPENDS_default:
+RDEPENDS_${PN} += " \
+       python-pbr \
+       python-six \
+       python-oslo.utils \
+       python-pytz \
+       python-msgpack \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-oslo.upgradecheck_git.bb b/meta-stx/recipes-devtools/python/python-oslo.upgradecheck_git.bb
new file mode 100644 (file)
index 0000000..8f62fad
--- /dev/null
@@ -0,0 +1,47 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Common code for writing OpenStack upgrade checks"
+DESCRIPTION = "\
+  This project contains the common code necessary for writing upgrade checks \
+  in OpenStack projects. It includes a module (oslo_upgradecheck.upgradecheck) \
+  for the common code as well as an example (oslo_upgradecheck.__main__) of \
+  integrating that code into a project. \
+"
+HOMEPAGE = "https://github.com/openstack/oslo.upgradecheck"
+SECTION = "devel/python"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+PV = "0.3.1+git${SRCPV}"
+SRCREV = "5f182fe19cdfe0bcf0d51bcf7be05e7a74f0a068"
+
+SRCNAME = "oslo.upgradecheck"
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;branch=master"
+
+S = "${WORKDIR}/git"
+
+inherit setuptools
+
+DEPENDS += " \
+    python-pip \
+    python-pbr-native \
+"
+
+RDEPENDS_${PN} += " \
+    python-oslo.config \
+    python-oslo.i18n \
+"
diff --git a/meta-stx/recipes-devtools/python/python-oslo.utils_git.bbappend b/meta-stx/recipes-devtools/python/python-oslo.utils_git.bbappend
new file mode 100644 (file)
index 0000000..ca53118
--- /dev/null
@@ -0,0 +1,19 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PV = "3.33.0+git${SRCPV}"
+SRCREV = "58fb709f58bd031c5114d9b6fe2e0926175b87b6"
+SRC_URI = "git://github.com/openstack/oslo.utils.git;branch=stable/queens"
+
diff --git a/meta-stx/recipes-devtools/python/python-oslo.versionedobjects_git.bb b/meta-stx/recipes-devtools/python/python-oslo.versionedobjects_git.bb
new file mode 100644 (file)
index 0000000..f71a190
--- /dev/null
@@ -0,0 +1,51 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Oslo versionedobjects library"
+HOMEPAGE = "https://wiki.openstack.org/wiki/Oslo"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRCREV = "c95f0c876840e36f37acb14d5eec5238d85e7dce"
+SRCNAME = "oslo.versionedobjects"
+PROTOCOL = "https"
+BRANCH = "stable/queens"
+S = "${WORKDIR}/git"
+PV = "1.31.2+git${SRCPV}"
+
+SRC_URI = "git://github.com/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-six \
+        python-oslo.concurrency \
+        python-oslo.config \
+        python-oslo.context \
+        python-oslo.messaging \
+        python-oslo.serialization \
+        python-oslo.utils \
+        python-oslo.log \
+        python-oslo.i18n \
+        python-webob \
+        python-iso8601 \
+        python-netaddr \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-osprofiler_git.bb b/meta-stx/recipes-devtools/python/python-osprofiler_git.bb
new file mode 100644 (file)
index 0000000..e98ff15
--- /dev/null
@@ -0,0 +1,45 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Library for cross-project profiling library"
+HOMEPAGE = "https://docs.openstack.org/osprofiler/latest/"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=19cbd64715b51267a47bf3750cc6a8a5"
+
+SRCREV = "6d68170f72ef303e6564e164aafb1ec53a8b8314"
+SRCNAME = "osprofiler"
+BRANCH = "master"
+PROTOCOL = "https"
+PV = "2.3.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://opendev.org/openstack/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+       
+inherit setuptools
+
+DEPENDS += " python-pbr-native"
+
+RDEPENDS_${PN}_append = " \
+       python-pbr \
+       python-six \
+       python-oslo.messaging \
+       python-oslo.log \
+       python-oslo.utils \
+       python-webob \
+       python-requests \
+       python-netaddr \
+       python-oslo.concurrency \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-packaging_20.1.bb b/meta-stx/recipes-devtools/python/python-packaging_20.1.bb
new file mode 100644 (file)
index 0000000..8a90606
--- /dev/null
@@ -0,0 +1,29 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " \
+       Core utilities for Python packages. \
+       The packaging project includes the following: version handling, specifiers, markers, requirements, tags, utilities. \
+       "
+HOMEPAGE = "https://github.com/pypa/packaging"
+SECTION = "devel/python"
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=faadaedca9251a90b205c9167578ce91"
+
+SRC_URI[md5sum] = "a02ce566f10c701b4c42e39a4ce59c93"
+SRC_URI[sha256sum] = "e665345f9eef0c621aa0bf2f8d78cf6d21904eef16a93f020240b704a57f1334"
+
+PYPI_PACKAGE = "packaging"
+inherit setuptools pypi
diff --git a/meta-stx/recipes-devtools/python/python-pankoclient_0.5.0.bb b/meta-stx/recipes-devtools/python/python-pankoclient_0.5.0.bb
new file mode 100644 (file)
index 0000000..ab16379
--- /dev/null
@@ -0,0 +1,35 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "python-pankoclient"
+STABLE = "master"
+PROTOCOL = "https"
+BRANCH = "master"
+SRCREV = "572aee9cf6ac618eb5d1ea325f9e59414d387dbf"
+S = "${WORKDIR}/git"
+PV = "0.5.0"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+SRC_URI = "git://github.com/openstack/python-pankoclient.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+DEPENDS += " \
+       python \
+       python-pbr-native \
+       "
+
+inherit setuptools
diff --git a/meta-stx/recipes-devtools/python/python-paramiko_%.bbappend b/meta-stx/recipes-devtools/python/python-paramiko_%.bbappend
new file mode 100644 (file)
index 0000000..af4cca2
--- /dev/null
@@ -0,0 +1,19 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " \
+       python-bcrypt \
+       python-pynacl \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-pecan_%.bbappend b/meta-stx/recipes-devtools/python/python-pecan_%.bbappend
new file mode 100644 (file)
index 0000000..896da67
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " \
+       ${PYTHON_PN}-singledispatch \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-pika_1.1.0.bb b/meta-stx/recipes-devtools/python/python-pika_1.1.0.bb
new file mode 100644 (file)
index 0000000..e3747a8
--- /dev/null
@@ -0,0 +1,24 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Pika is a pure-Python implementation of the AMQP 0-9-1 protocol including RabbitMQ’s extensions."
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=535836bf0a5de515a7bfee026075701d"
+
+SRC_URI[sha256sum] = "9fa76ba4b65034b878b2b8de90ff8660a59d925b087c5bb88f8fdbb4b64a1dbf"
+
+
+PYPI_PACKAGE = "pika"
+inherit pypi setuptools
diff --git a/meta-stx/recipes-devtools/python/python-pycadf.bbappend_ b/meta-stx/recipes-devtools/python/python-pycadf.bbappend_
new file mode 100644 (file)
index 0000000..a15e846
--- /dev/null
@@ -0,0 +1,17 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+#
+#extract and update etc with contents from python-pycadf-common rpm file
diff --git a/meta-stx/recipes-devtools/python/python-pycurl_7.43.0.3.bb b/meta-stx/recipes-devtools/python/python-pycurl_7.43.0.3.bb
new file mode 100644 (file)
index 0000000..4da1f3a
--- /dev/null
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "libcurl python bindings"
+LICENSE = "LGPLv2 | MIT"
+LIC_FILES_CHKSUM = " \
+       file://COPYING-LGPL;md5=4fbd65380cdd255951079008b364516c \
+       file://COPYING-MIT;md5=2df767ed35d8ea83de4a93feb55e7815 \
+       "
+
+SRC_URI[sha256sum] = "6f08330c5cf79fa8ef68b9912b9901db7ffd34b63e225dce74db56bb21deda8e"
+
+
+PYPI_PACKAGE = "pycurl"
+inherit pypi setuptools
+
+export BUILD_SYS
+export HOST_SYS
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+DEPENDS = " curl python"
+
+BBCLASSEXTEND = " native"
diff --git a/meta-stx/recipes-devtools/python/python-pyelftools_0.25.bb b/meta-stx/recipes-devtools/python/python-pyelftools_0.25.bb
new file mode 100644 (file)
index 0000000..423ee75
--- /dev/null
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "pyelftools library for parsing and analyzing elf files"
+LICENSE = "LGPLv2+"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=5ce2a2b07fca326bc7c146d10105ccfc"
+
+SRC_URI[sha256sum] = "89c6da6f56280c37a5ff33468591ba9a124e17d71fe42de971818cbff46c1b24"
+
+PYPI_PACKAGE = "pyelftools"
+inherit pypi setuptools
diff --git a/meta-stx/recipes-devtools/python/python-pyghmi_1.5.7.bb b/meta-stx/recipes-devtools/python/python-pyghmi_1.5.7.bb
new file mode 100644 (file)
index 0000000..8b44780
--- /dev/null
@@ -0,0 +1,34 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "HPE 3PAR HTTP REST Client"
+HOMEPAGE = "https://pythonhosted.org/python-3parclient/"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://PKG-INFO;md5=d3dfac7b0d23cb44b097e35518879438"
+
+SRC_URI[md5sum] = "b49dd03782fd5eda09cd9210ae979f6a"
+SRC_URI[sha256sum] = "4c2b0be4e3b8a517c1718e39e1eeb3e62f73810bb9910278000716f6074e5a69"
+
+PYPI_PACKAGE = "pyghmi"
+inherit setuptools pypi
+
+DEPENDS += " \
+       python-pbr-native \
+       "
+
+RDEPENDS_${PN}_append = " \
+       python-dateutil \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-pymysql_0.9.3.bb b/meta-stx/recipes-devtools/python/python-pymysql_0.9.3.bb
new file mode 100644 (file)
index 0000000..56976f9
--- /dev/null
@@ -0,0 +1,29 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Python MySQL client library"
+DESCRIPTION = " This package contains a pure-Python MySQL client library, based on PEP 249." 
+
+SECTION = "devel/python"
+HOMEPAGE = "https://github.com/PyMySQL/PyMySQL"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=528175c84163bb800d23ad835c7fa0fc"
+
+inherit pypi setuptools
+
+PYPI_PACKAGE = "PyMySQL"
+
+SRC_URI[md5sum] = "e5d9183cc0a775ac29f9e0365cca6556"
+SRC_URI[sha256sum] = "d8c059dcd81dedb85a9f034d5e22dcb4442c0b201908bede99e306d65ea7c8e7"
diff --git a/meta-stx/recipes-devtools/python/python-pynacl_git.bb b/meta-stx/recipes-devtools/python/python-pynacl_git.bb
new file mode 100644 (file)
index 0000000..7ae73cd
--- /dev/null
@@ -0,0 +1,49 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files/python-pynacl:"
+DESCRIPTION = "Python binding to libsodiom"
+
+HOMEPAGE = "https://pypi.org/project/PyNaCl/"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=8cc789b082b3d97e1ccc5261f8594d3f"
+
+SRCREV = "4881c878c9a33f4684337f650355bdf7f031d77d"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/pyca/pynacl.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/0001-Enable-cross-compile.patch \
+       "
+
+
+inherit setuptools distutils
+
+DEPENDS += " libsodium python-cffi-native libsodium-native"
+RDEPENDS_${PN} = " libsodium"
+
+do_compile_prepend() {
+       export PYNACL_CROSS_BUILD="--build=${BUILD_SYS}"
+       export PYNACL_CROSS_HOST="--host=${HOST_SYS}"
+       export PYNACL_CROSS_TARGET="--target=${TARGET_SYS}"
+       export SODIUM_INSTALL="system"
+}
+
+do_install_prepend() {
+       export SODIUM_INSTALL="system"
+}
diff --git a/meta-stx/recipes-devtools/python/python-pyngus_git.bb b/meta-stx/recipes-devtools/python/python-pyngus_git.bb
new file mode 100644 (file)
index 0000000..4bbfc4a
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A messaging framework built on the QPID Proton engine. It provides a callback-based API for message passing. See the User Guide in the docs directory for more detail."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d2794c0df5b907fdace235a619d80314"
+
+SRCREV = "5392392046989f1bb84ba938c30e4d48311075f1"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/kgiusti/pyngus;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-pyperf_git.bb b/meta-stx/recipes-devtools/python/python-pyperf_git.bb
new file mode 100644 (file)
index 0000000..75588be
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "The Python perf module is a toolkit to write, run and analyze benchmarks."
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://COPYING;md5=78bc2e6e87c8c61272937b879e6dc2f8"
+
+SRCREV = "7febee0242ce8fd01f56bce2ec72ec536a41caed"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/vstinner/pyperf.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+# DEPENDS += " python python-pbr-native"
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-pysaml2_git.bb b/meta-stx/recipes-devtools/python/python-pysaml2_git.bb
new file mode 100644 (file)
index 0000000..f4f0abc
--- /dev/null
@@ -0,0 +1,41 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Python implementation of SAML Version 2 to be used in a WSGI environment"
+HOMEPAGE = "https://github.com/rohe/pysaml2"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=861cc9087857b5bea2e11356c3de95d9"
+
+SRCREV = "c740a3a270037d6fcb42a12112db594705d3878f"
+SRCNAME = "pysaml2"
+PROTOCOL = "git"
+BRANCH = "v4.9.0"
+S = "${WORKDIR}/git"
+PV = "4.5.0+git${SRCPV}"
+
+SRC_URI = "git://github.com/rohe/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools
+
+DEPENDS += " \
+        python-pip \
+        "
+
+RDEPENDS_${PN} += " \
+       python-zopeinterface \
+       python-repoze.who \
+       python-defusedxml \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-redfishtool_git.bb b/meta-stx/recipes-devtools/python/python-redfishtool_git.bb
new file mode 100644 (file)
index 0000000..6d659ff
--- /dev/null
@@ -0,0 +1,46 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Client side API implementation of the Redfish RESTful API for Data Center Hardware Management."
+HOMEPAGE = "https://github.com/DMTF/Redfishtool"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE.md;md5=cee7a7694b5bf14bc9d3e0fbe78a64af"
+
+
+SRCREV = "2bdcd905e1ad227f40809ec298804d5401047612"
+SRCNAME = "Redfishtool"
+BRANCH = "master"
+PROTOCOL = "https"
+PV = "1.1.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/DMTF/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/0001-Adapt-redfishtool-to-python2.patch \
+       "
+
+inherit setuptools
+
+RDEPENDS_${PN} += " \
+       python-requests \
+"
+
+do_install_append() {
+       :
+}
+
+FILES_${PN} += " \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-requests-oauthlib_%.bbappend b/meta-stx/recipes-devtools/python/python-requests-oauthlib_%.bbappend
new file mode 100644 (file)
index 0000000..beb02a9
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " \
+       python-oauthlib \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-requests-oauthlib_git.bb b/meta-stx/recipes-devtools/python/python-requests-oauthlib_git.bb
new file mode 100644 (file)
index 0000000..5aba502
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "This project provides first-class OAuth library support for Requests."
+
+LICENSE = "ISC"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=22d117a849df10d047ed9b792838e863"
+
+SRCREV = "e19ac082682ecf3ed06cdcc5538e3c4bb2aa7762"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+PV = "1.2.0"
+
+SRC_URI = "git://github.com/requests/requests-oauthlib;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-requests-toolbelt_git.bb b/meta-stx/recipes-devtools/python/python-requests-toolbelt_git.bb
new file mode 100644 (file)
index 0000000..3e137d1
--- /dev/null
@@ -0,0 +1,38 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "requests-toolbelt"
+
+STABLE = "master"
+PROTOCOL = "https"
+BRANCH = "master"
+SRCREV = "1e384626476f7afbff0f649fe41886d0f27473d6"
+S = "${WORKDIR}/git"
+PV = "0.9.1+${SRCPV}"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=71760e0f1dda8cff91b0bc9246caf571"
+
+SRC_URI = "git://github.com/requests/toolbelt.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+DEPENDS = " \
+       python \
+       python-pbr-native \
+       "
+
+inherit setuptools
+
+RDEPENDS_${PN} += " bash"
diff --git a/meta-stx/recipes-devtools/python/python-requests_%.bbappend b/meta-stx/recipes-devtools/python/python-requests_%.bbappend
new file mode 100644 (file)
index 0000000..775a1c5
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " file://requests/0001-close-connection-on-HTTP-413-Request-Entit.patch"
diff --git a/meta-stx/recipes-devtools/python/python-ruamel.ordereddict_0.4.9.bb b/meta-stx/recipes-devtools/python/python-ruamel.ordereddict_0.4.9.bb
new file mode 100644 (file)
index 0000000..f9a1fbb
--- /dev/null
@@ -0,0 +1,25 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "The ordereddict module in short"
+HOMEPAGE = "https://pypi.org/project/ruamel.ordereddict/"
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://PKG-INFO;md5=0caf06c5d151e57e100341eb28dcb6f5"
+
+SRC_URI[md5sum] = "d160714193a0ec470cc26f614b1aa0e7"
+SRC_URI[sha256sum] = "7058c470f131487a3039fb9536dda9dd17004a7581bdeeafa836269a36a2b3f6"
+
+inherit setuptools pypi
diff --git a/meta-stx/recipes-devtools/python/python-ruamel.yaml_0.15.9.bb b/meta-stx/recipes-devtools/python/python-ruamel.yaml_0.15.9.bb
new file mode 100644 (file)
index 0000000..db6407d
--- /dev/null
@@ -0,0 +1,38 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "YAML 1.2 loader/dumper package for Python"
+HOMEPAGE = "https://pypi.org/project/ruamel.yaml/"
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=84e9d7d823d2abac052e70de2051ca1c"
+
+SRC_URI[md5sum] = "d53315f8ccb93748d00ccca39486ac78"
+SRC_URI[sha256sum] = "350496f6fdd8c2bb17a0fa3fd2ec98431280cf12d72dae498b19ac0119c2bbad"
+
+inherit setuptools pypi python-dir
+
+DEPENDS += " \
+       ${PYTHON_PN}-native \
+       ${PYTHON_PN}-cryptography-native \
+       "
+RDEPENDS_${PN}_append  = " \
+       ${PYTHON_PN}-ruamel.ordereddict \
+       "
+
+do_install_prepend() {
+       export RUAMEL_NO_PIP_INSTALL_CHECK=1
+}
+
diff --git a/meta-stx/recipes-devtools/python/python-ryu_git.bb b/meta-stx/recipes-devtools/python/python-ryu_git.bb
new file mode 100644 (file)
index 0000000..92c16bf
--- /dev/null
@@ -0,0 +1,54 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Ryu is a component-based software defined networking framework."
+HOMEPAGE = "https://github.com/osrg/ryu"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = " \
+       file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57\
+       "
+
+SRCREV = "1c008060fa3dab51c3a59c1485a7529b13cf0dd1"
+SRCNAME = "ryu"
+BRANCH = "master"
+PROTOCOL = "https"
+PV = "4.24+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/osrg/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools python-dir
+
+DEPENDS += " \
+       python-pip \
+       python-pbr-native \
+       "
+
+RDEPENDS_${PN}_append = " \
+       ${PYTHON_PN}-eventlet \
+       ${PYTHON_PN}-msgpack \
+       ${PYTHON_PN}-netaddr \
+       ${PYTHON_PN}-oslo.config \
+       ${PYTHON_PN}-ovs \
+       ${PYTHON_PN}-routes \
+       ${PYTHON_PN}-six \
+       ${PYTHON_PN}-tinyrpc \
+       ${PYTHON_PN}-webob \
+       "
+
+FILES_${PN}_append = " \
+       ${datadir}/etc/${SRCNAME} \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-saharaclient_git.bb b/meta-stx/recipes-devtools/python/python-saharaclient_git.bb
new file mode 100644 (file)
index 0000000..de24319
--- /dev/null
@@ -0,0 +1,45 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "CLI and python client library for OpenStack Sahara"
+HOMEPAGE = "https://launchpad.net/sahara"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+DEPENDS += " \
+        python-pip \
+        python-pbr \
+        "
+
+# Satisfy setup.py 'setup_requires'
+DEPENDS += " \
+        python-pbr-native \
+"
+
+RDEPENDS_${PN} += " \
+       python-pbr \
+       "
+
+SRCNAME = "saharaclient"
+
+SRC_URI = "git://github.com/openstack/python-saharaclient.git;branch=master"
+
+PV = "2.3.0+git${SRCPV}"
+SRCREV = "3107b452467537f4eef3d9ecfb5e35d110d19662"
+S = "${WORKDIR}/git"
+
+inherit setuptools
+
diff --git a/meta-stx/recipes-devtools/python/python-scss_git.bb b/meta-stx/recipes-devtools/python/python-scss_git.bb
new file mode 100644 (file)
index 0000000..29a705a
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A simple language agnostic database migration tool"
+
+LICENSE = "LGPL-3.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=e6a600fd5e1d9cbde2d983680233ad02 "
+
+
+SRCREV = "34fe985e6b43caa9f9b9bcd0dc433be4b2a1fdec"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+PV = "0.8.73"
+
+SRC_URI = "git://github.com/klen/python-scss;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-snowballstemmer_2.0.0.bb b/meta-stx/recipes-devtools/python/python-snowballstemmer_2.0.0.bb
new file mode 100644 (file)
index 0000000..a68086c
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " \
+       Snowball is a small string processing language designed for creating stemming algorithms \
+       for use in Information Retrieval. This site describes Snowball, and presents several useful \
+       stemmers which have been implemented using it. \
+       "
+HOMEPAGE = "https://github.com/snowballstem/snowball"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://COPYING;md5=2750797da77c1d784e7626b3f7d7ff3e"
+
+SRC_URI[md5sum] = "c05ec4a897be3c953c8b8b844c4241d4"
+SRC_URI[sha256sum] = "df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52"
+
+PYPI_PACKAGE = "snowballstemmer"
+inherit setuptools pypi
diff --git a/meta-stx/recipes-devtools/python/python-sphinx_%.bbappend b/meta-stx/recipes-devtools/python/python-sphinx_%.bbappend
new file mode 100644 (file)
index 0000000..b973fcd
--- /dev/null
@@ -0,0 +1,26 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit python-dir
+RDEPENDS_${PN}_append = " \
+       ${PYTHON_PN}-pygments \
+       ${PYTHON_PN}-typing \
+       ${PYTHON_PN}-sphinxcontrib-websupport \
+       ${PYTHON_PN}-alabaster \
+       ${PYTHON_PN}-imagesize \
+       ${PYTHON_PN}-snowballstemmer \
+       ${PYTHON_PN}-packaging \
+       "
+
diff --git a/meta-stx/recipes-devtools/python/python-sphinxcontrib-websupport_1.1.0.bb b/meta-stx/recipes-devtools/python/python-sphinxcontrib-websupport_1.1.0.bb
new file mode 100644 (file)
index 0000000..5447a80
--- /dev/null
@@ -0,0 +1,29 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " \
+       sphinxcontrib-websupport provides a Python API to easily integrate Sphinx\
+       documentation into your Web application. \
+       "
+HOMEPAGE = "https://www.sphinx-doc.org/en/master/"
+SECTION = "devel/python"
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=819a10ab58e77e03e61a584de6339f7c"
+
+SRC_URI[md5sum] = "ca6435e7b4eb9408df4f54972361e9d3"
+SRC_URI[sha256sum] = "9de47f375baf1ea07cdb3436ff39d7a9c76042c10a769c52353ec46e4e8fc3b9"
+
+PYPI_PACKAGE = "sphinxcontrib-websupport"
+inherit setuptools pypi
diff --git a/meta-stx/recipes-devtools/python/python-testtools_%.bbappend b/meta-stx/recipes-devtools/python/python-testtools_%.bbappend
new file mode 100644 (file)
index 0000000..5caea9a
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " \
+       python-mimeparse \
+       python-traceback2 \
+       python-unittest2 \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-traceback2_1.4.0.bbappend b/meta-stx/recipes-devtools/python/python-traceback2_1.4.0.bbappend
new file mode 100644 (file)
index 0000000..f939587
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+BBCLASSEXTEND = "native"
diff --git a/meta-stx/recipes-devtools/python/python-unittest2_%.bbappend b/meta-stx/recipes-devtools/python/python-unittest2_%.bbappend
new file mode 100644 (file)
index 0000000..0db35be
--- /dev/null
@@ -0,0 +1,25 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+DEPENDS_append = " \
+       ${PYTHON_PN}-traceback2-native \
+       ${PYTHON_PN}-six-native \
+       "
+
+SRC_URI += " \
+       file://python-unittest2/0001-port-unittest2-argparse-is-part-of-stdlib.patch \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-urlgrabber_4.0.0.bb b/meta-stx/recipes-devtools/python/python-urlgrabber_4.0.0.bb
new file mode 100644 (file)
index 0000000..4d679df
--- /dev/null
@@ -0,0 +1,26 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A high-level cross-protocol url-grabber."
+LICENSE = "LGPLv2+"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=68ad62c64cc6c620126241fd429e68fe"
+
+SRC_URI[sha256sum] = "79c5a01c5dd31906a7f38ef1f500030e137704804d585644693d3e474ed15f39"
+
+DEPENDS += "python-six python-six-native"
+PYPI_PACKAGE = "urlgrabber"
+inherit pypi setuptools
+
+FILES_${PN}_append = " ${datadir}"
diff --git a/meta-stx/recipes-devtools/python/python-urllib3_1.23.bbappend b/meta-stx/recipes-devtools/python/python-urllib3_1.23.bbappend
new file mode 100644 (file)
index 0000000..5ecad18
--- /dev/null
@@ -0,0 +1,21 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PV="1.21.1"
+
+
+SRC_URI[md5sum] = "ef6c72c79b9c6bcd68c204f9b92abc86"
+SRC_URI[sha256sum] = "b14486978518ca0901a76ba973d7821047409d7f726f22156b24e83fd71382a5"
+
diff --git a/meta-stx/recipes-devtools/python/python-urwid_2.0.1.bb b/meta-stx/recipes-devtools/python/python-urwid_2.0.1.bb
new file mode 100644 (file)
index 0000000..de4a6ac
--- /dev/null
@@ -0,0 +1,24 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Urwid is a console user interface library for Python."
+LICENSE = "LGPLv2+"
+LIC_FILES_CHKSUM = "file://COPYING;md5=243b725d71bb5df4a1e5920b344b86ad"
+
+SRC_URI[sha256sum] = "644d3e3900867161a2fc9287a9762753d66bd194754679adb26aede559bcccbc"
+
+
+PYPI_PACKAGE = "urwid"
+inherit pypi setuptools
diff --git a/meta-stx/recipes-devtools/python/python-versiontools.inc b/meta-stx/recipes-devtools/python/python-versiontools.inc
new file mode 100644 (file)
index 0000000..23cccd5
--- /dev/null
@@ -0,0 +1,25 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Smart replacement for plain tuple used in __version__"
+SECTION = "devel/python"
+HOMEPAGE = "https://launchpad.net/versiontools"
+LICENSE = "LGPLv3"
+LIC_FILES_CHKSUM = "file://setup.py;beginline=3;endline=20;md5=02193721a38fd8a05a4ddeb7df8e294d"
+
+inherit pypi
+
+SRC_URI[md5sum] = "602b7db8eea30dd29a1d451997adf251"
+SRC_URI[sha256sum] = "a969332887a18a9c98b0df0ea4d4ca75972f24ca94f06fb87d591377e83414f6"
diff --git a/meta-stx/recipes-devtools/python/python-versiontools_1.9.1.bb b/meta-stx/recipes-devtools/python/python-versiontools_1.9.1.bb
new file mode 100644 (file)
index 0000000..ab1bdd0
--- /dev/null
@@ -0,0 +1,17 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit setuptools
+require python-versiontools.inc
diff --git a/meta-stx/recipes-devtools/python/python-websocket-client_%.bbappend b/meta-stx/recipes-devtools/python/python-websocket-client_%.bbappend
new file mode 100644 (file)
index 0000000..e2401d6
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " \
+       ${PYTHON_PN}-backports-ssl \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-websockify_%.bbappend b/meta-stx/recipes-devtools/python/python-websockify_%.bbappend
new file mode 100644 (file)
index 0000000..3e390b4
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " \
+       python-numpy \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-webtest_%.bbappend b/meta-stx/recipes-devtools/python/python-webtest_%.bbappend
new file mode 100644 (file)
index 0000000..570ac5f
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " python-waitress"
diff --git a/meta-stx/recipes-devtools/python/python-wsme_%.bbappend b/meta-stx/recipes-devtools/python/python-wsme_%.bbappend
new file mode 100644 (file)
index 0000000..13d38c9
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+RDEPENDS_${PN}_append = " \
+       ${PYTHON_PN}-simplegeneric \
+       "
diff --git a/meta-stx/recipes-devtools/python/python-yaql_git.bb b/meta-stx/recipes-devtools/python/python-yaql_git.bb
new file mode 100644 (file)
index 0000000..d054f8b
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "YAQL (Yet Another Query Language) is an embeddable and extensible query language, that allows performing complex queries against arbitrary objects. It has a vast and comprehensive standard library of frequently used querying functions and can be extend even further with user-specified functions. YAQL is written in python and is distributed via PyPI."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=34400b68072d710fecd0a2940a0d1658"
+
+SRCREV = "7385a31b9dbfc777a514ba28fe507fbf904cc779"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/yaql.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python python-pbr-native"
+inherit setuptools distutils pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python-zipp_0.6.0.bb b/meta-stx/recipes-devtools/python/python-zipp_0.6.0.bb
new file mode 100644 (file)
index 0000000..082aa81
--- /dev/null
@@ -0,0 +1,29 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Backport of pathlib-compatible object wrapper for zip files"
+HOMEPAGE = "https://github.com/jaraco/zipp"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=a33f38bbf47d48c70fe0d40e5f77498e"
+
+SRC_URI[md5sum] = "d4451a749d8a7c3c392a9edd1864a937"
+SRC_URI[sha256sum] = "3718b1cbcd963c7d4c5511a8240812904164b7f381b647143a89d3b98f9bcd8e"
+
+DEPENDS += "${PYTHON_PN}-setuptools-scm-native"
+RDEPENDS_${PN} += "${PYTHON_PN}-more-itertools"
+
+inherit pypi setuptools
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta-stx/recipes-devtools/python/python3-cheroot_git.bb b/meta-stx/recipes-devtools/python/python3-cheroot_git.bb
new file mode 100644 (file)
index 0000000..2e5695d
--- /dev/null
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Cheroot is the high-performance, pure-Python HTTP server used by CherryPy."
+
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE.md;md5=beeffd9dfcc746ed5a91921f1acc2746"
+
+SRCREV = "c7ca7ff0bcebb53e1bed783280a3bb5db35f900f"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+PV = "7.0.0"
+
+SRC_URI = "git://github.com/cherrypy/cheroot.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " \
+       python3 \
+       python3-setuptools-scm-native \
+       python3-setuptools-scm-git-archive-native \
+       "
+
+RDEPENDS_${PN} += " \
+       python3-email \
+       python3-fcntl \
+       python3-io \
+       python3-logging \
+       python3-unixadmin \
+       python3-pyopenssl \
+       python3-six \
+       "
+
+inherit setuptools3 distutils3 pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python3-cherrypy_git.bb b/meta-stx/recipes-devtools/python/python3-cherrypy_git.bb
new file mode 100644 (file)
index 0000000..32559b6
--- /dev/null
@@ -0,0 +1,69 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+SUMMARY = " CherryPy is a pythonic, object-oriented HTTP framework"
+DESCRIPTION = "\
+       It allows building web applications in much the same way one would build any other object-oriented program. \
+       This design results in less and more readable code being developed faster. It's all just properties and methods. \
+       It is now more than ten years old and has proven fast and very stable. \
+       It is being used in production by many sites, from the simplest to the most demanding. \
+       "
+
+
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE.md;md5=a8cbc5da4e6892b15a972a0b18622b2b"
+
+SRCREV = "994803e4923e53b7079c79f4e9b502cc1b8d0aa6"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+PV = "18.2.0"
+
+SRC_URI = "git://github.com/cherrypy/cherrypy.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python3 python3-setuptools-scm-native"
+inherit setuptools3 distutils3 pkgconfig
+
+RDEPENDS_${PN} += " \
+       python3-compression \
+       python3-crypt \
+       python3-datetime \
+       python3-email \
+       python3-fcntl \
+       python3-html \
+       python3-io \
+       python3-json \
+       python3-logging \
+       python3-netclient \
+       python3-netserver \
+       python3-profile \
+       python3-pydoc \
+       python3-xml \
+       python3-unixadmin \
+       "
+       
+RDEPENDS_${PN} += " \
+       python3-cheroot \
+       python3-contextlib2 \
+       python3-memcached \
+       python3-portend \
+       python3-pyopenssl \
+       python3-routes \
+       python3-simplejson \
+       python3-six \
+       python3-zc-lockfile \
+       "
diff --git a/meta-stx/recipes-devtools/python/python3-lang_git.bb b/meta-stx/recipes-devtools/python/python3-lang_git.bb
new file mode 100644 (file)
index 0000000..91604e2
--- /dev/null
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = " Lang - Python Language Constraints"
+DESCRIPTION = "\
+       Lang is a Python module that allows enforcing programming language constraints. Lang was \
+       built using a Java like mindset, so many of the constraints that are supported are mirrors \
+       of constraints in the Java programming language. \
+       "
+
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=fa818a259cbed7ce8bc2a22d35a464fc"
+
+SRCREV = "feb4c638ebc581d9913f440965e83558fd10018c"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git/src"
+
+SRC_URI = "git://github.com/amitassaraf/lang.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python3 python3-setuptools-scm-native"
+inherit setuptools3 distutils3 pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python3-linux-procfs_git.bb b/meta-stx/recipes-devtools/python/python3-linux-procfs_git.bb
new file mode 100644 (file)
index 0000000..46037ce
--- /dev/null
@@ -0,0 +1,34 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Python classes to extract information from the Linux kernel /proc files"
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=8ca43cbc842c2336e835926c2166c28b"
+
+
+SRCREV = "d170839360edf3fbac4a31c7771c4ec5bb0b6121"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+PV = "0.6.1"
+
+SRC_URI = "git://git.kernel.org/pub/scm/libs/python/python-linux-procfs/python-linux-procfs.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python"
+RDEPENDS_${PN} = "python3-core"
+
+inherit setuptools3 distutils3 pkgconfig
diff --git a/meta-stx/recipes-devtools/python/python3-logutils_0.3.5.bb b/meta-stx/recipes-devtools/python/python3-logutils_0.3.5.bb
new file mode 100644 (file)
index 0000000..d23f58e
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Set of handlers for the Python standard library's logging package"
+HOMEPAGE = "https://pypi.python.org/pypi/logutils"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=44c35f0b8e2a27a2f33a4e4a5c65d014"
+
+SRCREV = "66d55fae0cb3a94bc6a25af8e7c3dff2535a1b02"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+PV = "0.3.5"
+
+# SRC_URI = "hg://bitbucket.org/vinay.sajip/logutils;module=${PN}-${PV};proto=https;rev=8dcaf2a86257"
+SRC_URI = "git://bitbucket.org/vinay.sajip/logutils.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools3 distutils3
diff --git a/meta-stx/recipes-devtools/python/python3-mako_%.bbappend b/meta-stx/recipes-devtools/python/python3-mako_%.bbappend
new file mode 100644 (file)
index 0000000..3d16413
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+do_install_append() {
+       mv ${D}/${bindir}/mako-render ${D}/${bindir}/mako3-render 
+}
diff --git a/meta-stx/recipes-devtools/python/python3-pecan_git.bb b/meta-stx/recipes-devtools/python/python3-pecan_git.bb
new file mode 100644 (file)
index 0000000..397888d
--- /dev/null
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "WSGI object-dispatching web framework"
+
+HOMEPAGE = "https://pypi.python.org/pypi/pecan/"
+SECTION = "devel/python"
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d846877d24bbb3d7a00a985c90378e8c"
+
+SRCREV = "da15e06d783e2cf569b39ba506e68e4e1e85568d"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/pecan/pecan.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+inherit setuptools3 distutils3
+
+RDEPENDS_${PN} = "python3-mako \
+                 python3-six \
+                 python3-logutils \
+                 python3-webtest  \
+                 "
+
+do_install_append() {
+        mv ${D}/${bindir}/gunicorn_pecan ${D}/${bindir}/gunicorn_pecan3
+        mv ${D}/${bindir}/pecan ${D}/${bindir}/pecan3
+}
diff --git a/meta-stx/recipes-devtools/python/python3-prettytable_0.7.2.bb b/meta-stx/recipes-devtools/python/python3-prettytable_0.7.2.bb
new file mode 100644 (file)
index 0000000..c21bfd8
--- /dev/null
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# Update log with the message: adopted from upstream e1f2e1ae5f27d889e3d4bef1b23edcf8a6d804c0
+DESCRIPTION = " \
+       PrettyTable is a simple Python library designed to make it quick and easy to represent tabular data \
+       in visually appealing ASCII tables. It was inspired by the ASCII tables used in the PostgreSQL shell \
+       psql. PrettyTable allows for selection of which columns are to be printed, independent alignment of \
+       columns (left or right justified or centred) and printing of “sub-tables” by specifying a row range.\
+       "
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://COPYING;md5=3e73500ffa52de5071cff65990055282"
+
+SRC_URI[md5sum] = "0c1361104caff8b09f220748f9d69899"
+SRC_URI[sha256sum] = "a53da3b43d7a5c229b5e3ca2892ef982c46b7923b51e98f0db49956531211c4f"
+
+SRCNAME = "prettytable"
+
+SRC_URI = "https://pypi.python.org/packages/source/P/PrettyTable/${SRCNAME}-${PV}.zip"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools3
+do_install_append() {
+    find "${D}${PYTHON_SITEPACKAGES_DIR}/" -name "*.txt" -o -name "PKG-INFO" -exec chmod 644 {} +
+}
+
+BBCLASSEXTEND = "native nativesdk"
diff --git a/meta-stx/recipes-devtools/python/python3-pymysql_0.9.3.bb b/meta-stx/recipes-devtools/python/python3-pymysql_0.9.3.bb
new file mode 100644 (file)
index 0000000..dfbe060
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "A pure-Python MySQL client library"
+DESCRIPTION = " \
+This package contains a pure-Python MySQL client library, based on PEP 249 \
+Most public APIs are compatible with mysqlclient and MySQLdb.\
+"
+SECTION = "devel/python"
+HOMEPAGE = "https://github.com/PyMySQL/PyMySQL"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=528175c84163bb800d23ad835c7fa0fc"
+
+inherit pypi
+inherit setuptools3
+
+PYPI_PACKAGE = "PyMySQL"
+
+SRC_URI[md5sum] = "e5d9183cc0a775ac29f9e0365cca6556"
+SRC_URI[sha256sum] = "d8c059dcd81dedb85a9f034d5e22dcb4442c0b201908bede99e306d65ea7c8e7"
diff --git a/meta-stx/recipes-devtools/python/python3-redfishtool_git.bb b/meta-stx/recipes-devtools/python/python3-redfishtool_git.bb
new file mode 100644 (file)
index 0000000..f90f508
--- /dev/null
@@ -0,0 +1,43 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Client side API implementation of the Redfish RESTful API for Data Center Hardware Management."
+HOMEPAGE = "https://github.com/DMTF/Redfishtool"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE.md;md5=cee7a7694b5bf14bc9d3e0fbe78a64af"
+
+
+SRCREV = "2bdcd905e1ad227f40809ec298804d5401047612"
+SRCNAME = "Redfishtool"
+BRANCH = "master"
+PROTOCOL = "https"
+PV = "1.1.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/DMTF/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools3
+
+RDEPENDS_${PN} += " \
+       python3-requests \
+"
+
+do_install_append() {
+       :
+}
+
+FILES_${PN} += " \
+       "
diff --git a/meta-stx/recipes-devtools/python/python3-versiontools_1.9.1.bb b/meta-stx/recipes-devtools/python/python3-versiontools_1.9.1.bb
new file mode 100644 (file)
index 0000000..470f242
--- /dev/null
@@ -0,0 +1,17 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit setuptools3
+require python-versiontools.inc
diff --git a/meta-stx/recipes-devtools/python/python3-webtest_2.0.33.bb b/meta-stx/recipes-devtools/python/python3-webtest_2.0.33.bb
new file mode 100644 (file)
index 0000000..91f465e
--- /dev/null
@@ -0,0 +1,28 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "This wraps any WSGI application and makes it easy to send test requests to that application, without starting up an HTTP server."
+HOMEPAGE = "https://pypi.python.org/pypi/WebTest/"
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://license.rst;md5=64f013a9d7a2a8ffc8d016a2d4214bcd"
+
+PYPI_PACKAGE = "WebTest"
+
+RDEPENDS_${PN} += "${PYTHON_PN}-beautifulsoup4"
+
+SRC_URI[md5sum] = "dd0385c725b85ac1e8079f38d2acd7b1"
+SRC_URI[sha256sum] = "41348efe4323a647a239c31cde84e5e440d726ca4f449859264e538d39037fd0"
+inherit setuptools3 pypi
diff --git a/meta-stx/recipes-devtools/python/python3-wsme_git.bb b/meta-stx/recipes-devtools/python/python3-wsme_git.bb
new file mode 100644 (file)
index 0000000..0c08e6d
--- /dev/null
@@ -0,0 +1,35 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = " Web Services Made Easy (WSME)"
+DESCRIPTION = "\
+       Web Services Made Easy (WSME) simplifies the writing of REST web services by providing simple yet \
+       powerful typing, removing the need to directly manipulate the request and the response objects. \
+       "
+
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=5a9126e7f56a0cf3247050de7f10d0f4"
+
+SRCREV = "f36a607124355007d3a830ebe7e53efda777c58a"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git/"
+
+SRC_URI = "git://opendev.org/x/wsme.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+DEPENDS += " python3 python3-pbr-native"
+inherit distutils3 
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-Add-a-color-setting-for-mips64_n32-binaries.patch b/meta-stx/recipes-devtools/rpm/files/0001-Add-a-color-setting-for-mips64_n32-binaries.patch
new file mode 100644 (file)
index 0000000..ac6dcaf
--- /dev/null
@@ -0,0 +1,40 @@
+From e3eff024826550aec4a6a5baef7210a29faf299d Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Thu, 9 Mar 2017 18:54:02 +0200
+Subject: [PATCH] Add a color setting for mips64_n32 binaries
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ build/rpmfc.c | 1 +
+ rpmrc.in      | 2 ++
+ 2 files changed, 3 insertions(+)
+
+diff --git a/build/rpmfc.c b/build/rpmfc.c
+index d38a10916..c8e2f876a 100644
+--- a/build/rpmfc.c
++++ b/build/rpmfc.c
+@@ -622,6 +622,7 @@ exit:
+ static const struct rpmfcTokens_s rpmfcTokens[] = {
+   { "directory",              RPMFC_INCLUDE },
++  { "N32 MIPS64",             RPMFC_ELFMIPSN32|RPMFC_INCLUDE },
+   { "ELF 32-bit",             RPMFC_ELF32|RPMFC_INCLUDE },
+   { "ELF 64-bit",             RPMFC_ELF64|RPMFC_INCLUDE },
+diff --git a/rpmrc.in b/rpmrc.in
+index abc08fc31..f5bc820d8 100644
+--- a/rpmrc.in
++++ b/rpmrc.in
+@@ -133,6 +133,8 @@ archcolor: mipsr6el 1
+ archcolor: mips64r6 2
+ archcolor: mips64r6el 2
++archcolor: mips64_n32 4
++
+ archcolor: m68k 1
+ archcolor: m68kmint 1
+-- 
+2.11.0
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-Do-not-add-an-unsatisfiable-dependency-when-building.patch b/meta-stx/recipes-devtools/rpm/files/0001-Do-not-add-an-unsatisfiable-dependency-when-building.patch
new file mode 100644 (file)
index 0000000..80e2f0f
--- /dev/null
@@ -0,0 +1,33 @@
+From 87cfc0db1ed6fe381a5ed5f0016d8c3344a31a11 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Mon, 9 Jan 2017 18:52:11 +0200
+Subject: [PATCH] Do not add an unsatisfiable dependency when building rpms in
+ a short-circuited way.
+
+Upstream permits short-circuiting only for local testing; Yocto on the other
+hand produces rpms that way by design.
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ build/pack.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/build/pack.c b/build/pack.c
+index 1261cdbba..bb2d6f4f6 100644
+--- a/build/pack.c
++++ b/build/pack.c
+@@ -595,10 +595,6 @@ rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating)
+           headerPutBin(pkg->header, RPMTAG_SOURCEPKGID, spec->sourcePkgId,16);
+       }
+-      if (cheating) {
+-          (void) rpmlibNeedsFeature(pkg, "ShortCircuited", "4.9.0-1");
+-      }
+-      
+       {   char *binFormat = rpmGetPath("%{_rpmfilename}", NULL);
+           char *binRpm, *binDir;
+           binRpm = headerFormat(pkg->header, binFormat, &errorString);
+-- 
+2.11.0
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-Do-not-hardcode-lib-rpm-as-the-installation-path-for.patch b/meta-stx/recipes-devtools/rpm/files/0001-Do-not-hardcode-lib-rpm-as-the-installation-path-for.patch
new file mode 100644 (file)
index 0000000..82e7328
--- /dev/null
@@ -0,0 +1,58 @@
+From bd08eb0ae1312f347f49949481daa7c923752df2 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Mon, 27 Feb 2017 09:43:30 +0200
+Subject: [PATCH] Do not hardcode "lib/rpm" as the installation path for
+ default configuration and macros.
+
+Upstream-Status: Denied [https://github.com/rpm-software-management/rpm/pull/263]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
+---
+ configure.ac | 2 +-
+ macros.in    | 2 +-
+ rpm.am       | 4 ++--
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index 09af7c4..9bd6903 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1055,7 +1055,7 @@ else
+     usrprefix=$prefix
+ fi
+-RPMCONFIGDIR="`echo ${usrprefix}/lib/rpm`"
++RPMCONFIGDIR="`echo ${libdir}/rpm`"
+ AC_SUBST(RPMCONFIGDIR)
+ AC_SUBST(OBJDUMP)
+diff --git a/macros.in b/macros.in
+index a3aa7a9..62cee5c 100644
+--- a/macros.in
++++ b/macros.in
+@@ -970,7 +970,7 @@ package or when debugging this package.\
+ %_sharedstatedir      %{_prefix}/com
+ %_localstatedir               %{_prefix}/var
+ %_lib                 lib
+-%_libdir              %{_exec_prefix}/%{_lib}
++%_libdir              @libdir@
+ %_includedir          %{_prefix}/include
+ %_infodir             %{_datadir}/info
+ %_mandir              %{_datadir}/man
+diff --git a/rpm.am b/rpm.am
+index 82c2d7c..6341b51 100644
+--- a/rpm.am
++++ b/rpm.am
+@@ -1,10 +1,10 @@
+ # Internal binaries
+ ## HACK: It probably should be $(libexecdir)/rpm or $(libdir)/rpm
+-rpmlibexecdir = $(prefix)/lib/rpm
++rpmlibexecdir = $(libdir)/rpm
+ # Host independent config files
+ ## HACK: it probably should be $(datadir)/rpm
+-rpmconfigdir = $(prefix)/lib/rpm
++rpmconfigdir = $(libdir)/rpm
+ # Libtool version (current-revision-age) for all our libraries
+ rpm_version_info = 9:0:1
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-Do-not-read-config-files-from-HOME.patch b/meta-stx/recipes-devtools/rpm/files/0001-Do-not-read-config-files-from-HOME.patch
new file mode 100644 (file)
index 0000000..96eb418
--- /dev/null
@@ -0,0 +1,38 @@
+From 35381b6cd6c1b571bf7e6b0640de0f54dbf94386 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Tue, 10 Jan 2017 14:11:30 +0200
+Subject: [PATCH] Do not read config files from $HOME
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ lib/rpmrc.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/lib/rpmrc.c b/lib/rpmrc.c
+index 4ed991321..19fe80f98 100644
+--- a/lib/rpmrc.c
++++ b/lib/rpmrc.c
+@@ -458,8 +458,7 @@ static void setDefaults(void)
+     if (!defrcfiles) {
+       defrcfiles = rstrscat(NULL, confdir, "/rpmrc", ":",
+                               confdir, "/" RPMCANONVENDOR "/rpmrc", ":",
+-                              SYSCONFDIR "/rpmrc", ":",
+-                              "~/.rpmrc", NULL);
++                              SYSCONFDIR "/rpmrc", ":");
+     }
+ #ifndef MACROFILES
+@@ -471,8 +470,7 @@ static void setDefaults(void)
+                               confdir, "/" RPMCANONVENDOR "/macros", ":",
+                               SYSCONFDIR "/rpm/macros.*", ":",
+                               SYSCONFDIR "/rpm/macros", ":",
+-                              SYSCONFDIR "/rpm/%{_target}/macros", ":",
+-                              "~/.rpmmacros", NULL);
++                              SYSCONFDIR "/rpm/%{_target}/macros", ":");
+     }
+ #else
+     macrofiles = MACROFILES;
+-- 
+2.11.0
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-Do-not-reset-the-PATH-environment-variable-before-ru.patch b/meta-stx/recipes-devtools/rpm/files/0001-Do-not-reset-the-PATH-environment-variable-before-ru.patch
new file mode 100644 (file)
index 0000000..41cdf6e
--- /dev/null
@@ -0,0 +1,28 @@
+From a674b9cc7af448d7c6748bc163bf37dc14a57f09 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Fri, 20 Jan 2017 13:32:06 +0200
+Subject: [PATCH] Do not reset the PATH environment variable before running
+ scriptlets.
+
+We add lots of native stuff into it and scriptlets rely on that.
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
+---
+ lib/rpmscript.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/rpmscript.c b/lib/rpmscript.c
+index 6a31e0d..2b0e438 100644
+--- a/lib/rpmscript.c
++++ b/lib/rpmscript.c
+@@ -184,7 +184,7 @@ static void doScriptExec(ARGV_const_t argv, ARGV_const_t prefixes,
+       if (ipath && ipath[5] != '%')
+           path = ipath;
+-      xx = setenv("PATH", path, 1);
++      //xx = setenv("PATH", path, 1);
+       free(ipath);
+     }
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-Fix-build-with-musl-C-library.patch b/meta-stx/recipes-devtools/rpm/files/0001-Fix-build-with-musl-C-library.patch
new file mode 100644 (file)
index 0000000..0b1d629
--- /dev/null
@@ -0,0 +1,48 @@
+From d076de030deb9cafd9b2e82be5d506cebdefad0b Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Mon, 27 Feb 2017 14:43:21 +0200
+Subject: [PATCH 1/9] Fix build with musl C library.
+
+Upstream-Status: Pending
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
+---
+ configure.ac       | 3 ++-
+ rpmio/digest_nss.c | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/configure.ac b/configure.ac
+index c04a2e8d1..c9d9ac16d 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -255,6 +255,7 @@ AC_SEARCH_LIBS(dlopen, [dl])
+ # Check for libelf library. Prefer external, otherwise none.
+ WITH_LIBELF_LIB=
+ AC_CHECK_HEADER([libelf.h])
++AC_CHECK_HEADERS([error.h], [WITH_ERROR_H=yes])
+ AC_CHECK_HEADERS([gelf.h], [
+       AC_CHECK_LIB(elf, gelf_getvernaux, [
+           AC_DEFINE(HAVE_LIBELF, 1, [Define to 1 if you have the 'elf' library (-lelf).])
+@@ -263,7 +264,7 @@ AC_CHECK_HEADERS([gelf.h], [
+       ])
+ ])
+ AC_SUBST(WITH_LIBELF_LIB)
+-AM_CONDITIONAL(LIBELF,[test "$WITH_LIBELF" = yes])
++AM_CONDITIONAL(LIBELF,[test "$WITH_LIBELF" = yes && test "$WITH_ERROR_H" = yes])
+ AC_CHECK_HEADERS([dwarf.h], [
+   WITH_LIBDWARF=yes
+diff --git a/rpmio/digest_nss.c b/rpmio/digest_nss.c
+index 992d9acf6..e11920e3e 100644
+--- a/rpmio/digest_nss.c
++++ b/rpmio/digest_nss.c
+@@ -1,5 +1,6 @@
+ #include "system.h"
++#include <signal.h>
+ #include <pthread.h>
+ #include <nss.h>
+ #include <sechash.h>
+-- 
+2.14.2
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-Split-binary-package-building-into-a-separate-functi.patch b/meta-stx/recipes-devtools/rpm/files/0001-Split-binary-package-building-into-a-separate-functi.patch
new file mode 100644 (file)
index 0000000..6e44f0b
--- /dev/null
@@ -0,0 +1,84 @@
+From 721a660a507d6d062e7aecafad886c643970a5d5 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Thu, 25 May 2017 18:15:27 +0300
+Subject: [PATCH 1/4] Split binary package building into a separate function
+
+So that it can be run as a thread pool task.
+
+Upstream-Status: Submitted [https://github.com/rpm-software-management/rpm/pull/226]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
+---
+ build/pack.c | 33 +++++++++++++++++++++------------
+ 1 file changed, 21 insertions(+), 12 deletions(-)
+
+diff --git a/build/pack.c b/build/pack.c
+index 518f4e92a..ccfd614cc 100644
+--- a/build/pack.c
++++ b/build/pack.c
+@@ -546,18 +546,13 @@ static rpmRC checkPackages(char *pkgcheck)
+     return RPMRC_OK;
+ }
+-rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating)
++static rpmRC packageBinary(rpmSpec spec, Package pkg, const char *cookie, int cheating, char** filename)
+ {
+-    rpmRC rc;
+-    const char *errorString;
+-    Package pkg;
+-    char *pkglist = NULL;
+-
+-    for (pkg = spec->packages; pkg != NULL; pkg = pkg->next) {
+-      char *fn;
++      const char *errorString;
++      rpmRC rc = RPMRC_OK;
+       if (pkg->fileList == NULL)
+-          continue;
++          return rc;
+       if ((rc = processScriptFiles(spec, pkg)))
+           return rc;
+@@ -587,7 +582,7 @@ rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating)
+                    headerGetString(pkg->header, RPMTAG_NAME), errorString);
+               return RPMRC_FAIL;
+           }
+-          fn = rpmGetPath("%{_rpmdir}/", binRpm, NULL);
++          *filename = rpmGetPath("%{_rpmdir}/", binRpm, NULL);
+           if ((binDir = strchr(binRpm, '/')) != NULL) {
+               struct stat st;
+               char *dn;
+@@ -609,14 +604,28 @@ rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating)
+           free(binRpm);
+       }
+-      rc = writeRPM(pkg, NULL, fn, NULL);
++      rc = writeRPM(pkg, NULL, *filename, NULL);
+       if (rc == RPMRC_OK) {
+           /* Do check each written package if enabled */
+-          char *pkgcheck = rpmExpand("%{?_build_pkgcheck} ", fn, NULL);
++          char *pkgcheck = rpmExpand("%{?_build_pkgcheck} ", *filename, NULL);
+           if (pkgcheck[0] != ' ') {
+               rc = checkPackages(pkgcheck);
+           }
+           free(pkgcheck);
++      }
++      return rc;
++}
++
++rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating)
++{
++    rpmRC rc;
++    Package pkg;
++    char *pkglist = NULL;
++
++    for (pkg = spec->packages; pkg != NULL; pkg = pkg->next) {
++      char *fn = NULL;
++      rc = packageBinary(spec, pkg, cookie, cheating, &fn);
++      if (rc == RPMRC_OK) {
+           rstrcat(&pkglist, fn);
+           rstrcat(&pkglist, " ");
+       }
+-- 
+2.11.0
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-When-cross-installing-execute-package-scriptlets-wit.patch b/meta-stx/recipes-devtools/rpm/files/0001-When-cross-installing-execute-package-scriptlets-wit.patch
new file mode 100644 (file)
index 0000000..4020a31
--- /dev/null
@@ -0,0 +1,62 @@
+From a89daa75ac970d8e247edc762d1181e9a5b0c5d0 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Tue, 17 Jan 2017 14:07:17 +0200
+Subject: [PATCH] When cross-installing, execute package scriptlets without
+ chrooting into destination rootfs
+
+This is triggered only when RPM_NO_CHROOT_FOR_SCRIPTS environment variable is defined.
+Otherwise they will trigger an explosion of failures, obviously.
+
+Amended 2018-07-03 by Olof Johansson <olofjn@axis.com>:
+
+  Remove leaking temporary scriptlet files
+
+  Since we tell dnf to run rpm with debug output, this will result in rpm not
+  cleaning up written temporary scriptlet files (same flag controls both
+  behaviors). This wouldn't have been a problem since we normally would use the
+  target sysroot also for temporary files, but we need to chroot out to be able
+  to actually run the rpm scriptlets (purpose of this patch), so the temporary
+  files are written to the host's /var/tmp/ directory, causing a gradual
+  resource leakage on the host system for every RPM based do_rootfs task
+  executed.
+
+  Signed-off-by: Olof Johansson <olofjn@axis.com>
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ lib/rpmscript.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/lib/rpmscript.c b/lib/rpmscript.c
+index cc98c4885..f8bd3df04 100644
+--- a/lib/rpmscript.c
++++ b/lib/rpmscript.c
+@@ -394,8 +394,7 @@ exit:
+       Fclose(out);    /* XXX dup'd STDOUT_FILENO */
+     if (fn) {
+-      if (!rpmIsDebug())
+-          unlink(fn);
++      unlink(fn);
+       free(fn);
+     }
+     free(mline);
+@@ -428,7 +427,13 @@ rpmRC rpmScriptRun(rpmScript script, int arg1, int arg2, FD_t scriptFd,
+     if (rc != RPMRC_FAIL) {
+       if (script_type & RPMSCRIPTLET_EXEC) {
+-          rc = runExtScript(plugins, prefixes, script->descr, lvl, scriptFd, &args, script->body, arg1, arg2, &script->nextFileFunc);
++          if (getenv("RPM_NO_CHROOT_FOR_SCRIPTS") != NULL) {
++              rpmChrootOut();
++              rc = runExtScript(plugins, prefixes, script->descr, lvl, scriptFd, &args, script->body, arg1, arg2, &script->nextFileFunc);
++              rpmChrootIn();
++          } else {
++              rc = runExtScript(plugins, prefixes, script->descr, lvl, scriptFd, &args, script->body, arg1, arg2, &script->nextFileFunc);
++          }
+       } else {
+           rc = runLuaScript(plugins, prefixes, script->descr, lvl, scriptFd, &args, script->body, arg1, arg2, &script->nextFileFunc);
+       }
+-- 
+2.11.0
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-perl-disable-auto-reqs.patch b/meta-stx/recipes-devtools/rpm/files/0001-perl-disable-auto-reqs.patch
new file mode 100644 (file)
index 0000000..a6c5869
--- /dev/null
@@ -0,0 +1,32 @@
+perl: disable auto requires
+
+When generating automatic requirements, it's possible for perl scripts to
+declare 'optional' dependencies.  These seem to often be incorrect and will
+cause installation failures in OE.  Instead of fixing the perl scripts, it
+was decided it is better to simply disable the automatic dependency
+generation.  This matches the behavior from the previous RPM5 implementation.
+
+Upstream-Status: Inappropriate [OE specific configuration]
+
+Signed-off-by: Mark Hatle <mark.hatle@windriver.com>
+
+Index: git/fileattrs/perl.attr
+===================================================================
+--- git.orig/fileattrs/perl.attr
++++ git/fileattrs/perl.attr
+@@ -1,3 +1,3 @@
+-%__perl_requires      %{_rpmconfigdir}/perl.req
++#__perl_requires      %{_rpmconfigdir}/perl.req
+ %__perl_magic         ^.*[Pp]erl .*$
+ %__perl_flags         exeonly
+Index: git/fileattrs/perllib.attr
+===================================================================
+--- git.orig/fileattrs/perllib.attr
++++ git/fileattrs/perllib.attr
+@@ -1,5 +1,5 @@
+ %__perllib_provides   %{_rpmconfigdir}/perl.prov
+-%__perllib_requires   %{_rpmconfigdir}/perl.req
++#__perllib_requires   %{_rpmconfigdir}/perl.req
+ %__perllib_magic      ^Perl[[:digit:]] module source.*
+ %__perllib_path               \\.pm$
+ %__perllib_flags      magic_and_path
diff --git a/meta-stx/recipes-devtools/rpm/files/0001-rpm-rpmio.c-restrict-virtual-memory-usage-if-limit-s.patch b/meta-stx/recipes-devtools/rpm/files/0001-rpm-rpmio.c-restrict-virtual-memory-usage-if-limit-s.patch
new file mode 100644 (file)
index 0000000..6454785
--- /dev/null
@@ -0,0 +1,65 @@
+From 0066b862bb3a09f39295abd5d972a53ac8dc1555 Mon Sep 17 00:00:00 2001
+From: Peter Bergin <peter@berginkonsult.se>
+Date: Wed, 19 Sep 2018 15:12:31 +0200
+Subject: [PATCH] rpm/rpmio.c: restrict virtual memory usage if limit set
+
+A solution to avoid OOM situation when the virtual memory is restricted
+for a user (ulimit -v). As the lzopen_internal function is run in parallel
+one instance per CPU thread the available virtual memory is limited per
+CPU thread.
+
+Upstream-Status: Pending [merge of multithreading patches to upstream]
+
+Signed-off-by: Peter Bergin <peter@berginkonsult.se>
+---
+ rpmio/rpmio.c | 34 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 34 insertions(+)
+
+diff --git a/rpmio/rpmio.c b/rpmio/rpmio.c
+index e051c98..b3c56b6 100644
+--- a/rpmio/rpmio.c
++++ b/rpmio/rpmio.c
+@@ -845,6 +845,40 @@ static LZFILE *lzopen_internal(const char *mode, int fd, int xz)
+               }
+ #endif
++              struct rlimit virtual_memory;
++              getrlimit(RLIMIT_AS, &virtual_memory);
++              if (virtual_memory.rlim_cur != RLIM_INFINITY) {
++                      const uint64_t virtual_memlimit = virtual_memory.rlim_cur;
++                      const uint64_t virtual_memlimit_per_cpu_thread =
++                              virtual_memlimit / lzma_cputhreads();
++                      uint64_t memory_usage_virt;
++                      rpmlog(RPMLOG_NOTICE, "XZ: virtual memory restricted to %lu and "
++                             "per CPU thread %lu\n", virtual_memlimit, virtual_memlimit_per_cpu_thread);
++                      /* keep reducing the number of compression threads until memory
++                         usage falls below the limit per CPU thread*/
++                      while ((memory_usage_virt = lzma_stream_encoder_mt_memusage(&mt_options)) >
++                             virtual_memlimit_per_cpu_thread) {
++                              /* If number of threads goes down to zero lzma_stream_encoder will
++                               * will return UINT64_MAX. We must check here to avoid an infinite loop.
++                               * If we get into situation that one thread requires more virtual memory
++                               * than available we set one thread, print error message and try anyway. */
++                              if (--mt_options.threads == 0) {
++                                      mt_options.threads = 1;
++                                      rpmlog(RPMLOG_WARNING,
++                                             "XZ: Could not adjust number of threads to get below "
++                                             "virtual memory limit %lu. usage %lu\n",
++                                             virtual_memlimit_per_cpu_thread, memory_usage_virt);
++                                      break;
++                              }
++                      }
++                      if (threads != (int)mt_options.threads)
++                              rpmlog(RPMLOG_NOTICE,
++                                     "XZ: Adjusted the number of threads from %d to %d to not "
++                                     "exceed the memory usage limit of %lu bytes\n",
++                                     threads, mt_options.threads, virtual_memlimit);
++
++              }
++
+               ret = lzma_stream_encoder_mt(&lzfile->strm, &mt_options);
+           }
+ #endif
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0002-Add-support-for-prefixing-etc-from-RPM_ETCCONFIGDIR-.patch b/meta-stx/recipes-devtools/rpm/files/0002-Add-support-for-prefixing-etc-from-RPM_ETCCONFIGDIR-.patch
new file mode 100644 (file)
index 0000000..b3dbc31
--- /dev/null
@@ -0,0 +1,72 @@
+From 383c0b097b7eba16801a9e3c4b8e36a4b6de74ab Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Fri, 20 Jan 2017 13:33:05 +0200
+Subject: [PATCH 2/2] Add support for prefixing /etc from RPM_ETCCONFIGDIR
+ environment variable
+
+This is needed so that rpm can pick up target-specific configuration
+from target rootfs instead of its own native sysroot.
+
+Upstream-Status: Inappropriate [oe-core specific]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+---
+ lib/rpmrc.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/lib/rpmrc.c b/lib/rpmrc.c
+index 19fe80f98..6b27b3941 100644
+--- a/lib/rpmrc.c
++++ b/lib/rpmrc.c
+@@ -455,10 +455,14 @@ const char * lookupInDefaultTable(const char * name,
+ static void setDefaults(void)
+ {
+     const char *confdir = rpmConfigDir();
++    const char *etcconfdir = getenv("RPM_ETCCONFIGDIR");
++    if (etcconfdir == NULL)
++        etcconfdir = "";
++
+     if (!defrcfiles) {
+       defrcfiles = rstrscat(NULL, confdir, "/rpmrc", ":",
+                               confdir, "/" RPMCANONVENDOR "/rpmrc", ":",
+-                              SYSCONFDIR "/rpmrc", ":");
++                              etcconfdir, SYSCONFDIR "/rpmrc", ":", NULL);
+     }
+ #ifndef MACROFILES
+@@ -468,9 +472,9 @@ static void setDefaults(void)
+                               confdir, "/platform/%{_target}/macros", ":",
+                               confdir, "/fileattrs/*.attr", ":",
+                               confdir, "/" RPMCANONVENDOR "/macros", ":",
+-                              SYSCONFDIR "/rpm/macros.*", ":",
+-                              SYSCONFDIR "/rpm/macros", ":",
+-                              SYSCONFDIR "/rpm/%{_target}/macros", ":");
++                              etcconfdir, SYSCONFDIR "/rpm/macros.*", ":",
++                              etcconfdir, SYSCONFDIR "/rpm/macros", ":",
++                              etcconfdir, SYSCONFDIR "/rpm/%{_target}/macros", ":", NULL);
+     }
+ #else
+     macrofiles = MACROFILES;
+@@ -989,7 +993,11 @@ static void read_auxv(void)
+  */
+ static void defaultMachine(rpmrcCtx ctx, const char ** arch, const char ** os)
+ {
+-    const char * const platform_path = SYSCONFDIR "/rpm/platform";
++    const char *etcconfdir = getenv("RPM_ETCCONFIGDIR");
++    if (etcconfdir == NULL)
++        etcconfdir = "";
++
++    const char * const platform_path = rstrscat(NULL, etcconfdir, SYSCONFDIR "/rpm/platform", NULL);
+     static struct utsname un;
+     char * chptr;
+     canonEntry canon;
+@@ -1286,6 +1294,7 @@ static void defaultMachine(rpmrcCtx ctx, const char ** arch, const char ** os)
+     if (arch) *arch = un.machine;
+     if (os) *os = un.sysname;
++    free(platform_path);
+ }
+ static
+-- 
+2.11.0
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0002-Run-binary-package-creation-via-thread-pools.patch b/meta-stx/recipes-devtools/rpm/files/0002-Run-binary-package-creation-via-thread-pools.patch
new file mode 100644 (file)
index 0000000..d10041c
--- /dev/null
@@ -0,0 +1,127 @@
+From 513200cf76758de4668312c628d6362bdabfaf4b Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Thu, 25 May 2017 19:30:20 +0300
+Subject: [PATCH 1/3] Run binary package creation via thread pools.
+
+Upstream-Status: Submitted [https://github.com/rpm-software-management/rpm/pull/226]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
+---
+ build/pack.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++++-----------
+ configure.ac |  3 +++
+ 2 files changed, 70 insertions(+), 14 deletions(-)
+
+diff --git a/build/pack.c b/build/pack.c
+index ccfd614cc..ed5b9ab4e 100644
+--- a/build/pack.c
++++ b/build/pack.c
+@@ -616,25 +616,78 @@ static rpmRC packageBinary(rpmSpec spec, Package pkg, const char *cookie, int ch
+       return rc;
+ }
+-rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating)
++struct binaryPackageTaskData
+ {
+-    rpmRC rc;
+     Package pkg;
++    char *filename;
++    rpmRC result;
++    struct binaryPackageTaskData *next;
++};
++
++static struct binaryPackageTaskData* runBinaryPackageTasks(rpmSpec spec, const char *cookie, int cheating)
++{
++    struct binaryPackageTaskData *tasks = NULL;
++    struct binaryPackageTaskData *task = NULL;
++    struct binaryPackageTaskData *prev = NULL;
++
++    for (Package pkg = spec->packages; pkg != NULL; pkg = pkg->next) {
++        task = rcalloc(1, sizeof(*task));
++        task->pkg = pkg;
++        if (pkg == spec->packages) {
++            // the first package needs to be processed ahead of others, as they copy
++            // changelog data from it, and so otherwise data races would happen
++            task->result = packageBinary(spec, pkg, cookie, cheating, &(task->filename));
++            rpmlog(RPMLOG_NOTICE, _("Finished binary package job, result %d, filename %s\n"), task->result, task->filename);
++            tasks = task;
++        }
++        if (prev != NULL) {
++            prev->next = task;
++        }
++        prev = task;
++    }
++
++    #pragma omp parallel
++    #pragma omp single
++    // re-declaring task variable is necessary, or older gcc versions will produce code that segfaults
++    for (struct binaryPackageTaskData *task = tasks; task != NULL; task = task->next) {
++        if (task != tasks)
++        #pragma omp task
++        {
++            task->result = packageBinary(spec, task->pkg, cookie, cheating, &(task->filename));
++            rpmlog(RPMLOG_NOTICE, _("Finished binary package job, result %d, filename %s\n"), task->result, task->filename);
++        }
++    }
++
++    return tasks;
++}
++
++static void freeBinaryPackageTasks(struct binaryPackageTaskData* tasks)
++{
++    while (tasks != NULL) {
++        struct binaryPackageTaskData* next = tasks->next;
++        rfree(tasks->filename);
++        rfree(tasks);
++        tasks = next;
++    }
++}
++
++rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating)
++{
+     char *pkglist = NULL;
+-    for (pkg = spec->packages; pkg != NULL; pkg = pkg->next) {
+-      char *fn = NULL;
+-      rc = packageBinary(spec, pkg, cookie, cheating, &fn);
+-      if (rc == RPMRC_OK) {
+-          rstrcat(&pkglist, fn);
+-          rstrcat(&pkglist, " ");
+-      }
+-      free(fn);
+-      if (rc != RPMRC_OK) {
+-          pkglist = _free(pkglist);
+-          return rc;
+-      }
++    struct binaryPackageTaskData *tasks = runBinaryPackageTasks(spec, cookie, cheating);
++
++    for (struct binaryPackageTaskData *task = tasks; task != NULL; task = task->next) {
++        if (task->result == RPMRC_OK) {
++            rstrcat(&pkglist, task->filename);
++            rstrcat(&pkglist, " ");
++        } else {
++            _free(pkglist);
++            freeBinaryPackageTasks(tasks);
++            return RPMRC_FAIL;
++        }
+     }
++    freeBinaryPackageTasks(tasks);
+     /* Now check the package set if enabled */
+     if (pkglist != NULL) {
+diff --git a/configure.ac b/configure.ac
+index a506ec819..59fa0acaf 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -17,6 +17,9 @@ AC_DISABLE_STATIC
+ PKG_PROG_PKG_CONFIG
++AC_OPENMP
++RPMCFLAGS="$OPENMP_CFLAGS $RPMCFLAGS"
++
+ dnl Checks for programs.
+ AC_PROG_CXX
+ AC_PROG_AWK
+-- 
+2.11.0
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0003-rpmstrpool.c-make-operations-over-string-pools-threa.patch b/meta-stx/recipes-devtools/rpm/files/0003-rpmstrpool.c-make-operations-over-string-pools-threa.patch
new file mode 100644 (file)
index 0000000..c348ae5
--- /dev/null
@@ -0,0 +1,207 @@
+From c80892f17e44331206c8318d53b63bb6a99554d0 Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Tue, 30 May 2017 13:58:30 +0300
+Subject: [PATCH 3/4] rpmstrpool.c: make operations over string pools
+ thread-safe
+
+Otherwise multithreaded rpm building explodes in various ways due
+to data races.
+
+Upstream-Status: Submitted [https://github.com/rpm-software-management/rpm/pull/226]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
+---
+ rpmio/rpmstrpool.c | 56 +++++++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 47 insertions(+), 9 deletions(-)
+
+diff --git a/rpmio/rpmstrpool.c b/rpmio/rpmstrpool.c
+index 30a57eb10..58ba95a02 100644
+--- a/rpmio/rpmstrpool.c
++++ b/rpmio/rpmstrpool.c
+@@ -113,6 +113,8 @@ static poolHash poolHashCreate(int numBuckets)
+     return ht;
+ }
++static const char * rpmstrPoolStrNoLock(rpmstrPool pool, rpmsid sid);
++
+ static void poolHashResize(rpmstrPool pool, int numBuckets)
+ {
+     poolHash ht = pool->hash;
+@@ -120,7 +122,7 @@ static void poolHashResize(rpmstrPool pool, int numBuckets)
+     for (int i=0; i<ht->numBuckets; i++) {
+         if (!ht->buckets[i].keyid) continue;
+-        unsigned int keyHash = rstrhash(rpmstrPoolStr(pool, ht->buckets[i].keyid));
++        unsigned int keyHash = rstrhash(rpmstrPoolStrNoLock(pool, ht->buckets[i].keyid));
+         for (unsigned int j=0;;j++) {
+             unsigned int hash = hashbucket(keyHash, j) % numBuckets;
+             if (!buckets[hash].keyid) {
+@@ -149,7 +151,7 @@ static void poolHashAddHEntry(rpmstrPool pool, const char * key, unsigned int ke
+             ht->buckets[hash].keyid = keyid;
+             ht->keyCount++;
+             break;
+-        } else if (!strcmp(rpmstrPoolStr(pool, ht->buckets[hash].keyid), key)) {
++        } else if (!strcmp(rpmstrPoolStrNoLock(pool, ht->buckets[hash].keyid), key)) {
+             return;
+         }
+     }
+@@ -191,7 +193,7 @@ static void poolHashPrintStats(rpmstrPool pool)
+     int maxcollisions = 0;
+     for (i=0; i<ht->numBuckets; i++) {
+-        unsigned int keyHash = rstrhash(rpmstrPoolStr(pool, ht->buckets[i].keyid));
++        unsigned int keyHash = rstrhash(rpmstrPoolStrNoLock(pool, ht->buckets[i].keyid));
+         for (unsigned int j=0;;j++) {
+             unsigned int hash = hashbucket(keyHash, i) % ht->numBuckets;
+             if (hash==i) {
+@@ -221,7 +223,7 @@ static void rpmstrPoolRehash(rpmstrPool pool)
+     pool->hash = poolHashCreate(sizehint);
+     for (int i = 1; i <= pool->offs_size; i++)
+-      poolHashAddEntry(pool, rpmstrPoolStr(pool, i), i);
++      poolHashAddEntry(pool, rpmstrPoolStrNoLock(pool, i), i);
+ }
+ rpmstrPool rpmstrPoolCreate(void)
+@@ -245,6 +247,8 @@ rpmstrPool rpmstrPoolCreate(void)
+ rpmstrPool rpmstrPoolFree(rpmstrPool pool)
+ {
++    #pragma omp critical(rpmstrpool)
++    {
+     if (pool) {
+       if (pool->nrefs > 1) {
+           pool->nrefs--;
+@@ -260,18 +264,24 @@ rpmstrPool rpmstrPoolFree(rpmstrPool pool)
+           free(pool);
+       }
+     }
++    }
+     return NULL;
+ }
+ rpmstrPool rpmstrPoolLink(rpmstrPool pool)
+ {
++    #pragma omp critical(rpmstrpool)
++    {
+     if (pool)
+       pool->nrefs++;
++    }
+     return pool;
+ }
+ void rpmstrPoolFreeze(rpmstrPool pool, int keephash)
+ {
++    #pragma omp critical(rpmstrpool)
++    {
+     if (pool && !pool->frozen) {
+       if (!keephash) {
+           pool->hash = poolHashFree(pool->hash);
+@@ -281,16 +291,20 @@ void rpmstrPoolFreeze(rpmstrPool pool, int keephash)
+                             pool->offs_alloced * sizeof(*pool->offs));
+       pool->frozen = 1;
+     }
++    }
+ }
+ void rpmstrPoolUnfreeze(rpmstrPool pool)
+ {
++    #pragma omp critical(rpmstrpool)
++    {
+     if (pool) {
+       if (pool->hash == NULL) {
+           rpmstrPoolRehash(pool);
+       }
+       pool->frozen = 0;
+     }
++    }
+ }
+ static rpmsid rpmstrPoolPut(rpmstrPool pool, const char *s, size_t slen, unsigned int hash)
+@@ -350,7 +364,7 @@ static rpmsid rpmstrPoolGet(rpmstrPool pool, const char * key, size_t keylen,
+             return 0;
+         }
+-      s = rpmstrPoolStr(pool, ht->buckets[hash].keyid);
++      s = rpmstrPoolStrNoLock(pool, ht->buckets[hash].keyid);
+       /* pool string could be longer than keylen, require exact matche */
+       if (strncmp(s, key, keylen) == 0 && s[keylen] == '\0')
+           return ht->buckets[hash].keyid;
+@@ -373,27 +387,31 @@ static inline rpmsid strn2id(rpmstrPool pool, const char *s, size_t slen,
+ rpmsid rpmstrPoolIdn(rpmstrPool pool, const char *s, size_t slen, int create)
+ {
+     rpmsid sid = 0;
+-
++    #pragma omp critical(rpmstrpool)
++    {
+     if (s != NULL) {
+       unsigned int hash = rstrnhash(s, slen);
+       sid = strn2id(pool, s, slen, hash, create);
+     }
++    }
+     return sid;
+ }
+ rpmsid rpmstrPoolId(rpmstrPool pool, const char *s, int create)
+ {
+     rpmsid sid = 0;
+-
++    #pragma omp critical(rpmstrpool)
++    {
+     if (s != NULL) {
+       size_t slen;
+       unsigned int hash = rstrlenhash(s, &slen);
+       sid = strn2id(pool, s, slen, hash, create);
+     }
++    }
+     return sid;
+ }
+-const char * rpmstrPoolStr(rpmstrPool pool, rpmsid sid)
++static const char * rpmstrPoolStrNoLock(rpmstrPool pool, rpmsid sid)
+ {
+     const char *s = NULL;
+     if (pool && sid > 0 && sid <= pool->offs_size)
+@@ -401,12 +419,25 @@ const char * rpmstrPoolStr(rpmstrPool pool, rpmsid sid)
+     return s;
+ }
++const char * rpmstrPoolStr(rpmstrPool pool, rpmsid sid)
++{
++    const char *s = NULL;
++    #pragma omp critical(rpmstrpool)
++    {
++    s = rpmstrPoolStrNoLock(pool, sid);
++    }
++    return s;
++}
++
+ size_t rpmstrPoolStrlen(rpmstrPool pool, rpmsid sid)
+ {
+     size_t slen = 0;
++    #pragma omp critical(rpmstrpool)
++    {
+     if (pool && sid > 0 && sid <= pool->offs_size) {
+       slen = strlen(pool->offs[sid]);
+     }
++    }
+     return slen;
+ }
+@@ -421,5 +452,12 @@ int rpmstrPoolStreq(rpmstrPool poolA, rpmsid sidA,
+ rpmsid rpmstrPoolNumStr(rpmstrPool pool)
+ {
+-    return (pool != NULL) ? pool->offs_size : 0;
++    rpmsid id = 0;
++    #pragma omp critical(rpmstrpool)
++    {
++    if (pool) {
++      id = pool->offs_size;
++    }
++    }
++    return id;
+ }
+-- 
+2.11.0
+
diff --git a/meta-stx/recipes-devtools/rpm/files/0004-build-pack.c-remove-static-local-variables-from-buil.patch b/meta-stx/recipes-devtools/rpm/files/0004-build-pack.c-remove-static-local-variables-from-buil.patch
new file mode 100644 (file)
index 0000000..652e30b
--- /dev/null
@@ -0,0 +1,336 @@
+From 792693bb90768cfde4898e8dd31ee1b5de803d2f Mon Sep 17 00:00:00 2001
+From: Alexander Kanavin <alex.kanavin@gmail.com>
+Date: Thu, 8 Jun 2017 17:08:09 +0300
+Subject: [PATCH] build/pack.c: remove static local variables from buildHost()
+ and getBuildTime()
+
+Their use is causing difficult to diagnoze data races when building multiple
+packages in parallel, and is a bad idea in general, as it also makes it more
+difficult to reason about code.
+
+Upstream-Status: Submitted [https://github.com/rpm-software-management/rpm/pull/226]
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
+Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
+
+---
+ build/build.c             | 54 ++++++++++++++++++++++++++++--
+ build/pack.c              | 84 +++++++++--------------------------------------
+ build/rpmbuild_internal.h |  8 +++--
+ 3 files changed, 74 insertions(+), 72 deletions(-)
+
+diff --git a/build/build.c b/build/build.c
+index 13c3df2..b154f08 100644
+--- a/build/build.c
++++ b/build/build.c
+@@ -6,6 +6,8 @@
+ #include "system.h"
+ #include <errno.h>
++#include <netdb.h>
++#include <time.h>
+ #include <sys/wait.h>
+ #include <rpm/rpmlog.h>
+@@ -16,6 +18,50 @@
+ #include "debug.h"
++static rpm_time_t getBuildTime(void)
++{
++    rpm_time_t buildTime = 0;
++    char *srcdate;
++    time_t epoch;
++    char *endptr;
++
++    srcdate = getenv("SOURCE_DATE_EPOCH");
++    if (srcdate) {
++        errno = 0;
++        epoch = strtol(srcdate, &endptr, 10);
++        if (srcdate == endptr || *endptr || errno != 0)
++            rpmlog(RPMLOG_ERR, _("unable to parse SOURCE_DATE_EPOCH\n"));
++        else
++            buildTime = (int32_t) epoch;
++    } else
++        buildTime = (int32_t) time(NULL);
++
++    return buildTime;
++}
++
++static char * buildHost(void)
++{
++    char* hostname;
++    struct hostent *hbn;
++    char *bhMacro;
++
++    bhMacro = rpmExpand("%{?_buildhost}", NULL);
++    if (strcmp(bhMacro, "") != 0) {
++        rasprintf(&hostname, "%s", bhMacro);
++    } else {
++        hostname = rcalloc(1024, sizeof(*hostname));
++        (void) gethostname(hostname, 1024);
++        hbn = gethostbyname(hostname);
++        if (hbn)
++            strcpy(hostname, hbn->h_name);
++        else
++            rpmlog(RPMLOG_WARNING,
++                    _("Could not canonicalize hostname: %s\n"), hostname);
++    }
++    free(bhMacro);
++    return(hostname);
++}
++
+ /**
+  */
+ static rpmRC doRmSource(rpmSpec spec)
+@@ -201,6 +247,9 @@ static rpmRC buildSpec(BTA_t buildArgs, rpmSpec spec, int what)
+     rpmRC rc = RPMRC_OK;
+     int test = (what & RPMBUILD_NOBUILD);
+     char *cookie = buildArgs->cookie ? xstrdup(buildArgs->cookie) : NULL;
++    const char* host = buildHost();
++    rpm_time_t buildTime = getBuildTime();
++
+     if (rpmExpandNumeric("%{?source_date_epoch_from_changelog}") &&
+       getenv("SOURCE_DATE_EPOCH") == NULL) {
+@@ -269,11 +318,11 @@ static rpmRC buildSpec(BTA_t buildArgs, rpmSpec spec, int what)
+               goto exit;
+       if (((what & RPMBUILD_PACKAGESOURCE) && !test) &&
+-          (rc = packageSources(spec, &cookie)))
++          (rc = packageSources(spec, &cookie, buildTime, host)))
+               goto exit;
+       if (((what & RPMBUILD_PACKAGEBINARY) && !test) &&
+-          (rc = packageBinaries(spec, cookie, (didBuild == 0))))
++          (rc = packageBinaries(spec, cookie, (didBuild == 0), buildTime, host)))
+               goto exit;
+       
+       if ((what & RPMBUILD_CLEAN) &&
+@@ -293,6 +342,7 @@ static rpmRC buildSpec(BTA_t buildArgs, rpmSpec spec, int what)
+       (void) unlink(spec->specFile);
+ exit:
++    free(host);
+     free(cookie);
+     spec->rootDir = NULL;
+     if (rc != RPMRC_OK && rpmlogGetNrecs() > 0) {
+diff --git a/build/pack.c b/build/pack.c
+index df15876..17a4b09 100644
+--- a/build/pack.c
++++ b/build/pack.c
+@@ -6,8 +6,6 @@
+ #include "system.h"
+ #include <errno.h>
+-#include <netdb.h>
+-#include <time.h>
+ #include <sys/wait.h>
+ #include <rpm/rpmlib.h>                       /* RPMSIGTAG*, rpmReadPackageFile */
+@@ -152,57 +150,6 @@ exit:
+     return rc;
+ }
+-static rpm_time_t * getBuildTime(void)
+-{
+-    static rpm_time_t buildTime[1];
+-    char *srcdate;
+-    time_t epoch;
+-    char *endptr;
+-
+-    if (buildTime[0] == 0) {
+-        srcdate = getenv("SOURCE_DATE_EPOCH");
+-        if (srcdate) {
+-            errno = 0;
+-            epoch = strtol(srcdate, &endptr, 10);
+-            if (srcdate == endptr || *endptr || errno != 0)
+-                rpmlog(RPMLOG_ERR, _("unable to parse SOURCE_DATE_EPOCH\n"));
+-            else
+-                buildTime[0] = (int32_t) epoch;
+-        } else
+-            buildTime[0] = (int32_t) time(NULL);
+-    }
+-
+-    return buildTime;
+-}
+-
+-static const char * buildHost(void)
+-{
+-    static char hostname[1024];
+-    static int oneshot = 0;
+-    struct hostent *hbn;
+-    char *bhMacro;
+-
+-    if (! oneshot) {
+-        bhMacro = rpmExpand("%{?_buildhost}", NULL);
+-        if (strcmp(bhMacro, "") != 0 && strlen(bhMacro) < 1024) {
+-            strcpy(hostname, bhMacro);
+-        } else {
+-            if (strcmp(bhMacro, "") != 0)
+-                rpmlog(RPMLOG_WARNING, _("The _buildhost macro is too long\n"));
+-            (void) gethostname(hostname, sizeof(hostname));
+-            hbn = gethostbyname(hostname);
+-            if (hbn)
+-                strcpy(hostname, hbn->h_name);
+-            else
+-                rpmlog(RPMLOG_WARNING,
+-                        _("Could not canonicalize hostname: %s\n"), hostname);
+-        }
+-        free(bhMacro);
+-        oneshot = 1;
+-    }
+-    return(hostname);
+-}
+-
+ static rpmRC processScriptFiles(rpmSpec spec, Package pkg)
+ {
+     struct TriggerFileEntry *p;
+@@ -476,7 +423,8 @@ exit:
+  * order to how the RPM format is laid on disk.
+  */
+ static rpmRC writeRPM(Package pkg, unsigned char ** pkgidp,
+-                    const char *fileName, char **cookie)
++                    const char *fileName, char **cookie,
++                    rpm_time_t buildTime, const char* buildHost)
+ {
+     FD_t fd = NULL;
+     char * rpmio_flags = NULL;
+@@ -500,7 +448,7 @@ static rpmRC writeRPM(Package pkg, unsigned char ** pkgidp,
+     /* Create and add the cookie */
+     if (cookie) {
+-      rasprintf(cookie, "%s %d", buildHost(), (int) (*getBuildTime()));
++      rasprintf(cookie, "%s %d", buildHost, buildTime);
+       headerPutString(pkg->header, RPMTAG_COOKIE, *cookie);
+     }
+@@ -641,7 +589,7 @@ static rpmRC checkPackages(char *pkgcheck)
+     return RPMRC_OK;
+ }
+-static rpmRC packageBinary(rpmSpec spec, Package pkg, const char *cookie, int cheating, char** filename)
++static rpmRC packageBinary(rpmSpec spec, Package pkg, const char *cookie, int cheating, char** filename, rpm_time_t buildTime, const char* buildHost)
+ {
+       const char *errorString;
+       rpmRC rc = RPMRC_OK;
+@@ -660,8 +608,8 @@ static rpmRC packageBinary(rpmSpec spec, Package pkg, const char *cookie, int ch
+       headerCopyTags(spec->packages->header, pkg->header, copyTags);
+       
+       headerPutString(pkg->header, RPMTAG_RPMVERSION, VERSION);
+-      headerPutString(pkg->header, RPMTAG_BUILDHOST, buildHost());
+-      headerPutUint32(pkg->header, RPMTAG_BUILDTIME, getBuildTime(), 1);
++      headerPutString(pkg->header, RPMTAG_BUILDHOST, buildHost);
++      headerPutUint32(pkg->header, RPMTAG_BUILDTIME, &buildTime, 1);
+       if (spec->sourcePkgId != NULL) {
+           headerPutBin(pkg->header, RPMTAG_SOURCEPKGID, spec->sourcePkgId,16);
+@@ -699,7 +647,7 @@ static rpmRC packageBinary(rpmSpec spec, Package pkg, const char *cookie, int ch
+           free(binRpm);
+       }
+-      rc = writeRPM(pkg, NULL, *filename, NULL);
++      rc = writeRPM(pkg, NULL, *filename, NULL, buildTime, buildHost);
+       if (rc == RPMRC_OK) {
+           /* Do check each written package if enabled */
+           char *pkgcheck = rpmExpand("%{?_build_pkgcheck} ", *filename, NULL);
+@@ -719,7 +667,7 @@ struct binaryPackageTaskData
+     struct binaryPackageTaskData *next;
+ };
+-static struct binaryPackageTaskData* runBinaryPackageTasks(rpmSpec spec, const char *cookie, int cheating)
++static struct binaryPackageTaskData* runBinaryPackageTasks(rpmSpec spec, const char *cookie, int cheating, rpm_time_t buildTime, char* buildHost)
+ {
+     struct binaryPackageTaskData *tasks = NULL;
+     struct binaryPackageTaskData *task = NULL;
+@@ -731,7 +679,7 @@ static struct binaryPackageTaskData* runBinaryPackageTasks(rpmSpec spec, const c
+         if (pkg == spec->packages) {
+             // the first package needs to be processed ahead of others, as they copy
+             // changelog data from it, and so otherwise data races would happen
+-            task->result = packageBinary(spec, pkg, cookie, cheating, &(task->filename));
++            task->result = packageBinary(spec, pkg, cookie, cheating, &(task->filename), buildTime, buildHost);
+             rpmlog(RPMLOG_NOTICE, _("Finished binary package job, result %d, filename %s\n"), task->result, task->filename);
+             tasks = task;
+         }
+@@ -748,7 +696,7 @@ static struct binaryPackageTaskData* runBinaryPackageTasks(rpmSpec spec, const c
+         if (task != tasks)
+         #pragma omp task
+         {
+-            task->result = packageBinary(spec, task->pkg, cookie, cheating, &(task->filename));
++            task->result = packageBinary(spec, task->pkg, cookie, cheating, &(task->filename), buildTime, buildHost);
+             rpmlog(RPMLOG_NOTICE, _("Finished binary package job, result %d, filename %s\n"), task->result, task->filename);
+         }
+     }
+@@ -766,11 +714,11 @@ static void freeBinaryPackageTasks(struct binaryPackageTaskData* tasks)
+     }
+ }
+-rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating)
++rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating, rpm_time_t buildTime, char* buildHost)
+ {
+     char *pkglist = NULL;
+-    struct binaryPackageTaskData *tasks = runBinaryPackageTasks(spec, cookie, cheating);
++    struct binaryPackageTaskData *tasks = runBinaryPackageTasks(spec, cookie, cheating, buildTime, buildHost);
+     for (struct binaryPackageTaskData *task = tasks; task != NULL; task = task->next) {
+         if (task->result == RPMRC_OK) {
+@@ -797,7 +745,7 @@ rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating)
+     return RPMRC_OK;
+ }
+-rpmRC packageSources(rpmSpec spec, char **cookie)
++rpmRC packageSources(rpmSpec spec, char **cookie, rpm_time_t buildTime, char* buildHost)
+ {
+     Package sourcePkg = spec->sourcePackage;
+     rpmRC rc;
+@@ -805,8 +753,8 @@ rpmRC packageSources(rpmSpec spec, char **cookie)
+     /* Add some cruft */
+     headerPutString(sourcePkg->header, RPMTAG_RPMVERSION, VERSION);
+-    headerPutString(sourcePkg->header, RPMTAG_BUILDHOST, buildHost());
+-    headerPutUint32(sourcePkg->header, RPMTAG_BUILDTIME, getBuildTime(), 1);
++    headerPutString(sourcePkg->header, RPMTAG_BUILDHOST, buildHost);
++    headerPutUint32(sourcePkg->header, RPMTAG_BUILDTIME, &buildTime, 1);
+     headerPutUint32(sourcePkg->header, RPMTAG_SOURCEPACKAGE, &one, 1);
+     /* XXX this should be %_srpmdir */
+@@ -814,7 +762,7 @@ rpmRC packageSources(rpmSpec spec, char **cookie)
+       char *pkgcheck = rpmExpand("%{?_build_pkgcheck_srpm} ", fn, NULL);
+       spec->sourcePkgId = NULL;
+-      rc = writeRPM(sourcePkg, &spec->sourcePkgId, fn, cookie);
++      rc = writeRPM(sourcePkg, &spec->sourcePkgId, fn, cookie, buildTime, buildHost);
+       /* Do check SRPM package if enabled */
+       if (rc == RPMRC_OK && pkgcheck[0] != ' ') {
+diff --git a/build/rpmbuild_internal.h b/build/rpmbuild_internal.h
+index 439b7d3..07e8338 100644
+--- a/build/rpmbuild_internal.h
++++ b/build/rpmbuild_internal.h
+@@ -427,19 +427,23 @@ rpmRC processSourceFiles(rpmSpec spec, rpmBuildPkgFlags pkgFlags);
+  * @param spec                spec file control structure
+  * @param cookie      build identifier "cookie" or NULL
+  * @param cheating    was build shortcircuited?
++ * @param buildTime   the build timestamp that goes into packages
++ * @param buildHost   the hostname where the build is happening
+  * @return            RPMRC_OK on success
+  */
+ RPM_GNUC_INTERNAL
+-rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating);
++rpmRC packageBinaries(rpmSpec spec, const char *cookie, int cheating, rpm_time_t buildTime, char* buildHost);
+ /** \ingroup rpmbuild
+  * Generate source package.
+  * @param spec                spec file control structure
+  * @retval cookie     build identifier "cookie" or NULL
++ * @param buildTime   the build timestamp that goes into packages
++ * @param buildHost   the hostname where the build is happening
+  * @return            RPMRC_OK on success
+  */
+ RPM_GNUC_INTERNAL
+-rpmRC packageSources(rpmSpec spec, char **cookie);
++rpmRC packageSources(rpmSpec spec, char **cookie, rpm_time_t buildTime, char* buildHost);
+ RPM_GNUC_INTERNAL
+ int addLangTag(rpmSpec spec, Header h, rpmTagVal tag,
diff --git a/meta-stx/recipes-devtools/rpm/files/0011-Do-not-require-that-ELF-binaries-are-executable-to-b.patch b/meta-stx/recipes-devtools/rpm/files/0011-Do-not-require-that-ELF-binaries-are-executable-to-b.patch
new file mode 100644 (file)
index 0000000..4ac5c38
--- /dev/null
@@ -0,0 +1,33 @@
+From 5141d50d7b3d3c209a22c53deedb4ceef014401d Mon Sep 17 00:00:00 2001
+From: Peter Kjellerstedt <pkj@axis.com>
+Date: Mon, 15 May 2017 10:21:08 +0200
+Subject: [PATCH 09/15] Do not require that ELF binaries are executable to be
+ identifiable
+
+There is nothing that requires, e.g., a DSO to be executable, but it
+is still an ELF binary and should be identified as such.
+
+Upstream probably expects all ELF binaries to be marked as executable,
+but rather than imposing such a limitation for OE, allow any file to
+be identified as an ELF binary regardless of whether it is executable
+or not.
+
+Upstream-Status: Inappropriate
+Signed-off-by: Peter Kjellerstedt <peter.kjellerstedt@axis.com>
+
+---
+ fileattrs/elf.attr | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/fileattrs/elf.attr b/fileattrs/elf.attr
+index 5805dd0ee..3516f309d 100644
+--- a/fileattrs/elf.attr
++++ b/fileattrs/elf.attr
+@@ -1,4 +1,3 @@
+ %__elf_provides               %{_rpmconfigdir}/elfdeps --provides %{?__filter_GLIBC_PRIVATE:--filter-private}
+ %__elf_requires               %{_rpmconfigdir}/elfdeps --requires %{?__filter_GLIBC_PRIVATE:--filter-private}
+ %__elf_magic          ^(setuid,? )?(setgid,? )?(sticky )?ELF (32|64)-bit.*$
+-%__elf_flags          exeonly
+-- 
+2.14.2
+
diff --git a/meta-stx/recipes-devtools/rpm/rpm2_4.14.2.bb b/meta-stx/recipes-devtools/rpm/rpm2_4.14.2.bb
new file mode 100644 (file)
index 0000000..233fa14
--- /dev/null
@@ -0,0 +1,195 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+#SUMMARY = "The RPM package management system"
+#DESCRIPTION = "The RPM Package Manager (RPM) is a powerful command line driven \
+#package management system capable of installing, uninstalling, \
+#verifying, querying, and updating software packages. Each software \
+#package consists of an archive of files along with information about \
+#the package like its version, a description, etc."
+
+#SUMMARY_${PN}-dev = "Development files for manipulating RPM packages"
+#DESCRIPTION_${PN}-dev = "This package contains the RPM C library and header files. These \
+#development files will simplify the process of writing programs that \
+#manipulate RPM packages and databases. These files are intended to \
+#simplify the process of creating graphical package managers or any \
+#other tools that need an intimate knowledge of RPM packages in order \
+#to function."
+
+SUMMARY_python2-rpm = "Python bindings for apps which will manupulate RPM packages"
+DESCRIPTION_python2-rpm = "The python2-rpm package contains a module that permits applications \
+written in the Python programming language to use the interface \
+supplied by the RPM Package Manager libraries."
+
+HOMEPAGE = "http://www.rpm.org"
+
+# libraries are also LGPL - how to express this?
+LICENSE = "GPL-2.0"
+LIC_FILES_CHKSUM = "file://COPYING;md5=c0bf017c0fd1920e6158a333acabfd4a"
+
+SRC_URI = "git://github.com/rpm-software-management/rpm;branch=rpm-4.14.x \
+           file://0001-Do-not-add-an-unsatisfiable-dependency-when-building.patch \
+           file://0001-Do-not-read-config-files-from-HOME.patch \
+           file://0001-When-cross-installing-execute-package-scriptlets-wit.patch \
+           file://0001-Do-not-reset-the-PATH-environment-variable-before-ru.patch \
+           file://0002-Add-support-for-prefixing-etc-from-RPM_ETCCONFIGDIR-.patch \
+           file://0001-Do-not-hardcode-lib-rpm-as-the-installation-path-for.patch \
+           file://0001-Fix-build-with-musl-C-library.patch \
+           file://0001-Add-a-color-setting-for-mips64_n32-binaries.patch \
+           file://0011-Do-not-require-that-ELF-binaries-are-executable-to-b.patch \
+           file://0001-Split-binary-package-building-into-a-separate-functi.patch \
+           file://0002-Run-binary-package-creation-via-thread-pools.patch \
+           file://0003-rpmstrpool.c-make-operations-over-string-pools-threa.patch \
+           file://0004-build-pack.c-remove-static-local-variables-from-buil.patch \
+           file://0001-perl-disable-auto-reqs.patch \
+           file://0001-rpm-rpmio.c-restrict-virtual-memory-usage-if-limit-s.patch \
+           "
+
+PE = "1"
+SRCREV = "753f6941dc32e94047b7cfe713ddd604a810b4db"
+
+S = "${WORKDIR}/git"
+
+DEPENDS = "nss libarchive db file popt xz bzip2 dbus elfutils python"
+DEPENDS_append_class-native = " file-replacement-native bzip2-replacement-native"
+
+inherit autotools gettext pkgconfig pythonnative
+export PYTHON_ABI
+
+# OE-core patches autoreconf to additionally run gnu-configize, which fails with this recipe
+EXTRA_AUTORECONF_append = " --exclude=gnu-configize"
+
+EXTRA_OECONF_append = " --without-lua --enable-python"
+EXTRA_OECONF_append_libc-musl = " --disable-nls"
+
+# --sysconfdir prevents rpm from attempting to access machine-specific configuration in sysroot/etc; we need to have it in rootfs
+#
+# --localstatedir prevents rpm from writing its database to native sysroot when building images
+#
+# Disable dbus for native, so that rpm doesn't attempt to inhibit shutdown via session dbus even when plugins support is enabled.
+# Also disable plugins by default for native.
+EXTRA_OECONF_append_class-native = " --sysconfdir=/etc --localstatedir=/var --disable-plugins"
+EXTRA_OECONF_append_class-nativesdk = " --sysconfdir=/etc --localstatedir=/var --disable-plugins"
+
+BBCLASSEXTEND = "native nativesdk"
+
+PACKAGECONFIG ??= ""
+PACKAGECONFIG[imaevm] = "--with-imaevm,,ima-evm-utils"
+
+ASNEEDED = ""
+
+# Direct rpm-native to read configuration from our sysroot, not the one it was compiled in
+# libmagic also has sysroot path contamination, so override it
+
+#WRAPPER_TOOLS = " \
+#   ${bindir}/rpm \
+#   ${bindir}/rpm2archive \
+#   ${bindir}/rpm2cpio \
+#   ${bindir}/rpmbuild \
+#   ${bindir}/rpmdb \
+#   ${bindir}/rpmgraph \
+#   ${bindir}/rpmkeys \
+#   ${bindir}/rpmsign \
+#   ${bindir}/rpmspec \
+#   ${libdir}/rpm/rpmdeps \
+#"
+
+#do_install_append_class-native() {
+#        for tool in ${WRAPPER_TOOLS}; do
+#                create_wrapper ${D}$tool \
+#                        RPM_CONFIGDIR=${STAGING_LIBDIR_NATIVE}/rpm \
+#                        RPM_ETCCONFIGDIR=${STAGING_DIR_NATIVE} \
+#                        MAGIC=${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc \
+#                        RPM_NO_CHROOT_FOR_SCRIPTS=1
+#        done
+#}
+
+#do_install_append_class-nativesdk() {
+#        for tool in ${WRAPPER_TOOLS}; do
+#                create_wrapper ${D}$tool \
+#                        RPM_CONFIGDIR='`dirname $''realpath`'/${@os.path.relpath(d.getVar('libdir'), d.getVar('bindir'))}/rpm \
+#                        RPM_ETCCONFIGDIR='$'{RPM_ETCCONFIGDIR-'`dirname $''realpath`'/${@os.path.relpath(d.getVar('sysconfdir'), d.getVar('bindir'))}/..} \
+#                        MAGIC='`dirname $''realpath`'/${@os.path.relpath(d.getVar('datadir'), d.getVar('bindir'))}/misc/magic.mgc \
+#                        RPM_NO_CHROOT_FOR_SCRIPTS=1
+#        done
+#
+#        rm -rf ${D}/var
+#}
+
+# Rpm's make install creates var/tmp which clashes with base-files packaging
+#do_install_append_class-target() {
+#    rm -rf ${D}/var
+#}
+
+do_compile_append () {
+       cd python
+       cp -r ../../git/python/* ./
+       python setup.py build
+}
+
+do_install_append () {
+       sed -i -e 's:${HOSTTOOLS_DIR}/::g' \
+           ${D}/${libdir}/rpm/macros
+
+       sed -i -e 's|/usr/bin/python|${USRBINPATH}/env ${PYTHON_PN}|' \
+           ${D}${libdir}/rpm/pythondistdeps.py \
+           ${D}${libdir}/rpm/python-macro-helper
+
+       # remove all contents except python2-rpm
+       rm -r ${D}/var
+       rm -r ${D}/usr/share
+       rm -r ${D}/usr/include
+       rm -r ${D}/usr/lib/librpm*
+       rm -r ${D}/usr/lib/pkgconfig
+       # rm -r ${D}/usr/src
+       rm -r ${D}/usr/lib/rpm
+       rm -r ${D}/usr/lib/rpm-plugins
+       # rm -r ${D}/usr/lib/.debug
+       rm -r ${D}/usr/bin
+
+       cd python
+       python setup.py install \
+               --root=${D} --prefix=/usr \
+               --install-lib=${PYTHON_SITEPACKAGES_DIR}/ --install-data=${datadir}
+}
+
+#FILES_${PN} += "${libdir}/rpm-plugins/*.so \
+#                ${libdir}/rpm \
+#               "
+
+#FILES_${PN}-dev += "${libdir}/rpm-plugins/*.la \
+#                    "
+
+PACKAGES = "python2-rpm rpm2-dbg"
+PROVIDES = "python2-rpm rpm2-dbg"
+FILES_python2-rpm = " \
+       ${PYTHON_SITEPACKAGES_DIR}/rpm/ \
+       ${PYTHON_SITEPACKAGES_DIR}/rpm-${PV}-py${PYTHON_BASEVERSION}.egg-info \
+       "
+
+# rpm 5.x was packaging the rpm build tools separately
+#RPROVIDES_${PN} += "rpm-build"
+
+RDEPENDS_${PN} = "bash perl python-core"
+RDEPENDS_python2-rpm = "rpm"
+DEPENDS_python2-rpm = "rpm"
+# PACKAGE_PREPROCESS_FUNCS += "rpm_package_preprocess"
+
+# Do not specify a sysroot when compiling on a target.
+#rpm_package_preprocess () {
+#      sed -i -e 's:--sysroot[^ ]*::g' \
+#          ${PKGD}/${libdir}/rpm/macros
+#}
+
diff --git a/meta-stx/recipes-devtools/rsync/rsync_%.bbappend b/meta-stx/recipes-devtools/rsync/rsync_%.bbappend
new file mode 100644 (file)
index 0000000..b2e07ad
--- /dev/null
@@ -0,0 +1,27 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit systemd
+SYSTEMD_PACKAGES = "${PN}"
+SYSTEMD_SERVICE_${PN} = "rsync.service"
+SYSTEMD_AUTO_ENABLE_${PN} = "enable"
+
+do_install_append_class-target() {
+        install -p -D -m 644 ${S}/packaging/systemd/rsync.service ${D}/${systemd_system_unitdir}/rsync.service
+}
+
+FILES_${PN}_append = " ${systemd_system_unitdir}"
+
+       
diff --git a/meta-stx/recipes-devtools/ruby-shadow/ruby-shadow_%.bbappend b/meta-stx/recipes-devtools/ruby-shadow/ruby-shadow_%.bbappend
new file mode 100644 (file)
index 0000000..4b883fd
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit openssl10
diff --git a/meta-stx/recipes-devtools/ruby/ruby.inc b/meta-stx/recipes-devtools/ruby/ruby.inc
new file mode 100644 (file)
index 0000000..fa5692e
--- /dev/null
@@ -0,0 +1,57 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "An interpreter of object-oriented scripting language"
+DESCRIPTION = "Ruby is an interpreted scripting language for quick \
+and easy object-oriented programming. It has many features to process \
+text files and to do system management tasks (as in Perl). \
+It is simple, straight-forward, and extensible. \
+"
+HOMEPAGE = "http://www.ruby-lang.org/"
+SECTION = "devel/ruby"
+LICENSE = "Ruby | BSD | GPLv2"
+LIC_FILES_CHKSUM = "\
+    file://COPYING;md5=837b32593517ae48b9c3b5c87a5d288c \
+    file://BSDL;md5=19aaf65c88a40b508d17ae4be539c4b5 \
+    file://GPL;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
+    file://LEGAL;md5=3ce1fae39fe573b818c0af162bce6579 \
+"
+
+DEPENDS = "ruby-native zlib openssl tcl libyaml gdbm readline libffi libnsl2"
+DEPENDS_class-native = "openssl-native libyaml-native readline-native libnsl2"
+
+SHRT_VER = "${@oe.utils.trim_version("${PV}", 2)}"
+SRC_URI = " \
+       http://cache.ruby-lang.org/pub/ruby/2.0/ruby-2.0.0-p648.tar.gz \
+       file://0002-Obey-LDFLAGS-for-the-link-of-libruby.patch \
+           "
+# file://0002-Obey-LDFLAGS-for-the-link-of-libruby.patch 
+# file://extmk.patch 
+UPSTREAM_CHECK_URI = "https://www.ruby-lang.org/en/downloads/"
+
+inherit autotools ptest
+
+
+# This snippet lets compiled extensions which rely on external libraries,
+# such as zlib, compile properly.  If we don't do this, then when extmk.rb
+# runs, it uses the native libraries instead of the target libraries, and so
+# none of the linking operations succeed -- which makes extconf.rb think
+# that the libraries aren't available and hence that the extension can't be
+# built.
+
+do_configure_prepend() {
+    sed -i "s#%%TARGET_CFLAGS%%#$TARGET_CFLAGS#; s#%%TARGET_LDFLAGS%%#$TARGET_LDFLAGS#" ${S}/common.mk
+    rm -rf ${S}/ruby/
+}
diff --git a/meta-stx/recipes-devtools/ruby/ruby/0001-openembedded-socket-extconf-hardcode-wide-getaddr-in.patch b/meta-stx/recipes-devtools/ruby/ruby/0001-openembedded-socket-extconf-hardcode-wide-getaddr-in.patch
new file mode 100644 (file)
index 0000000..08ab662
--- /dev/null
@@ -0,0 +1,31 @@
+From e6a66a83233eead74daab6bfe0390c70989ea110 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Tue, 24 Mar 2020 14:47:02 -0700
+Subject: [PATCH] openembedded socket extconf: hardcode wide getaddr info
+
+From 9341293e71c03fe606edc9157bf1e13e3dd5b507
+Without this the socket extension doesn't build correctly
+---
+ ext/socket/extconf.rb | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/ext/socket/extconf.rb b/ext/socket/extconf.rb
+index 13937ac773..775d63335d 100644
+--- a/ext/socket/extconf.rb
++++ b/ext/socket/extconf.rb
+@@ -362,6 +362,12 @@ main(void)
+   return EXIT_FAILURE;
+ }
+ EOF
++
++
++# Ignore the actual result of the above test and assume that
++# everything is OK.
++getaddr_info_ok = true
++
+ if ipv6 and not getaddr_info_ok
+   abort <<EOS
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-devtools/ruby/ruby/0002-Obey-LDFLAGS-for-the-link-of-libruby.patch b/meta-stx/recipes-devtools/ruby/ruby/0002-Obey-LDFLAGS-for-the-link-of-libruby.patch
new file mode 100644 (file)
index 0000000..d6a8780
--- /dev/null
@@ -0,0 +1,12 @@
+diff -Nurpd a/Makefile.in b/Makefile.in
+--- a/Makefile.in      2020-04-01 14:03:13.899047800 -0700
++++ b/Makefile.in      2020-04-01 14:03:57.186887765 -0700
+@@ -70,7 +70,7 @@ EXTLIBS =
+ LIBS = @LIBS@ $(EXTLIBS)
+ MISSING = @LIBOBJS@ @ALLOCA@
+ LDSHARED = @LIBRUBY_LDSHARED@
+-DLDFLAGS = @LIBRUBY_DLDFLAGS@ $(XLDFLAGS) $(ARCH_FLAG)
++DLDFLAGS = @LIBRUBY_DLDFLAGS@ $(LDFLAGS) $(ARCH_FLAG)
+ SOLIBS = @SOLIBS@
+ MAINLIBS = @MAINLIBS@
+ ARCHMINIOBJS = @MINIOBJS@
diff --git a/meta-stx/recipes-devtools/ruby/ruby/ext.socket.extmk.patch b/meta-stx/recipes-devtools/ruby/ruby/ext.socket.extmk.patch
new file mode 100644 (file)
index 0000000..8884716
--- /dev/null
@@ -0,0 +1,13 @@
+diff -Nurpd a/ext/socket/extconf.rb b/ext/socket/extconf.rb
+--- a/ext/socket/extconf.rb    2020-04-03 10:58:47.646427762 -0700
++++ b/ext/socket/extconf.rb    2020-04-03 11:00:06.998097135 -0700
+@@ -362,6 +362,9 @@ main(void)
+   return EXIT_FAILURE;
+ }
+ EOF
++# Ignore the actual result of the above test and assume that
++# everything is OK.
++getaddr_info_ok = true
+ if ipv6 and not getaddr_info_ok
+   abort <<EOS
diff --git a/meta-stx/recipes-devtools/ruby/ruby/extmk.patch b/meta-stx/recipes-devtools/ruby/ruby/extmk.patch
new file mode 100644 (file)
index 0000000..611ea99
--- /dev/null
@@ -0,0 +1,12 @@
+diff --git a/ext/extmk.rb b/ext/extmk.rb
+index 597fc78..8fad2cc 100755
+--- a/ext/extmk.rb
++++ b/ext/extmk.rb
+@@ -409,7 +409,6 @@ else
+ end
+ $ruby << " -I'$(topdir)'"
+ unless CROSS_COMPILING
+-  $ruby << " -I'$(top_srcdir)/lib'"
+   $ruby << " -I'$(extout)/$(arch)' -I'$(extout)/common'" if $extout
+   ENV["RUBYLIB"] = "-"
+ end
diff --git a/meta-stx/recipes-devtools/ruby/ruby/ruby-CVE-2017-9226.patch b/meta-stx/recipes-devtools/ruby/ruby/ruby-CVE-2017-9226.patch
new file mode 100644 (file)
index 0000000..3ea2450
--- /dev/null
@@ -0,0 +1,17 @@
+diff -Nurpd a/regparse.c b/regparse.c
+--- a/regparse.c       2020-04-03 10:48:05.349134180 -0700
++++ b/regparse.c       2020-04-03 10:55:16.647309795 -0700
+@@ -4412,9 +4412,11 @@ next_state_val(CClassNode* cc, OnigCodeP
+   switch (*state) {
+   case CCS_VALUE:
+-    if (*type == CCV_SB)
++    if (*type == CCV_SB) {
++      if (*from > 0xff)
++              return ONIGERR_INVALID_CODE_POINT_VALUE;
+       BITSET_SET_BIT_CHKDUP(cc->bs, (int )(*vs));
+-    else if (*type == CCV_CODE_POINT) {
++    } else if (*type == CCV_CODE_POINT) {
+       r = add_code_range(&(cc->mbuf), env, *vs, *vs);
+       if (r < 0) return r;
+     }
diff --git a/meta-stx/recipes-devtools/ruby/ruby/ruby-CVE-2017-9228.patch b/meta-stx/recipes-devtools/ruby/ruby/ruby-CVE-2017-9228.patch
new file mode 100644 (file)
index 0000000..d8bfba4
--- /dev/null
@@ -0,0 +1,34 @@
+From 3b63d12038c8d8fc278e81c942fa9bec7c704c8b Mon Sep 17 00:00:00 2001
+From: "K.Kosako" <kosako@sofnec.co.jp>
+Date: Wed, 24 May 2017 13:43:25 +0900
+Subject: [PATCH] fix #60 : invalid state(CCS_VALUE) in parse_char_class()
+
+---
+ regparse.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- end of original header
+
+CVE: CVE-2017-9228
+
+Upstream-Status: Inappropriate [not author]
+Signed-off-by: Joe Slater <joe.slater@windriver.com>
+
+diff --git a/regparse.c b/regparse.c
+index 69875fa..1988747 100644
+--- a/regparse.c
++++ b/regparse.c
+@@ -4081,7 +4081,9 @@ next_state_class(CClassNode* cc, OnigCodePoint* vs, enum CCVALTYPE* type,
+     }
+   }
+-  *state = CCS_VALUE;
++  if (*state != CCS_START)
++    *state = CCS_VALUE;
++
+   *type  = CCV_CLASS;
+   return 0;
+ }
+-- 
+1.7.9.5
+
diff --git a/meta-stx/recipes-devtools/ruby/ruby_2.0.0-p648.bb b/meta-stx/recipes-devtools/ruby/ruby_2.0.0-p648.bb
new file mode 100644 (file)
index 0000000..540b906
--- /dev/null
@@ -0,0 +1,95 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+require ruby.inc
+inherit openssl10
+
+DEPENDS += " libnsl2"
+
+# file://ruby-CVE-2017-9226.patch # Not applicable
+SRC_URI += " \
+           file://ruby-CVE-2017-9228.patch \
+          file://ext.socket.extmk.patch \
+           "
+#  file://run-ptest 
+
+SRC_URI[md5sum] = "05db49992d01731fee023cad09bb4e52"
+SRC_URI[sha256sum] = "8690bd6b4949c333b3919755c4e48885dbfed6fd055fe9ef89930bde0d2376f8"
+
+# it's unknown to configure script, but then passed to extconf.rb
+# maybe it's not really needed as we're hardcoding the result with
+# 0001-socket-extconf-hardcode-wide-getaddr-info-test-outco.patch
+UNKNOWN_CONFIGURE_WHITELIST += "--enable-wide-getaddrinfo"
+
+PACKAGECONFIG ??= ""
+
+PACKAGECONFIG[valgrind] = "--with-valgrind=yes, --with-valgrind=no, valgrind"
+#PACKAGECONFIG[gmp] = "--with-gmp=yes, --with-gmp=no, gmp"
+
+EXTRA_AUTORECONF += "--exclude=aclocal"
+
+#    --disable-versioned-paths 
+EXTRA_OECONF ?= " "
+EXTRA_OECONF = "\
+    --disable-install-doc \
+    --disable-rpath \
+    --disable-dtrace \
+    --enable-shared \
+    --enable-load-relative \
+"
+
+EXTRA_OEMAKE = " \
+    LIBRUBYARG='-lruby-static' \
+"
+
+do_configure_prepend() {
+    cd ${S}
+    rm -rf spec/rubyspec
+    git clone git://github.com/ruby/rubyspec.git spec/rubyspec
+    cd ${B}
+}
+
+do_install() {
+    oe_runmake 'DESTDIR=${D}' install
+}
+
+do_install_append_class-target () {
+    # Find out rbconfig.rb from .installed.list
+    rbconfig_rb=`grep rbconfig.rb ${B}/.installed.list`
+    # Remove build host directories
+    sed -i -e 's:--sysroot=${STAGING_DIR_TARGET}::g' \
+           -e s:'--with-libtool-sysroot=${STAGING_DIR_TARGET}'::g \
+           -e 's|${DEBUG_PREFIX_MAP}||g' \
+           -e 's:${HOSTTOOLS_DIR}/::g' \
+           -e 's:${RECIPE_SYSROOT_NATIVE}::g' \
+           -e 's:${RECIPE_SYSROOT}::g' \
+           -e 's:${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}::g' \
+        ${D}$rbconfig_rb
+
+}
+
+do_install_ptest () {
+    cp -rf ${S}/test ${D}${PTEST_PATH}/
+    cp -r ${S}/include ${D}/${libdir}/ruby/
+    test_case_rb=`grep rubygems/test_case.rb ${B}/.installed.list`
+    sed -i -e 's:../../../test/:../../../ptest/test/:g' ${D}/$test_case_rb
+}
+
+FILES_${PN} += "${datadir}/rubygems"
+
+
+BBCLASSEXTEND = "native nativesdk"
+
+# INSANE_SKIP_${PN} += "ldflags"
diff --git a/meta-stx/recipes-extended/ceph/ceph-13.2.2/0001-Correct-the-path-to-find-version.h-in-rocksdb.patch b/meta-stx/recipes-extended/ceph/ceph-13.2.2/0001-Correct-the-path-to-find-version.h-in-rocksdb.patch
new file mode 100644 (file)
index 0000000..788505b
--- /dev/null
@@ -0,0 +1,40 @@
+From a53605694d5301b7bb543464b17f74bbbd35d372 Mon Sep 17 00:00:00 2001
+From: Dengke Du <dengke.du@windriver.com>
+Date: Tue, 28 Aug 2018 10:04:40 +0800
+Subject: [PATCH] Correct the path to find version.h in rocksdb
+
+Signed-off-by: Dengke Du <dengke.du@windriver.com>
+---
+ cmake/modules/Findrocksdb.cmake | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/cmake/modules/Findrocksdb.cmake b/cmake/modules/Findrocksdb.cmake
+index f8369f7..36b67ea 100644
+--- a/cmake/modules/Findrocksdb.cmake
++++ b/cmake/modules/Findrocksdb.cmake
+@@ -9,17 +9,17 @@
+ #  ROCKSDB_VERSION_MINOR
+ #  ROCKSDB_VERSION_PATCH
+-find_path(ROCKSDB_INCLUDE_DIR rocksdb/db.h)
++find_path(ROCKSDB_INCLUDE_DIR rocksdb/db.h ${CMAKE_SYSROOT})
+-find_library(ROCKSDB_LIBRARIES rocksdb)
++find_library(ROCKSDB_LIBRARIES rocksdb ${CMAKE_SYSROOT})
+ if(ROCKSDB_INCLUDE_DIR AND EXISTS "${ROCKSDB_INCLUDE_DIR}/rocksdb/version.h")
+   foreach(ver "MAJOR" "MINOR" "PATCH")
+-    file(STRINGS "${ROCKSDB_INCLUDE_DIR}/version.h" ROCKSDB_VER_${ver}_LINE
++    file(STRINGS "${ROCKSDB_INCLUDE_DIR}/rocksdb/version.h" ROCKSDB_VER_${ver}_LINE
+       REGEX "^#define[ \t]+ROCKSDB_${ver}[ \t]+[0-9]+$")
+     string(REGEX REPLACE "^#define[ \t]+ROCKSDB_${ver}[ \t]+([0-9]+)$"
+-      "\\1" ROCKSDB_VERSION_${ver} "${ROCKDB_VER_${ver}_LINE}")
+-    unset(${ROCKDB_VER_${ver}_LINE})
++      "\\1" ROCKSDB_VERSION_${ver} "${ROCKSDB_VER_${ver}_LINE}")
++    unset(ROCKSDB_VER_${ver}_LINE)
+   endforeach()
+   set(ROCKSDB_VERSION_STRING
+     "${ROCKSDB_VERSION_MAJOR}.${ROCKSDB_VERSION_MINOR}.${ROCKSDB_VERSION_PATCH}")
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-extended/ceph/ceph-13.2.2/0002-zstd-fix-error-for-cross-compile.patch b/meta-stx/recipes-extended/ceph/ceph-13.2.2/0002-zstd-fix-error-for-cross-compile.patch
new file mode 100644 (file)
index 0000000..66b5f0a
--- /dev/null
@@ -0,0 +1,26 @@
+From 3e86b6d9db2682b123839e38e9bf45060e2bb2ab Mon Sep 17 00:00:00 2001
+From: Dengke Du <dengke.du@windriver.com>
+Date: Wed, 29 Aug 2018 16:57:52 +0800
+Subject: [PATCH] zstd: fix error for cross compile
+
+Signed-off-by: Dengke Du <dengke.du@windriver.com>
+---
+ src/compressor/zstd/CMakeLists.txt | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/compressor/zstd/CMakeLists.txt b/src/compressor/zstd/CMakeLists.txt
+index e30cb89..b298a3d 100644
+--- a/src/compressor/zstd/CMakeLists.txt
++++ b/src/compressor/zstd/CMakeLists.txt
+@@ -9,7 +9,7 @@ ExternalProject_Add(zstd_ext
+   CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
+              -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
+              -DCMAKE_C_FLAGS=${ZSTD_C_FLAGS}
+-             -DCMAKE_AR=${CMAKE_AR}
++             -DCMAKE_SYSROOT=${CMAKE_SYSROOT}
+   BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/libzstd
+   BUILD_COMMAND $(MAKE) libzstd_static
+   INSTALL_COMMAND "true")
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-extended/ceph/ceph-13.2.2/0003-ceph-add-pybind-support-in-OE.patch b/meta-stx/recipes-extended/ceph/ceph-13.2.2/0003-ceph-add-pybind-support-in-OE.patch
new file mode 100644 (file)
index 0000000..f9c5340
--- /dev/null
@@ -0,0 +1,129 @@
+From 00d44940c2e83bf73101a05d2aa8f88c2e2fca58 Mon Sep 17 00:00:00 2001
+From: Dengke Du <dengke.du@windriver.com>
+Date: Tue, 23 Oct 2018 15:34:53 +0800
+Subject: [PATCH] ceph: add pybind support in OE
+
+1. add sysroot to CFLAGS when cross compiling pybind
+2. change the pybind's INSTALL path to OE's INSTALL path
+3. delete the check for header files, because the check method using
+   host compiler.
+
+Signed-off-by: Dengke Du <dengke.du@windriver.com>
+Upstream-Status: Inappropriate [oe specific]
+---
+ cmake/modules/Distutils.cmake | 12 +++---------
+ src/pybind/cephfs/setup.py    |  8 --------
+ src/pybind/rados/setup.py     |  8 --------
+ src/pybind/rbd/setup.py       |  8 --------
+ src/pybind/rgw/setup.py       |  8 --------
+ 5 files changed, 3 insertions(+), 41 deletions(-)
+
+diff --git a/cmake/modules/Distutils.cmake b/cmake/modules/Distutils.cmake
+index d6e9f38..3091d97 100644
+--- a/cmake/modules/Distutils.cmake
++++ b/cmake/modules/Distutils.cmake
+@@ -47,7 +47,7 @@ function(distutils_add_cython_module name src)
+     LDFLAGS=-L${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
+     CYTHON_BUILD_DIR=${CMAKE_CURRENT_BINARY_DIR}
+     CEPH_LIBDIR=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
+-    CFLAGS=\"-iquote${CMAKE_SOURCE_DIR}/src/include -w\"
++    CFLAGS=\"-iquote${CMAKE_SOURCE_DIR}/src/include -w --sysroot=${CMAKE_SYSROOT}\"
+     ${PYTHON${PYTHON_VERSION}_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/setup.py
+     build --verbose --build-base ${CYTHON_MODULE_DIR}
+     --build-platlib ${CYTHON_MODULE_DIR}/lib.${PYTHON${PYTHON_VERSION}_VERSION_MAJOR}
+@@ -69,14 +69,8 @@ function(distutils_install_cython_module name)
+     set(ENV{CEPH_LIBDIR} \"${CMAKE_LIBRARY_OUTPUT_DIRECTORY}\")
+     set(options --prefix=${CMAKE_INSTALL_PREFIX})
+-    if(DEFINED ENV{DESTDIR})
+-      if(EXISTS /etc/debian_version)
+-        list(APPEND options --install-layout=deb)
+-      endif()
+-      list(APPEND options --root=\$ENV{DESTDIR})
+-    else()
+-      list(APPEND options --root=/)
+-    endif()
++    list(APPEND options --root=${CMAKE_DESTDIR})
++    list(APPEND options --install-lib=${PYTHON_SITEPACKAGES_DIR})
+     execute_process(
+        COMMAND
+            ${PYTHON${PYTHON_VERSION}_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/setup.py
+diff --git a/src/pybind/cephfs/setup.py b/src/pybind/cephfs/setup.py
+index 6533f41..1ee4a59 100755
+--- a/src/pybind/cephfs/setup.py
++++ b/src/pybind/cephfs/setup.py
+@@ -121,14 +121,6 @@ def check_sanity():
+     finally:
+         shutil.rmtree(tmp_dir)
+-
+-if 'BUILD_DOC' in os.environ.keys():
+-    pass
+-elif check_sanity():
+-    pass
+-else:
+-    sys.exit(1)
+-
+ cmdclass = {}
+ try:
+     from Cython.Build import cythonize
+diff --git a/src/pybind/rados/setup.py b/src/pybind/rados/setup.py
+index ef7c307..5204017 100755
+--- a/src/pybind/rados/setup.py
++++ b/src/pybind/rados/setup.py
+@@ -117,14 +117,6 @@ def check_sanity():
+     finally:
+         shutil.rmtree(tmp_dir)
+-
+-if 'BUILD_DOC' in os.environ.keys():
+-    pass
+-elif check_sanity():
+-    pass
+-else:
+-    sys.exit(1)
+-
+ cmdclass = {}
+ try:
+     from Cython.Build import cythonize
+diff --git a/src/pybind/rbd/setup.py b/src/pybind/rbd/setup.py
+index bcf96f2..d4cbbeb 100755
+--- a/src/pybind/rbd/setup.py
++++ b/src/pybind/rbd/setup.py
+@@ -120,14 +120,6 @@ def check_sanity():
+     finally:
+         shutil.rmtree(tmp_dir)
+-
+-if 'BUILD_DOC' in os.environ.keys():
+-    pass
+-elif check_sanity():
+-    pass
+-else:
+-    sys.exit(1)
+-
+ cmdclass = {}
+ try:
+     from Cython.Build import cythonize
+diff --git a/src/pybind/rgw/setup.py b/src/pybind/rgw/setup.py
+index f14f30c..ee7570b 100755
+--- a/src/pybind/rgw/setup.py
++++ b/src/pybind/rgw/setup.py
+@@ -120,14 +120,6 @@ def check_sanity():
+     finally:
+         shutil.rmtree(tmp_dir)
+-
+-if 'BUILD_DOC' in os.environ.keys():
+-    pass
+-elif check_sanity():
+-    pass
+-else:
+-    sys.exit(1)
+-
+ cmdclass = {}
+ try:
+     from Cython.Build import cythonize
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-extended/ceph/ceph-13.2.2/0004-ceph-detect-init-correct-the-installation-for-OE.patch b/meta-stx/recipes-extended/ceph/ceph-13.2.2/0004-ceph-detect-init-correct-the-installation-for-OE.patch
new file mode 100644 (file)
index 0000000..875501b
--- /dev/null
@@ -0,0 +1,36 @@
+From 6aaf04036f0affbeddda123bff111990c4d5fd72 Mon Sep 17 00:00:00 2001
+From: Dengke Du <dengke.du@windriver.com>
+Date: Fri, 26 Oct 2018 14:31:10 +0800
+Subject: [PATCH] ceph-detect-init: correct the installation for OE
+
+Signed-off-by: Dengke Du <dengke.du@windriver.com>
+Upstream-Status: Inappropriate [oe specific]
+---
+ cmake/modules/Distutils.cmake | 11 ++---------
+ 1 file changed, 2 insertions(+), 9 deletions(-)
+
+diff --git a/cmake/modules/Distutils.cmake b/cmake/modules/Distutils.cmake
+index 3091d97..c50fe77 100644
+--- a/cmake/modules/Distutils.cmake
++++ b/cmake/modules/Distutils.cmake
+@@ -16,15 +16,8 @@ function(distutils_install_module name)
+   cmake_parse_arguments(DU "" INSTALL_SCRIPT "" ${ARGN})
+   install(CODE "
+     set(options --prefix=${CMAKE_INSTALL_PREFIX})
+-    if(DEFINED ENV{DESTDIR})
+-      if(EXISTS /etc/debian_version)
+-        list(APPEND options --install-layout=deb)
+-      endif()
+-      list(APPEND options --root=\$ENV{DESTDIR})
+-      if(NOT \"${DU_INSTALL_SCRIPT}\" STREQUAL \"\")
+-        list(APPEND options --install-script=${DU_INSTALL_SCRIPT})
+-      endif()
+-    endif()
++    list(APPEND options --root=${CMAKE_DESTDIR})
++    list(APPEND options --install-lib=${PYTHON_SITEPACKAGES_DIR})
+     execute_process(
+     COMMAND ${PYTHON${PYTHON_VERSION}_EXECUTABLE}
+         setup.py install \${options}
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-extended/ceph/ceph-13.2.2/0005-Add-hooks-for-orderly-shutdown-on-controller.patch b/meta-stx/recipes-extended/ceph/ceph-13.2.2/0005-Add-hooks-for-orderly-shutdown-on-controller.patch
new file mode 100644 (file)
index 0000000..fd53546
--- /dev/null
@@ -0,0 +1,59 @@
+From 1f882d7ee20d70548d380ce27eedab3ae30180d4 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Fri, 17 Apr 2020 00:55:52 +0800
+Subject: [PATCH] Add hooks for orderly shutdown on controller
+
+Hook the ceph init script to add systemd overrides to define
+an orderly shutdown for StarlingX controllers.
+
+Signed-off-by: Don Penney <don.penney@windriver.com>
+---
+ src/init-ceph.in | 32 ++++++++++++++++++++++++++++++++
+ 1 file changed, 32 insertions(+)
+
+diff --git a/src/init-ceph.in b/src/init-ceph.in
+index d13032a..a6113fa 100755
+--- a/src/init-ceph.in
++++ b/src/init-ceph.in
+@@ -434,6 +434,38 @@ for name in $what; do
+               continue
+           fi
++            . /etc/platform/platform.conf
++            if [ "${nodetype}" = "controller" ]; then
++                # StarlingX: Hook the transient services launched by systemd-run
++                # to allow for proper cleanup and orderly shutdown
++
++                # Set nullglob so wildcards will return empty string if no match
++                shopt -s nullglob
++
++                OSD_SERVICES=$(for svc in /run/systemd/system/ceph-osd*.service; do basename $svc; done | xargs echo)
++                for d in /run/systemd/system/ceph-osd*.d; do
++                    cat <<EOF > $d/starlingx-overrides.conf
++[Unit]
++Before=docker.service
++After=sm-shutdown.service
++
++EOF
++                done
++
++                for d in /run/systemd/system/ceph-mon*.d; do
++                    cat <<EOF > $d/starlingx-overrides.conf
++[Unit]
++Before=docker.service
++After=sm-shutdown.service ${OSD_SERVICES}
++
++EOF
++                done
++
++                shopt -u nullglob
++
++                systemctl daemon-reload
++            fi
++
+           [ -n "$post_start" ] && do_cmd "$post_start"
+           [ -n "$lockfile" ] && [ "$?" -eq 0 ] && touch $lockfile
+           ;;
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-extended/ceph/ceph-14.1.0/0001-ceph-rebase-on-stx.3.0-and-warrior.patch b/meta-stx/recipes-extended/ceph/ceph-14.1.0/0001-ceph-rebase-on-stx.3.0-and-warrior.patch
new file mode 100644 (file)
index 0000000..d27ed87
--- /dev/null
@@ -0,0 +1,62 @@
+From 3763a20314ec2b80ec9d8525a1d3867b3c731266 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Sat, 22 Feb 2020 04:48:04 -0800
+Subject: [PATCH] ceph rebase on stx.3.0 and warrior
+
+From 03340eaf0004e3cc8e3f8991ea96a46757d92830 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Sat, 26 Jan 2019 13:34:55 -0500
+Subject: [PATCH] Add hooks for orderly shutdown on controller
+
+Hook the ceph init script to add systemd overrides to define
+an orderly shutdown for StarlingX controllers.
+---
+ src/init-ceph.in | 32 ++++++++++++++++++++++++++++++++
+ 1 file changed, 32 insertions(+)
+
+diff --git a/src/init-ceph.in b/src/init-ceph.in
+index 1843710..a31b900 100755
+--- a/src/init-ceph.in
++++ b/src/init-ceph.in
+@@ -434,6 +434,38 @@ for name in $what; do
+               continue
+           fi
++            . /etc/platform/platform.conf
++            if [ "${nodetype}" = "controller" ]; then
++                # StarlingX: Hook the transient services launched by systemd-run
++                # to allow for proper cleanup and orderly shutdown
++
++                # Set nullglob so wildcards will return empty string if no match
++                shopt -s nullglob
++
++                OSD_SERVICES=$(for svc in /run/systemd/system/ceph-osd*.service; do basename $svc; done | xargs echo)
++                for d in /run/systemd/system/ceph-osd*.d; do
++                    cat <<EOF > $d/starlingx-overrides.conf
++[Unit]
++Before=docker.service
++After=sm-shutdown.service
++
++EOF
++                done
++
++                for d in /run/systemd/system/ceph-mon*.d; do
++                    cat <<EOF > $d/starlingx-overrides.conf
++[Unit]
++Before=docker.service
++After=sm-shutdown.service ${OSD_SERVICES}
++
++EOF
++                done
++
++                shopt -u nullglob
++
++                systemctl daemon-reload
++            fi
++
+           [ -n "$post_start" ] && do_cmd "$post_start"
+           [ -n "$lockfile" ] && [ "$?" -eq 0 ] && touch $lockfile
+           ;;
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-extended/ceph/ceph-14.1.0/rados.runtime.decode.error.patch b/meta-stx/recipes-extended/ceph/ceph-14.1.0/rados.runtime.decode.error.patch
new file mode 100644 (file)
index 0000000..7e75c5d
--- /dev/null
@@ -0,0 +1,27 @@
+diff -Nurpd a/src/pybind/rados/rados.pyx b/src/pybind/rados/rados.pyx
+--- a/src/pybind/rados/rados.pyx       2020-04-11 17:15:00.793220981 -0700
++++ b/src/pybind/rados/rados.pyx       2020-04-11 17:18:32.536405851 -0700
+@@ -326,7 +326,10 @@ LIBRADOS_OPERATION_IGNORE_CACHE = _LIBRA
+ LIBRADOS_OPERATION_SKIPRWLOCKS = _LIBRADOS_OPERATION_SKIPRWLOCKS
+ LIBRADOS_OPERATION_IGNORE_OVERLAY = _LIBRADOS_OPERATION_IGNORE_OVERLAY
+-LIBRADOS_ALL_NSPACES = _LIBRADOS_ALL_NSPACES.decode('utf-8')
++if isinstance(_LIBRADOS_ALL_NSPACES, str):
++    LIBRADOS_ALL_NSPACES = _LIBRADOS_ALL_NSPACES
++else:
++    LIBRADOS_ALL_NSPACES = _LIBRADOS_ALL_NSPACES.decode('utf-8')
+ LIBRADOS_CREATE_EXCLUSIVE = _LIBRADOS_CREATE_EXCLUSIVE
+ LIBRADOS_CREATE_IDEMPOTENT = _LIBRADOS_CREATE_IDEMPOTENT
+diff --git a/src/pybind/rados/rados.pyx b/src/pybind/rados/rados.pyx
+index fe17620..beca5ae 100644
+--- a/src/pybind/rados/rados.pyx
++++ b/src/pybind/rados/rados.pyx
+@@ -555,6 +555,8 @@ def decode_cstr(val, encoding="utf-8"):
+     """
+     if val is None:
+         return None
++    if isinstance(val, str):
++        return val
+
+     return val.decode(encoding)
diff --git a/meta-stx/recipes-extended/ceph/ceph_13.2.2.bb b/meta-stx/recipes-extended/ceph/ceph_13.2.2.bb
new file mode 100644 (file)
index 0000000..96b7076
--- /dev/null
@@ -0,0 +1,225 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "User space components of the Ceph file system"
+DESCRIPTION = "\
+Ceph is a massively scalable, open-source, distributed storage system that runs \
+on commodity hardware and delivers object, block and file system storage. \
+"
+HOMEPAGE = "https://ceph.io"
+
+LICENSE = "LGPLv2.1 & GPLv2 & Apache-2.0 & MIT"
+LIC_FILES_CHKSUM = "\
+    file://COPYING-LGPL2.1;md5=fbc093901857fcd118f065f900982c24 \
+    file://COPYING-GPL2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
+    file://COPYING;md5=92d301c8fccd296f2221a68a8dd53828 \
+"
+
+DEPENDS = "\
+    boost rdma-core bzip2 curl expat \
+    gperf-native keyutils libaio lz4 \
+    nspr nss oath openldap openssl \
+    python python-cython-native rocksdb \
+    snappy udev valgrind xfsprogs zlib \
+"
+
+SRC_URI = "\
+    http://download.ceph.com/tarballs/ceph-${PV}.tar.gz \
+    file://0001-Correct-the-path-to-find-version.h-in-rocksdb.patch \
+    file://0002-zstd-fix-error-for-cross-compile.patch \
+    file://0003-ceph-add-pybind-support-in-OE.patch \
+    file://0004-ceph-detect-init-correct-the-installation-for-OE.patch \
+    file://0005-Add-hooks-for-orderly-shutdown-on-controller.patch \
+    \
+    file://ceph-init-wrapper.sh \
+    file://ceph-manage-journal.py \
+    file://ceph-preshutdown.sh \
+    file://ceph-radosgw.service \
+    file://ceph.conf \
+    file://ceph.conf.pmon \
+    file://ceph.service \
+    file://ceph.sh \
+    file://mgr-restful-plugin.py \
+    file://mgr-restful-plugin.service \
+    file://starlingx-docker-override.conf \
+"
+SRC_URI[md5sum] = "ce118be451dcb6b89e9e0a45057827dd"
+SRC_URI[sha256sum] = "f3a61db4c90e00c38a2dac7239b956ec367ef56f601e07335ed3011f931d8840"
+
+inherit cmake pythonnative python-dir systemd
+
+DISTRO_FEATURES_BACKFILL_CONSIDERED_remove = "sysvinit"
+
+SYSTEMD_SERVICE_${PN} = " \
+    ceph-radosgw@.service \
+    ceph-radosgw.target \
+    ceph-mon@.service \
+    ceph-mon.target \
+    ceph-mds@.service \
+    ceph-mds.target \
+    ceph-disk@.service \
+    ceph-osd@.service \
+    ceph-osd.target \
+    ceph.target \
+    ceph-fuse@.service \
+    ceph-fuse.target \
+    ceph-rbd-mirror@.service \
+    ceph-rbd-mirror.target \
+    ceph-volume@.service \
+    ceph-mgr@.service \
+    ceph-mgr.target \
+    rbdmap.service \
+"
+
+OECMAKE_GENERATOR = "Unix Makefiles"
+
+EXTRA_OECMAKE = "\
+    -DWITH_MANPAGE=OFF \
+    -DWITH_FUSE=OFF \
+    -DWITH_SPDK=OFF \
+    -DWITH_LEVELDB=OFF \
+    -DWITH_LTTNG=OFF \
+    -DWITH_BABELTRACE=OFF \
+    -DWITH_TESTS=OFF \
+    -DDEBUG_GATHER=OFF \
+    -DWITH_PYTHON2=ON \
+    -DWITH_MGR=ON \
+    -DMGR_PYTHON_VERSION=2.7 \
+    -DWITH_MGR_DASHBOARD_FRONTEND=OFF \
+    -DWITH_SYSTEM_BOOST=ON \
+    -DWITH_SYSTEM_ROCKSDB=ON \
+"
+
+do_configure_prepend () {
+    echo "set( CMAKE_SYSROOT \"${RECIPE_SYSROOT}\" )" >> ${WORKDIR}/toolchain.cmake
+    echo "set( CMAKE_DESTDIR \"${D}\" )" >> ${WORKDIR}/toolchain.cmake
+    echo "set( PYTHON_SITEPACKAGES_DIR \"${PYTHON_SITEPACKAGES_DIR}\" )" >> ${WORKDIR}/toolchain.cmake
+    ln -sf ${STAGING_LIBDIR}/libboost_python27.so ${STAGING_LIBDIR}/libboost_python.so
+}
+
+do_install_append () {
+    mv ${D}${bindir}/ceph-disk ${D}${sbindir}/ceph-disk
+    sed -i -e 's:${WORKDIR}.*python2.7:${bindir}/python:' ${D}${sbindir}/ceph-disk
+    sed -i -e 's:${WORKDIR}.*python2.7:${bindir}/python:' ${D}${bindir}/ceph
+    sed -i -e 's:${WORKDIR}.*python2.7:${bindir}/python:' ${D}${bindir}/ceph-detect-init
+    find ${D} -name SOURCES.txt | xargs sed -i -e 's:${WORKDIR}::'
+
+    install -d ${D}${systemd_unitdir}
+    mv ${D}${libexecdir}/systemd/system ${D}${systemd_unitdir}
+    mv ${D}${libexecdir}/ceph/ceph-osd-prestart.sh ${D}${libdir}/ceph
+    install -m 0755 ${D}${libexecdir}/ceph/ceph_common.sh ${D}${libdir}/ceph
+
+    install -d ${D}${sysconfdir}/ceph
+    install -m 0644 ${WORKDIR}/ceph.conf ${D}${sysconfdir}/ceph/
+    install -m 0644 ${WORKDIR}/ceph-radosgw.service ${D}${systemd_system_unitdir}/ceph-radosgw@.service
+    install -m 0644 ${WORKDIR}/ceph.service ${D}${systemd_system_unitdir}
+    install -m 0644 ${WORKDIR}/mgr-restful-plugin.service ${D}${systemd_system_unitdir}
+
+    install -m 0700 ${WORKDIR}/ceph-manage-journal.py ${D}${sbindir}/ceph-manage-journal
+    install -Dm 0750 ${WORKDIR}/mgr-restful-plugin.py  ${D}${sysconfdir}/rc.d/init.d/mgr-restful-plugin
+    install -Dm 0750 ${WORKDIR}/mgr-restful-plugin.py  ${D}${sysconfdir}/init.d/mgr-restful-plugin
+    install -m 0750 ${WORKDIR}/ceph.conf.pmon ${D}${sysconfdir}/ceph/
+
+    install -d -m 0750 ${D}${sysconfdir}/services.d/controller
+    install -d -m 0750 ${D}${sysconfdir}/services.d/storage
+    install -d -m 0750 ${D}${sysconfdir}/services.d/worker
+
+    install -m 0750 ${WORKDIR}/ceph.sh ${D}${sysconfdir}/services.d/controller
+    install -m 0750 ${WORKDIR}/ceph.sh ${D}${sysconfdir}/services.d/storage
+    install -m 0750 ${WORKDIR}/ceph.sh ${D}${sysconfdir}/services.d/worker
+
+    install -Dm 0750 ${WORKDIR}/ceph-init-wrapper.sh ${D}${sysconfdir}/rc.d/init.d/ceph-init-wrapper
+    install -Dm 0750 ${WORKDIR}/ceph-init-wrapper.sh ${D}${sysconfdir}/init.d/ceph-init-wrapper
+    sed -i -e 's|/usr/lib64|${libdir}|' ${D}${sysconfdir}/rc.d/init.d/ceph-init-wrapper ${D}${sysconfdir}/init.d/ceph-init-wrapper
+
+    install -m 0700 ${WORKDIR}/ceph-preshutdown.sh ${D}${sbindir}/ceph-preshutdown.sh
+    
+    install -Dm 0644 ${WORKDIR}/starlingx-docker-override.conf ${D}${systemd_system_unitdir}/docker.service.d/starlingx-docker-override.conf
+
+    install -m 0644 -D ${S}/src/etc-rbdmap ${D}${sysconfdir}/ceph/rbdmap 
+    install -m 0644 -D ${S}/etc/sysconfig/ceph ${D}${sysconfdir}/sysconfig/ceph
+    install -m 0644 -D ${S}/src/logrotate.conf ${D}${sysconfdir}/logrotate.d/ceph
+
+    install -m 0644 -D ${S}/COPYING ${D}${docdir}/ceph/COPYING    
+    install -m 0644 -D ${S}/etc/sysctl/90-ceph-osd.conf ${D}${libdir}/sysctl.d/90-ceph-osd.conf
+    install -m 0644 -D ${S}/udev/50-rbd.rules ${D}${libdir}/udev/rules.d/50-rbd.rules
+    install -m 0644 -D ${S}/udev/60-ceph-by-parttypeuuid.rules ${D}${libdir}/udev/rules.d/60-ceph-by-parttypeuuid.rules
+
+    mkdir -p ${D}${localstatedir}/ceph
+    mkdir -p ${D}${localstatedir}/log/ceph
+    mkdir -p ${D}${localstatedir}/lib/ceph/tmp
+    mkdir -p ${D}${localstatedir}/lib/ceph/mon
+    mkdir -p ${D}${localstatedir}/lib/ceph/osd
+    mkdir -p ${D}${localstatedir}/lib/ceph/mds
+    mkdir -p ${D}${localstatedir}/lib/ceph/mgr
+    mkdir -p ${D}${localstatedir}/lib/ceph/radosgw
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-osd
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-mds
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-rgw
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-mgr
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-rbd
+
+    install -m 0755 -d ${D}/${sysconfdir}/tmpfiles.d
+    echo "d ${localstatedir}/run/ceph 0755 ceph ceph -" >> ${D}/${sysconfdir}/tmpfiles.d/ceph.conf
+
+    install -m 0750 -D ${S}/src/init-radosgw ${D}${sysconfdir}/rc.d/init.d/ceph-radosgw
+    install -m 0750 -D ${S}/src/init-radosgw ${D}${sysconfdir}/init.d/ceph-radosgw
+    sed -i '/### END INIT INFO/a SYSTEMCTL_SKIP_REDIRECT=1' ${D}${sysconfdir}/rc.d/init.d/ceph-radosgw
+    sed -i '/### END INIT INFO/a SYSTEMCTL_SKIP_REDIRECT=1' ${D}${sysconfdir}/init.d/ceph-radosgw
+    install -m 0750 -D ${S}/src/init-rbdmap ${D}${sysconfdir}/rc.d/init.d/rbdmap
+    install -m 0750 -D ${S}/src/init-rbdmap ${D}${sysconfdir}/init.d/rbdmap
+    install -m 0750 -D ${B}/bin/init-ceph ${D}${sysconfdir}/rc.d/init.d/ceph
+    install -m 0750 -D ${B}/bin/init-ceph ${D}${sysconfdir}/init.d/ceph
+    install -d -m 0750 ${D}${localstatedir}/log/radosgw 
+}
+
+PACKAGES += " \
+    ${PN}-python \
+"
+
+FILES_${PN} += "\
+    ${libdir}/rados-classes/*.so.* \
+    ${libdir}/ceph/compressor/*.so \
+    ${libdir}/rados-classes/*.so \
+    ${libdir}/ceph/*.so \
+    ${localstatedir} \
+    ${docdir}/ceph/COPYING \
+    ${libdir}/sysctl.d/90-ceph-osd.conf \
+    ${libdir}/udev/rules.d/50-rbd.rules \
+    ${libdir}/udev/rules.d/60-ceph-by-parttypeuuid.rules \
+    ${systemd_system_unitdir}/mgr-restful-plugin.service \
+    ${systemd_system_unitdir}/ceph-radosgw@.service \
+    ${systemd_system_unitdir}/ceph.service \
+    ${systemd_system_unitdir}/docker.service.d/starlingx-docker-override.conf \
+"
+FILES_${PN}-python = "\
+    ${PYTHON_SITEPACKAGES_DIR}/* \
+"
+
+RDEPENDS_${PN} += "\
+    bash \
+    python \
+    python-misc \
+    python-modules \
+    python-prettytable \
+    rdma-core \
+    xfsprogs-mkfs \
+    ${PN}-python \
+"
+
+COMPATIBLE_HOST = "(x86_64).*"
+
+INSANE_SKIP_${PN}-python += "ldflags"
+INSANE_SKIP_${PN} += "dev-so"
diff --git a/meta-stx/recipes-extended/ceph/ceph_14.1.0.bbappend b/meta-stx/recipes-extended/ceph/ceph_14.1.0.bbappend
new file mode 100644 (file)
index 0000000..0340565
--- /dev/null
@@ -0,0 +1,169 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/${BP}:${THISDIR}/files:"
+
+inherit python3native python3-dir
+
+DISTRO_FEATURES_BACKFILL_CONSIDERED_remove = "sysvinit"
+
+SRC_URI += "\
+       file://0001-ceph-rebase-on-stx.3.0-and-warrior.patch \
+       file://ceph.conf \
+       file://ceph-init-wrapper.sh \
+        file://ceph-preshutdown.sh \
+        file://ceph.service \
+        file://mgr-restful-plugin.py \
+        file://starlingx-docker-override.conf \
+        file://ceph.conf.pmon \
+        file://ceph-manage-journal.py \
+        file://ceph-radosgw.service \
+        file://ceph.sh \
+        file://mgr-restful-plugin.service \
+       file://rados.runtime.decode.error.patch \
+       "
+DEPENDS = "boost rdma-core bzip2 curl expat gperf-native \
+               keyutils libaio lz4 \
+               nspr nss oath openldap openssl \
+               python3 python3-cython-native rocksdb snappy udev \
+               python-cython-native valgrind xfsprogs zlib \
+               rabbitmq-c \
+               "
+RDEPENDS_${PN} += " rdma-core python3-core python3 xfsprogs-mkfs python3-prettytable"
+
+
+EXTRA_OECMAKE = "-DWITH_MANPAGE=OFF \
+                 -DWITH_FUSE=OFF \
+                -DWITH_SPDK=OFF \
+                -DWITH_LEVELDB=OFF \
+                -DWITH_LTTNG=OFF \
+                -DWITH_BABELTRACE=OFF \
+                -DWITH_TESTS=OFF \
+                -DWITH_MGR=ON \
+                -DWITH_PYTHON2=OFF \
+                -DWITH_PYTHON3=ON \
+                -DMGR_PYTHON_VERSION=3 \
+                -DWITH_MGR_DASHBOARD_FRONTEND=OFF \
+                -DWITH_SYSTEM_BOOST=ON \
+                -DWITH_SYSTEM_ROCKSDB=ON \
+                -DWITH_RDMA=OFF \
+                -DWITH_RADOSGW_AMQP_ENDPOINT=OFF \
+                "
+
+# TODO: Should be fixed in either boost package or CMake files. 
+#do_configure_prepend() {
+#      ln -f -s ${WORKDIR}/recipe-sysroot/usr/lib/libboost_python35.so \
+#              ${WORKDIR}/recipe-sysroot/usr/lib/libboost_python.so
+#}
+
+do_install_append () {
+    install -d ${D}${sysconfdir}/ceph
+    install -m 0644 ${WORKDIR}/ceph.conf ${D}${sysconfdir}/ceph/
+    install -m 0644 ${WORKDIR}/ceph-radosgw.service ${D}${systemd_system_unitdir}/ceph-radosgw@.service
+    install -m 0644 ${WORKDIR}/ceph.service ${D}${systemd_system_unitdir}
+    install -m 0644 ${WORKDIR}/mgr-restful-plugin.service ${D}${systemd_system_unitdir}
+
+    install -m 0700 ${WORKDIR}/ceph-manage-journal.py ${D}${sbindir}/ceph-manage-journal
+    install -Dm 0750 ${WORKDIR}/mgr-restful-plugin.py  ${D}${sysconfdir}/rc.d/init.d/mgr-restful-plugin
+    install -Dm 0750 ${WORKDIR}/mgr-restful-plugin.py  ${D}${sysconfdir}/init.d/mgr-restful-plugin
+    install -m 0750 ${WORKDIR}/ceph.conf.pmon ${D}${sysconfdir}/ceph/
+
+    install -d -m 0750 ${D}${sysconfdir}/services.d/controller
+    install -d -m 0750 ${D}${sysconfdir}/services.d/storage
+    install -d -m 0750 ${D}${sysconfdir}/services.d/worker
+
+    install -m 0750 ${WORKDIR}/ceph.sh ${D}${sysconfdir}/services.d/controller
+    install -m 0750 ${WORKDIR}/ceph.sh ${D}${sysconfdir}/services.d/storage
+    install -m 0750 ${WORKDIR}/ceph.sh ${D}${sysconfdir}/services.d/worker
+
+    install -Dm 0750 ${WORKDIR}/ceph-init-wrapper.sh ${D}${sysconfdir}/rc.d/init.d/ceph-init-wrapper
+    install -Dm 0750 ${WORKDIR}/ceph-init-wrapper.sh ${D}${sysconfdir}/init.d/ceph-init-wrapper
+    sed -i -e 's|/usr/lib64|${libdir}|' ${D}${sysconfdir}/rc.d/init.d/ceph-init-wrapper ${D}${sysconfdir}/init.d/ceph-init-wrapper
+
+    install -m 0700 ${WORKDIR}/ceph-preshutdown.sh ${D}${sbindir}/ceph-preshutdown.sh
+    
+    install -Dm 0644 ${WORKDIR}/starlingx-docker-override.conf ${D}${systemd_system_unitdir}/docker.service.d/starlingx-docker-override.conf
+
+    install -m 0644 -D ${S}/src/etc-rbdmap ${D}${sysconfdir}/ceph/rbdmap 
+    install -m 0644 -D ${S}/etc/sysconfig/ceph ${D}${sysconfdir}/sysconfig/ceph
+    install -m 0644 -D ${S}/src/logrotate.conf ${D}${sysconfdir}/logrotate.d/ceph
+
+    install -m 0644 -D ${S}/COPYING ${D}${docdir}/ceph/COPYING    
+    install -m 0644 -D ${S}/etc/sysctl/90-ceph-osd.conf ${D}${libdir}/sysctl.d/90-ceph-osd.conf
+    install -m 0644 -D ${S}/udev/50-rbd.rules ${D}${libdir}/udev/rules.d/50-rbd.rules
+    # install -m 0644 -D ${S}/udev/60-ceph-by-parttypeuuid.rules ${D}${libdir}/udev/rules.d/60-ceph-by-parttypeuuid.rules
+
+    mkdir -p ${D}${localstatedir}/ceph
+    mkdir -p ${D}${localstatedir}/log/ceph
+    mkdir -p ${D}${localstatedir}/lib/ceph/tmp
+    mkdir -p ${D}${localstatedir}/lib/ceph/mon
+    mkdir -p ${D}${localstatedir}/lib/ceph/osd
+    mkdir -p ${D}${localstatedir}/lib/ceph/mds
+    mkdir -p ${D}${localstatedir}/lib/ceph/mgr
+    mkdir -p ${D}${localstatedir}/lib/ceph/radosgw
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-osd
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-mds
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-rgw
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-mgr
+    mkdir -p ${D}${localstatedir}/lib/ceph/bootstrap-rbd
+    mkdir -p ${D}${localstatedir}/lib/ceph/crash/posted
+
+    install -m 0755 -d ${D}/${sysconfdir}/tmpfiles.d
+    echo "d ${localstatedir}/run/ceph 0755 ceph ceph -" >> ${D}/${sysconfdir}/tmpfiles.d/ceph.conf
+
+    install -m 0755 ${D}${libdir}/ceph/ceph_common.sh ${D}${libexecdir}/ceph
+
+    install -m 0750 -D ${S}/src/init-radosgw ${D}${sysconfdir}/rc.d/init.d/ceph-radosgw
+    install -m 0750 -D ${S}/src/init-radosgw ${D}${sysconfdir}/init.d/ceph-radosgw
+    sed -i '/### END INIT INFO/a SYSTEMCTL_SKIP_REDIRECT=1' ${D}${sysconfdir}/rc.d/init.d/ceph-radosgw
+    sed -i '/### END INIT INFO/a SYSTEMCTL_SKIP_REDIRECT=1' ${D}${sysconfdir}/init.d/ceph-radosgw
+    install -m 0750 -D ${S}/src/init-rbdmap ${D}${sysconfdir}/rc.d/init.d/rbdmap
+    install -m 0750 -D ${S}/src/init-rbdmap ${D}${sysconfdir}/init.d/rbdmap
+    install -m 0750 -D ${B}/bin/init-ceph ${D}${sysconfdir}/rc.d/init.d/ceph
+    install -m 0750 -D ${B}/bin/init-ceph ${D}${sysconfdir}/init.d/ceph
+    install -d -m 0750 ${D}${localstatedir}/log/radosgw 
+
+    sed -i -e 's:${WORKDIR}.*python3:${bindir}/python3:' ${D}${bindir}/ceph
+    # sed -i -e 's:${WORKDIR}.*python3:${bindir}/python3:' ${D}${bindir}/ceph-disk
+    # sed -i -e 's:${WORKDIR}.*python3:${bindir}/python3:' ${D}${bindir}/ceph-detect-init
+
+    sed -i -e 's:${WORKDIR}.*python3:${bindir}/python3:' ${D}${bindir}/ceph-crash
+    sed -i -e 's:${WORKDIR}.*python3:${bindir}/python3:' ${D}${bindir}/ceph-volume
+    sed -i -e 's:${WORKDIR}.*python3:${bindir}/python3:' ${D}${bindir}/ceph-volume-systemd
+    #sed -i -e '1s:python$:python3:' ${D}${bindir}/ceph-volume
+    #sed -i -e '1s:python$:python3:' ${D}${bindir}/ceph-volume-systemd
+    sed -i -e 's:/sbin/:/bin/:' ${D}${systemd_system_unitdir}/ceph-volume@.service
+}
+
+TARGET_CC_ARCH += "${LDFLAGS}"
+RDEPENDS_${PN} += "\
+        bash \
+"
+
+FILES_${PN} += "\
+        ${localstatedir} \
+       ${docdir}/ceph/COPYING \
+       ${libdir}/sysctl.d/90-ceph-osd.conf \
+       ${libdir}/udev/rules.d/50-rbd.rules \
+       ${libdir}/udev/rules.d/60-ceph-by-parttypeuuid.rules \
+        ${systemd_system_unitdir}/mgr-restful-plugin.service \
+        ${systemd_system_unitdir}/ceph-radosgw@.service \
+        ${systemd_system_unitdir}/ceph.service \
+        ${systemd_system_unitdir}/docker.service.d/starlingx-docker-override.conf \
+       home/root/cluster/ceph-mon_config.sh \
+       home/root/cluster/ceph-mgr_manual.sh \
+       home/root/cluster/ceph-volume_manual.sh \
+"
+# /run/ceph
diff --git a/meta-stx/recipes-extended/ceph/files/ceph-init-wrapper.sh b/meta-stx/recipes-extended/ceph/files/ceph-init-wrapper.sh
new file mode 100755 (executable)
index 0000000..ddbbc84
--- /dev/null
@@ -0,0 +1,331 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+# This script is a helper wrapper for pmon monitoring of ceph
+# processes. The "/etc/init.d/ceph" script does not know if ceph is
+# running on the node. For example when the node is locked, ceph
+# processes are not running. In that case we do not want pmond to
+# monitor these processes.
+#
+# The script "/etc/services.d/<node>/ceph.sh" will create the file
+# "/var/run/.ceph_started" when ceph is running and remove it when
+# is not.
+#
+# The script also extracts  one or more ceph process names  that are
+# reported as 'not running' or 'dead' or 'failed'  by '/etc/intit.d/ceph status'
+# and writes the names to a text file: /tmp/ceph_status_failure.txt for
+# pmond to access. The pmond adds the text to logs and alarms. Example of text
+# samples written to file by this script are:
+#   'osd.1'
+#   'osd.1, osd.2'
+#   'mon.storage-0'
+#   'mon.storage-0, osd.2'
+#
+# Moreover, for processes that are reported as 'hung' by '/etc/intit.d/ceph status'
+# the script will try increase their logging to 'debug' for a configurable interval.
+# With logging increased it will outputs a few stack traces then, at the end of this
+# interval, it dumps its stack core and kills it.
+#
+# Return values;
+# zero -   /etc/init.d/ceph returned success or ceph is not running on the node
+# non-zero /etc/init.d/ceph returned a failure or invalid syntax
+#
+
+source /usr/bin/tsconfig
+source /etc/platform/platform.conf
+
+CEPH_SCRIPT="/etc/init.d/ceph"
+CEPH_FILE="$VOLATILE_PATH/.ceph_started"
+CEPH_GET_MON_STATUS_FILE="$VOLATILE_PATH/.ceph_getting_mon_status"
+CEPH_GET_OSD_STATUS_FILE="$VOLATILE_PATH/.ceph_getting_osd_status"
+CEPH_STATUS_FAILURE_TEXT_FILE="/tmp/ceph_status_failure.txt"
+
+BINDIR=/usr/bin
+SBINDIR=/usr/sbin
+LIBDIR=/usr/lib64/ceph
+ETCDIR=/etc/ceph
+source $LIBDIR/ceph_common.sh
+
+LOG_PATH=/var/log/ceph
+LOG_FILE=$LOG_PATH/ceph-process-states.log
+LOG_LEVEL=NORMAL  # DEBUG
+verbose=0
+
+DATA_PATH=$VOLATILE_PATH/ceph_hang    # folder where we keep state information
+mkdir -p $DATA_PATH                   # make sure folder exists
+
+MONITORING_INTERVAL=15
+TRACE_LOOP_INTERVAL=5
+CEPH_STATUS_TIMEOUT=20
+
+LOCK_CEPH_MON_SERVICE_FILE="$VOLATILE_PATH/.ceph_mon_status"
+LOCK_CEPH_OSD_SERVICE_FILE="$VOLATILE_PATH/.ceph_osd_status"
+LOCK_CEPH_MON_STATUS_FILE="$VOLATILE_PATH/.ceph_mon_service"
+LOCK_CEPH_OSD_STATUS_FILE="$VOLATILE_PATH/.ceph_osd_service"
+
+# Seconds to wait for ceph status to finish before
+# continuing to execute a service action
+MONITOR_STATUS_TIMEOUT=30
+MAX_STATUS_TIMEOUT=120
+
+RC=0
+
+# SM can only pass arguments through environment variable
+# when ARGS is not empty use it to extend command line arguments
+args=("$@")
+if [ ! -z $ARGS ]; then
+    IFS=";" read -r -a new_args <<< "$ARGS"
+    args+=("${new_args[@]}")
+fi
+
+with_service_lock ()
+{
+    local target="$1"; shift
+    [ -z "${target}" ] && target="mon osd"
+
+    # Run in sub-shell so we don't leak file descriptors
+    # used for locking service actions
+    (
+        # Grab service locks
+        wlog "-" INFO "Grab service locks"
+        [[ "${target}" == *"mon"* ]] && flock ${LOCK_CEPH_MON_SERVICE_FD}
+        [[ "${target}" == *"osd"* ]] && flock ${LOCK_CEPH_OSD_SERVICE_FD}
+
+        # Try to lock status with a timeout in case status is stuck
+        wlog "-" INFO "Lock service status"
+        deadline=$((SECONDS + MAX_STATUS_TIMEOUT + 1))
+        if [[ "${target}" == *"mon"* ]]; then
+            flock --exclusive --timeout ${MONITOR_STATUS_TIMEOUT} ${LOCK_CEPH_MON_STATUS_FD}
+        fi
+        if [[ "${target}" == *"osd"* ]]; then
+            timeout=$((deadline - SECONDS))
+            if [[ $timeout -gt 0 ]]; then
+                flock --exclusive --timeout ${timeout} ${LOCK_CEPH_OSD_STATUS_FD}
+            fi
+        fi
+
+        # Close lock file descriptors so they are
+        # not inherited by the spawned process then
+        # run service action
+        wlog "-" INFO "Run service action: $@"
+        "$@" {LOCK_CEPH_MON_SERVICE_FD}>&- \
+             {LOCK_CEPH_MON_STATUS_FD}>&- \
+             {LOCK_CEPH_OSD_SERVICE_FD}>&- \
+             {LOCK_CEPH_OSD_STATUS_FD}>&-
+
+    ) {LOCK_CEPH_MON_SERVICE_FD}>${LOCK_CEPH_MON_SERVICE_FILE} \
+      {LOCK_CEPH_MON_STATUS_FD}>${LOCK_CEPH_MON_STATUS_FILE} \
+      {LOCK_CEPH_OSD_SERVICE_FD}>${LOCK_CEPH_OSD_SERVICE_FILE} \
+      {LOCK_CEPH_OSD_STATUS_FD}>${LOCK_CEPH_OSD_STATUS_FILE}
+    RC=$?
+}
+
+start ()
+{
+    if [ ! -f ${CEPH_FILE} ]; then
+        # Ceph is not running on this node, return success
+        exit 0
+    fi
+    wlog "-" INFO "Ceph START $1 command received"
+    with_service_lock "$1" ${CEPH_SCRIPT} start $1
+    wlog "-" INFO "Ceph START $1 command finished."
+}
+
+stop ()
+{
+    wlog "-" INFO "Ceph STOP $1 command received."
+    with_service_lock "$1" ${CEPH_SCRIPT} stop $1
+    wlog "-" INFO "Ceph STOP $1 command finished."
+}
+
+restart ()
+{
+    if [ ! -f ${CEPH_FILE} ]; then
+        # Ceph is not running on this node, return success
+        exit 0
+    fi
+    wlog "-" INFO "Ceph RESTART $1 command received."
+    with_service_lock "$1" ${CEPH_SCRIPT} restart $1
+    wlog "-" INFO "Ceph RESTART $1 command finished."
+}
+
+log_and_restart_blocked_osds ()
+{
+    # Log info about the blocked osd daemons and then restart it
+    local names=$1
+    local message=$2
+    for name in $names; do
+        wlog $name "INFO" "$message"
+        ${CEPH_SCRIPT} restart $name
+    done
+}
+
+log_and_kill_hung_procs ()
+{
+    # Log info about the hung processes and then kill them; later on pmon will restart them
+    local names=$1
+    for name in $names; do
+        type=`echo $name | cut -c 1-3`   # e.g. 'mon', if $item is 'mon1'
+        id=`echo $name | cut -c 4- | sed 's/^\\.//'`
+        get_conf run_dir "/var/run/ceph" "run dir"
+        get_conf pid_file "$run_dir/$type.$id.pid" "pid file"
+        pid=$(cat $pid_file)
+        wlog $name "INFO" "Dealing with hung process (pid:$pid)"
+
+        # monitoring interval
+        wlog $name "INFO" "Increasing log level"
+        execute_ceph_cmd ret $name "ceph daemon $name config set debug_$type 20/20"
+        monitoring=$MONITORING_INTERVAL
+        while [ $monitoring -gt 0 ]; do
+            if [ $(($monitoring % $TRACE_LOOP_INTERVAL)) -eq 0 ]; then
+                date=$(date "+%Y-%m-%d_%H-%M-%S")
+                log_file="$LOG_PATH/hang_trace_${name}_${pid}_${date}.log"
+                wlog $name "INFO" "Dumping stack trace to: $log_file"
+                $(pstack $pid >$log_file) &
+            fi
+            let monitoring-=1
+            sleep 1
+        done
+        wlog $name "INFO" "Trigger core dump"
+        kill -ABRT $pid &>/dev/null
+        rm -f $pid_file # process is dead, core dump is archiving, preparing for restart
+        # Wait for pending systemd core dumps
+        sleep 2 # hope systemd_coredump has started meanwhile
+        deadline=$(( $(date '+%s') + 300 ))
+        while [[ $(date '+%s') -lt "${deadline}" ]]; do
+            systemd_coredump_pid=$(pgrep -f "systemd-coredump.*${pid}.*ceph-${type}")
+            [[ -z "${systemd_coredump_pid}" ]] && break
+            wlog $name "INFO" "systemd-coredump ceph-${type} in progress: pid ${systemd_coredump_pid}"
+            sleep 2
+        done
+        kill -KILL $pid &>/dev/null
+    done
+}
+
+status ()
+{
+    local target="$1"  # no shift here
+    [ -z "${target}" ] && target="mon osd"
+
+    if [ ! -f ${CEPH_FILE} ]; then
+        # Ceph is not running on this node, return success
+        exit 0
+    fi
+
+    if [[ "$system_type" == "All-in-one" ]] && [[ "$system_mode" != "simplex" ]] && [[ "$1" == "osd" ]]; then
+        timeout $CEPH_STATUS_TIMEOUT ceph -s
+        if [ "$?" -ne 0 ]; then
+            # Ceph cluster is not accessible. Don't panic, controller swact
+            # may be in progress.
+            wlog "-" INFO "Ceph is down, ignoring OSD status."
+            exit 0
+        fi
+    fi
+
+    # Report success while ceph mon is running a service action
+    # otherwise mark ceph mon status is in progress
+    exec {LOCK_CEPH_MON_STATUS_FD}>${LOCK_CEPH_MON_STATUS_FILE}
+    if [[ "${target}" == *"mon"* ]]; then
+        flock --shared --nonblock ${LOCK_CEPH_MON_SERVICE_FILE} true
+        if [[ $? -ne 0 ]]; then
+            exit 0
+        fi
+        # Lock will be released when script exits
+        flock --shared ${LOCK_CEPH_MON_STATUS_FD}
+    fi
+    # Report success while ceph mon is running a service action
+    # otherwise mark ceph osd status is in progress
+    exec {LOCK_CEPH_OSD_STATUS_FD}>${LOCK_CEPH_OSD_STATUS_FILE}
+    if [[ "${target}" == *"osd"* ]]; then
+        flock --shared --nonblock ${LOCK_CEPH_OSD_SERVICE_FILE} true
+        if [[ $? -ne 0 ]]; then
+            exit 0
+        fi
+        # Lock will be released when script exits
+        flock --shared ${LOCK_CEPH_OSD_STATUS_FD}
+    fi
+
+    result=`${CEPH_SCRIPT} status $1 {LOCK_CEPH_MON_STATUS_FD}>&- {LOCK_CEPH_OSD_STATUS_FD}>&-`
+    RC=$?
+    if [ "$RC" -ne 0 ]; then
+        erred_procs=`echo "$result" | sort | uniq | awk ' /not running|dead|failed/ {printf "%s ", $1}' | sed 's/://g' | sed 's/, $//g'`
+        hung_procs=`echo "$result" | sort | uniq | awk ' /hung/ {printf "%s ", $1}' | sed 's/://g' | sed 's/, $//g'`
+        blocked_ops_procs=`echo "$result" | sort | uniq | awk ' /blocked ops/ {printf "%s ", $1}' | sed 's/://g' | sed 's/, $//g'`
+        stuck_peering_procs=`echo "$result" | sort | uniq | awk ' /stuck peering/ {printf "%s ", $1}' | sed 's/://g' | sed 's/, $//g'`
+        invalid=0
+        host=`hostname`
+        if [[ "$system_type" == "All-in-one" ]] && [[ "$system_mode" != "simplex" ]]; then
+            # On 2 node configuration we have a floating monitor
+            host="controller"
+        fi
+        for i in $(echo $erred_procs $hung_procs); do
+            if [[ "$i" =~ osd.?[0-9]?[0-9]|mon.$host ]]; then
+                continue
+            else
+                invalid=1
+            fi
+        done
+
+        log_and_restart_blocked_osds "$blocked_ops_procs"\
+            "Restarting OSD with blocked operations"
+        log_and_restart_blocked_osds "$stuck_peering_procs"\
+            "Restarting OSD stuck peering"
+        log_and_kill_hung_procs $hung_procs
+
+        rm -f $CEPH_STATUS_FAILURE_TEXT_FILE
+        if [ $invalid -eq 0 ]; then
+            text=""
+            for i in $erred_procs; do
+                text+="$i, "
+            done
+            for i in $hung_procs; do
+                text+="$i (process hang), "
+            done
+            echo "$text" | tr -d '\n' > $CEPH_STATUS_FAILURE_TEXT_FILE
+        else
+            echo "$host: '${CEPH_SCRIPT} status $1' result contains invalid process names: $erred_procs"
+            echo "Undetermined osd or monitor id" > $CEPH_STATUS_FAILURE_TEXT_FILE
+        fi
+    fi
+
+    if [[ $RC == 0 ]] && [[ "$1" == "mon" ]] && [[ "$system_type" == "All-in-one" ]] && [[ "$system_mode" != "simplex" ]]; then
+        # SM needs exit code != 0 from 'status mon' argument of the init script on
+        # standby controller otherwise it thinks that the monitor is running and
+        # tries to stop it.
+        # '/etc/init.d/ceph status mon' checks the status of monitors configured in
+        # /etc/ceph/ceph.conf and if it should be running on current host.
+        # If it should not be running it just exits with code 0. This is what
+        # happens on the standby controller.
+        # When floating monitor is running on active controller /var/lib/ceph/mon of
+        # standby is not mounted (Ceph monitor partition is DRBD synced).
+        test -e "/var/lib/ceph/mon/ceph-controller"
+        if [ "$?" -ne 0 ]; then
+            exit 3
+        fi
+    fi
+}
+
+
+case "${args[0]}" in
+    start)
+        start ${args[1]}
+        ;;
+    stop)
+        stop ${args[1]}
+        ;;
+    restart)
+        restart ${args[1]}
+        ;;
+    status)
+        status ${args[1]}
+        ;;
+    *)
+        echo "Usage: $0 {start|stop|restart|status} [{mon|osd|osd.<number>|mon.<hostname>}]"
+        exit 1
+        ;;
+esac
+
+exit $RC
diff --git a/meta-stx/recipes-extended/ceph/files/ceph-manage-journal.py b/meta-stx/recipes-extended/ceph/files/ceph-manage-journal.py
new file mode 100644 (file)
index 0000000..f91cbc1
--- /dev/null
@@ -0,0 +1,334 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+import ast
+import os
+import os.path
+import re
+import subprocess
+import sys
+
+DEVICE_NAME_NVME = "nvme"
+
+#########
+# Utils #
+#########
+
+
+def command(arguments, **kwargs):
+    """Execute e command and capture stdout, stderr & return code"""
+    process = subprocess.Popen(
+        arguments,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        **kwargs)
+    out, err = process.communicate()
+    return out, err, process.returncode
+
+
+def get_input(arg, valid_keys):
+    """Convert the input to a dict and perform basic validation"""
+    json_string = arg.replace("\\n", "\n")
+    try:
+        input_dict = ast.literal_eval(json_string)
+        if not all(k in input_dict for k in valid_keys):
+            return None
+    except Exception:
+        return None
+
+    return input_dict
+
+
+def get_partition_uuid(dev):
+    output, _, _ = command(['blkid', dev])
+    try:
+        return re.search('PARTUUID=\"(.+?)\"', output).group(1)
+    except AttributeError:
+        return None
+
+
+def device_path_to_device_node(device_path):
+    try:
+        output, _, _ = command(["udevadm", "settle", "-E", device_path])
+        out, err, retcode = command(["readlink", "-f", device_path])
+        out = out.rstrip()
+    except Exception as e:
+        return None
+
+    return out
+
+
+###########################################
+# Manage Journal Disk Partitioning Scheme #
+###########################################
+
+DISK_BY_PARTUUID = "/dev/disk/by-partuuid/"
+JOURNAL_UUID = '45b0969e-9b03-4f30-b4c6-b4b80ceff106'  # Type of a journal partition
+
+
+def is_partitioning_correct(disk_path, partition_sizes):
+    """Validate the existence and size of journal partitions"""
+
+    # Obtain the device node from the device path.
+    disk_node = device_path_to_device_node(disk_path)
+
+    # Check that partition table format is GPT
+    output, _, _ = command(["udevadm", "settle", "-E", disk_node])
+    output, _, _ = command(["parted", "-s", disk_node, "print"])
+    if not re.search('Partition Table: gpt', output):
+        print("Format of disk node %s is not GPT, zapping disk" % disk_node)
+        return False
+
+    # Check each partition size
+    partition_index = 1
+    for size in partition_sizes:
+        # Check that each partition size matches the one in input
+        if DEVICE_NAME_NVME in disk_node:
+            partition_node = '{}p{}'.format(disk_node, str(partition_index))
+        else:
+            partition_node = '{}{}'.format(disk_node, str(partition_index))
+
+        output, _, _ = command(["udevadm", "settle", "-E", partition_node])
+        cmd = ["parted", "-s", partition_node, "unit", "MiB", "print"]
+        output, _, _ = command(cmd)
+
+        regex = ("^Disk " + str(partition_node) + ":\\s*" +
+                 str(size) + "[\\.0]*MiB")
+        if not re.search(regex, output, re.MULTILINE):
+            print("Journal partition %(node)s size is not %(size)s, "
+                  "zapping disk" % {"node": partition_node, "size": size})
+            return False
+
+        partition_index += 1
+
+    output, _, _ = command(["udevadm", "settle", "-t", "10"])
+    return True
+
+
+def create_partitions(disk_path, partition_sizes):
+    """Recreate partitions"""
+
+    # Obtain the device node from the device path.
+    disk_node = device_path_to_device_node(disk_path)
+
+    # Issue: After creating a new partition table on a device, Udev does not
+    # always remove old symlinks (i.e. to previous partitions on that device).
+    # Also, even if links are erased before zapping the disk, some of them will
+    # be recreated even though there is no partition to back them!
+    # Therefore, we have to remove the links AFTER we erase the partition table
+    # Issue: DISK_BY_PARTUUID directory is not present at all if there are no
+    # GPT partitions on the storage node so nothing to remove in this case
+    links = []
+    if os.path.isdir(DISK_BY_PARTUUID):
+        links = [os.path.join(DISK_BY_PARTUUID, l) for l in os.listdir(DISK_BY_PARTUUID)
+                 if os.path.islink(os.path.join(DISK_BY_PARTUUID, l))]
+
+    # Erase all partitions on current node by creating a new GPT table
+    _, err, ret = command(["parted", "-s", disk_node, "mktable", "gpt"])
+    if ret:
+        print("Error erasing partition table of %(node)s\n"
+              "Return code: %(ret)s reason: %(reason)s" %
+              {"node": disk_node, "ret": ret, "reason": err})
+        exit(1)
+
+    # Erase old symlinks
+    for l in links:
+        if disk_node in os.path.realpath(l):
+            os.remove(l)
+
+    # Create partitions in order
+    used_space_mib = 1  # leave 1 MB at the beginning of the disk
+    num = 1
+    for size in partition_sizes:
+        cmd = ['parted', '-s', disk_node, 'unit', 'mib',
+               'mkpart', 'primary',
+               str(used_space_mib), str(used_space_mib + size)]
+        _, err, ret = command(cmd)
+        parms = {"disk_node": disk_node,
+                 "start": used_space_mib,
+                 "end": used_space_mib + size,
+                 "reason": err}
+        print("Created partition from start=%(start)s MiB to end=%(end)s MiB"
+              " on %(disk_node)s" % parms)
+        if ret:
+            print("Failed to create partition with "
+                  "start=%(start)s, end=%(end)s "
+                  "on %(disk_node)s reason: %(reason)s" % parms)
+            exit(1)
+        # Set partition type to ceph journal
+        # noncritical operation, it makes 'ceph-disk list' output correct info
+        cmd = ['sgdisk',
+               '--change-name={num}:ceph journal'.format(num=num),
+               '--typecode={num}:{uuid}'.format(
+                   num=num,
+                   uuid=JOURNAL_UUID,
+               ),
+               disk_node]
+        _, err, ret = command(cmd)
+        if ret:
+            print("WARNINIG: Failed to set partition name and typecode")
+        used_space_mib += size
+        num += 1
+
+
+###########################
+# Manage Journal Location #
+###########################
+
+OSD_PATH = "/var/lib/ceph/osd/"
+
+
+def mount_data_partition(data_path, osdid):
+    """Mount an OSD data partition and return the mounted path"""
+
+    # Obtain the device node from the device path.
+    data_node = device_path_to_device_node(data_path)
+
+    mount_path = OSD_PATH + "ceph-" + str(osdid)
+    output, _, _ = command(['mount'])
+    regex = "^" + data_node + ".*" + mount_path
+    if not re.search(regex, output, re.MULTILINE):
+        cmd = ['mount', '-t', 'xfs', data_node, mount_path]
+        _, _, ret = command(cmd)
+        params = {"node": data_node, "path": mount_path}
+        if ret:
+            print("Failed to mount %(node)s to %(path), aborting" % params)
+            exit(1)
+        else:
+            print("Mounted %(node)s to %(path)s" % params)
+    return mount_path
+
+
+def is_location_correct(path, journal_path, osdid):
+    """Check if location points to the correct device"""
+
+    # Obtain the device node from the device path.
+    journal_node = device_path_to_device_node(journal_path)
+
+    cur_node = os.path.realpath(path + "/journal")
+    if cur_node == journal_node:
+        return True
+    else:
+        return False
+
+
+def fix_location(mount_point, journal_path, osdid):
+    """Move the journal to the new partition"""
+
+    # Obtain the device node from the device path.
+    journal_node = device_path_to_device_node(journal_path)
+
+    # Fix symlink
+    path = mount_point + "/journal"  # 'journal' symlink path used by ceph-osd
+    journal_uuid = get_partition_uuid(journal_node)
+    new_target = DISK_BY_PARTUUID + journal_uuid
+    params = {"path": path, "target": new_target}
+    try:
+        if os.path.lexists(path):
+            os.unlink(path)  # delete the old symlink
+        os.symlink(new_target, path)
+        print("Symlink created: %(path)s -> %(target)s" % params)
+    except:
+        print("Failed to create symlink: %(path)s -> %(target)s" % params)
+        exit(1)
+    # Fix journal_uuid
+    path = mount_point + "/journal_uuid"
+    try:
+        with open(path, 'w') as f:
+            f.write(journal_uuid)
+    except Exception as ex:
+        # The operation is noncritical, it only makes 'ceph-disk list'
+        # display complete output. We log and continue.
+        params = {"path": path, "uuid": journal_uuid}
+        print("WARNING: Failed to set uuid of %(path)s to %(uuid)s" % params)
+
+    # Clean the journal partition
+    # even if erasing the partition table, if another journal was present here
+    # it's going to be reused. Journals are always bigger than 100MB.
+    command(['dd', 'if=/dev/zero', 'of=%s' % journal_node,
+             'bs=1M', 'count=100'])
+
+    # Format the journal
+    cmd = ['/usr/bin/ceph-osd', '-i', str(osdid),
+           '--pid-file', '/var/run/ceph/osd.%s.pid' % osdid,
+           '-c', '/etc/ceph/ceph.conf',
+           '--cluster', 'ceph',
+           '--mkjournal']
+    out, err, ret = command(cmd)
+    params = {"journal_node": journal_node,
+              "osdid": osdid,
+              "ret": ret,
+              "reason": err}
+    if not ret:
+        print("Prepared new journal partition: %(journal_node)s "
+              "for osd id: %(osdid)s" % params)
+    else:
+        print("Error initializing journal node: "
+              "%(journal_node)s for osd id: %(osdid)s "
+              "ceph-osd return code: %(ret)s reason: %(reason)s" % params)
+
+
+########
+# Main #
+########
+
+def main(argv):
+    # parse and validate arguments
+    err = False
+    partitions = None
+    location = None
+    if len(argv) != 2:
+        err = True
+    elif argv[0] == "partitions":
+        valid_keys = ['disk_path', 'journals']
+        partitions = get_input(argv[1], valid_keys)
+        if not partitions:
+            err = True
+        elif not isinstance(partitions['journals'], list):
+            err = True
+    elif argv[0] == "location":
+        valid_keys = ['data_path', 'journal_path', 'osdid']
+        location = get_input(argv[1], valid_keys)
+        if not location:
+            err = True
+        elif not isinstance(location['osdid'], int):
+            err = True
+    else:
+        err = True
+    if err:
+        print("Command intended for internal use only")
+        exit(-1)
+
+    if partitions:
+        # Recreate partitions only if the existing ones don't match input
+        if not is_partitioning_correct(partitions['disk_path'],
+                                       partitions['journals']):
+            create_partitions(partitions['disk_path'], partitions['journals'])
+        else:
+            print("Partition table for %s is correct, "
+                  "no need to repartition" %
+                  device_path_to_device_node(partitions['disk_path']))
+    elif location:
+        # we need to have the data partition mounted & we can let it mounted
+        mount_point = mount_data_partition(location['data_path'],
+                                           location['osdid'])
+        # Update journal location only if link point to another partition
+        if not is_location_correct(mount_point,
+                                   location['journal_path'],
+                                   location['osdid']):
+            print("Fixing journal location for "
+                  "OSD id: %(id)s" % {"node": location['data_path'],
+                                      "id": location['osdid']})
+            fix_location(mount_point,
+                         location['journal_path'],
+                         location['osdid'])
+        else:
+            print("Journal location for %s is correct,"
+                  "no need to change it" % location['data_path'])
+
+
+main(sys.argv[1:])
diff --git a/meta-stx/recipes-extended/ceph/files/ceph-preshutdown.sh b/meta-stx/recipes-extended/ceph/files/ceph-preshutdown.sh
new file mode 100644 (file)
index 0000000..5f59bd1
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+script=$(basename $0)
+
+# Set nullglob so wildcards will return empty string if no match
+shopt -s nullglob
+
+for dev in /dev/rbd[0-9]*; do
+    for mnt in $(mount | awk -v dev=$dev '($1 == dev) {print $3}'); do
+        logger -t ${script} "Unmounting $mnt"
+        /usr/bin/umount $mnt
+    done
+    logger -t ${script} "Unmounted $dev"
+done
+
+for dev in /dev/rbd[0-9]*; do
+    /usr/bin/rbd unmap -o force $dev
+    logger -t ${script} "Unmapped $dev"
+done
+
+lsmod | grep -q '^rbd\>' && /usr/sbin/modprobe -r rbd
+lsmod | grep -q '^libceph\>' && /usr/sbin/modprobe -r libceph
+
+exit 0
+
diff --git a/meta-stx/recipes-extended/ceph/files/ceph-radosgw.service b/meta-stx/recipes-extended/ceph/files/ceph-radosgw.service
new file mode 100644 (file)
index 0000000..391ecf6
--- /dev/null
@@ -0,0 +1,18 @@
+[Unit]
+Description=radosgw RESTful rados gateway
+After=network.target
+#After=remote-fs.target nss-lookup.target network-online.target time-sync.target
+#Wants=network-online.target
+
+[Service]
+Type=forking
+Restart=no
+KillMode=process
+RemainAfterExit=yes
+ExecStart=/etc/rc.d/init.d/ceph-radosgw start
+ExecStop=/etc/rc.d/init.d/ceph-radosgw stop
+ExecReload=/etc/rc.d/init.d/ceph-radosgw reload
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/meta-stx/recipes-extended/ceph/files/ceph.conf b/meta-stx/recipes-extended/ceph/files/ceph.conf
new file mode 100644 (file)
index 0000000..4057e53
--- /dev/null
@@ -0,0 +1,73 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+[global]
+       # Unique ID for the cluster.
+       fsid = %CLUSTER_UUID%
+       # Public network where the monitor is connected to, i.e, 128.224.0.0/16
+       #public network = 127.0.0.1/24
+       # For version 0.55 and beyond, you must explicitly enable
+       # or disable authentication with "auth" entries in [global].
+       auth_cluster_required = cephx
+       auth_service_required = cephx
+       auth_client_required = cephx
+       osd_journal_size = 1024
+
+       # Uncomment the following line if you are mounting with ext4
+       # filestore xattr use omap = true
+
+       # Number of replicas of objects. Write an object 2 times.
+       # Cluster cannot reach an active + clean state until there's enough OSDs
+       # to handle the number of copies of an object. In this case, it requires
+       # at least 2 OSDs
+       osd_pool_default_size = 2
+
+       # Allow writing one copy in a degraded state.
+       osd_pool_default_min_size = 1
+
+       # Ensure you have a realistic number of placement groups. We recommend
+       # approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
+       # divided by the number of replicas (i.e., osd pool default size). So for
+       # 2 OSDs and osd pool default size = 2, we'd recommend approximately
+       # (100 * 2) / 2 = 100.
+       osd_pool_default_pg_num = 64
+       osd_pool_default_pgp_num = 64
+       osd_crush_chooseleaf_type = 1
+       setuser match path = /var/lib/ceph/$type/$cluster-$id
+
+       # Override Jewel default of 2 reporters. StarlingX has replication factor 2
+       mon_osd_min_down_reporters = 1
+
+       # Use Hammer's report interval default value
+       osd_mon_report_interval_max = 120
+
+    # Configure max PGs per OSD to cover worst-case scenario of all possible
+    # StarlingX deployments i.e. AIO-SX with one OSD. Otherwise using
+    # the default value provided by Ceph Mimic leads to "too many PGs per OSD"
+    # health warning as the pools needed by stx-openstack are being created.
+    mon_max_pg_per_osd = 2048
+    osd_max_pg_per_osd_hard_ratio = 1.2
+
+[osd]
+       osd_mkfs_type = xfs
+       osd_mkfs_options_xfs = "-f"
+       osd_mount_options_xfs = "rw,noatime,inode64,logbufs=8,logbsize=256k"
+
+[mon]
+    mon warn on legacy crush tunables = false
+    # Quiet new warnings on move to Hammer
+    mon pg warn max per osd = 2048
+    mon pg warn max object skew = 0
+    mgr initial modules = restful
diff --git a/meta-stx/recipes-extended/ceph/files/ceph.conf.pmon b/meta-stx/recipes-extended/ceph/files/ceph.conf.pmon
new file mode 100644 (file)
index 0000000..00418b2
--- /dev/null
@@ -0,0 +1,26 @@
+[process]
+process  = ceph
+script   = /etc/init.d/ceph-init-wrapper
+
+style    = lsb
+severity = major          ; minor, major, critical
+restarts = 3              ; restart retries before error assertion
+interval = 30             ; number of seconds to wait between restarts
+
+mode = status             ; Monitoring mode: passive (default) or active
+                          ; passive: process death monitoring (default: always)
+                          ; active : heartbeat monitoring, i.e. request / response messaging
+                          ; status : determine process health with executing "status" command
+                          ;          "start" is used to start the process(es) again
+                          ; ignore : do not monitor or stop monitoring
+
+; Status and Active Monitoring Options
+
+period     = 30           ; monitor period in seconds
+timeout    = 120          ; for active mode, messaging timeout period in seconds, must be shorter than period
+                          ; for status mode, max amount of time for a command to execute
+
+; Status Monitoring Options
+start_arg      = start        ; start argument for the script
+status_arg     = status       ; status argument for the script
+status_failure_text = /tmp/ceph_status_failure.txt   ; text to be added to alarms or logs, this is optional
diff --git a/meta-stx/recipes-extended/ceph/files/ceph.service b/meta-stx/recipes-extended/ceph/files/ceph.service
new file mode 100644 (file)
index 0000000..d3c2acc
--- /dev/null
@@ -0,0 +1,16 @@
+[Unit]
+Description=StarlingX Ceph Startup
+After=network.target
+
+[Service]
+Type=forking
+Restart=no
+KillMode=process
+RemainAfterExit=yes
+ExecStart=/etc/rc.d/init.d/ceph start
+ExecStop=/etc/rc.d/init.d/ceph stop
+PIDFile=/var/run/ceph/ceph.pid
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/meta-stx/recipes-extended/ceph/files/ceph.sh b/meta-stx/recipes-extended/ceph/files/ceph.sh
new file mode 100644 (file)
index 0000000..e7e6ecd
--- /dev/null
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+INITDIR=/etc/init.d
+LOGFILE=/var/log/ceph/ceph-init.log
+CEPH_FILE=/var/run/.ceph_started
+
+# Get our nodetype
+. /etc/platform/platform.conf
+
+# Exit immediately if ceph not configured (i.e. no mon in the config file)
+if ! grep -q "mon\." /etc/ceph/ceph.conf
+then
+    exit 0
+fi
+
+logecho ()
+{
+    echo $1
+    date >> ${LOGFILE}
+    echo $1 >> ${LOGFILE}
+}
+
+start ()
+{
+    logecho "Starting ceph services..."
+    ${INITDIR}/ceph start >> ${LOGFILE} 2>&1
+    RC=$?
+
+    if [ ! -f ${CEPH_FILE} ]; then
+        touch ${CEPH_FILE}
+    fi
+}
+
+stop ()
+{
+    if [[ "$system_type" == "All-in-one" ]] && [[ "$system_mode" == "simplex" ]]; then
+        logecho "Ceph services will continue to run on node"
+        exit 0
+    fi
+
+    logecho "Stopping ceph services..."
+
+    if [ -f ${CEPH_FILE} ]; then
+        rm -f ${CEPH_FILE}
+    fi
+
+    ${INITDIR}/ceph stop >> ${LOGFILE} 2>&1
+    RC=$?
+}
+
+RC=0
+
+case "$1" in
+    start)
+        start
+        ;;
+    stop)
+        stop
+        ;;
+    *)
+        echo "Usage: $0 {start|stop}"
+        exit 1
+        ;;
+esac
+
+logecho "RC was: $RC"
+exit $RC
diff --git a/meta-stx/recipes-extended/ceph/files/mgr-restful-plugin.py b/meta-stx/recipes-extended/ceph/files/mgr-restful-plugin.py
new file mode 100644 (file)
index 0000000..d1f14b8
--- /dev/null
@@ -0,0 +1,1121 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+
+### BEGIN INIT INFO
+# Provides:          ceph/mgr RESTful API plugin
+# Required-Start:    $ceph
+# Required-Stop:     $ceph
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: Ceph MGR RESTful API plugin
+# Description:       Ceph MGR RESTful API plugin
+### END INIT INFO
+
+import argparse
+import contextlib
+import errno
+import fcntl
+import inspect
+import json
+import logging
+import multiprocessing
+import os
+import shutil
+import signal
+import socket
+import subprocess
+import sys
+import tempfile
+import time
+
+import daemon
+import psutil
+import requests
+
+# 'timeout' command returns exit status 124
+# if command times out (see man page)
+GNU_TIMEOUT_EXPIRED_RETCODE = 124
+
+
+def psutil_terminate_kill(target, timeout):
+
+    """Extend psutil functionality to stop a process.
+
+       SIGINT is sent to each target then after a grace period SIGKILL
+       is sent to the ones that are still running.
+    """
+
+    if not isinstance(target, list):
+        target = [target]
+    _, target = psutil.wait_procs(target, timeout=0)
+    for action in [lambda p: p.terminate(), lambda p: p.kill()]:
+        for proc in target:
+            action(proc)
+        _, target = psutil.wait_procs(
+            target, timeout=timeout)
+
+
+class Config(object):
+
+    """ceph-mgr service wrapper configuration options.
+
+        In the future we may want to load them from a configuration file
+        (for example /etc/ceph/mgr-restful-plugin.conf )
+    """
+
+    def __init__(self):
+        self.log_level = logging.INFO
+        self.log_dir = '/var/log'
+
+        self.ceph_mgr_service = '/usr/bin/ceph-mgr'
+        self.ceph_mgr_config = '/etc/ceph/ceph.conf'
+        self.ceph_mgr_cluster = 'ceph'
+        self.ceph_mgr_rundir = '/var/run/ceph/mgr'
+        self.ceph_mgr_confdir = '/var/lib/ceph/mgr'
+        self.ceph_mgr_identity = socket.gethostname()
+
+        self.service_name = 'mgr-restful-plugin'
+        self.service_socket = os.path.join(
+            self.ceph_mgr_rundir, '{}.socket'.format(self.service_name))
+        self.service_lock = os.path.join(
+            self.ceph_mgr_rundir, '{}.lock'.format(self.service_name))
+        self.service_pid_file = os.path.join(
+            '/var/run/ceph', '{}.pid'.format(self.service_name))
+
+        self.restful_plugin_port = 5001
+
+        # maximum size of a message received/sent via
+        # service monitor control socket
+        self.service_socket_bufsize = 1024
+
+        # maximum time to wait for ceph cli to exit
+        self.ceph_cli_timeout_sec = 30
+
+        # how much time to wait after ceph cli commands fail with timeout
+        # before running any other commands
+        self.cluster_grace_period_sec = 30
+
+        # after ceph-mgr is started it goes through an internal initialization
+        # phase before; how much time to wait before querying ceph-mgr
+        self.ceph_mgr_grace_period_sec = 15
+
+        # after sending SIGTERM to ceph-mgr how much time to wait before
+        # sending SIGKILL (maximum time allowed for ceph-mgr cleanup)
+        self.ceph_mgr_kill_delay_sec = 5
+
+        # if service monitor is running a recovery procedure it reports
+        # status OK even if ceph-mgr is currently down. This sets the
+        # maximum number of consecutive ceph-mgr failures before reporting
+        # status error
+        self.ceph_mgr_fail_count_report_error = 3
+
+        # maximum number of consecutive ceph-mgr failures before
+        # stopping mgr-restful-plugin service
+        self.ceph_mgr_fail_count_exit = 5
+
+        # maximum time allowed for ceph-mgr to respond to a REST API request
+        self.rest_api_timeout_sec = 15
+
+        # interval between consecutive REST API requests (ping's). A smaller
+        # value here triggers more requests to ceph-mgr restful plugin. A
+        # higher value makes recovery slower when services become unavailable
+        self.restful_plugin_ping_delay_sec = 3
+
+        # where to save the self-signed certificate generated by ceph-mgr
+        self.restful_plugin_cert_path = os.path.join(
+            self.ceph_mgr_rundir, 'restful.crt')
+
+        # time to wait after enabling restful plugin
+        self.restful_plugin_grace_period_sec = 3
+
+        # after how many REST API ping failures to restart ceph-mgr
+        self.ping_fail_count_restart_mgr = 3
+
+        # after how many REST API ping failures to report status error.
+        # Until then service monitor reports status OK just in case
+        # restful plugin recovers
+        self.ping_fail_count_report_error = 5
+
+    @staticmethod
+    def load():
+        return Config()
+
+
+def setup_logging(name=None, cleanup_handlers=False):
+    if not name:
+        name = CONFIG.service_name
+    log = logging.getLogger(name)
+    log.setLevel(CONFIG.log_level)
+    if cleanup_handlers:
+        try:
+            for handler in log.handlers:
+                if isinstance(handler, logging.StreamHandler):
+                    handler.flush()
+                if isinstance(handler, logging.FileHandler):
+                    handler.close()
+            log.handlers = []
+        except Exception:
+            pass
+    elif log.handlers:
+        return log
+    handler = logging.FileHandler(
+        os.path.join(CONFIG.log_dir,
+                     '{}.log'.format(CONFIG.service_name)))
+    handler.setFormatter(
+        logging.Formatter('%(asctime)s %(process)s %(levelname)s %(name)s %(message)s'))
+    log.addHandler(handler)
+    return log
+
+
+CONFIG = Config.load()
+LOG = setup_logging(name='init-wrapper')
+
+
+class ServiceException(Exception):
+
+    """Generic mgr-restful-plugin service exception.
+
+       Build exception string based on static (per exception class)
+       string plus args, keyword args passed to exception constructor.
+    """
+
+    message = ""
+
+    def __init__(self, *args, **kwargs):
+        if "message" not in kwargs:
+            try:
+                message = self.message.format(*args, **kwargs)
+            except Exception:   # noqa
+                message = '{}, args:{}, kwargs: {}'.format(
+                    self.message, args, kwargs)
+        else:
+            message = kwargs["message"]
+        super(ServiceException, self).__init__(message)
+
+
+class ServiceAlreadyStarted(ServiceException):
+    message = ('Service monitor already started')
+
+
+class ServiceLockFailed(ServiceException):
+    message = ('Unable to lock service monitor: '
+               'reason={reason}')
+
+
+class ServiceNoSocket(ServiceException):
+    message = ('Unable to create service monitor socket: '
+               'reason={reason}')
+
+
+class ServiceSocketBindFailed(ServiceException):
+    message = ('Failed to bind service monitor socket: '
+               'path={path}, reason={reason}')
+
+
+class ServiceNoPidFile(ServiceException):
+    message = ('Failed to update pid file: '
+               'path={path}, reason={reason}')
+
+
+class CommandFailed(ServiceException):
+    message = ('Command failed: command={command}, '
+               'reason={reason}, out={out}')
+
+
+class CommandTimeout(ServiceException):
+    message = ('Command timeout: command={command}, '
+               'timeout={timeout}')
+
+
+class CephMgrStartFailed(ServiceException):
+    message = ('Failed to start ceph_mgr: '
+               'reason={reason}')
+
+
+class CephRestfulPluginFailed(ServiceException):
+    message = ('Failed to start restful plugin: '
+               'reason={reason}')
+
+
+class RestApiPingFailed(ServiceException):
+    message = ('REST API ping failed: '
+               'reason={reason}')
+
+
+class ServiceMonitor(object):
+
+    """Configure and monitor ceph-mgr and restful plugin (Ceph REST API)
+
+       1. process init script service requests: status, stop. Requests are
+          received via a control socket. Stop has priority over whatever
+          the monitor is doing currently. Any ceph command that may be running
+          is terminated/killed. Note that while ceph-mgr and restful plugin
+          configuration is in progress ServiceMonitor reports status OK to
+          avoid being restarted by SM.
+
+       2. configure ceph-mgr and mgr restful plugin: authentication, REST API
+          service port, self signed certificate. This runs as a separate
+          process so it can be stopped when init script requests it.
+
+       3. periodically check (ping) REST API responds to HTTPS requests.
+          Recovery actions are taken if REST API fails to respond: restart
+          ceph-mgr, wait for cluster to become available again.
+    """
+
+    def __init__(self):
+        # process running configuration & REST API ping loop
+        self.monitor = None
+
+        # command socket used by init script
+        self.command = None
+
+        # ceph-mgr process
+        self.ceph_mgr = None
+
+        # consecutive ceph-mgr/restful-plugin start failures. Service monitor
+        # reports failure after CONFIG.ceph_mgr_max_failure_count
+        self.ceph_mgr_failure_count = 0
+
+        # consecutive REST API ping failures. ceph-mgr service is restarted
+        # after CONFIG.ping_fail_count_restart_mgr threshold is exceeded
+        self.ping_failure_count = 0
+
+        # REST API url reported by ceph-mgr after enabling restful plugin
+        self.restful_plugin_url = ''
+
+        # REST API self signed certificate generated by restful plugin
+        self.certificate = ''
+
+    def run(self):
+        self.disable_certificate_check()
+        with self.service_lock(), self.service_socket(), \
+                self.service_pid_file():
+            self.start_monitor()
+            self.server_loop()
+
+    def disable_certificate_check(self):
+        # ceph-mgr restful plugin is configured with a self-signed
+        # certificate. Certificate host is hard-coded to "ceph-restful"
+        # which causes HTTPS requests to fail because they don't
+        # match current host name ("controller-..."). Disable HTTPS
+        # certificates check in urllib3
+        LOG.warning('Disable urllib3 certifcates check')
+        requests.packages.urllib3.disable_warnings()
+
+    def server_loop(self):
+        self.command.listen(2)
+        while True:
+            try:
+                client, _ = self.command.accept()
+                request = client.recv(CONFIG.service_socket_bufsize)
+                LOG.debug('Monitor command socket: request=%s', str(request))
+                cmd = request.split(' ')
+                cmd, args = cmd[0], cmd[1:]
+                if cmd == 'status':
+                    self.send_response(client, request, self.status())
+                elif cmd == 'stop':
+                    self.stop()
+                    self.send_response(client, request, 'OK')
+                    break
+                elif cmd == 'restful-url':
+                    try:
+                        self.restful_plugin_url = args[0]
+                        self.send_response(client, request, 'OK')
+                    except IndexError:
+                        LOG.warning('Failed to update restful plugin url: '
+                                    'args=%s', str(args))
+                        self.send_response(client, request, 'ERR')
+                elif cmd == 'certificate':
+                    try:
+                        self.certificate = args[0] if args else ''
+                        self.send_response(client, request, 'OK')
+                    except IndexError:
+                        LOG.warning('Failed to update certificate path: '
+                                    'args=%s', str(args))
+                        self.send_response(client, request, 'ERR')
+                elif cmd == 'ceph-mgr-failures':
+                    try:
+                        self.ceph_mgr_failure_count = int(args[0])
+                        self.send_response(client, request, 'OK')
+                        if self.ceph_mgr_failure_count >= CONFIG.ceph_mgr_fail_count_exit:
+                            self.stop()
+                            break
+                    except (IndexError, ValueError):
+                        LOG.warning('Failed to update ceph-mgr failures: '
+                                    'args=%s', str(args))
+                        self.send_response(client, request, 'ERR')
+                elif cmd == 'ping-failures':
+                    try:
+                        self.ping_failure_count = int(args[0])
+                        self.send_response(client, request, 'OK')
+                    except (IndexError, ValueError):
+                        LOG.warning('Failed to update ping failures: '
+                                    'args=%s', str(args))
+                        self.send_response(client, request, 'ERR')
+            except Exception as err:
+                LOG.exception(err)
+
+    @staticmethod
+    def send_response(client, request, response):
+        try:
+            client.send(response)
+        except socket.error as err:
+            LOG.warning('Failed to send response back. '
+                        'request=%s, response=%s, reason=%s',
+                        request, response, err)
+
+    def status(self):
+        if not self.restful_plugin_url:
+            if self.ceph_mgr_failure_count < CONFIG.ceph_mgr_fail_count_report_error \
+               and self.ping_failure_count < CONFIG.ping_fail_count_report_error:
+                LOG.debug('Monitor is starting services. Report status OK')
+                return 'OK'
+            LOG.debug('Too many failures: '
+                      'ceph_mgr=%d < %d, ping=%d < %d. '
+                      'Report status ERR',
+                      self.ceph_mgr_failure_count,
+                      CONFIG.ceph_mgr_fail_count_report_error,
+                      self.ping_failure_count,
+                      CONFIG.ping_fail_count_report_error)
+            return 'ERR.down'
+        try:
+            self.restful_plugin_ping()
+            LOG.debug('Restful plugin ping successful. Report status OK')
+            return 'OK'
+        except (CommandFailed, RestApiPingFailed):
+            if self.ceph_mgr_failure_count < CONFIG.ceph_mgr_fail_count_report_error \
+               and self.ping_failure_count < CONFIG.ping_fail_count_report_error:
+                LOG.info('Restful plugin does not respond but failure '
+                         'count is within acceptable limits: '
+                         ' ceph_mgr=%d < %d, ping=%d < %d. '
+                         'Report status OK',
+                         self.ceph_mgr_failure_count,
+                         CONFIG.ceph_mgr_fail_count_report_error,
+                         self.ping_failure_count,
+                         CONFIG.ping_fail_count_report_error)
+                return 'OK'
+            LOG.debug('Restful does not respond (ping failure count %d). '
+                      'Report status ERR', self.ping_failure_count)
+            return 'ERR.ping_failed'
+
+    def stop(self):
+        if not self.monitor:
+            return
+        LOG.info('Stop monitor with SIGTERM to process group %d',
+                 self.monitor.pid)
+        try:
+            os.killpg(self.monitor.pid, signal.SIGTERM)
+        except OSError as err:
+            LOG.info('Stop monitor failed: reason=%s', str(err))
+            return
+        time.sleep(CONFIG.ceph_mgr_kill_delay_sec)
+        LOG.info('Stop monitor with SIGKILL to process group %d',
+                 self.monitor.pid)
+        try:
+            os.killpg(self.monitor.pid, signal.SIGKILL)
+            os.waitpid(self.monitor.pid, 0)
+        except OSError as err:
+            LOG.info('Stop monitor failed: reason=%s', str(err))
+            return
+        LOG.info('Monitor stopped: pid=%d', self.monitor.pid)
+
+    @contextlib.contextmanager
+    def service_lock(self):
+        LOG.info('Take service lock: path=%s', CONFIG.service_lock)
+        try:
+            os.makedirs(os.path.dirname(CONFIG.service_lock))
+        except OSError:
+            pass
+        lock_file = open(CONFIG.service_lock, 'w')
+        try:
+            fcntl.flock(lock_file.fileno(),
+                        fcntl.LOCK_EX | fcntl.LOCK_NB)
+        except (IOError, OSError) as err:
+            if err.errno == errno.EAGAIN:
+                raise ServiceAlreadyStarted()
+            else:
+                raise ServiceLockFailed(reason=str(err))
+        # even if we have the lock here there might be another service manager
+        # running whose CONFIG.ceph_mgr_rundir was removed before starting
+        # this instance. Make sure there is only one service manager running
+        self.stop_other_service_managers()
+        try:
+            yield
+        finally:
+            os.unlink(CONFIG.service_lock)
+            lock_file.close()
+            LOG.info('Release service lock: path=%s', CONFIG.service_lock)
+
+    def stop_other_service_managers(self):
+        service = os.path.join('/etc/init.d', CONFIG.service_name)
+        for p in psutil.process_iter():
+            if p.cmdline()[:2] not in [[service], ['/usr/bin/python', service]]:
+                continue
+            if p.pid == os.getpid():
+                continue
+            p.kill()
+
+    @contextlib.contextmanager
+    def service_socket(self):
+        LOG.info('Create service socket')
+        try:
+            self.command = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
+        except socket.error as err:
+            raise ServiceNoSocket(reason=str(err))
+        LOG.info('Remove existing socket files')
+        try:
+            os.unlink(CONFIG.service_socket)
+        except OSError:
+            pass
+        LOG.info('Bind service socket: path=%s', CONFIG.service_socket)
+        try:
+            self.command.bind(CONFIG.service_socket)
+        except socket.error as err:
+            raise ServiceSocketBindFailed(
+                path=CONFIG.service_socket, reason=str(err))
+        try:
+            yield
+        finally:
+            LOG.info('Close service socket and remove file: path=%s',
+                     CONFIG.service_socket)
+            self.command.close()
+            os.unlink(CONFIG.service_socket)
+
+    @contextlib.contextmanager
+    def service_pid_file(self):
+        LOG.info('Update service pid file: path=%s', CONFIG.service_pid_file)
+        try:
+            pid_file = open(CONFIG.service_pid_file, 'w')
+            pid_file.write(str(os.getpid()))
+            pid_file.flush()
+        except OSError as err:
+            raise ServiceNoPidFile(
+                path=CONFIG.service_pid_file, reason=str(err))
+        try:
+            yield
+        finally:
+            LOG.info('Remove service pid file: path=%s',
+                     CONFIG.service_pid_file)
+            try:
+                os.unlink(CONFIG.service_pid_file)
+            except OSError:
+                pass
+
+    def start_monitor(self):
+        LOG.info('Start monitor loop')
+        self.monitor = multiprocessing.Process(target=self.monitor_loop)
+        self.monitor.start()
+
+    def stop_unmanaged_ceph_mgr(self):
+        LOG.info('Stop unmanaged running ceph-mgr processes')
+        service_name = os.path.basename(CONFIG.ceph_mgr_service)
+        if self.ceph_mgr:
+            psutil_terminate_kill(
+                [proc for proc in psutil.process_iter()
+                 if (proc.name() == service_name
+                     and proc.pid != self.ceph_mgr.pid)],
+                CONFIG.ceph_mgr_kill_delay_sec)
+        else:
+            psutil_terminate_kill(
+                [proc for proc in psutil.process_iter()
+                 if proc.name() == service_name],
+                CONFIG.ceph_mgr_kill_delay_sec)
+
+    def monitor_loop(self):
+
+        """Bring up and monitor ceph-mgr restful plugin.
+
+           Steps:
+           - wait for Ceph cluster to become available
+           - configure and start ceph-mgr
+           - configure and enable restful plugin
+           - send periodic requests to REST API
+           - recover from failures
+
+           Note: because this runs as a separate process it
+               must send status updates to service monitor
+               via control socket for: ping_failure_count,
+               restful_plugin_url and certificate.
+        """
+
+        # Promote to process group leader so parent (service monitor)
+        # can kill the monitor plus processes spawned by it. Otherwise
+        # children of monitor_loop() will keep running in background and
+        # will be reaped by init when they finish but by then they might
+        # interfere with any new service instance.
+        os.setpgrp()
+
+        # Ignoring SIGTERM here ensures process group is not reused by
+        # the time parent (service monitor) issues the final SIGKILL.
+        signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+        while True:
+            try:
+                # steps to configure/start ceph-mgr and restful plugin
+                self.ceph_fsid_get()
+                self.ceph_mgr_auth_create()
+                self.restful_plugin_set_server_port()
+                self.restful_plugin_create_certificate()
+                self.ceph_mgr_start()
+                self.restful_plugin_enable()
+                self.restful_plugin_create_admin_key()
+                self.restful_plugin_get_url()
+                self.restful_plugin_get_certificate()
+
+                # REST API should be available now
+                # start making periodic requests (ping)
+                while True:
+                    try:
+                        self.restful_plugin_ping()
+                        self.ping_failure_count = 0
+                        self.request_update_ping_failures(
+                            self.ping_failure_count)
+                        self.ceph_mgr_failure_count = 0
+                        self.request_update_ceph_mgr_failures(
+                            self.ceph_mgr_failure_count)
+                        time.sleep(CONFIG.restful_plugin_ping_delay_sec)
+                        continue
+                    except RestApiPingFailed as err:
+                        LOG.warning(str(err))
+
+                    LOG.info('REST API ping failure count=%d',
+                             self.ping_failure_count)
+                    self.ping_failure_count += 1
+                    self.request_update_ping_failures(
+                        self.ping_failure_count)
+
+                    # maybe request failed because ceph-mgr is not running
+                    if not self.ceph_mgr_is_running():
+                        self.ceph_mgr_failure_count += 1
+                        self.request_update_ceph_mgr_failures(
+                            self.ceph_mgr_failure_count)
+                        self.ceph_mgr_start()
+                        time.sleep(CONFIG.ceph_mgr_grace_period_sec)
+                        continue
+
+                    # maybe request failed because cluster health is not ok
+                    if not self.ceph_fsid_get():
+                        LOG.info('Unable to get cluster fsid. '
+                                 'Sleep for a while')
+                        time.sleep(CONFIG.cluster_grace_period_sec)
+                        break
+
+                    # too many failures? Restart ceph-mgr and go again
+                    # through configuration steps
+                    if (self.ping_failure_count
+                            % CONFIG.ping_fail_count_restart_mgr == 0):
+                        LOG.info('Too many consecutive REST API failures. '
+                                 'Restart ceph-mgr. Update service '
+                                 'url and certificate')
+                        self.ceph_mgr_stop()
+                        self.restful_plugin_url = ''
+                        self.request_update_plugin_url(self.restful_plugin_url)
+                        self.certificate = ''
+                        self.request_update_certificate(self.certificate)
+                        break
+
+                    time.sleep(CONFIG.restful_plugin_ping_delay_sec)
+
+            except CommandFailed as err:
+                LOG.warning(str(err))
+                time.sleep(CONFIG.cluster_grace_period_sec)
+            except CommandTimeout as err:
+                LOG.warning(str(err))
+            except (CephMgrStartFailed, CephRestfulPluginFailed) as err:
+                LOG.warning(str(err))
+                self.ceph_mgr_failure_count += 1
+                self.request_update_ceph_mgr_failures(
+                    self.ceph_mgr_failure_count)
+                time.sleep(CONFIG.ceph_mgr_grace_period_sec)
+            except Exception as err:
+                LOG.exception(err)
+                time.sleep(CONFIG.cluster_grace_period_sec)
+
+    @staticmethod
+    def run_with_timeout(command, timeout, stderr=subprocess.STDOUT):
+        try:
+            LOG.info('Run command: %s', ' '.join(command))
+            return subprocess.check_output(
+                ['/usr/bin/timeout', str(timeout)] + command,
+                stderr=stderr, shell=False).strip()
+        except subprocess.CalledProcessError as err:
+            if err.returncode == GNU_TIMEOUT_EXPIRED_RETCODE:
+                raise CommandTimeout(command=err.cmd, timeout=timeout)
+            raise CommandFailed(command=err.cmd, reason=str(err),
+                                out=err.output)
+
+    def ceph_fsid_get(self):
+        return self.run_with_timeout(['/usr/bin/ceph', 'fsid'],
+                                     CONFIG.ceph_cli_timeout_sec)
+
+    def ceph_mgr_has_auth(self):
+        path = '{}/ceph-{}'.format(
+            CONFIG.ceph_mgr_confdir, CONFIG.ceph_mgr_identity)
+        try:
+            os.makedirs(path)
+        except OSError as err:
+            pass
+        try:
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'auth', 'get',
+                 'mgr.{}'.format(CONFIG.ceph_mgr_identity),
+                 '-o', '{}/keyring'.format(path)],
+                CONFIG.ceph_cli_timeout_sec)
+            return True
+        except CommandFailed as err:
+            if 'ENOENT' in str(err):
+                return False
+            raise
+
+    def ceph_mgr_auth_create(self):
+        if self.ceph_mgr_has_auth():
+            return
+        LOG.info('Create ceph-mgr authentication')
+        self.run_with_timeout(
+            ['/usr/bin/ceph', 'auth', 'get-or-create',
+             'mgr.{}'.format(CONFIG.ceph_mgr_identity),
+             'mon', 'allow *', 'osd', 'allow *'],
+            CONFIG.ceph_cli_timeout_sec)
+
+    def ceph_mgr_is_running(self):
+        if not self.ceph_mgr:
+            return None
+        try:
+            self.ceph_mgr.wait(timeout=0)
+        except psutil.TimeoutExpired:
+            return True
+        return False
+
+    def ceph_mgr_start(self):
+        if self.ceph_mgr_is_running():
+            return
+        self.stop_unmanaged_ceph_mgr()
+        LOG.info('Start ceph-mgr daemon')
+        try:
+            with open(os.devnull, 'wb') as null:
+                self.ceph_mgr = psutil.Popen(
+                    [CONFIG.ceph_mgr_service,
+                     '--cluster', CONFIG.ceph_mgr_cluster,
+                     '--conf', CONFIG.ceph_mgr_config,
+                     '--id', CONFIG.ceph_mgr_identity,
+                     '-f'],
+                    close_fds=True,
+                    stdout=null,
+                    stderr=null,
+                    shell=False)
+        except (OSError, ValueError) as err:
+            raise CephMgrStartFailed(reason=str(err))
+        time.sleep(CONFIG.ceph_mgr_grace_period_sec)
+
+    def ceph_mgr_stop(self):
+        if not self.ceph_mgr:
+            return
+        LOG.info('Stop ceph-mgr')
+        psutil_terminate_kill(self.ceph_mgr, CONFIG.ceph_mgr_kill_delay_sec)
+
+    def restful_plugin_has_server_port(self):
+        try:
+            with open(os.devnull, 'wb') as null:
+                out = self.run_with_timeout(
+                    ['/usr/bin/ceph', 'config-key', 'get',
+                     'mgr/restful/server_port'],
+                    CONFIG.ceph_cli_timeout_sec, stderr=null)
+            if out == str(CONFIG.restful_plugin_port):
+                return True
+            LOG.warning('Restful plugin port mismatch: '
+                        'current=%d, expected=%d', out,
+                        CONFIG.restful_plugin_port)
+        except CommandFailed as err:
+            LOG.warning('Failed to get restful plugin port: '
+                        'reason=%s', str(err))
+        return False
+
+    def restful_plugin_set_server_port(self):
+        if self.restful_plugin_has_server_port():
+            return
+        LOG.info('Set restful plugin port=%d', CONFIG.restful_plugin_port)
+        self.run_with_timeout(
+            ['/usr/bin/ceph', 'config-key', 'set',
+             'mgr/restful/server_port', str(CONFIG.restful_plugin_port)],
+            CONFIG.ceph_cli_timeout_sec)
+
+    def restful_plugin_has_admin_key(self):
+        try:
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'config-key', 'get',
+                 'mgr/restful/keys/admin'],
+                CONFIG.ceph_cli_timeout_sec)
+            return True
+        except CommandFailed:
+            pass
+        return False
+
+    def restful_plugin_create_admin_key(self):
+        if self.restful_plugin_has_admin_key():
+            return
+        LOG.info('Create restful plugin admin key')
+        self.run_with_timeout(
+            ['/usr/bin/ceph', 'restful',
+             'create-key', 'admin'],
+            CONFIG.ceph_cli_timeout_sec)
+
+    def restful_plugin_has_certificate(self):
+        try:
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'config-key', 'get',
+                 'config/mgr/restful/{}/crt'.format(CONFIG.ceph_mgr_identity)],
+                CONFIG.ceph_cli_timeout_sec)
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'config-key', 'get',
+                 'mgr/restful/{}/crt'.format(CONFIG.ceph_mgr_identity)],
+                CONFIG.ceph_cli_timeout_sec)
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'config-key', 'get',
+                 'config/mgr/restful/{}/key'.format(CONFIG.ceph_mgr_identity)],
+                CONFIG.ceph_cli_timeout_sec)
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'config-key', 'get',
+                 '/mgr/restful/{}/key'.format(CONFIG.ceph_mgr_identity)],
+                CONFIG.ceph_cli_timeout_sec)
+            return True
+        except CommandFailed:
+            pass
+        return False
+
+    def restful_plugin_create_certificate(self):
+        if self.restful_plugin_has_certificate():
+            return
+        LOG.info('Create restful plugin self signed certificate')
+        path = tempfile.mkdtemp()
+        try:
+            try:
+                with tempfile.NamedTemporaryFile() as restful_cnf:
+                    restful_cnf.write((
+                        '[req]\n'
+                        'req_extensions = v3_ca\n'
+                        'distinguished_name = req_distinguished_name\n'
+                        '[v3_ca]\n'
+                        'subjectAltName=DNS:{}\n'
+                        'basicConstraints = CA:true\n'
+                        '[ req_distinguished_name ]\n'
+                        '0.organizationName = IT\n'
+                        'commonName = ceph-restful\n').format(
+                            CONFIG.ceph_mgr_identity))
+                    restful_cnf.flush()
+                    subprocess.check_call([
+                        '/usr/bin/openssl', 'req', '-new', '-nodes', '-x509',
+                        '-subj', '/O=IT/CN=' + CONFIG.ceph_mgr_identity,
+                        '-days', '3650',
+                        '-config', restful_cnf.name,
+                        '-out', os.path.join(path, 'crt'),
+                        '-keyout', os.path.join(path, 'key'),
+                        '-extensions', 'v3_ca'])
+            except subprocess.CalledProcessError as err:
+                raise CommandFailed(
+                    command=' '.join(err.cmd),
+                    reason='failed to generate self-signed certificate: {}'.format(str(err)),
+                    out=err.output)
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'config-key', 'set',
+                 'config/mgr/restful/{}/crt'.format(CONFIG.ceph_mgr_identity),
+                 '-i', os.path.join(path, 'crt')],
+                CONFIG.ceph_cli_timeout_sec)
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'config-key', 'set',
+                 'mgr/restful/{}/crt'.format(CONFIG.ceph_mgr_identity),
+                 '-i', os.path.join(path, 'crt')],
+                CONFIG.ceph_cli_timeout_sec)
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'config-key', 'set',
+                 'config/mgr/restful/{}/key'.format(CONFIG.ceph_mgr_identity),
+                 '-i', os.path.join(path, 'key')],
+                CONFIG.ceph_cli_timeout_sec)
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'config-key', 'set',
+                 'mgr/restful/{}/key'.format(CONFIG.ceph_mgr_identity),
+                 '-i', os.path.join(path, 'key')],
+                CONFIG.ceph_cli_timeout_sec)
+        finally:
+            shutil.rmtree(path)
+
+    def restful_plugin_is_enabled(self):
+        command = ['/usr/bin/ceph', 'mgr', 'module', 'ls',
+                   '--format', 'json']
+        with open(os.devnull, 'wb') as null:
+            out = self.run_with_timeout(
+                command, CONFIG.ceph_cli_timeout_sec, stderr=null)
+        try:
+            if 'restful' in json.loads(out)['enabled_modules']:
+                return True
+        except ValueError as err:
+            raise CommandFailed(
+                command=' '.join(command),
+                reason='unable to decode json: {}'.format(err), out=out)
+        except KeyError as err:
+            raise CommandFailed(
+                command=' '.join(command),
+                reason='missing expected key: {}'.format(err), out=out)
+        return False
+
+    def restful_plugin_enable(self):
+        if not self.restful_plugin_is_enabled():
+            LOG.info('Enable restful plugin')
+            self.run_with_timeout(
+                ['/usr/bin/ceph', 'mgr',
+                 'module', 'enable', 'restful'],
+                CONFIG.ceph_cli_timeout_sec)
+        time.sleep(CONFIG.restful_plugin_grace_period_sec)
+
+    def restful_plugin_get_url(self):
+        command = ['/usr/bin/ceph', 'mgr', 'services',
+                   '--format', 'json']
+        with open(os.devnull, 'wb') as null:
+            out = self.run_with_timeout(
+                command, CONFIG.ceph_cli_timeout_sec, stderr=null)
+        try:
+            self.restful_plugin_url = json.loads(out)['restful']
+        except ValueError as err:
+            raise CephRestfulPluginFailed(
+                reason='unable to decode json: {} output={}'.format(err, out))
+        except KeyError as err:
+            raise CephRestfulPluginFailed(
+                reason='missing expected key: {} in ouput={}'.format(err, out))
+        self.request_update_plugin_url(self.restful_plugin_url)
+
+    def restful_plugin_get_certificate(self):
+        command = ['/usr/bin/ceph', 'config-key', 'get',
+                   'config/mgr/restful/{}/crt'.format(CONFIG.ceph_mgr_identity)]
+        with open(os.devnull, 'wb') as null:
+            certificate = self.run_with_timeout(
+                command, CONFIG.ceph_cli_timeout_sec, stderr=null)
+            with open(CONFIG.restful_plugin_cert_path, 'wb') as cert_file:
+                cert_file.write(certificate)
+            self.certificate = CONFIG.restful_plugin_cert_path
+            self.request_update_certificate(
+                self.certificate)
+
+    def restful_plugin_ping(self):
+        if not self.restful_plugin_url:
+            raise RestApiPingFailed(reason='missing service url')
+        if not self.certificate:
+            raise RestApiPingFailed(reason='missing certificate')
+        LOG.debug('Ping restful plugin: url=%d', self.restful_plugin_url)
+        try:
+            response = requests.request(
+                'GET', self.restful_plugin_url, verify=False,
+                timeout=CONFIG.rest_api_timeout_sec)
+            if not response.ok:
+                raise RestApiPingFailed(
+                    reason='response not ok ({})'.format(response))
+            LOG.debug('Ping restful plugin OK')
+        except (requests.ConnectionError,
+                requests.Timeout,
+                requests.HTTPError) as err:
+            raise RestApiPingFailed(reason=str(err))
+
+    @staticmethod
+    def _make_client_socket():
+        sock = socket.socket(
+            socket.AF_UNIX, socket.SOCK_SEQPACKET)
+        sock.settimeout(2 * CONFIG.rest_api_timeout_sec)
+        sock.connect(CONFIG.service_socket)
+        return sock
+
+    @staticmethod
+    def request_status():
+        try:
+            with contextlib.closing(
+                    ServiceMonitor._make_client_socket()) as sock:
+                sock.send('status')
+                status = sock.recv(CONFIG.service_socket_bufsize)
+                LOG.debug('Status %s', status)
+                return status.startswith('OK')
+        except socket.error as err:
+            LOG.error('Status error: reason=%s', err)
+            return False
+
+    @staticmethod
+    def request_stop():
+        try:
+            with contextlib.closing(
+                    ServiceMonitor._make_client_socket()) as sock:
+                sock.send('stop')
+                response = sock.recv(CONFIG.service_socket_bufsize)
+                LOG.debug('Stop response: %s', response)
+                return True
+        except socket.error as err:
+            LOG.error('Stop error: reason=%s', err)
+            return False
+
+    @staticmethod
+    def request_update_ceph_mgr_failures(count):
+        try:
+            with contextlib.closing(
+                    ServiceMonitor._make_client_socket()) as sock:
+                sock.send('ceph-mgr-failures {}'.format(count))
+                sock.recv(CONFIG.service_socket_bufsize)
+                return True
+        except socket.error as err:
+            LOG.error('Stop error: reason=%s', err)
+            return False
+
+    @staticmethod
+    def request_update_ping_failures(count):
+        try:
+            with contextlib.closing(
+                    ServiceMonitor._make_client_socket()) as sock:
+                sock.send('ping-failures {}'.format(count))
+                sock.recv(CONFIG.service_socket_bufsize)
+                return True
+        except socket.error as err:
+            LOG.error('Stop error: reason=%s', err)
+            return False
+
+    @staticmethod
+    def request_update_plugin_url(url):
+        try:
+            with contextlib.closing(
+                    ServiceMonitor._make_client_socket()) as sock:
+                sock.send('restful-url {}'.format(url))
+                sock.recv(CONFIG.service_socket_bufsize)
+                return True
+        except socket.error as err:
+            LOG.error('Stop error: reason=%s', err)
+            return False
+
+    @staticmethod
+    def request_update_certificate(path):
+        try:
+            with contextlib.closing(
+                    ServiceMonitor._make_client_socket()) as sock:
+                sock.send('certificate {}'.format(path))
+                sock.recv(CONFIG.service_socket_bufsize)
+                return True
+        except socket.error as err:
+            LOG.error('Stop error: reason=%s', err)
+            return False
+
+
+class InitWrapper(object):
+
+    """Handle System V init script actions: start, stop, restart, etc. """
+
+    def __init__(self):
+
+        """Dispatch command line action to the corresponding function.
+
+           Candidate action functions are all class methods except ones
+           that start with an underscore.
+        """
+
+        parser = argparse.ArgumentParser()
+        actions = [m[0]
+                   for m in inspect.getmembers(self)
+                   if (inspect.ismethod(m[1])
+                       and not m[0].startswith('_'))]
+        parser.add_argument(
+            'action',
+            choices=actions)
+        self.args = parser.parse_args()
+        getattr(self, self.args.action)()
+
+    def start(self):
+
+        """Start ServiceMonitor as a daemon unless one is already running.
+
+           Use a pipe to report monitor status back to this process.
+        """
+
+        pipe = os.pipe()
+        child = os.fork()
+        if child == 0:
+            os.close(pipe[0])
+            with daemon.DaemonContext(files_preserve=[pipe[1]]):
+                # prevent duplication of messages in log
+                global LOG
+                LOG = setup_logging(cleanup_handlers=True)
+                try:
+                    monitor = ServiceMonitor()
+                    status = 'OK'
+                except ServiceAlreadyStarted:
+                    os.write(pipe[1], 'OK')
+                    os.close(pipe[1])
+                    return
+                except Exception as err:
+                    status = str(err)
+                os.write(pipe[1], status)
+                os.close(pipe[1])
+                if status == 'OK':
+                    try:
+                        monitor.run()
+                    except ServiceException as err:
+                        LOG.warning(str(err))
+                    except Exception as err:
+                        LOG.exception('Service monitor error: reason=%s', err)
+        else:
+            os.close(pipe[1])
+            try:
+                status = os.read(pipe[0], CONFIG.service_socket_bufsize)
+                if status == 'OK':
+                    sys.exit(0)
+                else:
+                    LOG.warning('Service monitor failed to start: '
+                                'status=%s', status)
+            except IOError as err:
+                LOG.warning('Failed to read monitor status: reason=%s', err)
+            os.close(pipe[0])
+            os.waitpid(child, 0)
+            sys.exit(1)
+
+    def stop(self):
+
+        """Tell ServiceMonitor daemon to stop running.
+
+           In case request fails stop ServiceMonitor and ceph_mgr proecsses
+           using SIGTERM followed by SIGKILL.
+        """
+
+        result = ServiceMonitor.request_stop()
+        if not result:
+            ceph_mgr = os.path.basename(CONFIG.ceph_mgr_service)
+            procs = []
+            for proc in psutil.process_iter():
+                name = proc.name()
+                if name == CONFIG.service_name:
+                    procs.append(proc)
+                if name == ceph_mgr:
+                    procs.append(proc)
+            psutil_terminate_kill(procs, CONFIG.ceph_mgr_kill_delay_sec)
+
+    def restart(self):
+        self.stop()
+        self.start()
+
+    def force_reload(self):
+        self.stop()
+        self.start()
+
+    def reload(self):
+        self.stop()
+        self.start()
+
+    def status(self):
+
+        """Report status from ServiceMonitor.
+
+           We don't just try to access REST API here because ServiceMonitor may
+           be in the process of starting/configuring ceph-mgr and restful
+           plugin in which case we report OK to avoid being restarted by SM.
+        """
+
+        status = ServiceMonitor.request_status()
+        sys.exit(0 if status is True else 1)
+
+
+if __name__ == '__main__':
+    InitWrapper()
diff --git a/meta-stx/recipes-extended/ceph/files/mgr-restful-plugin.service b/meta-stx/recipes-extended/ceph/files/mgr-restful-plugin.service
new file mode 100644 (file)
index 0000000..b3e61f0
--- /dev/null
@@ -0,0 +1,15 @@
+[Unit]
+Description=Ceph MGR RESTful API Plugin
+After=network-online.target sw-patch.service
+
+[Service]
+Type=forking
+Restart=no
+KillMode=process
+RemainAfterExit=yes
+ExecStart=/etc/rc.d/init.d/mgr-restful-plugin start
+ExecStop=/etc/rc.d/init.d/mgr-restul-plugin stop
+ExecReload=/etc/rc.d/init.d/mgr-restful-plugin reload
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-extended/ceph/files/starlingx-docker-override.conf b/meta-stx/recipes-extended/ceph/files/starlingx-docker-override.conf
new file mode 100644 (file)
index 0000000..3845a85
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+[Service]
+ExecStopPost=/usr/sbin/ceph-preshutdown.sh
+
diff --git a/meta-stx/recipes-extended/cloud-init/cloud-init_0.7.6.bbappend b/meta-stx/recipes-extended/cloud-init/cloud-init_0.7.6.bbappend
new file mode 100644 (file)
index 0000000..62618fe
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " \
+       file://cloud-init-interactive-parted.patch \
+       "
diff --git a/meta-stx/recipes-extended/cloud-init/files/cloud-init-interactive-parted.patch b/meta-stx/recipes-extended/cloud-init/files/cloud-init-interactive-parted.patch
new file mode 100644 (file)
index 0000000..facd624
--- /dev/null
@@ -0,0 +1,25 @@
+From 70b90db2364256fe8ba7e368cbd96cd53b246cb3 Mon Sep 17 00:00:00 2001
+From: rpm-build <rpm-build>
+Date: Wed, 8 Nov 2017 11:02:42 -0500
+Subject: [PATCH] cloud-init-interactive-parted
+
+---
+ cloudinit/config/cc_growpart.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
+index 832bb3f..71c4f7f 100644
+--- a/cloudinit/config/cc_growpart.py
++++ b/cloudinit/config/cc_growpart.py
+@@ -182,7 +182,7 @@ class ResizeGpart(object):
+         before = get_size(partdev)
+         try:
+-            util.subp(["gpart", "resize", "-i", partnum, diskdev])
++            util.subp(["resizepart.sh", diskdev, partnum])
+         except util.ProcessExecutionError as e:
+             util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
+             raise ResizeFailedException(e)
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-extended/cloud-init/files/first_boot.patch b/meta-stx/recipes-extended/cloud-init/files/first_boot.patch
new file mode 100644 (file)
index 0000000..bbcfe83
--- /dev/null
@@ -0,0 +1,35 @@
+---
+ cloudinit/sources/DataSourceConfigDrive.py |   13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/cloudinit/sources/DataSourceConfigDrive.py
++++ b/cloudinit/sources/DataSourceConfigDrive.py
+@@ -52,12 +52,13 @@ class ConfigDriveHelper(object):
+     def __init__(self, distro):
+         self.distro = distro
+-    def on_first_boot(self, data):
++    def on_first_boot(self, data, dsmode="local"):
+         if not data:
+             data = {}
+-        if 'network_config' in data:
+-            LOG.debug("Updating network interfaces from config drive")
+-            self.distro.apply_network(data['network_config'])
++        if dsmode == "local":
++            if 'network_config' in data:
++                LOG.debug("Updating network interfaces from config drive")
++                self.distro.apply_network(data['network_config'])
+         files = data.get('files')
+         if files:
+             LOG.debug("Writing %s injected files", len(files))
+@@ -214,8 +215,8 @@ class DataSourceConfigDrive(sources.Data
+         # instance-id
+         prev_iid = get_previous_iid(self.paths)
+         cur_iid = md['instance-id']
+-        if prev_iid != cur_iid and self.dsmode == "local":
+-            self.helper.on_first_boot(results)
++        if prev_iid != cur_iid:
++            self.helper.on_first_boot(results, dsmode=self.dsmode)
+         # dsmode != self.dsmode here if:
+         #  * dsmode = "pass",  pass means it should only copy files and then
diff --git a/meta-stx/recipes-extended/collectd/collectd_%.bbappend b/meta-stx/recipes-extended/collectd/collectd_%.bbappend
new file mode 100644 (file)
index 0000000..7a517b2
--- /dev/null
@@ -0,0 +1,22 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += "file://collectd-fix-for-LIBPYTHON_LDFLAGS.patch"
+
+PACKAGECONFIG += "python"
+
+PACKAGECONFIG[python] = "--enable-python --with-libpython,--disable-python --with-libpython=no,python"
diff --git a/meta-stx/recipes-extended/collectd/files/collectd-fix-for-LIBPYTHON_LDFLAGS.patch b/meta-stx/recipes-extended/collectd/files/collectd-fix-for-LIBPYTHON_LDFLAGS.patch
new file mode 100644 (file)
index 0000000..0bee88f
--- /dev/null
@@ -0,0 +1,32 @@
+From b619d111a63d83b4d4bfa3f2c6c28cbd94ba874b Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Sat, 11 Apr 2020 21:55:08 +0800
+Subject: [PATCH] fix for LIBPYTHON_LDFLAGS
+
+There is bug in oe-core's python-native that sysconfig module cat not
+get the correct Py_ENABLE_SHARED, which causes the "python-config --ldflags"
+add the prefix/lib/pythonX.Y/config dir which is the static lib dir,
+here is a workarond to remove the dir, we may need to fix the issue
+in oe-core later.
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ configure.ac | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/configure.ac b/configure.ac
+index b5f8e87..8a2d04f 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -4641,7 +4641,7 @@ if test "$PYTHON_CONFIG" != ""; then
+   if test $? -ne 0; then
+     with_libpython="no"
+   fi
+-  LIBPYTHON_LDFLAGS="`${PYTHON_CONFIG} --ldflags`"
++  LIBPYTHON_LDFLAGS="`${PYTHON_CONFIG} --ldflags|sed 's/-L.*config //'`"
+   if test $? -ne 0; then
+     with_libpython="no"
+   fi
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-extended/ibsh/ibsh_0.3e.bb b/meta-stx/recipes-extended/ibsh/ibsh_0.3e.bb
new file mode 100644 (file)
index 0000000..b6965d6
--- /dev/null
@@ -0,0 +1,38 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "ibsh Iron Bar Shell"
+DESCRIPTION = "CGCS add default users types"
+SECTION = "base/shell"
+HOMEPAGE = "https://sourceforge.net/projects/ibsh/"
+
+LICENSE = "GPLv2+"
+LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \
+                    file://COPYRIGHT;md5=e5ef2fe07fcba90ea59f9f1a61f7526b \
+"
+
+SRC_URI[md5sum] = "41ea08a03c6bd805dff1a5ff25ea1d5b"
+SRC_URI[sha256sum] = "4c40d0841527c76fc75ccc27d32b575543d02d661973fc3561004efb6033206d"
+SRC_URI = "https://downloads.sourceforge.net/project/ibsh/ibsh/ibsh-0.3e/ibsh-0.3e.tar.gz"
+
+do_install() {
+       cd ${S}
+       install -d -m0755 ${D}/${base_bindir}
+       install -d -m0755 ${D}/${sysconfdir}/ibsh/cmds/
+       install -d -m0755 ${D}/${sysconfdir}/ibsh/xtns/
+       install -m0755 ibsh ${D}/${base_bindir}
+       install -m0644 globals.cmds ${D}/${sysconfdir}/ibsh
+       install -m0644 globals.xtns ${D}/${sysconfdir}/ibsh
+}
diff --git a/meta-stx/recipes-extended/libpwquality/libpwquality_1.4.0.bbappend b/meta-stx/recipes-extended/libpwquality/libpwquality_1.4.0.bbappend
new file mode 100644 (file)
index 0000000..205640c
--- /dev/null
@@ -0,0 +1,22 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+do_install_append() {
+       install -d ${D}/${baselib}/security/
+       ln -fs ${libdir}/security/pam_pwquality.so ${D}/${baselib}/security/
+}
+
+FILES_${PN} += "/${baselib}/security/"
+INSANE_SKIP_${PN} += "dev-so"
diff --git a/meta-stx/recipes-extended/lsb/files/service-redirect-to-restart-for-reload.patch b/meta-stx/recipes-extended/lsb/files/service-redirect-to-restart-for-reload.patch
new file mode 100644 (file)
index 0000000..16ad52e
--- /dev/null
@@ -0,0 +1,28 @@
+From cf72e404927c807e4352d867f73f2a2c409fc524 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Mon, 13 Apr 2020 23:38:43 +0800
+Subject: [PATCH] service: redirect to restart for reload
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ service | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/service b/service
+index 357c2a6..1f3976c 100755
+--- a/service
++++ b/service
+@@ -70,6 +70,10 @@ while [ $# -gt 0 ]; do
+    esac
+ done
++if [ "${ACTION}" = "reload" ]; then
++   ACTION="restart"
++fi
++
+ if [ -f "${SERVICEDIR}/${SERVICE}" ]; then
+    # LSB daemons that dies abnormally in systemd looks alive in systemd's eyes due to RemainAfterExit=yes
+    # lets reap them before next start
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-extended/lsb/lsbinitscripts_9.79.bbappend b/meta-stx/recipes-extended/lsb/lsbinitscripts_9.79.bbappend
new file mode 100644 (file)
index 0000000..50da7ee
--- /dev/null
@@ -0,0 +1,40 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += "file://service-redirect-to-restart-for-reload.patch"
+
+DEPENDS += "\
+    gettext-native \
+    glib-2.0 \
+    popt \
+"
+
+unset do_configure[noexec]
+unset do_compile[noexec]
+
+do_install_append() {
+       install -m 0755 -d ${D}/${sysconfdir}/profile.d/
+       install -m 0644 ${S}/lang.sh  ${D}${sysconfdir}/profile.d/lang.sh
+       install -m 0755 -d ${D}/${base_sbindir}
+       install -m 0755 ${S}/src/consoletype ${D}/${base_sbindir}
+
+       install -m 0755 -d ${D}/${bindir}
+       install -m 0755 ${S}/service ${D}/${bindir}
+       sed -i -e 's|${bindir}|${base_bindir}|' ${D}/${bindir}/service
+}
+
+FILES_${PN}_append = "${sysconfdir}/profile.d/lang.sh"
diff --git a/meta-stx/recipes-extended/postgresql/postgresql_%.bbappend b/meta-stx/recipes-extended/postgresql/postgresql_%.bbappend
new file mode 100644 (file)
index 0000000..41d1c03
--- /dev/null
@@ -0,0 +1,17 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+COMPUTE_IP="127.0.0.1"
+CONTROLLER_IP="127.0.0.1"
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-common-0001-Avoid-RPC-roundtrips-while-listing-items.patch b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-common-0001-Avoid-RPC-roundtrips-while-listing-items.patch
new file mode 100644 (file)
index 0000000..e6c6fe9
--- /dev/null
@@ -0,0 +1,388 @@
+From: Alexey Lebedeff <alebedev@mirantis.com>
+Date: Wed, 9 Mar 2016 14:55:02 +0300
+Subject: [PATCH] Avoid RPC roundtrips while listing items
+
+- Emit info about particular items in parallel on every node, with
+  results delivered directly to a `rabbitmqctl` instance.
+- `rabbit_control_misc:wait_for_info_messages/5` can wait for results of
+  more than one emitting map.
+- Stop passing arround InfoItemKeys in
+  `rabbit_control_misc:wait_for_info_messages/5`, the same information
+  could be directly encoded in DisplayFun closure.
+- Add `emit` to function names, to avoid confusion with regular ones
+  which return result directly.
+
+Part of https://github.com/rabbitmq/rabbitmq-server/pull/683
+
+diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl
+index 27b352a..e09e02c 100644
+--- a/src/rabbit_amqqueue.erl
++++ b/src/rabbit_amqqueue.erl
+@@ -25,10 +25,10 @@
+          check_exclusive_access/2, with_exclusive_access_or_die/3,
+          stat/1, deliver/2, requeue/3, ack/3, reject/4]).
+ -export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2,
+-         info_all/6, info_local/1]).
++         emit_info_all/5, list_local/1, info_local/1]).
+ -export([list_down/1]).
+ -export([force_event_refresh/1, notify_policy_changed/1]).
+--export([consumers/1, consumers_all/1,  consumers_all/3, consumer_info_keys/0]).
++-export([consumers/1, consumers_all/1,  emit_consumers_all/4, consumer_info_keys/0]).
+ -export([basic_get/4, basic_consume/10, basic_cancel/4, notify_decorators/1]).
+ -export([notify_sent/2, notify_sent_queue_down/1, resume/2]).
+ -export([notify_down_all/2, notify_down_all/3, activate_limit_all/2, credit/5]).
+@@ -41,7 +41,8 @@
+ %% internal
+ -export([internal_declare/2, internal_delete/1, run_backing_queue/3,
+-         set_ram_duration_target/2, set_maximum_since_use/2]).
++         set_ram_duration_target/2, set_maximum_since_use/2,
++         emit_info_local/4, emit_info_down/4, emit_consumers_local/3]).
+ -include("rabbit.hrl").
+ -include_lib("stdlib/include/qlc.hrl").
+@@ -117,10 +118,6 @@
+ -spec info_all(rabbit_types:vhost()) -> [rabbit_types:infos()].
+ -spec info_all(rabbit_types:vhost(), rabbit_types:info_keys()) ->
+           [rabbit_types:infos()].
+--spec info_all
+-        (rabbit_types:vhost(), rabbit_types:info_keys(), boolean(), boolean(),
+-         reference(), pid()) ->
+-            'ok'.
+ -spec force_event_refresh(reference()) -> 'ok'.
+ -spec notify_policy_changed(rabbit_types:amqqueue()) -> 'ok'.
+ -spec consumers(rabbit_types:amqqueue()) ->
+@@ -130,7 +127,6 @@
+ -spec consumers_all(rabbit_types:vhost()) ->
+           [{name(), pid(), rabbit_types:ctag(), boolean(),
+             non_neg_integer(), rabbit_framing:amqp_table()}].
+--spec consumers_all(rabbit_types:vhost(), reference(), pid()) -> 'ok'.
+ -spec stat(rabbit_types:amqqueue()) ->
+           {'ok', non_neg_integer(), non_neg_integer()}.
+ -spec delete_immediately(qpids()) -> 'ok'.
+@@ -627,16 +623,18 @@ info_all(VHostPath, Items) ->
+     map(list(VHostPath), fun (Q) -> info(Q, Items) end) ++
+         map(list_down(VHostPath), fun (Q) -> info_down(Q, Items, down) end).
+-info_all(VHostPath, Items, NeedOnline, NeedOffline, Ref, AggregatorPid) ->
+-    NeedOnline andalso rabbit_control_misc:emitting_map_with_exit_handler(
+-                         AggregatorPid, Ref, fun(Q) -> info(Q, Items) end, list(VHostPath),
+-                         continue),
+-    NeedOffline andalso rabbit_control_misc:emitting_map_with_exit_handler(
+-                          AggregatorPid, Ref, fun(Q) -> info_down(Q, Items, down) end,
+-                          list_down(VHostPath),
+-                          continue),
+-    %% Previous maps are incomplete, finalize emission
+-    rabbit_control_misc:emitting_map(AggregatorPid, Ref, fun(_) -> no_op end, []).
++emit_info_local(VHostPath, Items, Ref, AggregatorPid) ->
++    rabbit_control_misc:emitting_map_with_exit_handler(
++      AggregatorPid, Ref, fun(Q) -> info(Q, Items) end, list_local(VHostPath)).
++
++emit_info_all(Nodes, VHostPath, Items, Ref, AggregatorPid) ->
++    Pids = [ spawn_link(Node, rabbit_amqqueue, emit_info_local, [VHostPath, Items, Ref, AggregatorPid]) || Node <- Nodes ],
++    rabbit_control_misc:await_emitters_termination(Pids).
++
++emit_info_down(VHostPath, Items, Ref, AggregatorPid) ->
++    rabbit_control_misc:emitting_map_with_exit_handler(
++      AggregatorPid, Ref, fun(Q) -> info_down(Q, Items, down) end,
++      list_down(VHostPath)).
+ info_local(VHostPath) ->
+     map(list_local(VHostPath), fun (Q) -> info(Q, [name]) end).
+@@ -664,12 +662,17 @@ consumers_all(VHostPath) ->
+       map(list(VHostPath),
+           fun(Q) -> get_queue_consumer_info(Q, ConsumerInfoKeys) end)).
+-consumers_all(VHostPath, Ref, AggregatorPid) ->
++emit_consumers_all(Nodes, VHostPath, Ref, AggregatorPid) ->
++    Pids = [ spawn_link(Node, rabbit_amqqueue, emit_consumers_local, [VHostPath, Ref, AggregatorPid]) || Node <- Nodes ],
++    rabbit_control_misc:await_emitters_termination(Pids),
++    ok.
++
++emit_consumers_local(VHostPath, Ref, AggregatorPid) ->
+     ConsumerInfoKeys = consumer_info_keys(),
+     rabbit_control_misc:emitting_map(
+       AggregatorPid, Ref,
+       fun(Q) -> get_queue_consumer_info(Q, ConsumerInfoKeys) end,
+-      list(VHostPath)).
++      list_local(VHostPath)).
+ get_queue_consumer_info(Q, ConsumerInfoKeys) ->
+     [lists:zip(ConsumerInfoKeys,
+diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl
+index ab7d38d..837a892 100644
+--- a/src/rabbit_channel.erl
++++ b/src/rabbit_channel.erl
+@@ -56,7 +56,7 @@
+ -export([send_command/2, deliver/4, deliver_reply/2,
+          send_credit_reply/2, send_drained/2]).
+ -export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1,
+-         info_all/3, info_local/1]).
++         emit_info_all/4, info_local/1]).
+ -export([refresh_config_local/0, ready_for_close/1]).
+ -export([force_event_refresh/1]).
+@@ -64,7 +64,7 @@
+          handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
+          prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
+ %% Internal
+--export([list_local/0, deliver_reply_local/3]).
++-export([list_local/0, emit_info_local/3, deliver_reply_local/3]).
+ -export([get_vhost/1, get_user/1]).
+ -record(ch, {
+@@ -220,7 +220,6 @@
+ -spec info(pid(), rabbit_types:info_keys()) -> rabbit_types:infos().
+ -spec info_all() -> [rabbit_types:infos()].
+ -spec info_all(rabbit_types:info_keys()) -> [rabbit_types:infos()].
+--spec info_all(rabbit_types:info_keys(), reference(), pid()) -> 'ok'.
+ -spec refresh_config_local() -> 'ok'.
+ -spec ready_for_close(pid()) -> 'ok'.
+ -spec force_event_refresh(reference()) -> 'ok'.
+@@ -329,9 +328,16 @@ info_all(Items) ->
+ info_local(Items) ->
+     rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list_local()).
+-info_all(Items, Ref, AggregatorPid) ->
++emit_info_all(Nodes, Items, Ref, AggregatorPid) ->
++    Pids = [ spawn_link(Node, rabbit_channel, emit_info_local, [Items, Ref, AggregatorPid]) || Node <- Nodes ],
++    rabbit_control_misc:await_emitters_termination(Pids).
++
++emit_info_local(Items, Ref, AggregatorPid) ->
++    emit_info(list_local(), Items, Ref, AggregatorPid).
++
++emit_info(PidList, InfoItems, Ref, AggregatorPid) ->
+     rabbit_control_misc:emitting_map_with_exit_handler(
+-      AggregatorPid, Ref, fun(C) -> info(C, Items) end, list()).
++      AggregatorPid, Ref, fun(C) -> info(C, InfoItems) end, PidList).
+ refresh_config_local() ->
+     rabbit_misc:upmap(
+diff --git a/src/rabbit_control_misc.erl b/src/rabbit_control_misc.erl
+index 2e1f6cc..3b0c60b 100644
+--- a/src/rabbit_control_misc.erl
++++ b/src/rabbit_control_misc.erl
+@@ -17,7 +17,8 @@
+ -module(rabbit_control_misc).
+ -export([emitting_map/4, emitting_map/5, emitting_map_with_exit_handler/4,
+-         emitting_map_with_exit_handler/5, wait_for_info_messages/5,
++         emitting_map_with_exit_handler/5, wait_for_info_messages/6,
++         spawn_emitter_caller/7, await_emitters_termination/1,
+          print_cmd_result/2]).
+ -spec emitting_map(pid(), reference(), fun(), list()) -> 'ok'.
+@@ -25,7 +26,14 @@
+ -spec emitting_map_with_exit_handler
+         (pid(), reference(), fun(), list()) -> 'ok'.
+ -spec emitting_map_with_exit_handler
+-        (pid(), reference(), fun(), list(), atom()) -> 'ok'.
++        (pid(), reference(), fun(), list(), 'continue') -> 'ok'.
++
++-type fold_fun() :: fun ((term(), term()) -> term()).
++
++-spec wait_for_info_messages (pid(), reference(), fold_fun(), term(), timeout(), non_neg_integer()) -> {'ok', term()} | {'error', term()}.
++-spec spawn_emitter_caller (node(), module(), atom(), [term()], reference(), pid(), timeout()) -> 'ok'.
++-spec await_emitters_termination ([pid()]) -> 'ok'.
++
+ -spec print_cmd_result(atom(), term()) -> 'ok'.
+ emitting_map(AggregatorPid, Ref, Fun, List) ->
+@@ -65,27 +73,108 @@ step_with_exit_handler(AggregatorPid, Ref, Fun, Item) ->
+             ok
+     end.
+-wait_for_info_messages(Pid, Ref, ArgAtoms, DisplayFun, Timeout) ->
+-    _ = notify_if_timeout(Pid, Ref, Timeout),
+-    wait_for_info_messages(Ref, ArgAtoms, DisplayFun).
++%% Invokes RPC for async info collection in separate (but linked to
++%% the caller) process. Separate process waits for RPC to finish and
++%% in case of errors sends them in wait_for_info_messages/5-compatible
++%% form to aggregator process. Calling process is then expected to
++%% do blocking call of wait_for_info_messages/5.
++%%
++%% Remote function MUST use calls to emitting_map/4 (and other
++%% emitting_map's) to properly deliver requested information to an
++%% aggregator process.
++%%
++%% If for performance reasons several parallel emitting_map's need to
++%% be run, remote function MUST NOT return until all this
++%% emitting_map's are done. And during all this time remote RPC
++%% process MUST be linked to emitting
++%% processes. await_emitters_termination/1 helper can be used as a
++%% last statement of remote function to ensure this behaviour.
++spawn_emitter_caller(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
++    spawn_monitor(
++      fun () ->
++              case rpc_call_emitter(Node, Mod, Fun, Args, Ref, Pid, Timeout) of
++                  {error, _} = Error        ->
++                      Pid ! {Ref, error, Error};
++                  {bad_argument, _} = Error ->
++                      Pid ! {Ref, error, Error};
++                  {badrpc, _} = Error       ->
++                      Pid ! {Ref, error, Error};
++                  _                         ->
++                      ok
++              end
++      end),
++    ok.
++
++rpc_call_emitter(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
++    rabbit_misc:rpc_call(Node, Mod, Fun, Args++[Ref, Pid], Timeout).
++
++%% Agregator process expects correct numbers of explicits ACKs about
++%% finished emission process. While everything is linked, we still
++%% need somehow to wait for termination of all emitters before
++%% returning from RPC call - otherwise links will be just broken with
++%% reason 'normal' and we can miss some errors, and subsequentially
++%% hang.
++await_emitters_termination(Pids) ->
++    Monitors = [erlang:monitor(process, Pid) || Pid <- Pids],
++    collect_monitors(Monitors).
+-wait_for_info_messages(Ref, InfoItemKeys, DisplayFun) when is_reference(Ref) ->
++collect_monitors([]) ->
++    ok;
++collect_monitors([Monitor|Rest]) ->
+     receive
+-        {Ref,  finished}         ->
+-            ok;
+-        {Ref,  {timeout, T}}     ->
++        {'DOWN', Monitor, _Pid, normal} ->
++            collect_monitors(Rest);
++        {'DOWN', Monitor, _Pid, noproc} ->
++            %% There is a link and a monitor to a process. Matching
++            %% this clause means that process has gracefully
++            %% terminated even before we've started monitoring.
++            collect_monitors(Rest);
++        {'DOWN', _, Pid, Reason} ->
++            exit({emitter_exit, Pid, Reason})
++    end.
++
++%% Wait for result of one or more calls to emitting_map-family
++%% functions.
++%%
++%% Number of expected acknowledgments is specified by ChunkCount
++%% argument. Most common usage will be with ChunkCount equals to
++%% number of live nodes, but it's not mandatory - thus more generic
++%% name of 'ChunkCount' was chosen.
++wait_for_info_messages(Pid, Ref, Fun, Acc0, Timeout, ChunkCount) ->
++    notify_if_timeout(Pid, Ref, Timeout),
++    wait_for_info_messages(Ref, Fun, Acc0, ChunkCount).
++
++wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft) ->
++    receive
++        {Ref, finished} when ChunksLeft =:= 1 ->
++            {ok, Acc0};
++        {Ref, finished} ->
++            wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft - 1);
++        {Ref, {timeout, T}} ->
+             exit({error, {timeout, (T / 1000)}});
+-        {Ref,  []}               ->
+-            wait_for_info_messages(Ref, InfoItemKeys, DisplayFun);
+-        {Ref,  Result, continue} ->
+-            DisplayFun(Result, InfoItemKeys),
+-            wait_for_info_messages(Ref, InfoItemKeys, DisplayFun);
+-        {error, Error}           ->
+-            Error;
+-        _                        ->
+-            wait_for_info_messages(Ref, InfoItemKeys, DisplayFun)
++        {Ref, []} ->
++            wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft);
++        {Ref, Result, continue} ->
++            wait_for_info_messages(Ref, Fun, Fun(Result, Acc0), ChunksLeft);
++        {Ref, error, Error} ->
++            {error, simplify_emission_error(Error)};
++        {'DOWN', _MRef, process, _Pid, normal} ->
++            wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft);
++        {'DOWN', _MRef, process, _Pid, Reason} ->
++            {error, simplify_emission_error(Reason)};
++        _Msg ->
++            wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft)
+     end.
++simplify_emission_error({badrpc, {'EXIT', {{nocatch, EmissionError}, _Stacktrace}}}) ->
++    EmissionError;
++simplify_emission_error({{nocatch, EmissionError}, _Stacktrace}) ->
++    EmissionError;
++simplify_emission_error(Anything) ->
++    {error, Anything}.
++
++notify_if_timeout(_, _, infinity) ->
++    ok;
+ notify_if_timeout(Pid, Ref, Timeout) ->
+     timer:send_after(Timeout, Pid, {Ref, {timeout, Timeout}}).
+diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl
+index 8965c59..9341ea9 100644
+--- a/src/rabbit_misc.erl
++++ b/src/rabbit_misc.erl
+@@ -75,7 +75,7 @@
+ -export([get_env/3]).
+ -export([get_channel_operation_timeout/0]).
+ -export([random/1]).
+--export([rpc_call/4, rpc_call/5, rpc_call/7]).
++-export([rpc_call/4, rpc_call/5]).
+ -export([report_default_thread_pool_size/0]).
+ -export([get_gc_info/1]).
+@@ -264,8 +264,6 @@
+ -spec random(non_neg_integer()) -> non_neg_integer().
+ -spec rpc_call(node(), atom(), atom(), [any()]) -> any().
+ -spec rpc_call(node(), atom(), atom(), [any()], number()) -> any().
+--spec rpc_call
+-        (node(), atom(), atom(), [any()], reference(), pid(), number()) -> any().
+ -spec report_default_thread_pool_size() -> 'ok'.
+ -spec get_gc_info(pid()) -> integer().
+@@ -1184,9 +1182,6 @@ rpc_call(Node, Mod, Fun, Args, Timeout) ->
+                            rpc:call(Node, Mod, Fun, Args, Timeout)
+     end.
+-rpc_call(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
+-    rpc_call(Node, Mod, Fun, Args++[Ref, Pid], Timeout).
+-
+ guess_number_of_cpu_cores() ->
+     case erlang:system_info(logical_processors_available) of
+         unknown -> % Happens on Mac OS X.
+diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl
+index 5bf30ff..63e3ed0 100644
+--- a/src/rabbit_networking.erl
++++ b/src/rabbit_networking.erl
+@@ -33,7 +33,8 @@
+          node_listeners/1, register_connection/1, unregister_connection/1,
+          connections/0, connection_info_keys/0,
+          connection_info/1, connection_info/2,
+-         connection_info_all/0, connection_info_all/1, connection_info_all/3,
++         connection_info_all/0, connection_info_all/1,
++         emit_connection_info_all/4, emit_connection_info_local/3,
+          close_connection/2, force_connection_event_refresh/1, tcp_host/1]).
+ %% Used by TCP-based transports, e.g. STOMP adapter
+@@ -89,8 +90,6 @@
+ -spec connection_info_all() -> [rabbit_types:infos()].
+ -spec connection_info_all(rabbit_types:info_keys()) ->
+           [rabbit_types:infos()].
+--spec connection_info_all(rabbit_types:info_keys(), reference(), pid()) ->
+-          'ok'.
+ -spec close_connection(pid(), string()) -> 'ok'.
+ -spec force_connection_event_refresh(reference()) -> 'ok'.
+@@ -365,10 +364,15 @@ connection_info(Pid, Items) -> rabbit_reader:info(Pid, Items).
+ connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end).
+ connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end).
+-connection_info_all(Items, Ref, AggregatorPid) ->
++emit_connection_info_all(Nodes, Items, Ref, AggregatorPid) ->
++    Pids = [ spawn_link(Node, rabbit_networking, emit_connection_info_local, [Items, Ref, AggregatorPid]) || Node <- Nodes ],
++    rabbit_control_misc:await_emitters_termination(Pids),
++    ok.
++
++emit_connection_info_local(Items, Ref, AggregatorPid) ->
+     rabbit_control_misc:emitting_map_with_exit_handler(
+       AggregatorPid, Ref, fun(Q) -> connection_info(Q, Items) end,
+-      connections()).
++      connections_local()).
+ close_connection(Pid, Explanation) ->
+     rabbit_log:info("Closing connection ~p because ~p~n", [Pid, Explanation]),
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-common-0002-Use-proto_dist-from-command-line.patch b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-common-0002-Use-proto_dist-from-command-line.patch
new file mode 100644 (file)
index 0000000..a923a28
--- /dev/null
@@ -0,0 +1,31 @@
+From: Peter Lemenkov <lemenkov@redhat.com>
+Date: Fri, 15 Jul 2016 16:01:08 +0200
+Subject: [PATCH] Use proto_dist from command line
+
+Use protocol distribution value from command line when provided instead
+of always using default value (inet_tcp) when trying to check epmd.
+
+If provided more than one protocol distribution types, then use the
+first one.
+
+Signed-off-by: Peter Lemenkov <lemenkov@redhat.com>
+
+diff --git a/src/rabbit_nodes.erl b/src/rabbit_nodes.erl
+index 70a5355..18f7714 100644
+--- a/src/rabbit_nodes.erl
++++ b/src/rabbit_nodes.erl
+@@ -221,9 +221,14 @@ set_cluster_name(Name) ->
+ ensure_epmd() ->
+     {ok, Prog} = init:get_argument(progname),
+     ID = rabbit_misc:random(1000000000),
++    ProtoDist = case init:get_argument(proto_dist) of
++            {ok, [Proto | _Protos]} -> Proto;
++            error -> "inet_tcp"
++    end,
+     Port = open_port(
+              {spawn_executable, os:find_executable(Prog)},
+              [{args, ["-sname", rabbit_misc:format("epmd-starter-~b", [ID]),
++                      "-proto_dist", rabbit_misc:format("~p", [ProtoDist]),
+                       "-noshell", "-eval", "halt()."]},
+               exit_status, stderr_to_stdout, use_stdio]),
+     port_shutdown_loop(Port).
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-script-wrapper b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-script-wrapper
new file mode 100644 (file)
index 0000000..b2a4520
--- /dev/null
@@ -0,0 +1,54 @@
+#!/bin/sh
+##  The contents of this file are subject to the Mozilla Public License
+##  Version 1.1 (the "License"); you may not use this file except in
+##  compliance with the License. You may obtain a copy of the License
+##  at http://www.mozilla.org/MPL/
+##
+##  Software distributed under the License is distributed on an "AS IS"
+##  basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+##  the License for the specific language governing rights and
+##  limitations under the License.
+##
+##  The Original Code is RabbitMQ.
+##
+##  The Initial Developer of the Original Code is GoPivotal, Inc.
+##  Copyright (c) 2007-2015 Pivotal Software, Inc.  All rights reserved.
+##
+
+SED_OPT="-E"
+if [ $(uname -s) = "Linux" ]; then
+    SED_OPT="-r"
+fi
+
+for arg in "$@" ; do
+    # Wrap each arg in single quotes and wrap single quotes in double quotes, so that they're passed through cleanly.
+    arg=`printf %s "$arg" | sed $SED_OPT -e "s/'/'\"'\"'/g"`
+    CMDLINE="${CMDLINE} '${arg}'"
+done
+
+cd /var/lib/rabbitmq
+
+SCRIPT=`basename $0`
+
+if [ `id -u` = `id -u rabbitmq` -a "$SCRIPT" = "rabbitmq-server" ] ; then
+    RABBITMQ_ENV=/usr/lib/rabbitmq/bin/rabbitmq-env
+    RABBITMQ_SCRIPTS_DIR=$(dirname "$RABBITMQ_ENV")
+    . "$RABBITMQ_ENV"
+
+    exec /usr/lib/rabbitmq/bin/rabbitmq-server "$@"
+elif [ `id -u` = `id -u rabbitmq` -o "$SCRIPT" = "rabbitmq-plugins" ] ; then
+    if [ -f $PWD/.erlang.cookie ] ; then
+        export HOME=.
+    fi
+    exec /usr/lib/rabbitmq/bin/${SCRIPT} "$@"
+elif [ `id -u` = 0 ] ; then
+    # WRS. Allow to run as root
+    export HOME=${HOME:-/root}
+    /bin/sh -c "/usr/lib/rabbitmq/bin/${SCRIPT} ${CMDLINE}"
+else
+    /usr/lib/rabbitmq/bin/${SCRIPT}
+    echo
+    echo "Only root or rabbitmq should run ${SCRIPT}"
+    echo
+    exit 1
+fi
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0001-Remove-excessive-sd_notify-code.patch b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0001-Remove-excessive-sd_notify-code.patch
new file mode 100644 (file)
index 0000000..61a7f40
--- /dev/null
@@ -0,0 +1,131 @@
+From: Peter Lemenkov <lemenkov@gmail.com>
+Date: Thu, 19 May 2016 16:04:56 +0300
+Subject: [PATCH] Remove excessive sd_notify code
+
+Signed-off-by: Peter Lemenkov <lemenkov@gmail.com>
+
+diff --git a/src/rabbit.erl b/src/rabbit.erl
+index a86fd97..32ff240 100644
+--- a/src/rabbit.erl
++++ b/src/rabbit.erl
+@@ -280,120 +280,8 @@ broker_start() ->
+     Plugins = rabbit_plugins:setup(),
+     ToBeLoaded = Plugins ++ ?APPS,
+     start_apps(ToBeLoaded),
+-    maybe_sd_notify(),
+     ok = log_broker_started(rabbit_plugins:active()).
+-%% Try to send systemd ready notification if it makes sense in the
+-%% current environment. standard_error is used intentionally in all
+-%% logging statements, so all this messages will end in systemd
+-%% journal.
+-maybe_sd_notify() ->
+-    case sd_notify_ready() of
+-        false ->
+-            io:format(standard_error, "systemd READY notification failed, beware of timeouts~n", []);
+-        _ ->
+-            ok
+-    end.
+-
+-sd_notify_ready() ->
+-    case {os:type(), os:getenv("NOTIFY_SOCKET")} of
+-        {{win32, _}, _} ->
+-            true;
+-        {_, [_|_]} -> %% Non-empty NOTIFY_SOCKET, give it a try
+-            sd_notify_legacy() orelse sd_notify_socat();
+-        _ ->
+-            true
+-    end.
+-
+-sd_notify_data() ->
+-    "READY=1\nSTATUS=Initialized\nMAINPID=" ++ os:getpid() ++ "\n".
+-
+-sd_notify_legacy() ->
+-    case code:load_file(sd_notify) of
+-        {module, sd_notify} ->
+-            SDNotify = sd_notify,
+-            SDNotify:sd_notify(0, sd_notify_data()),
+-            true;
+-        {error, _} ->
+-            false
+-    end.
+-
+-%% socat(1) is the most portable way the sd_notify could be
+-%% implemented in erlang, without introducing some NIF. Currently the
+-%% following issues prevent us from implementing it in a more
+-%% reasonable way:
+-%% - systemd-notify(1) is unstable for non-root users
+-%% - erlang doesn't support unix domain sockets.
+-%%
+-%% Some details on how we ended with such a solution:
+-%%   https://github.com/rabbitmq/rabbitmq-server/issues/664
+-sd_notify_socat() ->
+-    case sd_current_unit() of
+-        {ok, Unit} ->
+-            io:format(standard_error, "systemd unit for activation check: \"~s\"~n", [Unit]),
+-            sd_notify_socat(Unit);
+-        _ ->
+-            false
+-    end.
+-
+-socat_socket_arg("@" ++ AbstractUnixSocket) ->
+-    "abstract-sendto:" ++ AbstractUnixSocket;
+-socat_socket_arg(UnixSocket) ->
+-    "unix-sendto:" ++ UnixSocket.
+-
+-sd_open_port() ->
+-    open_port(
+-      {spawn_executable, os:find_executable("socat")},
+-      [{args, [socat_socket_arg(os:getenv("NOTIFY_SOCKET")), "STDIO"]},
+-       use_stdio, out]).
+-
+-sd_notify_socat(Unit) ->
+-    case sd_open_port() of
+-        {'EXIT', Exit} ->
+-            io:format(standard_error, "Failed to start socat ~p~n", [Exit]),
+-            false;
+-        Port ->
+-            Port ! {self(), {command, sd_notify_data()}},
+-            Result = sd_wait_activation(Port, Unit),
+-            port_close(Port),
+-            Result
+-    end.
+-
+-sd_current_unit() ->
+-    case catch re:run(os:cmd("systemctl status " ++ os:getpid()), "([-.@0-9a-zA-Z]+)", [unicode, {capture, all_but_first, list}]) of
+-        {'EXIT', _} ->
+-            error;
+-        {match, [Unit]} ->
+-            {ok, Unit};
+-        _ ->
+-            error
+-    end.
+-
+-sd_wait_activation(Port, Unit) ->
+-    case os:find_executable("systemctl") of
+-        false ->
+-            io:format(standard_error, "'systemctl' unavailable, falling back to sleep~n", []),
+-            timer:sleep(5000),
+-            true;
+-        _ ->
+-            sd_wait_activation(Port, Unit, 10)
+-    end.
+-
+-sd_wait_activation(_, _, 0) ->
+-    io:format(standard_error, "Service still in 'activating' state, bailing out~n", []),
+-    false;
+-sd_wait_activation(Port, Unit, AttemptsLeft) ->
+-    case os:cmd("systemctl show --property=ActiveState " ++ Unit) of
+-        "ActiveState=activating\n" ->
+-            timer:sleep(1000),
+-            sd_wait_activation(Port, Unit, AttemptsLeft - 1);
+-        "ActiveState=" ++ _ ->
+-            true;
+-        _ = Err->
+-            io:format(standard_error, "Unexpected status from systemd ~p~n", [Err]),
+-            false
+-    end.
+-
+ start_it(StartFun) ->
+     Marker = spawn_link(fun() -> receive stop -> ok end end),
+     case catch register(rabbit_boot, Marker) of
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0002-Add-systemd-notification-support.patch b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0002-Add-systemd-notification-support.patch
new file mode 100644 (file)
index 0000000..7e8b0a4
--- /dev/null
@@ -0,0 +1,21 @@
+From: John Eckersberg <jeckersb@redhat.com>
+Date: Wed, 18 Feb 2015 16:11:12 -0500
+Subject: [PATCH] Add systemd notification support
+
+
+diff --git a/src/rabbit.erl b/src/rabbit.erl
+index 32ff240..f9e8231 100644
+--- a/src/rabbit.erl
++++ b/src/rabbit.erl
+@@ -280,6 +280,11 @@ broker_start() ->
+     Plugins = rabbit_plugins:setup(),
+     ToBeLoaded = Plugins ++ ?APPS,
+     start_apps(ToBeLoaded),
++    case code:load_file(sd_notify) of
++        {module, sd_notify} -> SDNotify = sd_notify,
++                             SDNotify:sd_notify(0, "READY=1\nSTATUS=Initialized\nMAINPID=" ++ os:getpid() ++ "\n");
++        {error, _} -> ok
++    end,
+     ok = log_broker_started(rabbit_plugins:active()).
+ start_it(StartFun) ->
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0003-Revert-Distinct-exit-codes-for-CLI-utilities.patch b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0003-Revert-Distinct-exit-codes-for-CLI-utilities.patch
new file mode 100644 (file)
index 0000000..e98c427
--- /dev/null
@@ -0,0 +1,124 @@
+From: Peter Lemenkov <lemenkov@gmail.com>
+Date: Wed, 25 May 2016 22:24:44 +0300
+Subject: [PATCH] Revert "Distinct exit-codes for CLI utilities"
+
+This reverts commit 7984540175d0b8852025165b6b6a0ac05d692c98.
+
+diff --git a/include/rabbit_cli.hrl b/include/rabbit_cli.hrl
+index a0d1ecf..efd328a 100644
+--- a/include/rabbit_cli.hrl
++++ b/include/rabbit_cli.hrl
+@@ -46,14 +46,3 @@
+ -define(RAM_DEF, {?RAM_OPT, flag}).
+ -define(OFFLINE_DEF, {?OFFLINE_OPT, flag}).
+ -define(ONLINE_DEF, {?ONLINE_OPT, flag}).
+-
+-%% Subset of standartized exit codes from sysexits.h, see
+-%% https://github.com/rabbitmq/rabbitmq-server/issues/396 for discussion.
+--define(EX_OK         ,  0).
+--define(EX_USAGE      , 64).  % Bad command-line arguments.
+--define(EX_DATAERR    , 65).  % Wrong data in command-line arguments.
+--define(EX_NOUSER     , 67).  % The user specified does not exist.
+--define(EX_UNAVAILABLE, 69).  % Could not connect to the target node.
+--define(EX_SOFTWARE   , 70).  % Failed to execute command.
+--define(EX_TEMPFAIL   , 75).  % Temporary error (e.g. something has timed out).
+--define(EX_CONFIG     , 78).  % Misconfiguration detected
+diff --git a/src/rabbit_cli.erl b/src/rabbit_cli.erl
+index 6b35482..dc490ad 100644
+--- a/src/rabbit_cli.erl
++++ b/src/rabbit_cli.erl
+@@ -58,7 +58,7 @@ ensure_cli_distribution() ->
+         {error, Error} ->
+             print_error("Failed to initialize erlang distribution: ~p.",
+                         [Error]),
+-            rabbit_misc:quit(?EX_TEMPFAIL)
++            rabbit_misc:quit(2)
+     end.
+ %%----------------------------------------------------------------------------
+@@ -84,10 +84,10 @@ main(ParseFun, DoFun, UsageMod) ->
+     %% thrown errors into normal return values
+     case catch DoFun(Command, Node, Args, Opts) of
+         ok ->
+-            rabbit_misc:quit(?EX_OK);
++            rabbit_misc:quit(0);
+         {ok, Result} ->
+             rabbit_control_misc:print_cmd_result(Command, Result),
+-            rabbit_misc:quit(?EX_OK);
++            rabbit_misc:quit(0);
+         {'EXIT', {function_clause, [{?MODULE, action, _}    | _]}} -> %% < R15
+             PrintInvalidCommandError(),
+             usage(UsageMod);
+@@ -97,51 +97,51 @@ main(ParseFun, DoFun, UsageMod) ->
+         {error, {missing_dependencies, Missing, Blame}} ->
+             print_error("dependent plugins ~p not found; used by ~p.",
+                         [Missing, Blame]),
+-            rabbit_misc:quit(?EX_CONFIG);
++            rabbit_misc:quit(2);
+         {'EXIT', {badarg, _}} ->
+             print_error("invalid parameter: ~p", [Args]),
+-            usage(UsageMod, ?EX_DATAERR);
++            usage(UsageMod, 2);
+         {error, {Problem, Reason}} when is_atom(Problem), is_binary(Reason) ->
+             %% We handle this common case specially to avoid ~p since
+             %% that has i18n issues
+             print_error("~s: ~s", [Problem, Reason]),
+-            rabbit_misc:quit(?EX_SOFTWARE);
++            rabbit_misc:quit(2);
+         {error, Reason} ->
+             print_error("~p", [Reason]),
+-            rabbit_misc:quit(?EX_SOFTWARE);
++            rabbit_misc:quit(2);
+         {error_string, Reason} ->
+             print_error("~s", [Reason]),
+-            rabbit_misc:quit(?EX_SOFTWARE);
++            rabbit_misc:quit(2);
+         {badrpc, {'EXIT', Reason}} ->
+             print_error("~p", [Reason]),
+-            rabbit_misc:quit(?EX_SOFTWARE);
++            rabbit_misc:quit(2);
+         {badrpc, Reason} ->
+             case Reason of
+                 timeout ->
+                     print_error("operation ~w on node ~w timed out", [Command, Node]),
+-                    rabbit_misc:quit(?EX_TEMPFAIL);
++                    rabbit_misc:quit(2);
+                 _ ->
+                     print_error("unable to connect to node ~w: ~w", [Node, Reason]),
+                     print_badrpc_diagnostics([Node]),
+                     case Command of
+-                        stop -> rabbit_misc:quit(?EX_OK);
+-                        _    -> rabbit_misc:quit(?EX_UNAVAILABLE)
++                        stop -> rabbit_misc:quit(0);
++                        _    -> rabbit_misc:quit(2)
+                     end
+             end;
+         {badrpc_multi, Reason, Nodes} ->
+             print_error("unable to connect to nodes ~p: ~w", [Nodes, Reason]),
+             print_badrpc_diagnostics(Nodes),
+-            rabbit_misc:quit(?EX_UNAVAILABLE);
++            rabbit_misc:quit(2);
+         function_clause ->
+             print_error("operation ~w used with invalid parameter: ~p",
+                         [Command, Args]),
+             usage(UsageMod);
+         {refused, Username, _, _} ->
+             print_error("failed to authenticate user \"~s\"", [Username]),
+-            rabbit_misc:quit(?EX_NOUSER);
++            rabbit_misc:quit(2);
+         Other ->
+             print_error("~p", [Other]),
+-            rabbit_misc:quit(?EX_SOFTWARE)
++            rabbit_misc:quit(2)
+     end.
+ start_distribution_anon(0, LastError) ->
+@@ -172,7 +172,7 @@ name_type() ->
+     end.
+ usage(Mod) ->
+-    usage(Mod, ?EX_USAGE).
++    usage(Mod, 1).
+ usage(Mod, ExitCode) ->
+     io:format("~s", [Mod:usage()]),
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0004-Allow-guest-login-from-non-loopback-connections.patch b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0004-Allow-guest-login-from-non-loopback-connections.patch
new file mode 100644 (file)
index 0000000..8bf55b4
--- /dev/null
@@ -0,0 +1,18 @@
+From: John Eckersberg <jeckersb@redhat.com>
+Date: Thu, 30 Oct 2014 14:01:08 -0400
+Subject: [PATCH] Allow guest login from non-loopback connections
+
+
+diff --git a/src/rabbit.app.src b/src/rabbit.app.src
+index 572c1f6..4676e03 100644
+--- a/src/rabbit.app.src
++++ b/src/rabbit.app.src
+@@ -39,7 +39,7 @@
+          {default_user_tags, [administrator]},
+          {default_vhost, <<"/">>},
+          {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
+-         {loopback_users, [<<"guest">>]},
++         {loopback_users, []},
+          {password_hashing_module, rabbit_password_hashing_sha256},
+          {cluster_nodes, {[], disc}},
+          {server_properties, []},
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0005-Avoid-RPC-roundtrips-in-list-commands.patch b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0005-Avoid-RPC-roundtrips-in-list-commands.patch
new file mode 100644 (file)
index 0000000..238d3d5
--- /dev/null
@@ -0,0 +1,280 @@
+From: Alexey Lebedeff <alebedev@mirantis.com>
+Date: Wed, 9 Mar 2016 18:09:04 +0300
+Subject: [PATCH] Avoid RPC roundtrips in list commands
+
+Current implementation of various `list_XXX` commands require cross-node
+roundtrip for every processed item - because `rabbitmqctl` target node
+is responsible for gathering global list of all items of
+interest (channels etc.) and then processing them one by one.
+
+For example, listing 10000 channels evenly distributed across 3 nodes
+where network has 1ms delay takes more than 10 seconds on my
+machine. And with the proposed change listing will take almost the same
+time as it'll take to gather this info locally. E.g. in the case above
+listing now takes 0.7 second on the same machine with same 1ms delay.
+
+It works by invoking emitting_map on every node, where it should send
+info about only local items to aggregator, in an async fashion - as no
+reply from aggregator is needed.
+
+diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl
+index ea9d6a2..e6b168a 100644
+--- a/src/rabbit_control_main.erl
++++ b/src/rabbit_control_main.erl
+@@ -23,7 +23,7 @@
+          sync_queue/1, cancel_sync_queue/1, become/1,
+          purge_queue/1]).
+--import(rabbit_misc, [rpc_call/4, rpc_call/5, rpc_call/7]).
++-import(rabbit_misc, [rpc_call/4, rpc_call/5]).
+ -define(EXTERNAL_CHECK_INTERVAL, 1000).
+@@ -595,56 +595,74 @@ action(purge_queue, Node, [Q], Opts, Inform, Timeout) ->
+ action(list_users, Node, [], _Opts, Inform, Timeout) ->
+     Inform("Listing users", []),
+-    call(Node, {rabbit_auth_backend_internal, list_users, []},
+-         rabbit_auth_backend_internal:user_info_keys(), true, Timeout);
++    call_emitter(Node, {rabbit_auth_backend_internal, list_users, []},
++                 rabbit_auth_backend_internal:user_info_keys(),
++                 [{timeout, Timeout}, to_bin_utf8]);
+ action(list_permissions, Node, [], Opts, Inform, Timeout) ->
+     VHost = proplists:get_value(?VHOST_OPT, Opts),
+     Inform("Listing permissions in vhost \"~s\"", [VHost]),
+-    call(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]},
+-         rabbit_auth_backend_internal:vhost_perms_info_keys(), true, Timeout,
+-         true);
++    call_emitter(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]},
++                 rabbit_auth_backend_internal:vhost_perms_info_keys(),
++                 [{timeout, Timeout}, to_bin_utf8, is_escaped]);
+ action(list_parameters, Node, [], Opts, Inform, Timeout) ->
+     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+     Inform("Listing runtime parameters", []),
+-    call(Node, {rabbit_runtime_parameters, list_formatted, [VHostArg]},
+-         rabbit_runtime_parameters:info_keys(), Timeout);
++    call_emitter(Node, {rabbit_runtime_parameters, list_formatted, [VHostArg]},
++                 rabbit_runtime_parameters:info_keys(),
++                 [{timeout, Timeout}]);
+ action(list_policies, Node, [], Opts, Inform, Timeout) ->
+     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+     Inform("Listing policies", []),
+-    call(Node, {rabbit_policy, list_formatted, [VHostArg]},
+-         rabbit_policy:info_keys(), Timeout);
++    call_emitter(Node, {rabbit_policy, list_formatted, [VHostArg]},
++                 rabbit_policy:info_keys(),
++                 [{timeout, Timeout}]);
+ action(list_vhosts, Node, Args, _Opts, Inform, Timeout) ->
+     Inform("Listing vhosts", []),
+     ArgAtoms = default_if_empty(Args, [name]),
+-    call(Node, {rabbit_vhost, info_all, []}, ArgAtoms, true, Timeout);
++    call_emitter(Node, {rabbit_vhost, info_all, []}, ArgAtoms,
++                 [{timeout, Timeout}, to_bin_utf8]);
+ action(list_user_permissions, _Node, _Args = [], _Opts, _Inform, _Timeout) ->
+     {error_string,
+      "list_user_permissions expects a username argument, but none provided."};
+ action(list_user_permissions, Node, Args = [_Username], _Opts, Inform, Timeout) ->
+     Inform("Listing permissions for user ~p", Args),
+-    call(Node, {rabbit_auth_backend_internal, list_user_permissions, Args},
+-         rabbit_auth_backend_internal:user_perms_info_keys(), true, Timeout,
+-         true);
++    call_emitter(Node, {rabbit_auth_backend_internal, list_user_permissions, Args},
++                 rabbit_auth_backend_internal:user_perms_info_keys(),
++                 [{timeout, Timeout}, to_bin_utf8, is_escaped]);
+ action(list_queues, Node, Args, Opts, Inform, Timeout) ->
+-    [Online, Offline] = rabbit_cli:filter_opts(Opts, [?ONLINE_OPT, ?OFFLINE_OPT]),
+     Inform("Listing queues", []),
++    %% User options
++    [Online, Offline] = rabbit_cli:filter_opts(Opts, [?ONLINE_OPT, ?OFFLINE_OPT]),
+     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+     ArgAtoms = default_if_empty(Args, [name, messages]),
+-    call(Node, {rabbit_amqqueue, info_all, [VHostArg, ArgAtoms, Online, Offline]},
+-         ArgAtoms, Timeout);
++
++    %% Data for emission
++    Nodes = nodes_in_cluster(Node, Timeout),
++    OnlineChunks = if Online -> length(Nodes); true -> 0 end,
++    OfflineChunks = if Offline -> 1; true -> 0 end,
++    ChunksOpt = {chunks, OnlineChunks + OfflineChunks},
++    TimeoutOpt = {timeout, Timeout},
++    EmissionRef = make_ref(),
++    EmissionRefOpt = {ref, EmissionRef},
++
++    _ = Online andalso start_emission(Node, {rabbit_amqqueue, emit_info_all, [Nodes, VHostArg, ArgAtoms]},
++                                      [TimeoutOpt, EmissionRefOpt]),
++    _ = Offline andalso start_emission(Node, {rabbit_amqqueue, emit_info_down, [VHostArg, ArgAtoms]},
++                                       [TimeoutOpt, EmissionRefOpt]),
++    display_emission_result(EmissionRef, ArgAtoms, [ChunksOpt, TimeoutOpt]);
+ action(list_exchanges, Node, Args, Opts, Inform, Timeout) ->
+     Inform("Listing exchanges", []),
+     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+     ArgAtoms = default_if_empty(Args, [name, type]),
+-    call(Node, {rabbit_exchange, info_all, [VHostArg, ArgAtoms]},
+-         ArgAtoms, Timeout);
++    call_emitter(Node, {rabbit_exchange, info_all, [VHostArg, ArgAtoms]},
++                 ArgAtoms, [{timeout, Timeout}]);
+ action(list_bindings, Node, Args, Opts, Inform, Timeout) ->
+     Inform("Listing bindings", []),
+@@ -652,27 +670,31 @@ action(list_bindings, Node, Args, Opts, Inform, Timeout) ->
+     ArgAtoms = default_if_empty(Args, [source_name, source_kind,
+                                        destination_name, destination_kind,
+                                        routing_key, arguments]),
+-    call(Node, {rabbit_binding, info_all, [VHostArg, ArgAtoms]},
+-         ArgAtoms, Timeout);
++    call_emitter(Node, {rabbit_binding, info_all, [VHostArg, ArgAtoms]},
++                 ArgAtoms, [{timeout, Timeout}]);
+ action(list_connections, Node, Args, _Opts, Inform, Timeout) ->
+     Inform("Listing connections", []),
+     ArgAtoms = default_if_empty(Args, [user, peer_host, peer_port, state]),
+-    call(Node, {rabbit_networking, connection_info_all, [ArgAtoms]},
+-         ArgAtoms, Timeout);
++    Nodes = nodes_in_cluster(Node, Timeout),
++    call_emitter(Node, {rabbit_networking, emit_connection_info_all, [Nodes, ArgAtoms]},
++                 ArgAtoms, [{timeout, Timeout}, {chunks, length(Nodes)}]);
+ action(list_channels, Node, Args, _Opts, Inform, Timeout) ->
+     Inform("Listing channels", []),
+     ArgAtoms = default_if_empty(Args, [pid, user, consumer_count,
+                                        messages_unacknowledged]),
+-    call(Node, {rabbit_channel, info_all, [ArgAtoms]},
+-         ArgAtoms, Timeout);
++    Nodes = nodes_in_cluster(Node, Timeout),
++    call_emitter(Node, {rabbit_channel, emit_info_all, [Nodes, ArgAtoms]}, ArgAtoms,
++                 [{timeout, Timeout}, {chunks, length(Nodes)}]);
+ action(list_consumers, Node, _Args, Opts, Inform, Timeout) ->
+     Inform("Listing consumers", []),
+     VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
+-    call(Node, {rabbit_amqqueue, consumers_all, [VHostArg]},
+-         rabbit_amqqueue:consumer_info_keys(), Timeout);
++    Nodes = nodes_in_cluster(Node, Timeout),
++    call_emitter(Node, {rabbit_amqqueue, emit_consumers_all, [Nodes, VHostArg]},
++                 rabbit_amqqueue:consumer_info_keys(),
++                 [{timeout, Timeout}, {chunks, length(Nodes)}]);
+ action(node_health_check, Node, _Args, _Opts, Inform, Timeout) ->
+     Inform("Checking health of node ~p", [Node]),
+@@ -788,17 +810,18 @@ display_info_message_row(IsEscaped, Result, InfoItemKeys) ->
+                        {X, Value} -> Value
+                    end, IsEscaped) || X <- InfoItemKeys]).
+-display_info_message(IsEscaped) ->
++display_info_message(IsEscaped, InfoItemKeys) ->
+     fun ([], _) ->
+             ok;
+-        ([FirstResult|_] = List, InfoItemKeys) when is_list(FirstResult) ->
++        ([FirstResult|_] = List, _) when is_list(FirstResult) ->
+             lists:foreach(fun(Result) ->
+                                   display_info_message_row(IsEscaped, Result, InfoItemKeys)
+                           end,
+                           List),
+             ok;
+-        (Result, InfoItemKeys) ->
+-            display_info_message_row(IsEscaped, Result, InfoItemKeys)
++        (Result, _) ->
++            display_info_message_row(IsEscaped, Result, InfoItemKeys),
++            ok
+     end.
+ display_info_list(Results, InfoItemKeys) when is_list(Results) ->
+@@ -855,7 +878,10 @@ display_call_result(Node, MFA) ->
+     end.
+ unsafe_rpc(Node, Mod, Fun, Args) ->
+-    case rpc_call(Node, Mod, Fun, Args) of
++    unsafe_rpc(Node, Mod, Fun, Args, ?RPC_TIMEOUT).
++
++unsafe_rpc(Node, Mod, Fun, Args, Timeout) ->
++    case rpc_call(Node, Mod, Fun, Args, Timeout) of
+         {badrpc, _} = Res -> throw(Res);
+         Normal            -> Normal
+     end.
+@@ -874,33 +900,42 @@ ensure_app_running(Node) ->
+ call(Node, {Mod, Fun, Args}) ->
+     rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary_utf8/1, Args)).
+-call(Node, {Mod, Fun, Args}, InfoKeys, Timeout) ->
+-    call(Node, {Mod, Fun, Args}, InfoKeys, false, Timeout, false).
++call_emitter(Node, {Mod, Fun, Args}, InfoKeys, Opts) ->
++    Ref = start_emission(Node, {Mod, Fun, Args}, Opts),
++    display_emission_result(Ref, InfoKeys, Opts).
++
++start_emission(Node, {Mod, Fun, Args}, Opts) ->
++    ToBinUtf8 = proplists:get_value(to_bin_utf8, Opts, false),
++    Timeout = proplists:get_value(timeout, Opts, infinity),
++    Ref = proplists:get_value(ref, Opts, make_ref()),
++    rabbit_control_misc:spawn_emitter_caller(
++      Node, Mod, Fun, prepare_call_args(Args, ToBinUtf8),
++      Ref, self(), Timeout),
++    Ref.
++
++display_emission_result(Ref, InfoKeys, Opts) ->
++    IsEscaped = proplists:get_value(is_escaped, Opts, false),
++    Chunks = proplists:get_value(chunks, Opts, 1),
++    Timeout = proplists:get_value(timeout, Opts, infinity),
++    EmissionStatus = rabbit_control_misc:wait_for_info_messages(
++                       self(), Ref, display_info_message(IsEscaped, InfoKeys), ok, Timeout, Chunks),
++    emission_to_action_result(EmissionStatus).
++
++%% Convert rabbit_control_misc:wait_for_info_messages/6 return value
++%% into form expected by rabbit_cli:main/3.
++emission_to_action_result({ok, ok}) ->
++    ok;
++emission_to_action_result({error, Error}) ->
++    Error.
+-call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout) ->
+-    call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout, false).
++prepare_call_args(Args, ToBinUtf8) ->
++    case ToBinUtf8 of
++        true  -> valid_utf8_args(Args);
++        false -> Args
++    end.
+-call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout, IsEscaped) ->
+-    Args0 = case ToBinUtf8 of
+-                true  -> lists:map(fun list_to_binary_utf8/1, Args);
+-                false -> Args
+-            end,
+-    Ref = make_ref(),
+-    Pid = self(),
+-    spawn_link(
+-      fun () ->
+-              case rabbit_cli:rpc_call(Node, Mod, Fun, Args0,
+-                                       Ref, Pid, Timeout) of
+-                  {error, _} = Error        ->
+-                      Pid ! {error, Error};
+-                  {bad_argument, _} = Error ->
+-                      Pid ! {error, Error};
+-                  _                         ->
+-                      ok
+-              end
+-      end),
+-    rabbit_control_misc:wait_for_info_messages(
+-      Pid, Ref, InfoKeys, display_info_message(IsEscaped), Timeout).
++valid_utf8_args(Args) ->
++    lists:map(fun list_to_binary_utf8/1, Args).
+ list_to_binary_utf8(L) ->
+     B = list_to_binary(L),
+@@ -950,7 +985,10 @@ split_list([_])        -> exit(even_list_needed);
+ split_list([A, B | T]) -> [{A, B} | split_list(T)].
+ nodes_in_cluster(Node) ->
+-    unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running]).
++    unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running], ?RPC_TIMEOUT).
++
++nodes_in_cluster(Node, Timeout) ->
++    unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running], Timeout).
+ alarms_by_node(Name) ->
+     case rpc_call(Name, rabbit, status, []) of
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0006-rabbit_prelaunch-must-use-RABBITMQ_SERVER_ERL_ARGS.patch b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-0006-rabbit_prelaunch-must-use-RABBITMQ_SERVER_ERL_ARGS.patch
new file mode 100644 (file)
index 0000000..efbff82
--- /dev/null
@@ -0,0 +1,18 @@
+From: Peter Lemenkov <lemenkov@gmail.com>
+Date: Sun, 17 Jul 2016 18:42:06 +0300
+Subject: [PATCH] rabbit_prelaunch must use RABBITMQ_SERVER_ERL_ARGS
+
+Signed-off-by: Peter Lemenkov <lemenkov@gmail.com>
+
+diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server
+index 7433731..25fff3a 100755
+--- a/scripts/rabbitmq-server
++++ b/scripts/rabbitmq-server
+@@ -71,6 +71,7 @@ RABBITMQ_DIST_PORT=$RABBITMQ_DIST_PORT \
+     -boot "${CLEAN_BOOT_FILE}" \
+     -noinput \
+     -hidden \
++    ${RABBITMQ_SERVER_ERL_ARGS} \
+     -s rabbit_prelaunch \
+     ${RABBITMQ_NAME_TYPE} rabbitmqprelaunch$$ \
+     -extra "${RABBITMQ_NODENAME}"
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-fails-with-home-not-set.patch b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server-fails-with-home-not-set.patch
new file mode 100644 (file)
index 0000000..26c1c12
--- /dev/null
@@ -0,0 +1,26 @@
+From 8141695bdab69d5abd4ae0611c35436840da3c07 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Sat, 21 Mar 2020 23:39:01 +0800
+Subject: [PATCH] rabbitmqctl: set HOME variable for stx bootstrap
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ scripts/rabbitmqctl | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/scripts/rabbitmqctl b/scripts/rabbitmqctl
+index 2336c3d..430f4a3 100755
+--- a/scripts/rabbitmqctl
++++ b/scripts/rabbitmqctl
+@@ -24,6 +24,8 @@ if [ -z "$ERL_CRASH_DUMP_SECONDS" ]; then
+     export ERL_CRASH_DUMP_SECONDS=0
+ fi
++export HOME
++
+ # We specify Mnesia dir and sasl error logger since some actions
+ # (e.g. forget_cluster_node --offline) require us to impersonate the
+ # real node.
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server.logrotate b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server.logrotate
new file mode 100644 (file)
index 0000000..832cdcf
--- /dev/null
@@ -0,0 +1,12 @@
+/var/log/rabbitmq/*.log {
+        weekly
+        missingok
+        rotate 20
+        compress
+        delaycompress
+        notifempty
+        sharedscripts
+        postrotate
+            /usr/sbin/rabbitmqctl -q rotate_logs
+        endscript
+}
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server.service b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server.service
new file mode 100644 (file)
index 0000000..ab0cc98
--- /dev/null
@@ -0,0 +1,16 @@
+[Unit]
+Description=RabbitMQ Messaging broker/server service
+After=network.target
+
+[Service]
+Type=simple
+User=root
+Environment=HOME=/home/root
+WorkingDirectory=/var/lib/rabbitmq
+LogsDirectory=rabbitmq
+ExecStartPre=/usr/bin/rabbitmq-server-setup
+ExecStart=/usr/bin/rabbitmq-server
+ExecStop=/usr/bin/rabbitmqctl stop
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server.tmpfiles b/meta-stx/recipes-extended/rabbitmq/files/rabbitmq-server.tmpfiles
new file mode 100644 (file)
index 0000000..c268182
--- /dev/null
@@ -0,0 +1 @@
+D /var/run/rabbitmq 0755 rabbitmq rabbitmq -
diff --git a/meta-stx/recipes-extended/rabbitmq/rabbitmq-server_3.6.5.bb b/meta-stx/recipes-extended/rabbitmq/rabbitmq-server_3.6.5.bb
new file mode 100644 (file)
index 0000000..e9315f6
--- /dev/null
@@ -0,0 +1,131 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "High-performance AMQP-compliant message broker written in Erlang."
+HOMEPAGE = "http://www.rabbitmq.com/"
+SECTION = "network"
+
+LICENSE = "MPL-1.1"
+LIC_FILES_CHKSUM = "file://LICENSE-MPL-RabbitMQ;md5=ce8293a7cc187353c90cb24a8ecee4ce"
+
+SRC_URI = " \
+    http://www.rabbitmq.com/releases/rabbitmq-server/v${PV}/${PN}-${PV}.tar.xz \
+    file://rabbitmq-server-0001-Remove-excessive-sd_notify-code.patch \
+    file://rabbitmq-server-0002-Add-systemd-notification-support.patch \
+    file://rabbitmq-server-0003-Revert-Distinct-exit-codes-for-CLI-utilities.patch \
+    file://rabbitmq-server-0004-Allow-guest-login-from-non-loopback-connections.patch \
+    file://rabbitmq-server-0005-Avoid-RPC-roundtrips-in-list-commands.patch \
+    file://rabbitmq-server-0006-rabbit_prelaunch-must-use-RABBITMQ_SERVER_ERL_ARGS.patch \
+    file://rabbitmq-common-0001-Avoid-RPC-roundtrips-while-listing-items.patch;patchdir=deps/rabbit_common \
+    file://rabbitmq-common-0002-Use-proto_dist-from-command-line.patch;patchdir=deps/rabbit_common \
+    file://rabbitmq-server-fails-with-home-not-set.patch \
+    file://rabbitmq-script-wrapper \
+    file://rabbitmq-server.logrotate \
+    file://rabbitmq-server.tmpfiles \
+"
+
+SRC_URI[md5sum] = "e9f96b5763a89a246f53250e46c2796b"
+SRC_URI[sha256sum] = "9550433ca8aaf5130bf5235bb978c44d3c4694cbd09d97114b3859f4895788ec"
+
+DEPENDS = " \
+    coreutils-native\
+    erlang-ssl \
+    erlang-ssl-dev \
+    erlang-native \
+    libxslt \
+    libxslt-native \
+    python-simplejson \
+    rsync-native \
+    unzip-native \
+    xmlto-native \
+    zip-native \
+"
+
+# ../../../../../recipe-sysroot/usr/lib/erlang/lib/ssl-5.3.3/src/
+do_compile() {
+    export SOCKJS_ERLC_OPTS="-Dpre17_type_specs"
+    rm -rf deps/rabbit_common/include/ssl
+    mkdir ${S}/deps/rabbit_common/include/ssl
+    cp -r ${RECIPE_SYSROOT}/${libdir}/erlang/lib/ssl-5.3.3/src ${S}/deps/rabbit_common/include/ssl
+    oe_runmake
+}
+
+do_install() {
+    RABBIT_LIB_DIR=${libdir}/rabbitmq
+
+    oe_runmake install \
+               DESTDIR=${D} \
+               PREFIX=${prefix} \
+               RMQ_ROOTDIR=${RABBIT_LIB_DIR}
+
+    oe_runmake install-man \
+               DESTDIR=${D} \
+               PREFIX=${prefix} \
+               RMQ_ROOTDIR=${RABBIT_LIB_DIR}
+
+    mkdir -p ${D}${localstatedir}/lib/rabbitmq/mnesia
+    mkdir -p ${D}${localstatedir}/log/rabbitmq
+
+    # Copy all necessary lib files etc.
+    install -p -D -m 0644 ${S}/docs/rabbitmq-server.service.example ${D}${systemd_system_unitdir}/rabbitmq-server.service
+    install -p -D -m 0755 ${WORKDIR}/rabbitmq-script-wrapper ${D}${sbindir}/rabbitmqctl
+    install -p -D -m 0755 ${WORKDIR}/rabbitmq-script-wrapper ${D}${sbindir}/rabbitmq-server
+    install -p -D -m 0755 ${WORKDIR}/rabbitmq-script-wrapper ${D}${sbindir}/rabbitmq-plugins
+
+    # Make necessary symlinks
+    mkdir -p ${D}${RABBIT_LIB_DIR}/bin
+    for app in rabbitmq-defaults rabbitmq-env rabbitmq-plugins rabbitmq-server rabbitmqctl; do
+        ln -s ${RABBIT_LIB_DIR}/lib/rabbitmq_server-${PV}/sbin/${app} ${D}${RABBIT_LIB_DIR}/bin/${app}
+    done
+
+    install -p -D -m 0755 ${S}/scripts/rabbitmq-server.ocf ${D}${exec_prefix}/lib/ocf/resource.d/rabbitmq/rabbitmq-server
+    install -p -D -m 0755 ${S}/scripts/rabbitmq-server-ha.ocf ${D}${exec_prefix}/lib/ocf/resource.d/rabbitmq/rabbitmq-server-ha
+
+    install -p -D -m 0644 ${WORKDIR}/rabbitmq-server.logrotate ${D}${sysconfdir}/logrotate.d/rabbitmq-server
+
+    install -p -D -m 0644 ${S}/docs/rabbitmq.config.example ${D}${sysconfdir}/rabbitmq/rabbitmq.config
+
+    rm -rf ${D}${RABBIT_LIB_DIR}/lib/rabbitmq_server-${PV}/{LICENSE,LICENSE-*,INSTALL}
+
+    install -p -D -m 0644 ${WORKDIR}/rabbitmq-server.tmpfiles ${D}${prefix}/lib/tmpfiles.d/${BPN}.conf
+}
+
+inherit useradd systemd openssl10
+
+USERADD_PACKAGES = "${PN}"
+GROUPADD_PARAM_${PN} = "--system rabbitmq"
+USERADD_PARAM_${PN}  = " \
+    --system --create-home \
+    --home ${localstatedir}/lib/rabbitmq \
+    -g rabbitmq rabbitmq \
+"
+
+SYSTEMD_SERVICE_${PN} = "rabbitmq-server.service"
+SYSTEMD_AUTO_ENABLE_${PN} = "disable"
+
+FILES_${PN} += " \
+    ${RABBIT_LIB_DIR}/* \
+    ${exec_prefix}/lib/* \
+    ${localstatedir} \
+"
+
+FILES_${PN}-doc += "LICENSE* INSTALL"
+
+RDEPENDS_${PN} = " \
+    erlang \
+    erlang-modules \
+"
+
+INSANE_SKIP_${PN} = "unsafe-references-in-scripts"
diff --git a/meta-stx/recipes-extended/registry-token-server/files/registry-token-server-1.0.0.tar.gz b/meta-stx/recipes-extended/registry-token-server/files/registry-token-server-1.0.0.tar.gz
new file mode 100644 (file)
index 0000000..ba20c20
Binary files /dev/null and b/meta-stx/recipes-extended/registry-token-server/files/registry-token-server-1.0.0.tar.gz differ
diff --git a/meta-stx/recipes-extended/registry-token-server/files/registry-token-server.service b/meta-stx/recipes-extended/registry-token-server/files/registry-token-server.service
new file mode 100644 (file)
index 0000000..477e85d
--- /dev/null
@@ -0,0 +1,19 @@
+[Unit]
+Description=v2 Registry token server for Docker
+
+[Service]
+Type=simple
+EnvironmentFile=/etc/docker-distribution/registry/token_server.conf
+ExecStart=/usr/bin/registry-token-server -addr=${REGISTRY_TOKEN_SERVER_ADDR} \
+    -issuer=${REGISTRY_TOKEN_SERVER_ISSUER} \
+    -endpoint=${REGISTRY_TOKEN_SERVER_KS_ENDPOINT} \
+    -tlscert=${REGISTRY_TOKEN_SERVER_TLSCERT} \
+    -tlskey=${REGISTRY_TOKEN_SERVER_TLSKEY} \
+    -realm=${REGISTRY_TOKEN_SERVER_REALM} \
+    -key=${REGISTRY_TOKEN_SERVER_KEY}
+Restart=on-failure
+ExecStartPost=/bin/bash -c 'echo $MAINPID > /var/run/registry-token-server.pid'
+ExecStopPost=/bin/rm -f /var/run/registry-token-server.pid
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-extended/registry-token-server/files/token-server-certificate.pem b/meta-stx/recipes-extended/registry-token-server/files/token-server-certificate.pem
new file mode 100644 (file)
index 0000000..c40df59
--- /dev/null
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDADCCAegCCQCSevkS4h7LQjANBgkqhkiG9w0BAQsFADBCMQswCQYDVQQGEwJY
+WDEVMBMGA1UEBwwMRGVmYXVsdCBDaXR5MRwwGgYDVQQKDBNEZWZhdWx0IENvbXBh
+bnkgTHRkMB4XDTE4MDkyMTE0MTYwOFoXDTE5MDkyMTE0MTYwOFowQjELMAkGA1UE
+BhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UECgwTRGVmYXVsdCBD
+b21wYW55IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKtCbNJ/
+aPEMkZFEtMKRomOh9NgeOv0jYFY5i23fXghtTgdXu9//H3Huz5/KDJ+XEUp2DZgK
+YQ2UHVR+cqj2sFjCllfAVrzmv9FFR0CQpQxqKcxChefVwsMh6XsqF+GzbqzFOx67
+bT39Xb5+spAmDHctFl3nrmyA1wM6e+OXcktC0chILeN+UEyq5Xeng6/BpVnI2UaY
+J1OpfuUrffddy5t0oeuKGZ/xG2g9sL6GMGBeVslOmLg4CBOwq3knUGoOTFYSjHVx
+rU/p4YgUotIUvb4GBsXqbiI7M2NakItTR6mxfcYiKkxfjadQlptFyGucI84mMYx8
+vO3o6TFLfcTYqZ8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAHXZR0U0pyMkYIeO5
+Y/n0H9Onj/PtCJHBbYzMHZGMPlX2IbW+JAeE/0XNIYGHtAtFwlb825Tkg2p7wpa8
+8HmOBqkTyn2ywDdmPqdfjCiMu/Ge6tkLjqkmYWv2l/d4+qEMR4dUh9g8SrrtUdZg
+DP7H22B+0knQ7s04JuiJ27hqi4nPOzdwdJNpz5Przgce8vN1ihk8194pR/uoNrjP
+td3Po+DwmxFKigoKPQCHgQuD63mAFor4vVnht+IkNbB3/lQyXP6Qv7DnWVW9WDBL
+nKxgXhRwyy5mYebYmwA//JX41O/Kdp1Q6oWgv4zSLd8M9FIMtESG8k4gSl0XfUBa
+Y24p0Q==
+-----END CERTIFICATE-----
diff --git a/meta-stx/recipes-extended/registry-token-server/files/token-server-private-key.pem b/meta-stx/recipes-extended/registry-token-server/files/token-server-private-key.pem
new file mode 100644 (file)
index 0000000..4332eb3
--- /dev/null
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAq0Js0n9o8QyRkUS0wpGiY6H02B46/SNgVjmLbd9eCG1OB1e7
+3/8fce7Pn8oMn5cRSnYNmAphDZQdVH5yqPawWMKWV8BWvOa/0UVHQJClDGopzEKF
+59XCwyHpeyoX4bNurMU7HrttPf1dvn6ykCYMdy0WXeeubIDXAzp745dyS0LRyEgt
+435QTKrld6eDr8GlWcjZRpgnU6l+5St9913Lm3Sh64oZn/EbaD2wvoYwYF5WyU6Y
+uDgIE7CreSdQag5MVhKMdXGtT+nhiBSi0hS9vgYGxepuIjszY1qQi1NHqbF9xiIq
+TF+Np1CWm0XIa5wjziYxjHy87ejpMUt9xNipnwIDAQABAoIBAFHCIV+QkdHZ9TiL
+u1vT2NmFvPTb4b9tfxVK3YRziVmujPy2Zqu2CRYEMzyOYd5iaU/J8g1ujwzDdAkd
+YLHHK0MEim+UFBSUeGh4kV6CbzjxCclIzNJz20n6y5MP8ly+o4x5kBLI2YsphPJn
+W+mzMGpIrQ/hhgSosX0KE5EAgQDqOfJSlhZvSgSO5UF9nXvEn7Y9Zc8GK0XQdcwB
+Pr8iFhuhEJmmb4LrCm+3Me/fhLxFjUAOAcLSkFnqfxo2vAuRqk99OOLxFEfPYZB8
+kLkKlQ+PwhkG3pjPg6w/rOmBHqW/ZEpd87972JWeHscXYpb/cLLVmcJbZI/claos
+YOHS7CECgYEA4XKo7GzuqSkLskfaZM2pyNhHbxphqyNfk8GmW5NJnKavpmY8YiXh
+7hNXXf4HCkcHvHMn4JUCHgHVavDNhNnrHNrQAzO3KwuUrrFiBP+yP1tRyQ4BP395
+KIBSUyeEOo9vM7d3yerI8WHboio5gaoqEfeNS1dakZ6ZiOpoP94CIxECgYEAwnfW
+Drdcqkpj794gYDlXH4D279f7+qmq11eI4C0zkZzTFkExl8BGfqpy49kruaTm0e4t
+L1B23TYfKC0ei4BQskyNCHUnl/eic/JHe9gJRd6BAZi2REfV0LI4ytYGgniCu50H
+EJVvTVMXS/+wWcjZr037oV6/WiB9Wzr7Z1oFoa8CgYBlmqdG5lEpK7Z5wqhKheXe
+/pozGFCsMGUC0mOHIfoq/3RqKelM0oXgJhdZ5QKHPzvdUojGTmGF5I2qhJwbI5sy
+her5hnUmkTGRCaCDYDmVFDLnycgGNg0Ek9CGaWjOe5ZCWI1EEuw83T1++Eiyh14u
+esLTEatftXq8megh4IxWAQKBgQCTNfox27ZnJrcuXn0tulpse8jy2RJjt0qfhyET
+asRN52SXxTRQhvoWattcBgsmlmEw69cCqSvB23WMiVNFERaFUpO0olMdpBUzJmXc
+pzal0IDh/4OCfsqqGDALxCbbX3S/p2gwsp617z+EhYMvBG9dWHAywTGjfVLH3Ady
+PmBi+wKBgQCWJS/PmTpyO8LU4MYZk91mJmjHAsPlgi/9n8yEqdmins+X698IsoCr
+s2FN8rol8+UP8c3m9o4kp62ouoby2QzAZw0y3UGWcxOb3ZpoozatKodsoETSLLoL
+T//wVn2Z2MsS9tLOBLZzsZiYlHyYxTUm7UTOdxdjbSLWVdLbCpKEhg==
+-----END RSA PRIVATE KEY-----
diff --git a/meta-stx/recipes-extended/registry-token-server/files/token_server.conf b/meta-stx/recipes-extended/registry-token-server/files/token_server.conf
new file mode 100644 (file)
index 0000000..10ef684
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+# This is a puppet managed config file
diff --git a/meta-stx/recipes-extended/registry-token-server/registry-token-server_git.bb b/meta-stx/recipes-extended/registry-token-server/registry-token-server_git.bb
new file mode 100644 (file)
index 0000000..5258735
--- /dev/null
@@ -0,0 +1,88 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "StarlingX distributedcloud packages collection"
+HOMEPAGE = "https://opendev.org/starlingx"
+SECTION = "network"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://src/registry-token-server/registry-token-server/src/LICENSE;md5=d2794c0df5b907fdace235a619d80314"
+
+PROTOCOL = "https"
+BRANCH = "r/stx.3.0"
+SRCNAME = "containers"
+SRCREV = "1a4b803e946b488c1f3adb25ab0614d1b0c3c9b8"
+PV = "1.0.0+git${SRCPV}"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://opendev.org/starlingx/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+GO_IMPORT = "registry-token-server"
+
+RDEPENDS_${PN}-dev_append = " \
+       systemd \
+       "
+
+DEPENDS += "\
+        go-logrus \
+        docker-distribution \
+        go-libtrust \
+        go-patricia \
+        go-mux \
+        go-context \
+        go-phercloud \
+        "
+
+RDEPENDS_${PN} = " \
+       docker-distribution \
+       docker-ce \
+       "
+
+inherit go goarch ${@bb.utils.contains('VIRTUAL-RUNTIME_init_manager','systemd','systemd','', d)}
+
+do_compile() {
+        mkdir -p _build/src
+        ln -sfn ${S}/src/registry-token-server/registry-token-server/src/ ./_build/src/registry-token-server
+  
+        # Pass the needed cflags/ldflags so that cgo
+        # can find the needed headers files and libraries
+        export GOARCH=${TARGET_GOARCH}
+        export CGO_ENABLED="1"
+        export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+        export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+        export GOPATH="${WORKDIR}/build/_build:${STAGING_DIR_TARGET}/${prefix}/local/go"
+        cd _build/src/${GO_IMPORT}
+        #oe_runmake registry-token-server
+        export GOROOT=${STAGING_DIR_TARGET}/${prefix}/local/go
+        go build -o ${WORKDIR}/build/bin/registry-token-server registry-token-server
+}
+
+SYSTEMD_PACKAGES = "${PN}"
+SYSTEMD_SERVICE_${PN} = "registry-token-server.service"
+SYSTEMD_AUTO_ENABLE = "disable"
+
+do_install() {
+       SRCPATH="${S}/src/registry-token-server/registry-token-server/centos/files"
+        install -d ${D}/${bindir}
+        install -m 0755 bin/registry-token-server ${D}/${bindir}
+        install -d -m 0755 ${D}/${sysconfdir}/registry-token-server/registry
+        install -m 0644 ${SRCPATH}//token_server.conf ${D}/${sysconfdir}/registry-token-server/registry
+
+        if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+            install -d ${D}${systemd_unitdir}/system
+            install -m 0644 ${SRCPATH}/registry-token-server.service ${D}${systemd_unitdir}/system/
+        fi
+}
+
diff --git a/meta-stx/recipes-extended/sudo/files/sudo-1.6.7p5-strip.patch b/meta-stx/recipes-extended/sudo/files/sudo-1.6.7p5-strip.patch
new file mode 100644 (file)
index 0000000..879a4c1
--- /dev/null
@@ -0,0 +1,25 @@
+From e8e74bddb6fb4030b574a76e43e7d0618c0432c9 Mon Sep 17 00:00:00 2001
+From: Tomas Sykora <tosykora@redhat.com>
+Date: Fri, 19 Aug 2016 13:49:25 +0200
+Subject: [PATCH] We do not strip
+
+rebased from:
+Patch1: sudo-1.6.7p5-strip.patch
+
+---
+ install-sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/install-sh b/install-sh
+index 6944fba..49d383a 100755
+--- a/install-sh
++++ b/install-sh
+@@ -147,7 +147,7 @@ while ${MORETODO} ; do
+       fi
+       ;;
+     X-s)
+-      STRIPIT=true
++      #STRIPIT=true
+       ;;
+     X--)
+       shift
diff --git a/meta-stx/recipes-extended/sudo/files/sudo-1.7.2p1-envdebug.patch b/meta-stx/recipes-extended/sudo/files/sudo-1.7.2p1-envdebug.patch
new file mode 100644 (file)
index 0000000..626abec
--- /dev/null
@@ -0,0 +1,25 @@
+From 33cc84bc035773105a62b5b0a07e78d55cb6bf6e Mon Sep 17 00:00:00 2001
+From: Tomas Sykora <tosykora@redhat.com>
+Date: Fri, 19 Aug 2016 14:07:35 +0200
+Subject: [PATCH] Added "Enviroment debugging" message
+
+rebased from:
+Patch2: sudo-1.7.2p1-envdebug.patch
+
+---
+ configure.ac | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/configure.ac b/configure.ac
+index 962a032..ade78f6 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1408,7 +1408,7 @@ AC_ARG_ENABLE(env_debug,
+ [AS_HELP_STRING([--enable-env-debug], [Whether to enable environment debugging.])],
+ [ case "$enableval" in
+     yes)      AC_MSG_RESULT(yes)
+-              AC_DEFINE(ENV_DEBUG)
++              AC_DEFINE(ENV_DEBUG, [], [Environment debugging.])
+               ;;
+     no)               AC_MSG_RESULT(no)
+               ;;
diff --git a/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-fix-double-quote-parsing-for-Defaults-values.patch b/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-fix-double-quote-parsing-for-Defaults-values.patch
new file mode 100644 (file)
index 0000000..268a0ab
--- /dev/null
@@ -0,0 +1,86 @@
+From 1a9754ec64f703542a5faf9ae9c5058b50047b26 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Wed, 11 Dec 2019 19:43:19 -0800
+ sudo-1.8.23-fix-double-quote-parsing-for-Defaults-values.patch
+
+---
+ plugins/sudoers/regress/sudoers/test2.json.ok | 6 +++---
+ plugins/sudoers/regress/sudoers/test2.toke.ok | 6 +++---
+ plugins/sudoers/toke.c                        | 2 +-
+ plugins/sudoers/toke.l                        | 2 +-
+ 4 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/plugins/sudoers/regress/sudoers/test2.json.ok b/plugins/sudoers/regress/sudoers/test2.json.ok
+index 8e6656e..769c3fd 100644
+--- a/plugins/sudoers/regress/sudoers/test2.json.ok
++++ b/plugins/sudoers/regress/sudoers/test2.json.ok
+@@ -34,7 +34,7 @@
+         },
+         {
+             "Binding": [
+-                { "username": "%them" }
++                { "usergroup": "them" }
+             ],
+             "Options": [
+                 { "set_home": true }
+@@ -42,7 +42,7 @@
+         },
+         {
+             "Binding": [
+-                { "username": "%: non UNIX 0 c" }
++                { "nonunixgroup": " non UNIX 0 c" }
+             ],
+             "Options": [
+                 { "set_home": true }
+@@ -50,7 +50,7 @@
+         },
+         {
+             "Binding": [
+-                { "username": "+net" }
++                { "netgroup": "net" }
+             ],
+             "Options": [
+                 { "set_home": true }
+diff --git a/plugins/sudoers/regress/sudoers/test2.toke.ok b/plugins/sudoers/regress/sudoers/test2.toke.ok
+index fcd7b73..63e1648 100644
+--- a/plugins/sudoers/regress/sudoers/test2.toke.ok
++++ b/plugins/sudoers/regress/sudoers/test2.toke.ok
+@@ -29,9 +29,9 @@ DEFAULTS_HOST BEGINSTR STRBODY ENDSTR WORD(4) DEFVAR
+ #
+ DEFAULTS_USER BEGINSTR STRBODY ENDSTR WORD(4) DEFVAR 
+ DEFAULTS_USER BEGINSTR STRBODY ENDSTR WORD(4) DEFVAR 
+-DEFAULTS_USER BEGINSTR STRBODY ENDSTR WORD(4) DEFVAR 
+-DEFAULTS_USER BEGINSTR STRBODY ENDSTR WORD(4) DEFVAR 
+-DEFAULTS_USER BEGINSTR STRBODY ENDSTR WORD(4) DEFVAR 
++DEFAULTS_USER BEGINSTR STRBODY ENDSTR USERGROUP DEFVAR 
++DEFAULTS_USER BEGINSTR STRBODY ENDSTR USERGROUP DEFVAR 
++DEFAULTS_USER BEGINSTR STRBODY ENDSTR NETGROUP DEFVAR 
+ #
+ DEFAULTS_RUNAS BEGINSTR STRBODY ENDSTR WORD(4) DEFVAR 
+diff --git a/plugins/sudoers/toke.c b/plugins/sudoers/toke.c
+index d0dd5e3..784218b 100644
+--- a/plugins/sudoers/toke.c
++++ b/plugins/sudoers/toke.c
+@@ -2512,7 +2512,7 @@ YY_RULE_SETUP
+                               LEXTRACE("ERROR "); /* empty string */
+                               LEXRETURN(ERROR);
+                           }
+-                          if (prev_state == INITIAL) {
++                          if (prev_state == INITIAL || prev_state == GOTDEFS) {
+                               switch (sudoerslval.string[0]) {
+                               case '%':
+                                   if (sudoerslval.string[1] == '\0' ||
+diff --git a/plugins/sudoers/toke.l b/plugins/sudoers/toke.l
+index d275a26..638d9ea 100644
+--- a/plugins/sudoers/toke.l
++++ b/plugins/sudoers/toke.l
+@@ -178,7 +178,7 @@ DEFVAR                     [a-z_]+
+                               LEXTRACE("ERROR "); /* empty string */
+                               LEXRETURN(ERROR);
+                           }
+-                          if (prev_state == INITIAL) {
++                          if (prev_state == INITIAL || prev_state == GOTDEFS) {
+                               switch (sudoerslval.string[0]) {
+                               case '%':
+                                   if (sudoerslval.string[1] == '\0' ||
diff --git a/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-ldapsearchuidfix.patch b/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-ldapsearchuidfix.patch
new file mode 100644 (file)
index 0000000..b5107aa
--- /dev/null
@@ -0,0 +1,36 @@
+From bff4cd71cc41bf3104b35da24e73742571845ebd Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Wed, 11 Dec 2019 19:43:19 -0800
+
+---
+ plugins/sudoers/ldap.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/plugins/sudoers/ldap.c b/plugins/sudoers/ldap.c
+index bc2baec..ad8a890 100644
+--- a/plugins/sudoers/ldap.c
++++ b/plugins/sudoers/ldap.c
+@@ -920,8 +920,8 @@ sudo_ldap_build_pass1(LDAP *ld, struct passwd *pw)
+     if (ldap_conf.search_filter)
+       sz += strlen(ldap_conf.search_filter);
+-    /* Then add (|(sudoUser=USERNAME)(sudoUser=ALL)) + NUL */
+-    sz += 29 + sudo_ldap_value_len(pw->pw_name);
++    /* Then add (|(sudoUser=USERNAME)(sudoUser=#uid)(sudoUser=ALL)) + NUL */
++    sz += 29 + (12 + MAX_UID_T_LEN) + sudo_ldap_value_len(pw->pw_name);
+     /* Add space for primary and supplementary groups and gids */
+     if ((grp = sudo_getgrgid(pw->pw_gid)) != NULL) {
+@@ -982,6 +982,12 @@ sudo_ldap_build_pass1(LDAP *ld, struct passwd *pw)
+     CHECK_LDAP_VCAT(buf, pw->pw_name, sz);
+     CHECK_STRLCAT(buf, ")", sz);
++    /* Append user uid */
++    (void) snprintf(gidbuf, sizeof(gidbuf), "%u", (unsigned int)pw->pw_uid);
++    (void) strlcat(buf, "(sudoUser=#", sz);
++    (void) strlcat(buf, gidbuf, sz);
++    (void) strlcat(buf, ")", sz);
++
+     /* Append primary group and gid */
+     if (grp != NULL) {
+       CHECK_STRLCAT(buf, "(sudoUser=%", sz);
diff --git a/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-legacy-group-processing.patch b/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-legacy-group-processing.patch
new file mode 100644 (file)
index 0000000..e24477b
--- /dev/null
@@ -0,0 +1,108 @@
+From ce16b664df514c4d8b0e6b8733ae1dce3561a2a4 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Wed, 11 Dec 2019 19:43:19 -0800
+
+---
+ plugins/sudoers/cvtsudoers.c | 9 +++++++++
+ plugins/sudoers/def_data.c   | 4 ++++
+ plugins/sudoers/def_data.h   | 2 ++
+ plugins/sudoers/def_data.in  | 3 +++
+ plugins/sudoers/defaults.c   | 3 +++
+ plugins/sudoers/sudoers.c    | 4 ++++
+ 6 files changed, 25 insertions(+)
+
+diff --git a/plugins/sudoers/cvtsudoers.c b/plugins/sudoers/cvtsudoers.c
+index 0221314..9d21d2f 100644
+--- a/plugins/sudoers/cvtsudoers.c
++++ b/plugins/sudoers/cvtsudoers.c
+@@ -346,6 +346,15 @@ main(int argc, char *argv[])
+       sudo_fatalx("error: unhandled input %d", input_format);
+     }
++    /*
++     * cvtsudoers group filtering doesn't work if def_match_group_by_gid
++     * is set to true by default (at compile-time). It cannot be set to false
++     * because cvtsudoers doesn't apply the parsed Defaults.
++     *
++     * Related: sudo-1.8.23-legacy-group-processing.patch
++     */
++    def_match_group_by_gid = def_legacy_group_processing = false;
++
+     /* Apply filters. */
+     filter_userspecs(&parsed_policy, conf);
+     filter_defaults(&parsed_policy, conf);
+diff --git a/plugins/sudoers/def_data.c b/plugins/sudoers/def_data.c
+index 07e3433..5fa45bb 100644
+--- a/plugins/sudoers/def_data.c
++++ b/plugins/sudoers/def_data.c
+@@ -494,6 +494,10 @@ struct sudo_defs_types sudo_defs_table[] = {
+       N_("Ignore case when matching group names"),
+       NULL,
+     }, {
++      "legacy_group_processing", T_FLAG,
++      N_("Don't pre-resolve all group names"),
++      NULL,
++    }, {
+       NULL, 0, NULL
+     }
+ };
+diff --git a/plugins/sudoers/def_data.h b/plugins/sudoers/def_data.h
+index 65f10c3..940fa8f 100644
+--- a/plugins/sudoers/def_data.h
++++ b/plugins/sudoers/def_data.h
+@@ -226,6 +226,8 @@
+ #define def_case_insensitive_user (sudo_defs_table[I_CASE_INSENSITIVE_USER].sd_un.flag)
+ #define I_CASE_INSENSITIVE_GROUP 113
+ #define def_case_insensitive_group (sudo_defs_table[I_CASE_INSENSITIVE_GROUP].sd_un.flag)
++#define I_LEGACY_GROUP_PROCESSING 114
++#define def_legacy_group_processing (sudo_defs_table[I_LEGACY_GROUP_PROCESSING].sd_un.flag)
+ enum def_tuple {
+       never,
+diff --git a/plugins/sudoers/def_data.in b/plugins/sudoers/def_data.in
+index 99d4360..571bc96 100644
+--- a/plugins/sudoers/def_data.in
++++ b/plugins/sudoers/def_data.in
+@@ -357,3 +357,6 @@ case_insensitive_user
+ case_insensitive_group
+       T_FLAG
+       "Ignore case when matching group names"
++legacy_group_processing
++      T_FLAG
++      "Don't pre-resolve all group names"
+diff --git a/plugins/sudoers/defaults.c b/plugins/sudoers/defaults.c
+index 4c8c262..970755e 100644
+--- a/plugins/sudoers/defaults.c
++++ b/plugins/sudoers/defaults.c
+@@ -91,6 +91,7 @@ static struct early_default early_defaults[] = {
+     { I_FQDN },
+ #endif
+     { I_MATCH_GROUP_BY_GID },
++    { I_LEGACY_GROUP_PROCESSING },
+     { I_GROUP_PLUGIN },
+     { I_RUNAS_DEFAULT },
+     { I_SUDOERS_LOCALE },
+@@ -492,6 +493,8 @@ init_defaults(void)
+     }
+     /* First initialize the flags. */
++    def_legacy_group_processing = true;
++    def_match_group_by_gid = true;
+ #ifdef LONG_OTP_PROMPT
+     def_long_otp_prompt = true;
+ #endif
+diff --git a/plugins/sudoers/sudoers.c b/plugins/sudoers/sudoers.c
+index 1267949..d8f4dd0 100644
+--- a/plugins/sudoers/sudoers.c
++++ b/plugins/sudoers/sudoers.c
+@@ -217,6 +217,10 @@ sudoers_policy_init(void *info, char * const envp[])
+     if (set_loginclass(runas_pw ? runas_pw : sudo_user.pw))
+       ret = true;
++    if (!def_match_group_by_gid || !def_legacy_group_processing) {
++        def_match_group_by_gid = false;
++      def_legacy_group_processing = false;
++    }
+ cleanup:
+     if (!restore_perms())
+       ret = -1;
diff --git a/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-nowaitopt.patch b/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-nowaitopt.patch
new file mode 100644 (file)
index 0000000..a7a18a1
--- /dev/null
@@ -0,0 +1,75 @@
+From acbbefdbcf2951a2ce31fe4fc789cf8397a406cc Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Wed, 11 Dec 2019 19:43:19 -0800
+
+---
+ plugins/sudoers/def_data.c  |  4 ++++
+ plugins/sudoers/def_data.h  |  2 ++
+ plugins/sudoers/def_data.in |  3 +++
+ plugins/sudoers/sudoers.c   | 14 ++++++++++++++
+ 4 files changed, 23 insertions(+)
+
+diff --git a/plugins/sudoers/def_data.c b/plugins/sudoers/def_data.c
+index 5fa45bb..9d7a842 100644
+--- a/plugins/sudoers/def_data.c
++++ b/plugins/sudoers/def_data.c
+@@ -498,6 +498,10 @@ struct sudo_defs_types sudo_defs_table[] = {
+       N_("Don't pre-resolve all group names"),
+       NULL,
+     }, {
++      "cmnd_no_wait", T_FLAG,
++      N_("Don't fork and wait for the command to finish, just exec it"),
++      NULL,
++    }, {
+       NULL, 0, NULL
+     }
+ };
+diff --git a/plugins/sudoers/def_data.h b/plugins/sudoers/def_data.h
+index 940fa8f..68ceed0 100644
+--- a/plugins/sudoers/def_data.h
++++ b/plugins/sudoers/def_data.h
+@@ -228,6 +228,8 @@
+ #define def_case_insensitive_group (sudo_defs_table[I_CASE_INSENSITIVE_GROUP].sd_un.flag)
+ #define I_LEGACY_GROUP_PROCESSING 114
+ #define def_legacy_group_processing (sudo_defs_table[I_LEGACY_GROUP_PROCESSING].sd_un.flag)
++#define I_CMND_NO_WAIT          115
++#define def_cmnd_no_wait        (sudo_defs_table[I_CMND_NO_WAIT].sd_un.flag)
+ enum def_tuple {
+       never,
+diff --git a/plugins/sudoers/def_data.in b/plugins/sudoers/def_data.in
+index 571bc96..4250917 100644
+--- a/plugins/sudoers/def_data.in
++++ b/plugins/sudoers/def_data.in
+@@ -360,3 +360,6 @@ case_insensitive_group
+ legacy_group_processing
+       T_FLAG
+       "Don't pre-resolve all group names"
++cmnd_no_wait
++      T_FLAG
++      "Don't fork and wait for the command to finish, just exec it"
+diff --git a/plugins/sudoers/sudoers.c b/plugins/sudoers/sudoers.c
+index d8f4dd0..00669b4 100644
+--- a/plugins/sudoers/sudoers.c
++++ b/plugins/sudoers/sudoers.c
+@@ -221,6 +221,20 @@ sudoers_policy_init(void *info, char * const envp[])
+         def_match_group_by_gid = false;
+       def_legacy_group_processing = false;
+     }
++
++    /*
++     * Emulate cmnd_no_wait option by disabling PAM session, PTY allocation
++     * and I/O logging. This will cause sudo to execute the given command
++     * directly instead of forking a separate process for it.
++     */
++    if (def_cmnd_no_wait) {
++        def_pam_setcred = false;
++        def_pam_session = false;
++        def_use_pty = false;
++        def_log_input = false;
++        def_log_output = false;
++    }
++
+ cleanup:
+     if (!restore_perms())
+       ret = -1;
diff --git a/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-sudoldapconfman.patch b/meta-stx/recipes-extended/sudo/files/sudo-1.8.23-sudoldapconfman.patch
new file mode 100644 (file)
index 0000000..e24a295
--- /dev/null
@@ -0,0 +1,41 @@
+From fcd6c299111dd5dee6e387047c8f60dfef24e32a Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Wed, 11 Dec 2019 19:43:19 -0800
+
+---
+ doc/Makefile.in | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/doc/Makefile.in b/doc/Makefile.in
+index e8d2605..b2e16f4 100644
+--- a/doc/Makefile.in
++++ b/doc/Makefile.in
+@@ -349,10 +349,16 @@ install-doc: install-dirs
+           rm -f $(DESTDIR)$(mandirsu)/sudoedit.$(mansectsu)$(MANCOMPRESSEXT); \
+           echo ln -s sudo.$(mansectsu)$(MANCOMPRESSEXT) $(DESTDIR)$(mandirsu)/sudoedit.$(mansectsu)$(MANCOMPRESSEXT); \
+           ln -s sudo.$(mansectsu)$(MANCOMPRESSEXT) $(DESTDIR)$(mandirsu)/sudoedit.$(mansectsu)$(MANCOMPRESSEXT); \
++          rm -f $(DESTDIR)$(mandirform)/sudo-ldap.conf.$(mansectform)$(MANCOMPRESSEXT); \
++           echo ln -s sudoers.ldap.$(mansectform)$(MANCOMPRESSEXT) $(DESTDIR)$(mandirform)/sudo-ldap.conf.$(mansectform)$(MANCOMPRESSEXT); \
++           ln -s sudoers.ldap.$(mansectform)$(MANCOMPRESSEXT) $(DESTDIR)$(mandirform)/sudo-ldap.conf.$(mansectform)$(MANCOMPRESSEXT); \
+       else \
+           rm -f $(DESTDIR)$(mandirsu)/sudoedit.$(mansectsu); \
+           echo ln -s sudo.$(mansectsu) $(DESTDIR)$(mandirsu)/sudoedit.$(mansectsu); \
+           ln -s sudo.$(mansectsu) $(DESTDIR)$(mandirsu)/sudoedit.$(mansectsu); \
++          rm -f $(DESTDIR)$(mandirform)/sudo-ldap.conf.$(mansectform); \
++           echo ln -s sudoers.ldap.$(mansectform) $(DESTDIR)$(mandirform)/sudo-ldap.conf.$(mansectform); \
++           ln -s sudoers.ldap.$(mansectform) $(DESTDIR)$(mandirform)/sudo-ldap.conf.$(mansectform); \
+       fi
+ install-plugin:
+@@ -367,8 +373,9 @@ uninstall:
+               $(DESTDIR)$(mandirsu)/visudo.$(mansectsu) \
+               $(DESTDIR)$(mandirform)/sudo.conf.$(mansectform) \
+               $(DESTDIR)$(mandirform)/sudoers.$(mansectform) \
+-              $(DESTDIR)$(mandirform)/sudoers_timestamp.$(mansectform)
+-              $(DESTDIR)$(mandirform)/sudoers.ldap.$(mansectform)
++              $(DESTDIR)$(mandirform)/sudoers_timestamp.$(mansectform) \
++              $(DESTDIR)$(mandirform)/sudoers.ldap.$(mansectform) \
++              $(DESTDIR)$(mandirform)/sudo-ldap.conf.$(mansectform)
+ splint:
diff --git a/meta-stx/recipes-extended/sudo/files/sudo-1.8.6p7-logsudouser.patch b/meta-stx/recipes-extended/sudo/files/sudo-1.8.6p7-logsudouser.patch
new file mode 100644 (file)
index 0000000..cea999e
--- /dev/null
@@ -0,0 +1,88 @@
+From 7ee2d1e7fd55da7074a39b41fe342e261dd1f191 Mon Sep 17 00:00:00 2001
+From: Tomas Sykora <tosykora@redhat.com>
+Date: Wed, 17 Aug 2016 10:12:11 +0200
+Subject: [PATCH] Sudo logs username root instead of realuser
+
+RHEL7 sudo logs username root instead of realuser in /var/log/secure
+
+Rebased from:
+Patch50: sudo-1.8.6p7-logsudouser.patch
+
+Resolves:
+rhbz#1312486
+
+---
+ plugins/sudoers/logging.c | 14 +++++++-------
+ plugins/sudoers/sudoers.h |  1 +
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/plugins/sudoers/logging.c b/plugins/sudoers/logging.c
+index 9562609..775fd0c 100644
+--- a/plugins/sudoers/logging.c
++++ b/plugins/sudoers/logging.c
+@@ -116,7 +116,7 @@ do_syslog(int pri, char *msg)
+      * Log the full line, breaking into multiple syslog(3) calls if necessary
+      */
+     fmt = _("%8s : %s");
+-    maxlen = def_syslog_maxlen - (strlen(fmt) - 5 + strlen(user_name));
++    maxlen = def_syslog_maxlen - (strlen(fmt) - 5 + strlen(sudo_user_name));
+     for (p = msg; *p != '\0'; ) {
+       len = strlen(p);
+       if (len > maxlen) {
+@@ -132,7 +132,7 @@ do_syslog(int pri, char *msg)
+           save = *tmp;
+           *tmp = '\0';
+-          mysyslog(pri, fmt, user_name, p);
++          mysyslog(pri, fmt, sudo_user_name, p);
+           *tmp = save;                        /* restore saved character */
+@@ -140,11 +140,11 @@ do_syslog(int pri, char *msg)
+           for (p = tmp; *p == ' '; p++)
+               continue;
+       } else {
+-          mysyslog(pri, fmt, user_name, p);
++          mysyslog(pri, fmt, sudo_user_name, p);
+           p += len;
+       }
+       fmt = _("%8s : (command continued) %s");
+-      maxlen = def_syslog_maxlen - (strlen(fmt) - 5 + strlen(user_name));
++      maxlen = def_syslog_maxlen - (strlen(fmt) - 5 + strlen(sudo_user_name));
+     }
+     sudoers_setlocale(oldlocale, NULL);
+@@ -191,10 +191,10 @@ do_logfile(const char *msg)
+       timestr = "invalid date";
+     if (def_log_host) {
+       len = asprintf(&full_line, "%s : %s : HOST=%s : %s",
+-          timestr, user_name, user_srunhost, msg);
++          timestr, sudo_user_name, user_srunhost, msg);
+     } else {
+       len = asprintf(&full_line, "%s : %s : %s",
+-          timestr, user_name, msg);
++          timestr, sudo_user_name, msg);
+     }
+     if (len == -1) {
+       sudo_warnx(U_("%s: %s"), __func__, U_("unable to allocate memory"));
+@@ -844,7 +844,7 @@ send_mail(const char *fmt, ...)
+     if ((timestr = get_timestr(time(NULL), def_log_year)) == NULL)
+       timestr = "invalid date";
+-    (void) fprintf(mail, "\n\n%s : %s : %s : ", user_host, timestr, user_name);
++    (void) fprintf(mail, "\n\n%s : %s : %s : ", user_host, timestr, sudo_user_name);
+     va_start(ap, fmt);
+     (void) vfprintf(mail, fmt, ap);
+     va_end(ap);
+diff --git a/plugins/sudoers/sudoers.h b/plugins/sudoers/sudoers.h
+index 28dbbb3..99e137b 100644
+--- a/plugins/sudoers/sudoers.h
++++ b/plugins/sudoers/sudoers.h
+@@ -188,6 +188,7 @@ struct sudo_user {
+ /*
+  * Shortcuts for sudo_user contents.
+  */
++#define sudo_user_name                (sudo_user.pw->pw_name)
+ #define user_name             (sudo_user.name)
+ #define user_uid              (sudo_user.uid)
+ #define user_gid              (sudo_user.gid)
diff --git a/meta-stx/recipes-extended/sudo/sudo_%.bbappend b/meta-stx/recipes-extended/sudo/sudo_%.bbappend
new file mode 100644 (file)
index 0000000..627a3bd
--- /dev/null
@@ -0,0 +1,54 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+DEPENDS += " \
+       openldap \
+       libgcrypt \
+       "
+
+SRC_URI += " \
+       file://sudo-1.6.7p5-strip.patch \
+       file://sudo-1.7.2p1-envdebug.patch \
+       file://sudo-1.8.23-sudoldapconfman.patch \
+       file://sudo-1.8.23-legacy-group-processing.patch \
+       file://sudo-1.8.23-ldapsearchuidfix.patch \
+       file://sudo-1.8.6p7-logsudouser.patch \
+       file://sudo-1.8.23-nowaitopt.patch \
+       file://sudo-1.8.23-fix-double-quote-parsing-for-Defaults-values.patch \
+       "
+
+EXTRA_OECONF += " \
+       --with-pam-login \
+       --with-editor=${base_bindir}/vi \
+       --with-env-editor \
+       --with-ignore-dot \
+       --with-tty-tickets \
+       --with-ldap \
+       --with-ldap-conf-file="${sysconfdir}/sudo-ldap.conf" \
+       --with-passprompt="[sudo] password for %Zp: " \
+       --with-sssd \
+       "
+
+do_install_append () {
+       install -m755 -d ${D}/${sysconfdir}/openldap/schema
+       install -m644 ${S}/doc/schema.OpenLDAP  ${D}/${sysconfdir}/openldap/schema/sudo.schema
+}
+
+# This means sudo package only owns files
+# to avoid install conflict with openldap on
+# /etc/openldap. Sure there is a better way.
+DIRFILES = "1"
diff --git a/meta-stx/recipes-extended/uswgi/files/0001-pragma-ignore-cast-type-errors-with-gcc-8.3.patch b/meta-stx/recipes-extended/uswgi/files/0001-pragma-ignore-cast-type-errors-with-gcc-8.3.patch
new file mode 100644 (file)
index 0000000..a0e4965
--- /dev/null
@@ -0,0 +1,58 @@
+From 377b11c0255d717912f585e7c16c6c053e6cc913 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Thu, 23 Jan 2020 07:02:11 +0000
+Subject: [PATCH] pragma ignore cast type errors with gcc 8.3
+
+---
+ core/emperor.c                  | 2 ++
+ plugins/python/uwsgi_pymodule.c | 4 +++-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/core/emperor.c b/core/emperor.c
+index 97596556..d64110cb 100644
+--- a/core/emperor.c
++++ b/core/emperor.c
+@@ -988,7 +988,9 @@ int uwsgi_emperor_vassal_start(struct uwsgi_instance *n_ui) {
+ #if defined(__linux__) && !defined(OBSOLETE_LINUX_KERNEL) && !defined(__ia64__)
+       if (uwsgi.emperor_clone) {
+               char stack[PTHREAD_STACK_MIN];
++#pragma GCC diagnostic ignored "-Wcast-function-type"
+               pid = clone((int (*)(void *)) uwsgi_emperor_spawn_vassal, stack + PTHREAD_STACK_MIN, SIGCHLD | uwsgi.emperor_clone, (void *) n_ui);
++#pragma GCC diagnostic error "-Wcast-function-type"
+       }
+       else {
+ #endif
+diff --git a/plugins/python/uwsgi_pymodule.c b/plugins/python/uwsgi_pymodule.c
+index d3075897..ac3ccb89 100644
+--- a/plugins/python/uwsgi_pymodule.c
++++ b/plugins/python/uwsgi_pymodule.c
+@@ -1,4 +1,5 @@
+ #include "uwsgi_python.h"
++#pragma GCC diagnostic ignored "-Wcast-function-type"
+ extern struct uwsgi_server uwsgi;
+ extern struct uwsgi_python up;
+@@ -2553,6 +2554,7 @@ PyObject *py_uwsgi_parse_file(PyObject * self, PyObject * args) {
+ }
+ static PyMethodDef uwsgi_spooler_methods[] = {
++
+ #ifdef PYTHREE
+       {"send_to_spooler", (PyCFunction) py_uwsgi_send_spool, METH_VARARGS | METH_KEYWORDS, ""},
+       {"spool", (PyCFunction) py_uwsgi_send_spool, METH_VARARGS | METH_KEYWORDS, ""},
+@@ -2569,7 +2571,6 @@ static PyMethodDef uwsgi_spooler_methods[] = {
+       {NULL, NULL},
+ };
+-
+ PyObject *py_uwsgi_suspend(PyObject * self, PyObject * args) {
+       struct wsgi_request *wsgi_req = py_current_wsgi_req();
+@@ -3918,3 +3919,4 @@ void init_uwsgi_module_snmp(PyObject * current_uwsgi_module) {
+         uwsgi_log( "SNMP python functions initialized.\n");
+ }
++#pragma GCC diagnostic error "-Wcast-function-type"
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-extended/uswgi/uwsgi_git.bbappend b/meta-stx/recipes-extended/uswgi/uwsgi_git.bbappend
new file mode 100644 (file)
index 0000000..aeaf623
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += "file://0001-pragma-ignore-cast-type-errors-with-gcc-8.3.patch"
diff --git a/meta-stx/recipes-graphics/mesa/mesa_%.bbappend b/meta-stx/recipes-graphics/mesa/mesa_%.bbappend
new file mode 100644 (file)
index 0000000..eb77f09
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit selinux
+DEPENDS += " libselinux"
+RDEPENDS_${PN}_append = " libselinux"
diff --git a/meta-stx/recipes-httpd/apache2/apache2_%.bbappend b/meta-stx/recipes-httpd/apache2/apache2_%.bbappend
new file mode 100644 (file)
index 0000000..2799dfc
--- /dev/null
@@ -0,0 +1,28 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+
+APACHE_PORT_NUM ?= "81"
+
+do_install_append () {
+       sed -i -e 's/80/${APACHE_PORT_NUM}/' ${D}/${sysconfdir}/${BPN}/httpd.conf
+}
+
+inherit useradd
+
+USERADD_PACKAGES = "${PN}"
+
+USERADD_PARAM_${PN} = "-c 'Apache' -u 48 -g apache -s /sbin/nologin -r -d /usr/share/httpd apache"
+GROUPADD_PARAM_${PN} = "-g 48 -r apache"
diff --git a/meta-stx/recipes-httpd/lighttpd/lighttpd_%.bbappend b/meta-stx/recipes-httpd/lighttpd/lighttpd_%.bbappend
new file mode 100644 (file)
index 0000000..38c1558
--- /dev/null
@@ -0,0 +1,25 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+do_install_append () {
+       # remove the symlinks
+       rm ${D}/www/logs
+       rm ${D}/www/var
+
+       # use tmpfile to create dirs
+       install -d ${D}${sysconfdir}/tmpfiles.d/
+       echo "d /www/var 0755 www root -" > ${D}${sysconfdir}/tmpfiles.d/${BPN}.conf
+       echo "d /www/var/log 0755 www root -" >> ${D}${sysconfdir}/tmpfiles.d/${BPN}.conf
+}
diff --git a/meta-stx/recipes-kernel/linux/linux-yocto-rt_%.bbappend b/meta-stx/recipes-kernel/linux/linux-yocto-rt_%.bbappend
new file mode 100644 (file)
index 0000000..af9927d
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+require linux-yocto-stx.inc
diff --git a/meta-stx/recipes-kernel/linux/linux-yocto-stx.inc b/meta-stx/recipes-kernel/linux/linux-yocto-stx.inc
new file mode 100644 (file)
index 0000000..4cdb355
--- /dev/null
@@ -0,0 +1,25 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_append:= ":${THISDIR}/linux:"
+
+SRC_URI += "file://stx-kconfig.cfg"
+
+KERNEL_EXTRA_FEATURES_append = " \
+    features/xfs/xfs.scc \
+    features/iommu/iommu.scc \
+    features/vfio/vfio.scc \
+    cfg/debug/sched/debug-sched.scc \
+    "
diff --git a/meta-stx/recipes-kernel/linux/linux-yocto_%.bbappend b/meta-stx/recipes-kernel/linux/linux-yocto_%.bbappend
new file mode 100644 (file)
index 0000000..af9927d
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+require linux-yocto-stx.inc
diff --git a/meta-stx/recipes-kernel/linux/linux/stx-kconfig.cfg b/meta-stx/recipes-kernel/linux/linux/stx-kconfig.cfg
new file mode 100644 (file)
index 0000000..15674a1
--- /dev/null
@@ -0,0 +1,851 @@
+CONFIG_NF_TABLES=m
+CONFIG_NFT_COMPAT=m
+CONFIG_BLK_DEV_DRBD=m
+CONFIG_IXGBE=m
+CONFIG_IXGBE_HWMON=y
+CONFIG_IXGBE_DCA=y
+CONFIG_IXGBEVF=m
+CONFIG_FPGA=y
+
+# Support for NUMA
+CONFIG_NUMA=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
+
+# Support for ipset
+CONFIG_IP_SET=m
+CONFIG_IP_SET_MAX=256
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPMARK=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_IPMAC=m
+CONFIG_IP_SET_HASH_MAC=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_NETFILTER_XT_SET=m
+
+# Support for iscsi
+CONFIG_BE2ISCSI=m
+CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_ISCSI_IBFT=m
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_ISCSI_TARGET=m
+CONFIG_ISCSI_TCP=y
+CONFIG_QED_ISCSI=y
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_SCSI_ISCSI_ATTRS=y
+CONFIG_SCSI_QLA_ISCSI=m
+
+# Support for vlan
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_MACVLAN=m
+CONFIG_NET_ACT_VLAN=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+
+# Support for nfsd
+CONFIG_NFSD=m
+CONFIG_NFSD_PNFS=y
+CONFIG_NFSD_SCSILAYOUT=y
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+
+# generated by diffconfig:
+# ./diffconfig -m config-5.0.19-yocto-standard config-3.10.0-957.21.3.el7.2.tis.x86_64
+CONFIG_8139TOO_8129=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_BGRT=y
+CONFIG_ACPI_CUSTOM_METHOD=m
+CONFIG_ACPI_DOCK=y
+CONFIG_ACPI_EC_DEBUGFS=m
+CONFIG_ACPI_HED=y
+CONFIG_ACPI_NFIT=m
+CONFIG_ACPI_PCI_SLOT=y
+CONFIG_ACPI_SBS=m
+CONFIG_AMD8111_ETH=m
+CONFIG_AMD_IOMMU=y
+CONFIG_AMD_MEM_ENCRYPT=y
+CONFIG_AMD_PHY=m
+CONFIG_AMD_XGBE=m
+CONFIG_AQTION=m
+CONFIG_ASYNC_TX_DMA=y
+CONFIG_ATA_GENERIC=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_ATM=m
+CONFIG_AUXDISPLAY=y
+CONFIG_BE2NET=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
+CONFIG_BLK_DEV_RBD=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BNXT_DCB=y
+CONFIG_BOOT_PRINTK_DELAY=y
+CONFIG_BROADCOM_PHY=m
+CONFIG_CAN=m
+CONFIG_CEPH_FS=m
+CONFIG_CEPH_LIB=m
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CIFS_ACL=y
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CLEANCACHE=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_CLS_U32_PERF=y
+CONFIG_CMA=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CRC_T10DIF=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRC32C_INTEL=m
+CONFIG_CRYPTO_CRC32_PCLMUL=m
+CONFIG_CRYPTO_CRCT10DIF=m
+CONFIG_CRYPTO_DEV_CCP=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SHA1_SSSE3=y
+CONFIG_CRYPTO_SHA256_SSSE3=y
+CONFIG_CRYPTO_SHA512_SSSE3=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_DAX=y
+CONFIG_DEBUG_BOOT_PARAMS=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DELL_RBTN=m
+CONFIG_DELL_SMBIOS=m
+CONFIG_DELL_SMO8800=m
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DLM=m
+CONFIG_DMI_SYSFS=y
+CONFIG_DM_CACHE=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=m
+CONFIG_DNET=m
+CONFIG_DRBD_FAULT_INJECTION=y
+CONFIG_DRM_AMDGPU=m
+CONFIG_DRM_DP_AUX_CHARDEV=y
+CONFIG_DRM_I915_GVT=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_EARLY_PRINTK_DBGP=y
+CONFIG_EARLY_PRINTK_EFI=y
+CONFIG_EEPROM_LEGACY=m
+CONFIG_EFI_MIXED=y
+CONFIG_ENA_ETHERNET=m
+CONFIG_ENIC=m
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FIXED_PHY=y
+CONFIG_FRONTSWAP=y
+CONFIG_FSCACHE=m
+CONFIG_FS_DAX=y
+CONFIG_FUJITSU_ES=m
+CONFIG_FUSION=y
+CONFIG_FW_CFG_SYSFS=y
+CONFIG_GART_IOMMU=y
+CONFIG_GFS2_FS=m
+CONFIG_GPIO_AMDPT=m
+CONFIG_GPIO_ICH=m
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_HIBERNATION=y
+CONFIG_HID_ALPS=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
+CONFIG_HID_SENSOR_DEVICE_ROTATION=m
+CONFIG_HID_SENSOR_PRESS=m
+CONFIG_HID_SENSOR_PROX=m
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_HP_ILO=m
+CONFIG_HP_WATCHDOG=m
+CONFIG_HWLAT_TRACER=y
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_I2C_HID=m
+CONFIG_I2C_ISCH=m
+CONFIG_I2C_SCMI=m
+CONFIG_I2C_STUB=m
+CONFIG_I2C_TINY_USB=m
+CONFIG_I6300ESB_WDT=m
+CONFIG_IE6XX_WDT=m
+CONFIG_IFB=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_INPUT_GP2A=m
+CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
+CONFIG_INPUT_JOYDEV=m
+CONFIG_INPUT_SPARSEKMAP=m
+CONFIG_INTEL_HID_EVENT=m
+CONFIG_INTEL_IOMMU_DEFAULT_ON=y
+CONFIG_INTEL_IPS=m
+CONFIG_INTEL_ISH_HID=y
+CONFIG_INTEL_PMC_CORE=m
+CONFIG_INTEL_TXT=y
+CONFIG_INTEL_VBTN=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
+CONFIG_IPMI_HANDLER=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MIP6=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IP_DCCP=m
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_VS=m
+CONFIG_IRQ_REMAP=y
+CONFIG_ITCO_VENDOR_SUPPORT=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
+CONFIG_KSM=y
+CONFIG_KVM_MMU_AUDIT=y
+CONFIG_L2TP=m
+CONFIG_LEDS_LT3593=m
+CONFIG_LIBNVDIMM=m
+CONFIG_LIQUIDIO=m
+CONFIG_LIQUIDIO_VF=m
+CONFIG_LPC_SCH=m
+CONFIG_LWTUNNEL=y
+CONFIG_MAC802154=m
+CONFIG_MACSEC=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MD_RAID456=m
+CONFIG_MEGARAID_SAS=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_MEM_SOFT_DIRTY=y
+CONFIG_MLXFW=m
+CONFIG_MLXSW_CORE=m
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODVERSIONS=y
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MTD=m
+CONFIG_NETFILTER_NETLINK_ACCT=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_RATEEST=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETLABEL=y
+CONFIG_NETLINK_DIAG=m
+CONFIG_NETWORK_PHY_TIMESTAMPING=y
+CONFIG_NET_ACT_CONNMARK=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SAMPLE=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_SKBMOD=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_IND=y
+CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_DEVLINK=m
+CONFIG_NET_DROP_MONITOR=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_IPVTI=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SWITCHDEV=y
+CONFIG_NFP=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REDIR=m
+CONFIG_NFT_REJECT=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_LABELS=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_DUP_IPV4=m
+CONFIG_NF_DUP_IPV6=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_NF_REJECT_IPV6=m
+CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NO_HZ_FULL=y
+CONFIG_NTB=m
+CONFIG_NVME_FC=m
+CONFIG_NVME_TARGET=m
+CONFIG_NVRAM=y
+CONFIG_PACKET_DIAG=m
+CONFIG_PATA_ACPI=m
+CONFIG_PCI_STUB=y
+CONFIG_PINCTRL_AMD=m
+CONFIG_PINCTRL_DENVERTON=m
+CONFIG_PINCTRL_GEMINILAKE=m
+CONFIG_PINCTRL_LEWISBURG=m
+CONFIG_PMBUS=m
+CONFIG_PM_TRACE_RTC=y
+CONFIG_POWER_RESET=y
+CONFIG_PPTP=m
+CONFIG_PRINTK_TIME=y
+CONFIG_PROFILING=y
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_PSAMPLE=m
+CONFIG_PSTORE=y
+CONFIG_QED=m
+CONFIG_QLA3XXX=m
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_RAID_ATTRS=y
+CONFIG_RAW_DRIVER=y
+CONFIG_RMI4_CORE=m
+CONFIG_RTC_DRV_BQ32K=m
+CONFIG_RTC_DRV_BQ4802=m
+CONFIG_RTC_DRV_DS1286=m
+CONFIG_RTC_DRV_DS1307=m
+CONFIG_RTC_DRV_DS1374=m
+CONFIG_RTC_DRV_DS1511=m
+CONFIG_RTC_DRV_DS1553=m
+CONFIG_RTC_DRV_DS1672=m
+CONFIG_RTC_DRV_DS1742=m
+CONFIG_RTC_DRV_DS2404=m
+CONFIG_RTC_DRV_DS3232=m
+CONFIG_RTC_DRV_EM3027=m
+CONFIG_RTC_DRV_FM3130=m
+CONFIG_RTC_DRV_ISL12022=m
+CONFIG_RTC_DRV_ISL1208=m
+CONFIG_RTC_DRV_M41T80=m
+CONFIG_RTC_DRV_M48T35=m
+CONFIG_RTC_DRV_M48T59=m
+CONFIG_RTC_DRV_MAX6900=m
+CONFIG_RTC_DRV_MSM6242=m
+CONFIG_RTC_DRV_PCF8523=m
+CONFIG_RTC_DRV_PCF8563=m
+CONFIG_RTC_DRV_PCF8583=m
+CONFIG_RTC_DRV_RP5C01=m
+CONFIG_RTC_DRV_RS5C372=m
+CONFIG_RTC_DRV_RV3029C2=m
+CONFIG_RTC_DRV_RX4581=m
+CONFIG_RTC_DRV_RX8025=m
+CONFIG_RTC_DRV_RX8581=m
+CONFIG_RTC_DRV_STK17TA8=m
+CONFIG_RTC_DRV_V3020=m
+CONFIG_RTC_DRV_X1205=m
+CONFIG_SATA_AHCI_PLATFORM=m
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCSI_3W_9XXX=y
+CONFIG_SCSI_3W_SAS=y
+CONFIG_SCSI_AACRAID=y
+CONFIG_SCSI_AIC94XX=y
+CONFIG_SCSI_ARCMSR=y
+CONFIG_SCSI_DEBUG=m
+CONFIG_SCSI_DH=y
+CONFIG_SCSI_DPT_I2O=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_SCSI_GDTH=y
+CONFIG_SCSI_HPSA=y
+CONFIG_SCSI_HPTIOP=y
+CONFIG_SCSI_IPS=y
+CONFIG_SCSI_ISCI=m
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_SCSI_MPT3SAS=y
+CONFIG_SCSI_MVSAS=y
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_PMCRAID=y
+CONFIG_SCSI_SAS_ATTRS=y
+CONFIG_SCSI_SAS_LIBSAS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_SMARTPQI=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_STEX=y
+CONFIG_SCTP_COOKIE_HMAC_SHA1=y
+CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
+CONFIG_SDIO_UART=m
+CONFIG_SECURITY_NETWORK_XFRM=y
+CONFIG_SECURITY_PATH=y
+CONFIG_SECURITY_YAMA=y
+CONFIG_SENSORS_ACPI_POWER=m
+CONFIG_SENSORS_JC42=m
+CONFIG_SENSORS_SHT15=m
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_SERIO_RAW=m
+CONFIG_SFC_FALCON=m
+CONFIG_SFI=y
+CONFIG_SIGNED_PE_FILE_VERIFICATION=y
+CONFIG_SQUASHFS=y
+CONFIG_STACK_TRACER=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_TARGET_CORE=m
+CONFIG_TASK_XACCT=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_THERMAL_GOV_FAIR_SHARE=y
+CONFIG_THUNDERBOLT=y
+CONFIG_TLAN=m
+CONFIG_TYPEC_UCSI=y
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_UNIX_DIAG=m
+CONFIG_USBIP_CORE=m
+CONFIG_USB_LEDS_TRIGGER_USBPORT=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_UAS=m
+CONFIG_USERFAULTFD=y
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_NOIOMMU=y
+CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
+CONFIG_VMD=y
+CONFIG_VMXNET3=m
+CONFIG_VSOCKETS=m
+CONFIG_VXLAN=m
+CONFIG_WATCHDOG_SYSFS=y
+CONFIG_WDAT_WDT=m
+CONFIG_X86_AMD_PLATFORM_DEVICE=y
+CONFIG_X86_INTEL_MPX=y
+CONFIG_X86_MCE_INJECT=m
+CONFIG_X86_PCC_CPUFREQ=m
+CONFIG_X86_PMEM_LEGACY=m
+CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+CONFIG_ZBUD=y
+CONFIG_ZPOOL=y
+CONFIG_ZSMALLOC=y
+CONFIG_ACPI_APEI_EINJ=m
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_IPMI=m
+CONFIG_ACPI_WATCHDOG=y
+CONFIG_AMD_IOMMU_V2=m
+CONFIG_AMD_XGBE_HAVE_ECC=y
+CONFIG_ARCH_HIBERNATION_HEADER=y
+CONFIG_ARCH_USE_MEMREMAP_PROT=y
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_ASYNC_CORE=m
+CONFIG_ASYNC_MEMCPY=m
+CONFIG_ASYNC_PQ=m
+CONFIG_ASYNC_RAID6_RECOV=m
+CONFIG_ASYNC_XOR=m
+CONFIG_ATM_BR2684=m
+CONFIG_ATM_CLIP=m
+CONFIG_ATM_LANE=m
+CONFIG_BE2NET_HWMON=y
+CONFIG_BLK_DEV_PMEM=m
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_BTREE=y
+CONFIG_BTT=y
+CONFIG_CACHEFILES=m
+CONFIG_CAN_8DEV_USB=m
+CONFIG_CAN_BCM=m
+CONFIG_CAN_CALC_BITTIMING=y
+CONFIG_CAN_CC770=m
+CONFIG_CAN_CC770_PLATFORM=m
+CONFIG_CAN_C_CAN=m
+CONFIG_CAN_C_CAN_PCI=m
+CONFIG_CAN_C_CAN_PLATFORM=m
+CONFIG_CAN_DEV=m
+CONFIG_CAN_EMS_PCI=m
+CONFIG_CAN_EMS_USB=m
+CONFIG_CAN_ESD_USB2=m
+CONFIG_CAN_GW=m
+CONFIG_CAN_KVASER_PCI=m
+CONFIG_CAN_KVASER_USB=m
+CONFIG_CAN_LEDS=y
+CONFIG_CAN_PEAK_PCI=m
+CONFIG_CAN_PEAK_PCIEC=y
+CONFIG_CAN_PEAK_USB=m
+CONFIG_CAN_PLX_PCI=m
+CONFIG_CAN_RAW=m
+CONFIG_CAN_SJA1000=m
+CONFIG_CAN_SJA1000_PLATFORM=m
+CONFIG_CAN_SLCAN=m
+CONFIG_CAN_SOFTING=m
+CONFIG_CAN_VCAN=m
+CONFIG_CEPH_FS_POSIX_ACL=y
+CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
+CONFIG_CHASH=m
+CONFIG_CMA_ALIGNMENT=8
+CONFIG_CMA_AREAS=7
+CONFIG_CMA_SIZE_MBYTES=16
+CONFIG_CMA_SIZE_SEL_MBYTES=y
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
+CONFIG_CRYPTO_DEV_CCP_DD=m
+CONFIG_CRYPTO_FIPS=y
+CONFIG_CRYPTO_USER_API=m
+CONFIG_CYCLADES=m
+CONFIG_DAX_DRIVER=y
+CONFIG_DEFAULT_CUBIC=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_DELL_LAPTOP=m
+CONFIG_DLM_DEBUG=y
+CONFIG_DMA_CMA=y
+CONFIG_DM_CACHE_SMQ=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DP83640_PHY=y
+CONFIG_DRM_AMD_DC=y
+CONFIG_DRM_AMD_DC_DCN1_0=y
+CONFIG_DRM_I915_GVT_KVMGT=m
+CONFIG_DRM_SCHED=m
+CONFIG_EARLY_PRINTK_USB=y
+CONFIG_EFI_RUNTIME_MAP=y
+CONFIG_EFI_VARS_PSTORE=y
+CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y
+CONFIG_FB_HYPERV=m
+CONFIG_FSCACHE_STATS=y
+CONFIG_FUSION_CTL=m
+CONFIG_FUSION_LOGGING=y
+CONFIG_FUSION_MAX_SGE=128
+CONFIG_FUSION_SAS=y
+CONFIG_FUSION_SPI=m
+CONFIG_GACT_PROB=y
+CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ=y
+CONFIG_GFS2_FS_LOCKING_DLM=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_HAVE_RCU_TABLE_FREE=y
+CONFIG_HIBERNATE_CALLBACKS=y
+CONFIG_HID_HYPERV_MOUSE=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HMC_DRV=m
+CONFIG_HPWDT_NMI_DECODING=y
+CONFIG_HSA_AMD=m
+CONFIG_HWPOISON_INJECT=m
+CONFIG_HYPERV=m
+CONFIG_HYPERV_BALLOON=m
+CONFIG_HYPERV_KEYBOARD=m
+CONFIG_HYPERV_NET=m
+CONFIG_HYPERV_STORAGE=m
+CONFIG_HYPERV_TSCPAGE=y
+CONFIG_HYPERV_UTILS=m
+CONFIG_HYPERV_VSOCKETS=m
+CONFIG_IEEE802154_FAKELB=m
+CONFIG_INET_DCCP_DIAG=m
+CONFIG_IOMMU_HELPER=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_DMI_DECODE=y
+CONFIG_IPMI_POWEROFF=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_IP_DCCP_CCID3=y
+CONFIG_IP_DCCP_TFRC_LIB=y
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_NFCT=y
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_SCTP=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SH_TAB_BITS=8
+CONFIG_IP_VS_TAB_BITS=12
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_WRR=m
+CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y
+CONFIG_KEXEC_CORE=y
+CONFIG_KEXEC_JUMP=y
+CONFIG_KEXEC_VERIFY_SIG=y
+CONFIG_KVM_GUEST=y
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_ETH=m
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_V3=y
+CONFIG_LIBFC=m
+CONFIG_LIBFCOE=m
+CONFIG_LIVEPATCH=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_MAX_RAW_DEVS=8192
+CONFIG_MEMORY_ISOLATION=y
+CONFIG_MLXSW_CORE_HWMON=y
+CONFIG_MLXSW_CORE_THERMAL=y
+CONFIG_MLXSW_I2C=m
+CONFIG_MLXSW_MINIMAL=m
+CONFIG_MLXSW_PCI=m
+CONFIG_MLXSW_SPECTRUM=m
+CONFIG_MLXSW_SPECTRUM_DCB=y
+CONFIG_MLXSW_SWITCHIB=m
+CONFIG_MLXSW_SWITCHX2=m
+CONFIG_MMC_REALTEK_PCI=m
+CONFIG_MMC_REALTEK_USB=m
+CONFIG_MODULE_SIG_ALL=y
+CONFIG_MODULE_SIG_HASH="sha256"
+CONFIG_MODULE_SIG_SHA256=y
+CONFIG_MOUSE_PS2_VMMOUSE=y
+CONFIG_MTD_BLKDEVS=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_BEB_LIMIT=20
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_ND_BLK=m
+CONFIG_ND_BTT=m
+CONFIG_ND_CLAIM=y
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NET_EMATCH_CMP=m
+CONFIG_NET_EMATCH_IPSET=m
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_EMATCH_NBYTE=m
+CONFIG_NET_EMATCH_STACK=32
+CONFIG_NET_EMATCH_TEXT=m
+CONFIG_NET_EMATCH_U32=m
+CONFIG_NET_SCH_ATM=m
+CONFIG_NFP_APP_FLOWER=y
+CONFIG_NFS_FSCACHE=y
+CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
+CONFIG_NFS_V4_2=y
+CONFIG_NFS_V4_SECURITY_LABEL=y
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV4=m
+CONFIG_NFT_REDIR_IPV6=m
+CONFIG_NFT_REJECT_INET=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_REJECT_IPV6=m
+CONFIG_NF_CT_NETLINK_HELPER=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_NF_NAT_MASQUERADE_IPV6=m
+CONFIG_NF_NAT_SNMP_BASIC=m
+CONFIG_NTB_AMD=m
+CONFIG_NTB_NETDEV=m
+CONFIG_NTB_PERF=m
+CONFIG_NTB_TRANSPORT=m
+CONFIG_NVME_FABRICS=m
+CONFIG_NVME_TARGET_FC=m
+CONFIG_NVME_TARGET_FCLOOP=m
+CONFIG_NVME_TARGET_LOOP=m
+CONFIG_N_HDLC=m
+CONFIG_OPENVSWITCH_VXLAN=m
+CONFIG_ORE=m
+CONFIG_PADATA=y
+CONFIG_PARAVIRT=y
+CONFIG_PARAVIRT_CLOCK=y
+CONFIG_PARAVIRT_SPINLOCKS=y
+CONFIG_PARAVIRT_TIME_ACCOUNTING=y
+CONFIG_PARMAN=m
+CONFIG_PCH_DMA=m
+CONFIG_PCI_HYPERV=m
+CONFIG_PM_STD_PARTITION=""
+CONFIG_PM_TRACE=y
+CONFIG_PNFS_BLOCK=m
+CONFIG_PNFS_FILE_LAYOUT=m
+CONFIG_PNFS_FLEXFILE_LAYOUT=m
+CONFIG_PPPOATM=m
+CONFIG_PPPOL2TP=m
+CONFIG_PRINT_QUOTA_WARNING=y
+CONFIG_PROC_VMCORE=y
+CONFIG_PSTORE_RAM=m
+CONFIG_PTP_1588_CLOCK_KVM=y
+CONFIG_PTP_1588_CLOCK_PCH=y
+CONFIG_QEDE=m
+CONFIG_QEDF=m
+CONFIG_QEDI=m
+CONFIG_QED_FCOE=y
+CONFIG_QED_LL2=y
+CONFIG_QED_OOO=y
+CONFIG_QED_SRIOV=y
+CONFIG_QFMT_V2=y
+CONFIG_QUOTA_TREE=y
+CONFIG_RCU_FANOUT=64
+CONFIG_RCU_FANOUT_LEAF=16
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_REED_SOLOMON=m
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_RMI4_2D_SENSOR=y
+CONFIG_RMI4_F03=y
+CONFIG_RMI4_F03_SERIO=m
+CONFIG_RMI4_F11=y
+CONFIG_RMI4_F12=y
+CONFIG_RMI4_F30=y
+CONFIG_RMI4_SMB=m
+CONFIG_ROCKER=m
+CONFIG_RTC_DRV_M41T80_WDT=y
+CONFIG_SCSI_BFA_FC=m
+CONFIG_SCSI_BNX2X_FCOE=m
+CONFIG_SCSI_CHELSIO_FCOE=m
+CONFIG_SCSI_DH_ALUA=y
+CONFIG_SCSI_DH_EMC=y
+CONFIG_SCSI_DH_HP_SW=y
+CONFIG_SCSI_DH_RDAC=y
+CONFIG_SCSI_LPFC=m
+CONFIG_SCSI_MPT2SAS_MAX_SGE=128
+CONFIG_SCSI_MPT3SAS_MAX_SGE=128
+CONFIG_SCSI_MVSAS_TASKLET=y
+CONFIG_SCSI_NETLINK=y
+CONFIG_SCSI_OSD_DPRINT_SENSE=1
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_SCSI_QLA_FC=m
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_SAS_HOST_SMP=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SFC_FALCON_MTD=y
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SUNRPC_BACKCHANNEL=y
+CONFIG_SYNCLINK=m
+CONFIG_SYNCLINKMP=m
+CONFIG_SYNCLINK_GT=m
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TCM_FC=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_PSCSI=m
+CONFIG_TCM_QLA2XXX=m
+CONFIG_TCM_USER2=m
+CONFIG_TCP_CONG_BIC=m
+CONFIG_TCP_CONG_DCTCP=m
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_WESTWOOD=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_THUNDERBOLT_NET=m
+CONFIG_UCSI_ACPI=y
+CONFIG_UEFI_CPER=y
+CONFIG_UIO_HV_GENERIC=m
+CONFIG_VFIO_MDEV_DEVICE=m
+CONFIG_VHOST_RING=m
+CONFIG_VHOST_VSOCK=m
+CONFIG_VIRTIO_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS_COMMON=m
+CONFIG_VIRT_CPU_ACCOUNTING=y
+CONFIG_VMWARE_BALLOON=m
+CONFIG_VSOCKETS_DIAG=m
+CONFIG_VSOCKMON=m
+CONFIG_X86_PMEM_LEGACY_DEVICE=y
+CONFIG_X86_X2APIC=y
+CONFIG_ZRAM=m
+CONFIG_ZSWAP=y
diff --git a/meta-stx/recipes-networking/ipset/ipset_6.38.bb b/meta-stx/recipes-networking/ipset/ipset_6.38.bb
new file mode 100644 (file)
index 0000000..180db2f
--- /dev/null
@@ -0,0 +1,70 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Manage Linux IP sets"
+DESCRIPTION = " \
+IP sets are a framework inside the Linux kernel since version 2.4.x, which can  \
+be administered by the ipset utility. Depending on the type, currently an IP \
+set may store IP addresses, (TCP/UDP) port numbers or IP addresses with MAC \
+addresses in a way, which ensures lightning speed when matching an entry \
+against a set. \
+\
+If you want to: \
+ - store multiple IP addresses or port numbers and match against the collection \
+   by iptables at one swoop; \
+ - dynamically update iptables rules against IP addresses or ports without \
+   performance penalty; \
+ - express complex IP address and ports based rulesets with one single iptables \
+   rule and benefit from the speed of IP sets \
+then ipset may be the proper tool for you. \
+"
+
+HOMEPAGE = "http://ipset.netfilter.org"
+
+LICENSE = "GPL-2.0"
+LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552"
+
+SECTION = "network"
+
+DEPENDS = "libtool libmnl"
+
+SRC_URI = "http://ftp.netfilter.org/pub/ipset/${BP}.tar.bz2"
+
+SRC_URI[md5sum] = "0e5d9c85f6b78e7dff0c996e2900574b"
+SRC_URI[sha256sum] = "ceef625ba31fe0aaa422926c7231a819de0b07644c02c17ebdd3022a29e3e244"
+
+inherit autotools pkgconfig module-base
+
+EXTRA_OECONF += "-with-kbuild=${KBUILD_OUTPUT} --with-ksource=${STAGING_KERNEL_DIR}"
+
+RDEPENDS_${PN} = "kernel-module-ip-set"
+RRECOMMENDS_${PN} = "\
+    kernel-module-ip-set-bitmap-ip \
+    kernel-module-ip-set-bitmap-ipmac \
+    kernel-module-ip-set-bitmap-port \
+    kernel-module-ip-set-hash-ip \
+    kernel-module-ip-set-hash-ipmac \
+    kernel-module-ip-set-hash-ipmark \
+    kernel-module-ip-set-hash-ipport \
+    kernel-module-ip-set-hash-ipportip \
+    kernel-module-ip-set-hash-ipportnet \
+    kernel-module-ip-set-hash-mac \
+    kernel-module-ip-set-hash-net \
+    kernel-module-ip-set-hash-netiface \
+    kernel-module-ip-set-hash-netnet \
+    kernel-module-ip-set-hash-netport \
+    kernel-module-ip-set-hash-netportnet \
+    kernel-module-ip-set-list-set \
+"
diff --git a/meta-stx/recipes-networking/openvswitch/openvswitch_%.bbappend b/meta-stx/recipes-networking/openvswitch/openvswitch_%.bbappend
new file mode 100644 (file)
index 0000000..39edd6f
--- /dev/null
@@ -0,0 +1,112 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+RDEPENDS_${PN} += " \
+               python-pyelftools \
+               firewalld \
+               logrotate \
+               hwdata \
+               "
+
+PACKAGECONFIG = "libcap-ng ssl dpdk"
+PACKAGECONFIG[ssl] = "--enable-ssl,--disable-ssl,openssl,"
+PACKAGECONFIG[dpdk] = "--with-dpdk=${STAGING_DIR_TARGET}${DPDK_INSTALL_DIR}/share/${TARGET_ARCH}-native-linuxapp-gcc,,dpdk,"
+
+
+SRC_URI += " \
+       "
+
+EXTRA_OECONF += " \
+               "
+do_configure_append () {
+       :
+}
+
+do_compile_append () {
+       :
+}
+
+do_install_append () {
+       cd ${S}
+       install -d -p -m0755 ${D}/${base_libdir}/udev/rules.d
+       install -d -m0755 ${D}/${systemd_system_unitdir}
+       install -p -m0644 rhel/usr_lib_udev_rules.d_91-vfio.rules ${D}/${base_libdir}/udev/rules.d/91-vfio.rules
+
+       install -p -m0644 \
+               rhel/usr_lib_systemd_system_ovs-delete-transient-ports.service \
+               ${D}/${systemd_system_unitdir}/ovs-delete-transient-ports.service 
+
+       install -p -m0644 \
+               rhel/usr_lib_systemd_system_ovn-controller.service \
+               ${D}/${systemd_system_unitdir}/ovn-controller.service \
+
+       install -p -m0644 \
+               rhel/usr_lib_systemd_system_ovn-controller-vtep.service \
+               ${D}/${systemd_system_unitdir}/ovn-controller-vtep.service \
+
+       install -p -m0644 \
+               rhel/usr_lib_systemd_system_ovn-northd.service \
+               ${D}/${systemd_system_unitdir}/ovn-northd.service \
+
+##############
+# TODO: Do we need to use sysv? 
+#              
+#      install -m 0755 rhel/etc_init.d_openvswitch \
+#        $RPM_BUILD_ROOT%{_datadir}/openvswitch/scripts/openvswitch.init
+#
+# TODO: Is this the best solution?
+#      install -d -m 0755 $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/network-scripts/
+#      install -p -m 0755 rhel/etc_sysconfig_network-scripts_ifdown-ovs \
+#              $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/network-scripts/ifdown-ovs
+#      install -p -m 0755 rhel/etc_sysconfig_network-scripts_ifup-ovs \
+#              $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/network-scripts/ifup-ovs
+# TODO: warrior builds openvswitch with python3.
+#      install -d -m 0755 $RPM_BUILD_ROOT%{python2_sitelib}
+#      cp -a $RPM_BUILD_ROOT/%{_datadir}/openvswitch/python/* \
+#              $RPM_BUILD_ROOT%{python2_sitelib}
+#
+# TODO: who needs this script?
+#      install -p -D -m 0755 \
+#              rhel/usr_share_openvswitch_scripts_ovs-systemd-reload \
+#              $RPM_BUILD_ROOT%{_datadir}/openvswitch/scripts/ovs-systemd-reload
+###########
+
+       install -d -p -m0755 ${D}/${sysconfdir}/logrotate.d
+       install -p -D -m 0644 rhel/etc_logrotate.d_openvswitch \
+               ${D}/${sysconfdir}/logrotate.d/openvswitch
+
+
+       install -d -p -m 0755 ${D}/${sharedstatedir}/openvswitch
+       install -d -p  -m 0755 ${D}/${libdir}/firewalld/services/
+       install -p -m 0644 rhel/usr_lib_firewalld_services_ovn-central-firewall-service.xml \
+               ${D}/${libdir}/firewalld/services/ovn-central-firewall-service.xml
+
+       install -d -p -m 0755 ${D}/${libdir}/ocf/resource.d/ovn
+       ln -s ${datadir}/openvswitch/scripts/ovndb-servers.ocf  ${D}/${libdir}/ocf/resource.d/ovn/ovndb-servers
+
+       if ${@bb.utils.contains('PACKAGECONFIG', 'dpdk', 'true', 'false', d)}; then
+               install -m 0755 ${STAGING_DATADIR}/dpdk/usertools/dpdk-pmdinfo.py ${D}${datadir}/openvswitch/scripts/dpdk-pmdinfo.py
+               install -m 0755 ${STAGING_DATADIR}/dpdk/usertools/dpdk-devbind.py ${D}${datadir}/openvswitch/scripts/dpdk-devbind.py
+       fi
+      
+}
+
+
+FILES_${PN}_append = " \
+       ${libdir}/      \
+       ${base_libdir}/ \
+       "
diff --git a/meta-stx/recipes-networking/vlan/vlan_2.0.5.bb b/meta-stx/recipes-networking/vlan/vlan_2.0.5.bb
new file mode 100644 (file)
index 0000000..478c28d
--- /dev/null
@@ -0,0 +1,33 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "ifupdown integration for vlan configuration"
+HOMEPAGE = "https://salsa.debian.org/debian/vlan"
+SECTION = "misc"
+LICENSE = "GPLv2+"
+LIC_FILES_CHKSUM = "file://debian/copyright;;md5=a99f1f04fc1d4f5d723231f8937bdc2b"
+
+SRC_URI = "https://deb.debian.org/debian/pool/main/v/vlan/vlan_2.0.5.tar.xz"
+
+SRC_URI[md5sum] = "e6994250748fa3ee6d99f3ac292b7eb9"
+SRC_URI[sha256sum] = "ccf261839b79247be8dae93074e1c5fcbce3807787a0ff7aed4e1f7a9095c465"
+
+do_install () {
+    install -d ${D}${base_sbindir}
+    install -m 0755 ${S}/vconfig ${D}${base_sbindir}/
+
+    install -d ${D}${sysconfdir}
+    cp -av --no-preserve=ownership ${S}/debian/network ${D}${sysconfdir}
+}
diff --git a/meta-stx/recipes-security/gssproxy/files/Add-Client-ID-to-debug-messages.patch b/meta-stx/recipes-security/gssproxy/files/Add-Client-ID-to-debug-messages.patch
new file mode 100644 (file)
index 0000000..a8394a5
--- /dev/null
@@ -0,0 +1,148 @@
+From 20ddb6f200f61332ff43aca7ad9421303d0a3138 Mon Sep 17 00:00:00 2001
+From: Simo Sorce <simo@redhat.com>
+Date: Thu, 25 May 2017 15:22:37 -0400
+Subject: [PATCH] Add Client ID to debug messages
+
+This allows to sort out which debug message belongs to which client when
+multiple clients are preforming operations at the same time.
+
+Signed-off-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+
+Resolves: #189
+Merges: #191
+(cherry picked from commit 2f158fe4d39c11589d214d3d602c6d10411052dc)
+---
+ proxy/src/gp_debug.c   | 28 +++++++++++++++++++++++++++-
+ proxy/src/gp_debug.h   |  1 +
+ proxy/src/gp_proxy.h   |  1 +
+ proxy/src/gp_socket.c  |  5 +++++
+ proxy/src/gp_workers.c |  6 ++++++
+ proxy/src/gssproxy.c   |  4 ++++
+ 6 files changed, 44 insertions(+), 1 deletion(-)
+
+diff --git a/proxy/src/gp_debug.c b/proxy/src/gp_debug.c
+index 3029574..4a141fc 100644
+--- a/proxy/src/gp_debug.c
++++ b/proxy/src/gp_debug.c
+@@ -64,6 +64,32 @@ const char *gp_debug_timestamp(void)
+     return buffer;
+ }
++/* thread local connection/client id */
++static __thread int cid;
++
++void gp_debug_set_conn_id(int id)
++{
++    cid = id;
++}
++
++static const char*gp_debug_conn_id(void)
++{
++    static __thread char buffer[18];
++    static __thread int last_cid = 0;
++
++    if (cid == 0) {
++        buffer[0] = '\0';
++        return buffer;
++    }
++
++    if (last_cid == cid) return buffer;
++
++    (void)snprintf(buffer, 17, "[CID %d]", cid);
++    buffer[17] = '\0';
++    last_cid = cid;
++    return buffer;
++}
++
+ void gp_debug_printf(const char *format, ...)
+ {
+     va_list varargs;
+@@ -76,7 +102,7 @@ void gp_debug_time_printf(const char *format, ...)
+ {
+     va_list varargs;
+-    fprintf(stderr, "%s", gp_debug_timestamp());
++    fprintf(stderr, "%s%s", gp_debug_conn_id(), gp_debug_timestamp());
+     va_start(varargs, format);
+     vfprintf(stderr, format, varargs);
+diff --git a/proxy/src/gp_debug.h b/proxy/src/gp_debug.h
+index d3420b0..1c2f8a3 100644
+--- a/proxy/src/gp_debug.h
++++ b/proxy/src/gp_debug.h
+@@ -14,6 +14,7 @@ int gp_debug_args(int level);
+ void gp_debug_toggle(int);
+ void gp_debug_printf(const char *format, ...);
+ void gp_debug_time_printf(const char *format, ...);
++void gp_debug_set_conn_id(int id);
+ #define GPDEBUG(...) do { \
+     if (gp_debug) { \
+diff --git a/proxy/src/gp_proxy.h b/proxy/src/gp_proxy.h
+index 971a7b6..55ab83c 100644
+--- a/proxy/src/gp_proxy.h
++++ b/proxy/src/gp_proxy.h
+@@ -113,6 +113,7 @@ void gp_socket_send_data(verto_ctx *vctx, struct gp_conn *conn,
+ struct gp_creds *gp_conn_get_creds(struct gp_conn *conn);
+ uid_t gp_conn_get_uid(struct gp_conn *conn);
+ const char *gp_conn_get_socket(struct gp_conn *conn);
++int gp_conn_get_cid(struct gp_conn *conn);
+ bool gp_selinux_ctx_equal(SELINUX_CTX ctx1, SELINUX_CTX ctx2);
+ bool gp_conn_check_selinux(struct gp_conn *conn, SELINUX_CTX ctx);
+diff --git a/proxy/src/gp_socket.c b/proxy/src/gp_socket.c
+index 29b6a44..5064e51 100644
+--- a/proxy/src/gp_socket.c
++++ b/proxy/src/gp_socket.c
+@@ -103,6 +103,11 @@ const char *gp_conn_get_socket(struct gp_conn *conn)
+     return conn->sock_ctx->socket;
+ }
++int gp_conn_get_cid(struct gp_conn *conn)
++{
++    return conn->us.sd;
++}
++
+ void gp_conn_free(struct gp_conn *conn)
+ {
+     if (!conn) return;
+diff --git a/proxy/src/gp_workers.c b/proxy/src/gp_workers.c
+index c089b54..d37e57c 100644
+--- a/proxy/src/gp_workers.c
++++ b/proxy/src/gp_workers.c
+@@ -357,6 +357,9 @@ static void *gp_worker_main(void *pvt)
+     while (!t->pool->shutdown) {
++        /* initialize debug client id to 0 until work is scheduled */
++        gp_debug_set_conn_id(0);
++
+         /* ======> COND_MUTEX */
+         pthread_mutex_lock(&t->cond_mutex);
+         while (t->query == NULL) {
+@@ -374,6 +377,9 @@ static void *gp_worker_main(void *pvt)
+         /* <====== COND_MUTEX */
+         pthread_mutex_unlock(&t->cond_mutex);
++        /* set client id before hndling requests */
++        gp_debug_set_conn_id(gp_conn_get_cid(q->conn));
++
+         /* handle the client request */
+         gp_handle_query(t->pool, q);
+diff --git a/proxy/src/gssproxy.c b/proxy/src/gssproxy.c
+index 5c5937d..94a6a61 100644
+--- a/proxy/src/gssproxy.c
++++ b/proxy/src/gssproxy.c
+@@ -159,6 +159,10 @@ int main(int argc, const char *argv[])
+     int wait_fd;
+     int ret = -1;
++    /* initialize debug client id to 0 in the main thread */
++    /* we do this early, before any code starts using debug statements */
++    gp_debug_set_conn_id(0);
++
+     struct poptOption long_options[] = {
+         POPT_AUTOHELP
+         {"daemon", 'D', POPT_ARG_NONE, &opt_daemon, 0, \
diff --git a/meta-stx/recipes-security/gssproxy/files/Allow-connection-to-self-when-impersonator-set.patch b/meta-stx/recipes-security/gssproxy/files/Allow-connection-to-self-when-impersonator-set.patch
new file mode 100644 (file)
index 0000000..995ed4a
--- /dev/null
@@ -0,0 +1,236 @@
+From 0e04be2c1398dac40c50910a59157eed0ad5a7e4 Mon Sep 17 00:00:00 2001
+From: Simo Sorce <simo@redhat.com>
+Date: Tue, 14 Mar 2017 10:43:17 -0400
+Subject: [PATCH] Allow connection to self when impersonator set
+
+If the target of a context establishment is the impersonator itself,
+then allow it. This kind of context establishment is used by tools like
+mod_auth_gssapi to be able to inspect the ticket just obtained via
+impersonation and it is basically a noop as the acceptor and the
+impersonator are the same entitiy.
+
+Signed-off-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #172
+(cherry picked from commit eada55e831d12b42d3be3a555ff4e133bed7f594)
+---
+ proxy/src/gp_creds.c                | 57 +++++++++++++++++++++++++----
+ proxy/src/gp_rpc_creds.h            |  3 +-
+ proxy/src/gp_rpc_init_sec_context.c |  2 +-
+ proxy/tests/t_impersonate.py        | 35 +++++++++++++-----
+ 4 files changed, 78 insertions(+), 19 deletions(-)
+
+diff --git a/proxy/src/gp_creds.c b/proxy/src/gp_creds.c
+index 95a1c48..7d89b06 100644
+--- a/proxy/src/gp_creds.c
++++ b/proxy/src/gp_creds.c
+@@ -883,7 +883,8 @@ static uint32_t get_impersonator_name(uint32_t *min, gss_cred_id_t cred,
+         }
+     } else if (ret_maj == GSS_S_UNAVAILABLE) {
+         /* Not supported by krb5 library yet, fallback to raw krb5 calls */
+-        /* TODO: Remove once we set a required dependency on MIT 1.15+ */
++        /* TODO: Remove once we set a minimum required dependency on a
++         * release that supports this call */
+         ret_maj = get_impersonator_fallback(&ret_min, cred, impersonator);
+         if (ret_maj == GSS_S_FAILURE) {
+             if (ret_min == KRB5_CC_NOTFOUND) {
+@@ -899,9 +900,47 @@ done:
+     return ret_maj;
+ }
++static uint32_t check_impersonator_name(uint32_t *min,
++                                        gss_name_t target_name,
++                                        const char *impersonator)
++{
++    gss_name_t canon_name = NULL;
++    gss_buffer_desc buf;
++    uint32_t ret_maj = 0;
++    uint32_t ret_min = 0;
++    uint32_t discard;
++    bool match;
++
++    ret_maj = gss_canonicalize_name(&discard, target_name, &gp_mech_krb5,
++                                    &canon_name);
++    if (ret_maj != GSS_S_COMPLETE) {
++        *min = ret_min;
++        return ret_maj;
++    }
++
++    ret_maj = gss_display_name(&discard, canon_name, &buf, NULL);
++    gss_release_name(&discard, &canon_name);
++    if (ret_maj != GSS_S_COMPLETE) {
++        *min = ret_min;
++        return ret_maj;
++    }
++
++    match = (strncmp(impersonator, buf.value, buf.length) == 0) &&
++            (strlen(impersonator) == buf.length);
++    gss_release_buffer(&discard, &buf);
++
++    *min = 0;
++    if (match) {
++        return GSS_S_COMPLETE;
++    } else {
++        return GSS_S_UNAUTHORIZED;
++    }
++}
++
+ uint32_t gp_cred_allowed(uint32_t *min,
+                          struct gp_call_ctx *gpcall,
+-                         gss_cred_id_t cred)
++                         gss_cred_id_t cred,
++                         gss_name_t target_name)
+ {
+     char *impersonator = NULL;
+     uint32_t ret_maj = 0;
+@@ -924,11 +963,11 @@ uint32_t gp_cred_allowed(uint32_t *min,
+     if (ret_maj) goto done;
+     /* if we find an impersonator entry we bail as that is not authorized,
+-     * if it were then gpcall->service->allow_const_deleg would have caused
+-     * the ealier check to return GSS_S_COMPLETE already */
++     * *unless* the target is the impersonator itself! If the operation
++     * were authorized then gpcall->service->allow_const_deleg would have
++     * caused the ealier check to return GSS_S_COMPLETE already */
+     if (impersonator != NULL) {
+-        ret_min = 0;
+-        ret_maj = GSS_S_UNAUTHORIZED;
++        ret_maj = check_impersonator_name(&ret_min, target_name, impersonator);
+     }
+ done:
+@@ -937,7 +976,11 @@ done:
+         GPDEBUGN(2, "Unauthorized impersonator credentials detected\n");
+         break;
+     case GSS_S_COMPLETE:
+-        GPDEBUGN(2, "No impersonator credentials detected\n");
++        if (impersonator) {
++            GPDEBUGN(2, "Credentials allowed for 'self'\n");
++        } else {
++            GPDEBUGN(2, "No impersonator credentials detected\n");
++        }
+         break;
+     default:
+         GPDEBUG("Failure while checking credentials\n");
+diff --git a/proxy/src/gp_rpc_creds.h b/proxy/src/gp_rpc_creds.h
+index 54fe482..c116e53 100644
+--- a/proxy/src/gp_rpc_creds.h
++++ b/proxy/src/gp_rpc_creds.h
+@@ -34,7 +34,8 @@ uint32_t gp_add_krb5_creds(uint32_t *min,
+ uint32_t gp_cred_allowed(uint32_t *min,
+                          struct gp_call_ctx *gpcall,
+-                         gss_cred_id_t cred);
++                         gss_cred_id_t cred,
++                         gss_name_t target_name);
+ void gp_filter_flags(struct gp_call_ctx *gpcall, uint32_t *flags);
+diff --git a/proxy/src/gp_rpc_init_sec_context.c b/proxy/src/gp_rpc_init_sec_context.c
+index 767a3ff..413e2ec 100644
+--- a/proxy/src/gp_rpc_init_sec_context.c
++++ b/proxy/src/gp_rpc_init_sec_context.c
+@@ -108,7 +108,7 @@ int gp_init_sec_context(struct gp_call_ctx *gpcall,
+         }
+     }
+-    ret_maj = gp_cred_allowed(&ret_min, gpcall, ich);
++    ret_maj = gp_cred_allowed(&ret_min, gpcall, ich, target_name);
+     if (ret_maj) {
+         goto done;
+     }
+diff --git a/proxy/tests/t_impersonate.py b/proxy/tests/t_impersonate.py
+index 3e25962..29f9a41 100755
+--- a/proxy/tests/t_impersonate.py
++++ b/proxy/tests/t_impersonate.py
+@@ -34,19 +34,20 @@ IMPERSONATE_CONF_TEMPLATE = '''
+ '''
+-def run_cmd(testdir, env, conf, name, socket, cmd, expected_failure):
++def run_cmd(testdir, env, conf, name, socket, cmd, keytab, expected_failure):
+     logfile = conf['logfile']
+     testenv = env.copy()
+     testenv.update({'KRB5CCNAME': os.path.join(testdir, 't' + conf['prefix'] +
+                                                '_impersonate.ccache'),
+-                    'KRB5_KTNAME': os.path.join(testdir, PROXY_KTNAME),
++                    'KRB5_KTNAME': os.path.join(testdir, keytab),
+                     'KRB5_TRACE': os.path.join(testdir, 't' + conf['prefix'] +
+                                                '_impersonate.trace'),
+                     'GSS_USE_PROXY': 'yes',
+                     'GSSPROXY_SOCKET': socket,
+                     'GSSPROXY_BEHAVIOR': 'REMOTE_FIRST'})
++    print("\nTesting: [%s]" % (name,), file=logfile)
+     print("[COMMAND]\n%s\n[ENVIRONMENT]\n%s\n" % (cmd, testenv), file=logfile)
+     logfile.flush()
+@@ -74,45 +75,59 @@ def run(testdir, env, conf):
+     rets = []
+     # Test all permitted
++    msg = "Impersonate"
+     socket = os.path.join(testdir, 'impersonate.socket')
+     cmd = ["./tests/t_impersonate", USR_NAME, HOST_GSS, PROXY_GSS,
+            path_prefix + 'impersonate.cache']
+-    r = run_cmd(testdir, env, conf, "Impersonate", socket, cmd, False)
++    r = run_cmd(testdir, env, conf, msg, socket, cmd, PROXY_KTNAME, False)
+     rets.append(r)
+-    #Test fail
++    #Test self fail
++    msg = "Impersonate fail self"
+     socket = os.path.join(testdir, 'impersonate-proxyonly.socket')
+     cmd = ["./tests/t_impersonate", USR_NAME, HOST_GSS, PROXY_GSS,
+            path_prefix + 'impersonate.cache']
+-    r = run_cmd(testdir, env, conf, "Impersonate fail self", socket, cmd, True)
++    r = run_cmd(testdir, env, conf, msg, socket, cmd, PROXY_KTNAME, True)
+     rets.append(r)
+-    #Test fail
++    #Test proxy fail
++    msg = "Impersonate fail proxy"
+     socket = os.path.join(testdir, 'impersonate-selfonly.socket')
+     cmd = ["./tests/t_impersonate", USR_NAME, HOST_GSS, PROXY_GSS,
+            path_prefix + 'impersonate.cache']
+-    r = run_cmd(testdir, env, conf, "Impersonate fail proxy", socket, cmd, True)
++    r = run_cmd(testdir, env, conf, msg, socket, cmd, PROXY_KTNAME, True)
+     rets.append(r)
+     #Test s4u2self half succeed
++    msg = "s4u2self delegation"
+     socket = os.path.join(testdir, 'impersonate-selfonly.socket')
+     cmd = ["./tests/t_impersonate", USR_NAME, HOST_GSS, PROXY_GSS,
+            path_prefix + 'impersonate.cache', 's4u2self']
+-    r = run_cmd(testdir, env, conf, "s4u2self delegation", socket, cmd, False)
++    r = run_cmd(testdir, env, conf, msg, socket, cmd, PROXY_KTNAME, False)
++    rets.append(r)
++
++    #Test proxy to self succeed
++    msg = "Impersonate to self"
++    socket = os.path.join(testdir, 'impersonate-selfonly.socket')
++    cmd = ["./tests/t_impersonate", USR_NAME, HOST_GSS, HOST_GSS,
++           path_prefix + 'impersonate.cache', 's4u2proxy']
++    r = run_cmd(testdir, env, conf, msg, socket, cmd, SVC_KTNAME, False)
+     rets.append(r)
+     #Test s4u2proxy half fail
++    msg = "s4u2proxy fail"
+     socket = os.path.join(testdir, 'impersonate-selfonly.socket')
+     cmd = ["./tests/t_impersonate", USR_NAME, HOST_GSS, PROXY_GSS,
+            path_prefix + 'impersonate.cache', 's4u2proxy']
+-    r = run_cmd(testdir, env, conf, "s4u2proxy fail", socket, cmd, True)
++    r = run_cmd(testdir, env, conf, msg, socket, cmd, PROXY_KTNAME, True)
+     rets.append(r)
+     #Test s4u2proxy half succeed
++    msg = "s4u2proxy"
+     socket = os.path.join(testdir, 'impersonate-proxyonly.socket')
+     cmd = ["./tests/t_impersonate", USR_NAME, HOST_GSS, PROXY_GSS,
+            path_prefix + 'impersonate.cache', 's4u2proxy']
+-    r = run_cmd(testdir, env, conf, "s4u2proxy", socket, cmd, False)
++    r = run_cmd(testdir, env, conf, msg, socket, cmd, PROXY_KTNAME, False)
+     rets.append(r)
+     # Reset back gssproxy conf
diff --git a/meta-stx/recipes-security/gssproxy/files/Always-choose-highest-requested-debug-level.patch b/meta-stx/recipes-security/gssproxy/files/Always-choose-highest-requested-debug-level.patch
new file mode 100644 (file)
index 0000000..0517c64
--- /dev/null
@@ -0,0 +1,107 @@
+From 08fab6898a9937fbc39de6222cd33167707cd763 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Wed, 11 Apr 2018 16:15:00 -0400
+Subject: [PATCH] Always choose highest requested debug level
+
+Allowing the CLI to lower the debug level specified in a config file
+is dubious, and previously broken since we don't distinguish "default
+value" from "explicitly requested value of 0" in popt.  This resulted
+in "Debug Enabled (level: 0)" even when the log level was not actually
+0, which is confusing for users.
+
+Remove the gp_debug_args() function since it is no longer used.
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #229
+(cherry picked from commit 5a714768aec776dc875237dd729c85389932a688)
+---
+ proxy/src/gp_debug.c | 34 ++++++++--------------------------
+ proxy/src/gp_debug.h |  3 +--
+ proxy/src/gssproxy.c |  2 +-
+ 3 files changed, 10 insertions(+), 29 deletions(-)
+
+diff --git a/proxy/src/gp_debug.c b/proxy/src/gp_debug.c
+index 4a141fc..a0f51f0 100644
+--- a/proxy/src/gp_debug.c
++++ b/proxy/src/gp_debug.c
+@@ -1,4 +1,4 @@
+-/* Copyright (C) 2011 the GSS-PROXY contributors, see COPYING for license */
++/* Copyright (C) 2011,2018 the GSS-PROXY contributors, see COPYING for license */
+ #include "config.h"
+ #include <stdbool.h>
+@@ -7,35 +7,17 @@
+ #include "gp_log.h"
+ /* global debug switch */
+-int gp_debug;
+-
+-int gp_debug_args(int level) {
+-    static int args_level = 0;
+-
+-    if (level != 0) {
+-        args_level = level;
+-    }
+-    return args_level;
+-}
++int gp_debug = 0;
+ void gp_debug_toggle(int level)
+ {
+-    static bool krb5_trace_set = false;
++    if (level <= gp_debug)
++        return;
+-    /* Command line and environment options override config file */
+-    gp_debug = gp_debug_args(0);
+-    if (gp_debug == 0) {
+-        gp_debug = level;
+-    }
+-    if (level >= 3) {
+-        if (!getenv("KRB5_TRACE")) {
+-            setenv("KRB5_TRACE", "/dev/stderr", 1);
+-            krb5_trace_set = true;
+-        }
+-    } else if (krb5_trace_set) {
+-        unsetenv("KRB5_TRACE");
+-        krb5_trace_set = false;
+-    }
++    if (level >= 3 && !getenv("KRB5_TRACE"))
++        setenv("KRB5_TRACE", "/dev/stderr", 1);
++
++    gp_debug = level;
+     GPDEBUG("Debug Enabled (level: %d)\n", level);
+ }
+diff --git a/proxy/src/gp_debug.h b/proxy/src/gp_debug.h
+index 1c2f8a3..4932bfd 100644
+--- a/proxy/src/gp_debug.h
++++ b/proxy/src/gp_debug.h
+@@ -1,4 +1,4 @@
+-/* Copyright (C) 2011 the GSS-PROXY contributors, see COPYING for license */
++/* Copyright (C) 2011,2018 the GSS-PROXY contributors, see COPYING for license */
+ #ifndef _GP_DEBUG_H_
+ #define _GP_DEBUG_H_
+@@ -10,7 +10,6 @@
+ extern int gp_debug;
+-int gp_debug_args(int level);
+ void gp_debug_toggle(int);
+ void gp_debug_printf(const char *format, ...);
+ void gp_debug_time_printf(const char *format, ...);
+diff --git a/proxy/src/gssproxy.c b/proxy/src/gssproxy.c
+index 5fc4f8d..3b6a16e 100644
+--- a/proxy/src/gssproxy.c
++++ b/proxy/src/gssproxy.c
+@@ -209,7 +209,7 @@ int main(int argc, const char *argv[])
+     if (opt_debug || opt_debug_level > 0) {
+         if (opt_debug_level == 0) opt_debug_level = 1;
+-        gp_debug_args(opt_debug_level);
++        gp_debug_toggle(opt_debug_level);
+     }
+     if (opt_daemon && opt_interactive) {
diff --git a/meta-stx/recipes-security/gssproxy/files/Always-use-the-encype-we-selected.patch b/meta-stx/recipes-security/gssproxy/files/Always-use-the-encype-we-selected.patch
new file mode 100644 (file)
index 0000000..ccc1d69
--- /dev/null
@@ -0,0 +1,43 @@
+From d8166808a98fd1c3772de5d75e27656ed2ab124d Mon Sep 17 00:00:00 2001
+From: Simo Sorce <simo@redhat.com>
+Date: Tue, 27 Feb 2018 11:59:25 -0500
+Subject: [PATCH] Always use the encype we selected
+
+The enctype is selected from the keytab or from the fallback code.
+Either way make sure to use the enctype stored in the key block.
+
+Signed-off-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #226
+(cherry picked from commit d73c96d658059ce64ecd41ff2924071d86f2b54f)
+---
+ proxy/src/gp_export.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/proxy/src/gp_export.c b/proxy/src/gp_export.c
+index c9f5fd4..5e8e160 100644
+--- a/proxy/src/gp_export.c
++++ b/proxy/src/gp_export.c
+@@ -168,11 +168,10 @@ uint32_t gp_init_creds_handle(uint32_t *min, const char *svc_name,
+                                  GP_CREDS_HANDLE_KEY_ENCTYPE, 0,
+                                  &handle->key);
+         if (ret == 0) {
+-            ret = krb5_c_make_random_key(handle->context,
+-                                         GP_CREDS_HANDLE_KEY_ENCTYPE,
++            ret = krb5_c_make_random_key(handle->context, handle->key->enctype,
+                                          handle->key);
+             GPDEBUG("Service: %s, Enckey: [ephemeral], Enctype: %d\n",
+-                    svc_name, GP_CREDS_HANDLE_KEY_ENCTYPE);
++                    svc_name, handle->key->enctype);
+         }
+         if (ret) {
+             ret_min = ret;
+@@ -254,7 +253,7 @@ static int gp_decrypt_buffer(krb5_context context, krb5_keyblock *key,
+     memset(&enc_handle, '\0', sizeof(krb5_enc_data));
+-    enc_handle.enctype = GP_CREDS_HANDLE_KEY_ENCTYPE;
++    enc_handle.enctype = key->enctype;
+     enc_handle.ciphertext.data = in->octet_string_val;
+     enc_handle.ciphertext.length = in->octet_string_len;
diff --git a/meta-stx/recipes-security/gssproxy/files/Appease-gcc-7-s-fallthrough-detection.patch b/meta-stx/recipes-security/gssproxy/files/Appease-gcc-7-s-fallthrough-detection.patch
new file mode 100644 (file)
index 0000000..024c200
--- /dev/null
@@ -0,0 +1,27 @@
+From f62ece3a9655c6507f40e03a36e28ecbba16f744 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Fri, 19 May 2017 12:18:03 -0400
+Subject: [PATCH] Appease gcc-7's fallthrough detection
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #188
+(cherry picked from commit fdb56f8a0b6a5fd6a2072a525e228596c264883e)
+---
+ proxy/src/gp_export.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/proxy/src/gp_export.c b/proxy/src/gp_export.c
+index 3a927c9..f44da50 100644
+--- a/proxy/src/gp_export.c
++++ b/proxy/src/gp_export.c
+@@ -888,7 +888,8 @@ static uint32_t gp_export_creds_linux(uint32_t *min, gss_name_t name,
+         if (res != NULL) {
+             break;
+         }
+-        /* fall through as ret == NULL is equivalent to ENOENT */
++        /* ret == NULL is equivalent to ENOENT */
++        /* fall through */
+     case ENOENT:
+     case ESRCH:
+         free(pwbuf);
diff --git a/meta-stx/recipes-security/gssproxy/files/Change-impersonator-check-code.patch b/meta-stx/recipes-security/gssproxy/files/Change-impersonator-check-code.patch
new file mode 100644 (file)
index 0000000..27cf421
--- /dev/null
@@ -0,0 +1,216 @@
+From 37d1667ad0cc91f46a493281e62775cc8bbe3b5b Mon Sep 17 00:00:00 2001
+From: Simo Sorce <simo@redhat.com>
+Date: Tue, 14 Mar 2017 10:20:08 -0400
+Subject: [PATCH] Change impersonator check code
+
+In MIT 1.15 we now have a native way to check for an impersonator,
+implement the use of that function but still keep the fallback for
+earlier krb5 versions that do not support this method for now.
+
+Signed-off-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #172
+(cherry picked from commit 73b50c0b2799f0aed53337a6516b8e1a27279ebf)
+---
+ proxy/configure.ac   |   3 +
+ proxy/src/gp_creds.c | 147 ++++++++++++++++++++++++++++++++-----------
+ 2 files changed, 112 insertions(+), 38 deletions(-)
+
+diff --git a/proxy/configure.ac b/proxy/configure.ac
+index 63c0edf..c52dbb6 100644
+--- a/proxy/configure.ac
++++ b/proxy/configure.ac
+@@ -131,6 +131,9 @@ AC_CHECK_LIB(gssapi_krb5, gss_export_cred,,
+              [AC_MSG_ERROR([GSSAPI library does not support gss_export_cred])],
+              [$GSSAPI_LIBS])
++AC_CHECK_DECLS([GSS_KRB5_GET_CRED_IMPERSONATOR], [], [],
++               [[#include <gssapi/gssapi_krb5.h>]])
++
+ AC_SUBST([KRB5_CFLAGS])
+ AC_SUBST([KRB5_LIBS])
+ AC_SUBST([GSSAPI_CFLAGS])
+diff --git a/proxy/src/gp_creds.c b/proxy/src/gp_creds.c
+index 171a724..95a1c48 100644
+--- a/proxy/src/gp_creds.c
++++ b/proxy/src/gp_creds.c
+@@ -773,9 +773,9 @@ void gp_filter_flags(struct gp_call_ctx *gpcall, uint32_t *flags)
+     *flags &= ~gpcall->service->filter_flags;
+ }
+-uint32_t gp_cred_allowed(uint32_t *min,
+-                         struct gp_call_ctx *gpcall,
+-                         gss_cred_id_t cred)
++
++static uint32_t get_impersonator_fallback(uint32_t *min, gss_cred_id_t cred,
++                                          char **impersonator)
+ {
+     uint32_t ret_maj = 0;
+     uint32_t ret_min = 0;
+@@ -785,22 +785,6 @@ uint32_t gp_cred_allowed(uint32_t *min,
+     krb5_data config;
+     int err;
+-    if (cred == GSS_C_NO_CREDENTIAL) {
+-        return GSS_S_CRED_UNAVAIL;
+-    }
+-
+-    if (gpcall->service->trusted ||
+-        gpcall->service->impersonate ||
+-        gpcall->service->allow_const_deleg) {
+-
+-        GPDEBUGN(2, "Credentials allowed by configuration\n");
+-        *min = 0;
+-        return GSS_S_COMPLETE;
+-    }
+-
+-    /* FIXME: krb5 specific code, should get an oid registerd to query the
+-     * cred with gss_inquire_cred_by_oid() or similar instead */
+-
+     err = krb5_init_context(&context);
+     if (err) {
+         ret_min = err;
+@@ -835,21 +819,116 @@ uint32_t gp_cred_allowed(uint32_t *min,
+         goto done;
+     }
++    err = krb5_cc_get_config(context, ccache, NULL, "proxy_impersonator",
++                             &config);
++    if (err == 0) {
++        *impersonator = strndup(config.data, config.length);
++        if (!*impersonator) {
++            ret_min = ENOMEM;
++            ret_maj = GSS_S_FAILURE;
++        } else {
++            ret_min = 0;
++            ret_maj = GSS_S_COMPLETE;
++        }
++        krb5_free_data_contents(context, &config);
++    } else {
++        ret_min = err;
++        ret_maj = GSS_S_FAILURE;
++    }
++
++done:
++    if (context) {
++        if (ccache) {
++            krb5_cc_destroy(context, ccache);
++        }
++        krb5_free_context(context);
++    }
++    free(memcache);
++
++    *min = ret_min;
++    return ret_maj;
++}
++
++#if !HAVE_DECL_GSS_KRB5_GET_CRED_IMPERSONATOR
++gss_OID_desc impersonator_oid = {
++    11, discard_const("\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0e")
++};
++const gss_OID GSS_KRB5_GET_CRED_IMPERSONATOR = &impersonator_oid;
++#endif
++
++static uint32_t get_impersonator_name(uint32_t *min, gss_cred_id_t cred,
++                                      char **impersonator)
++{
++    gss_buffer_set_t bufset = GSS_C_NO_BUFFER_SET;
++    uint32_t ret_maj = 0;
++    uint32_t ret_min = 0;
++    uint32_t discard;
++
++    *impersonator = NULL;
++
++    ret_maj = gss_inquire_cred_by_oid(&ret_min, cred,
++                                      GSS_KRB5_GET_CRED_IMPERSONATOR,
++                                      &bufset);
++    if (ret_maj == GSS_S_COMPLETE) {
++        if (bufset->count == 0) {
++            ret_min = ENOENT;
++            ret_maj = GSS_S_COMPLETE;
++            goto done;
++        }
++        *impersonator = strndup(bufset->elements[0].value,
++                                bufset->elements[0].length);
++        if (!*impersonator) {
++            ret_min = ENOMEM;
++            ret_maj = GSS_S_FAILURE;
++        }
++    } else if (ret_maj == GSS_S_UNAVAILABLE) {
++        /* Not supported by krb5 library yet, fallback to raw krb5 calls */
++        /* TODO: Remove once we set a required dependency on MIT 1.15+ */
++        ret_maj = get_impersonator_fallback(&ret_min, cred, impersonator);
++        if (ret_maj == GSS_S_FAILURE) {
++            if (ret_min == KRB5_CC_NOTFOUND) {
++                ret_min = ENOENT;
++                ret_maj = GSS_S_COMPLETE;
++            }
++        }
++    }
++
++done:
++    (void)gss_release_buffer_set(&discard, &bufset);
++    *min = ret_min;
++    return ret_maj;
++}
++
++uint32_t gp_cred_allowed(uint32_t *min,
++                         struct gp_call_ctx *gpcall,
++                         gss_cred_id_t cred)
++{
++    char *impersonator = NULL;
++    uint32_t ret_maj = 0;
++    uint32_t ret_min = 0;
++
++    if (cred == GSS_C_NO_CREDENTIAL) {
++        return GSS_S_CRED_UNAVAIL;
++    }
++
++    if (gpcall->service->trusted ||
++        gpcall->service->impersonate ||
++        gpcall->service->allow_const_deleg) {
++
++        GPDEBUGN(2, "Credentials allowed by configuration\n");
++        *min = 0;
++        return GSS_S_COMPLETE;
++    }
++
++    ret_maj = get_impersonator_name(&ret_min, cred, &impersonator);
++    if (ret_maj) goto done;
++
+     /* if we find an impersonator entry we bail as that is not authorized,
+      * if it were then gpcall->service->allow_const_deleg would have caused
+      * the ealier check to return GSS_S_COMPLETE already */
+-    err = krb5_cc_get_config(context, ccache, NULL, "proxy_impersonator",
+-                             &config);
+-    if (!err) {
+-        krb5_free_data_contents(context, &config);
++    if (impersonator != NULL) {
+         ret_min = 0;
+         ret_maj = GSS_S_UNAUTHORIZED;
+-    } else if (err != KRB5_CC_NOTFOUND) {
+-        ret_min = err;
+-        ret_maj = GSS_S_FAILURE;
+-    } else {
+-        ret_min = 0;
+-        ret_maj = GSS_S_COMPLETE;
+     }
+ done:
+@@ -864,15 +943,7 @@ done:
+         GPDEBUG("Failure while checking credentials\n");
+         break;
+     }
+-    if (context) {
+-        /* NOTE: destroy only if we created a MEMORY ccache */
+-        if (ccache) {
+-            if (memcache) krb5_cc_destroy(context, ccache);
+-            else krb5_cc_close(context, ccache);
+-        }
+-        krb5_free_context(context);
+-    }
+-    free(memcache);
++    free(impersonator);
+     *min = ret_min;
+     return ret_maj;
+ }
diff --git a/meta-stx/recipes-security/gssproxy/files/Clarify-debug-and-debug_level-in-man-pages.patch b/meta-stx/recipes-security/gssproxy/files/Clarify-debug-and-debug_level-in-man-pages.patch
new file mode 100644 (file)
index 0000000..242ebe5
--- /dev/null
@@ -0,0 +1,74 @@
+From 384a1cff4855ad612ca71d3831a0c2c19a355c49 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Wed, 11 Apr 2018 16:01:21 -0400
+Subject: [PATCH] Clarify debug and debug_level in man pages
+
+In particular, add debug_level to gssproxy(5) since it was previously
+accepted but not documented.
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #229
+(cherry picked from commit e0e96e46be03102903533a9816b4deefe1adfaf8)
+---
+ proxy/man/gssproxy.8.xml      | 24 +++++++++++++++++++++++-
+ proxy/man/gssproxy.conf.5.xml |  5 ++++-
+ 2 files changed, 27 insertions(+), 2 deletions(-)
+
+diff --git a/proxy/man/gssproxy.8.xml b/proxy/man/gssproxy.8.xml
+index 1df4b0d..21f7e6a 100644
+--- a/proxy/man/gssproxy.8.xml
++++ b/proxy/man/gssproxy.8.xml
+@@ -118,13 +118,35 @@
+                     </para>
+                 </listitem>
+             </varlistentry>
++
+             <varlistentry>
+                 <term>
+                     <option>-d</option>,<option>--debug</option>
+                 </term>
+                 <listitem>
+                     <para>
+-                        Turn on debugging.
++                        Turn on debugging.  This option is identical to
++                        --debug-level=1.
++                    </para>
++                </listitem>
++            </varlistentry>
++
++            <varlistentry>
++                <term>
++                    <option>--debug-level=</option>
++                </term>
++                <listitem>
++                    <para>
++                        Turn on debugging at the specified level.  0
++                        corresponds to no logging, while 1 turns on basic
++                        debug logging.  Level 2 increases verbosity, including
++                        more detailed credential verification.
++                    </para>
++                    <para>
++                        At level 3 and above, KRB5_TRACE output is logged.  If
++                        KRB5_TRACE was already set in the execution
++                        environment, trace output is sent to its value
++                        instead.
+                     </para>
+                 </listitem>
+             </varlistentry>
+diff --git a/proxy/man/gssproxy.conf.5.xml b/proxy/man/gssproxy.conf.5.xml
+index ad9d96f..abb6745 100644
+--- a/proxy/man/gssproxy.conf.5.xml
++++ b/proxy/man/gssproxy.conf.5.xml
+@@ -191,7 +191,10 @@
+                 <varlistentry>
+                     <term>debug (boolean)</term>
+                     <listitem>
+-                        <para>Enable debugging to syslog.</para>
++                        <para>
++                            Enable debugging to syslog.  Setting to true is
++                            identical to setting debug_level to 1.
++                        </para>
+                         <para>Default: debug = false</para>
+                     </listitem>
+                 </varlistentry>
diff --git a/meta-stx/recipes-security/gssproxy/files/Conditionally-reload-kernel-interface-on-SIGHUP.patch b/meta-stx/recipes-security/gssproxy/files/Conditionally-reload-kernel-interface-on-SIGHUP.patch
new file mode 100644 (file)
index 0000000..42286f6
--- /dev/null
@@ -0,0 +1,59 @@
+From 8a5ba5feb98ecc3c57ee865604799fc4e4a90c61 Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <ascheel@redhat.com>
+Date: Mon, 26 Jun 2017 17:04:16 -0400
+Subject: [PATCH] Conditionally reload kernel interface on SIGHUP
+
+Signed-off-by: Alexander Scheel <ascheel@redhat.com>
+[rharwood@redhat.com: Simplified logic, rewrote commit message]
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Resolves: #193
+Merges: #201
+(cherry picked from commit 938af94b8f33c227dcdd6a53ed42de418578d29d)
+---
+ proxy/src/gp_init.c  | 4 +++-
+ proxy/src/gssproxy.c | 3 +++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/proxy/src/gp_init.c b/proxy/src/gp_init.c
+index e69934d..6a2b047 100644
+--- a/proxy/src/gp_init.c
++++ b/proxy/src/gp_init.c
+@@ -145,6 +145,7 @@ void init_proc_nfsd(struct gp_config *cfg)
+     char buf[] = "1";
+     bool enabled = false;
+     int fd, ret;
++    static int poked = 0;
+     /* check first if any service enabled kernel support */
+     for (int i = 0; i < cfg->num_svcs; i++) {
+@@ -154,7 +155,7 @@ void init_proc_nfsd(struct gp_config *cfg)
+         }
+     }
+-    if (!enabled) {
++    if (!enabled || poked) {
+         return;
+     }
+@@ -173,6 +174,7 @@ void init_proc_nfsd(struct gp_config *cfg)
+                 LINUX_PROC_USE_GSS_PROXY_FILE, ret, gp_strerror(ret));
+     }
++    poked = 1;
+     close(fd);
+     if (ret != 0) {
+         goto fail;
+diff --git a/proxy/src/gssproxy.c b/proxy/src/gssproxy.c
+index 94a6a61..5fc4f8d 100644
+--- a/proxy/src/gssproxy.c
++++ b/proxy/src/gssproxy.c
+@@ -140,6 +140,9 @@ static void hup_handler(verto_ctx *vctx, verto_ev *ev UNUSED)
+         exit(ret);
+     }
++    /* conditionally reload kernel interface */
++    init_proc_nfsd(gpctx->config);
++
+     free_config(&old_config);
+     GPDEBUG("New config loaded successfully.\n");
diff --git a/meta-stx/recipes-security/gssproxy/files/Do-not-call-gpm_grab_sock-twice.patch b/meta-stx/recipes-security/gssproxy/files/Do-not-call-gpm_grab_sock-twice.patch
new file mode 100644 (file)
index 0000000..5a5852c
--- /dev/null
@@ -0,0 +1,59 @@
+From 32578afb817f20446d888326814b52a8f3d6c0fe Mon Sep 17 00:00:00 2001
+From: Simo Sorce <simo@redhat.com>
+Date: Thu, 26 Oct 2017 16:59:18 -0400
+Subject: [PATCH] Do not call gpm_grab_sock() twice
+
+In the gpm_get_ctx() call, we unnecessarily call gpm_grab_sock() which
+would cause the lock to be held by one thread and never released.  We
+already call gpm_grab_sock() as the first thing after gpm_get_ctx() in
+gpm_make_call(), plus gpm_make_call() properly releases the socket
+once done.
+
+This corrects the deadlock fix in
+461a5fa9f91a2753ebeef6323a64239c35e2f250, which incorrectly released
+the lock we wanted to grab.  This caused the socket to not be locked
+to our thread.  Another thread could come along and change the global
+ctx while we were still using the socket from another thread, causing
+concurrency issues as only one request can be in flight on any given
+socket at the same time.
+
+In special cases where the "thread" uid/gid changes (like in
+rpc.gssd), we end up closing the socket while we are still waiting for
+an answer from the server, causing additional issues and confusion.
+
+[rharwood@redhat.com: squashed 2 commits; minor edits accordingly]
+Signed-off-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #218
+(cherry picked from commit 8590c5dbc6fa07d0c366df23b982a4b6b9ffc259)
+---
+ proxy/src/client/gpm_common.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index 69f4741..2133618 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -152,7 +152,9 @@ static int gpm_grab_sock(struct gpm_ctx *gpmctx)
+         ret = gpm_open_socket(gpmctx);
+     }
+-    pthread_mutex_unlock(&gpmctx->lock);
++    if (ret) {
++        pthread_mutex_unlock(&gpmctx->lock);
++    }
+     return ret;
+ }
+@@ -304,11 +306,6 @@ static struct gpm_ctx *gpm_get_ctx(void)
+     pthread_once(&gpm_init_once_control, gpm_init_once);
+-    ret = gpm_grab_sock(&gpm_global_ctx);
+-    if (ret) {
+-        return NULL;
+-    }
+-
+     return &gpm_global_ctx;
+ }
diff --git a/meta-stx/recipes-security/gssproxy/files/Don-t-leak-mech_type-when-CONTINUE_NEEDED-from-init_.patch b/meta-stx/recipes-security/gssproxy/files/Don-t-leak-mech_type-when-CONTINUE_NEEDED-from-init_.patch
new file mode 100644 (file)
index 0000000..bcad8ca
--- /dev/null
@@ -0,0 +1,27 @@
+From dd1699810efe933858badce463bece239d19e886 Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <ascheel@redhat.com>
+Date: Tue, 8 Aug 2017 11:36:56 -0400
+Subject: [PATCH] Don't leak mech_type when CONTINUE_NEEDED from
+ init_sec_context
+
+Signed-off-by: Alexander Scheel <ascheel@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #207
+(cherry picked from commit ca26c0f58ac961a16b06c3fb93b985d574116b2c)
+---
+ proxy/src/gp_export.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/proxy/src/gp_export.c b/proxy/src/gp_export.c
+index 0c39045..c9f5fd4 100644
+--- a/proxy/src/gp_export.c
++++ b/proxy/src/gp_export.c
+@@ -687,6 +687,7 @@ export:
+     case EXP_CTX_PARTIAL:
+         /* this happens only when a init_sec_context call returns a partially
+          * initialized context so we return only what we have, not much */
++        xdr_free((xdrproc_t)xdr_gssx_OID, (char *)&out->mech);
+         ret = gp_conv_oid_to_gssx(mech, &out->mech);
+         if (ret) {
+             ret_maj = GSS_S_FAILURE;
diff --git a/meta-stx/recipes-security/gssproxy/files/Emit-debug-on-queue-errors.patch b/meta-stx/recipes-security/gssproxy/files/Emit-debug-on-queue-errors.patch
new file mode 100644 (file)
index 0000000..99e7690
--- /dev/null
@@ -0,0 +1,26 @@
+From f1f89ef85b65b95f568d46fea85c7e7834e3a101 Mon Sep 17 00:00:00 2001
+From: Simo Sorce <simo@redhat.com>
+Date: Thu, 26 Oct 2017 11:47:54 -0400
+Subject: [PATCH] Emit debug on queue errors
+
+Signed-off-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #218
+(cherry picked from commit af666affbd4735ba437e3d89d9e22984a556ed16)
+---
+ proxy/src/gp_workers.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/proxy/src/gp_workers.c b/proxy/src/gp_workers.c
+index 2a33c21..18f38f6 100644
+--- a/proxy/src/gp_workers.c
++++ b/proxy/src/gp_workers.c
+@@ -314,6 +314,8 @@ static void gp_handle_reply(verto_ctx *vctx, verto_ev *ev)
+         case GP_QUERY_IN:
+             /* ?! fallback and kill client conn */
+         case GP_QUERY_ERR:
++            GPDEBUGN(3, "[status] Handling query error, terminating CID %d.\n",
++                     gp_conn_get_cid(q->conn));
+             gp_conn_free(q->conn);
+             gp_query_free(q, true);
+             break;
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-error-checking-on-get_impersonator_fallback.patch b/meta-stx/recipes-security/gssproxy/files/Fix-error-checking-on-get_impersonator_fallback.patch
new file mode 100644 (file)
index 0000000..7e17ad5
--- /dev/null
@@ -0,0 +1,28 @@
+From f21071fb3662824698b61d384b1144657a508043 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Wed, 15 Mar 2017 14:57:57 -0400
+Subject: [PATCH] Fix error checking on get_impersonator_fallback()
+
+Separate commit to ease backporting.
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #173
+(cherry picked from commit 25e31ebccde7f0d98480b6a99962fef61dd251b4)
+---
+ proxy/src/gp_creds.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/proxy/src/gp_creds.c b/proxy/src/gp_creds.c
+index e05ad01..fdc6bdf 100644
+--- a/proxy/src/gp_creds.c
++++ b/proxy/src/gp_creds.c
+@@ -885,7 +885,7 @@ static uint32_t get_impersonator_name(uint32_t *min, gss_cred_id_t cred,
+          * release that supports this call */
+         ret_maj = get_impersonator_fallback(&ret_min, cred, impersonator);
+         if (ret_maj == GSS_S_FAILURE) {
+-            if (ret_min == KRB5_CC_NOTFOUND) {
++            if (ret_min == (uint32_t)KRB5_CC_NOTFOUND) {
+                 ret_min = ENOENT;
+                 ret_maj = GSS_S_COMPLETE;
+             }
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-error-handling-in-gp_config_from_dir.patch b/meta-stx/recipes-security/gssproxy/files/Fix-error-handling-in-gp_config_from_dir.patch
new file mode 100644 (file)
index 0000000..a3c43ae
--- /dev/null
@@ -0,0 +1,50 @@
+From 8603c619ca9bc923534d83ee432ddd756f285d4c Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <ascheel@redhat.com>
+Date: Wed, 12 Jul 2017 09:26:52 -0400
+Subject: [PATCH] Fix error handling in gp_config_from_dir
+
+Signed-off-by: Alexander Scheel <ascheel@redhat.com>
+[rharwood@redhat.com: c99, refactor some existing code]
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #204
+(cherry picked from commit eb880e93ed4a48c67ac27b4d5194f0f7786da83e)
+---
+ proxy/src/gp_config.c | 24 ++++++++++++++----------
+ 1 file changed, 14 insertions(+), 10 deletions(-)
+
+diff --git a/proxy/src/gp_config.c b/proxy/src/gp_config.c
+index 8fd60a3..07f7c8d 100644
+--- a/proxy/src/gp_config.c
++++ b/proxy/src/gp_config.c
+@@ -798,17 +798,21 @@ static int gp_config_from_dir(const char *config_dir,
+                              &error_list,
+                              NULL);
+     if (ret) {
+-        if (error_list) {
+-            uint32_t i;
+-            uint32_t len = ref_array_getlen(error_list, &i);
+-            for (i = 0; i < len; i++) {
+-                GPDEBUG("Error when reading config directory: %s\n",
+-                        (const char *) ref_array_get(error_list, i, NULL));
+-            }
+-            ref_array_destroy(error_list);
+-        } else {
+-            GPDEBUG("Error when reading config directory number: %d\n", ret);
++        uint32_t len;
++
++        if (!error_list) {
++            GPAUDIT("Error when reading config directory number: %d\n", ret);
++            return ret;
+         }
++
++        len = ref_array_len(error_list);
++        for (uint32_t i = 0; i < len; i++) {
++            /* libini has an unfixable bug where error strings are (char **) */
++            GPAUDIT("Error when reading config directory: %s\n",
++                    *(char **)ref_array_get(error_list, i, NULL));
++        }
++
++        ref_array_destroy(error_list);
+         return ret;
+     }
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-error-handling-in-gpm_send_buffer-gpm_recv_buffe.patch b/meta-stx/recipes-security/gssproxy/files/Fix-error-handling-in-gpm_send_buffer-gpm_recv_buffe.patch
new file mode 100644 (file)
index 0000000..3dad153
--- /dev/null
@@ -0,0 +1,61 @@
+From 027596a3df85d11948f4fb3a28d27d6523645589 Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <alexander.m.scheel@gmail.com>
+Date: Thu, 14 Sep 2017 11:24:39 -0500
+Subject: [PATCH] Fix error handling in gpm_send_buffer/gpm_recv_buffer
+
+Signed-off-by: Alexander Scheel <alexander.m.scheel@gmail.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #213
+[rharwood@redhat.com: commit message formatting, copyright update]
+(cherry picked from commit f2530fc280dd84e6abc0f5475e261aa0d2ee2a21)
+---
+ proxy/src/client/gpm_common.c | 18 ++++++------------
+ 1 file changed, 6 insertions(+), 12 deletions(-)
+
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index b14e846..0d314fa 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -1,4 +1,4 @@
+-/* Copyright (C) 2011 the GSS-PROXY contributors, see COPYING for license */
++/* Copyright (C) 2011,2017 the GSS-PROXY contributors, see COPYING for license */
+ #include "gssapi_gpm.h"
+ #include <sys/types.h>
+@@ -415,10 +415,7 @@ static int gpm_send_buffer(struct gpm_ctx *gpmctx,
+     ret = 0;
+ done:
+-    if (ret) {
+-        /* on errors we can only close the fd and return */
+-        gpm_close_socket(gpmctx);
+-    }
++    /* we only need to return as gpm_retry_socket closes the socket */
+     return ret;
+ }
+@@ -488,9 +485,10 @@ static int gpm_recv_buffer(struct gpm_ctx *gpmctx,
+ done:
+     if (ret) {
+-        /* on errors we can only close the fd and return */
+-        gpm_close_socket(gpmctx);
+-        gpm_epoll_close(gpmctx);
++        /* on errors, free the buffer to prevent calling
++         * xdr_destroy(&xdr_reply_ctx); */
++        free(*buffer);
++        *buffer = NULL;
+     }
+     return ret;
+ }
+@@ -560,10 +558,6 @@ static int gpm_send_recv_loop(struct gpm_ctx *gpmctx, char *send_buffer,
+             /* Close and reopen socket before trying again */
+             ret = gpm_retry_socket(gpmctx);
+-            /* Free buffer and set it to NULL to prevent free(xdr_reply_ctx) */
+-            free(*recv_buffer);
+-            *recv_buffer = NULL;
+-
+             if (ret != 0)
+                 return ret;
+             ret = ETIMEDOUT;
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-handling-of-non-EPOLLIN-EPOLLOUT-events.patch b/meta-stx/recipes-security/gssproxy/files/Fix-handling-of-non-EPOLLIN-EPOLLOUT-events.patch
new file mode 100644 (file)
index 0000000..6f1aea5
--- /dev/null
@@ -0,0 +1,79 @@
+From a2a5789d6410e12469ea0f81c9a31ce70bac9ede Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <alexander.m.scheel@gmail.com>
+Date: Thu, 14 Sep 2017 11:16:42 -0500
+Subject: [PATCH] Fix handling of non-EPOLLIN/EPOLLOUT events
+
+Signed-off-by: Alexander Scheel <alexander.m.scheel@gmail.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #213
+(cherry picked from commit b8f5b2f75612a11753cf742ee0477b98df8e6b02)
+---
+ proxy/src/client/gpm_common.c | 49 +++++++++++++++++++++++++----------
+ 1 file changed, 35 insertions(+), 14 deletions(-)
+
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index 7d1158e..b14e846 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -283,26 +283,47 @@ static int gpm_epoll_wait(struct gpm_ctx *gpmctx, uint32_t event_flags) {
+         gpm_epoll_close(gpmctx);
+     } else if (epoll_ret == 1 && events[0].data.fd == gpmctx->timerfd) {
+         /* Got an event which is only our timer */
+-        ret = read(gpmctx->timerfd, &timer_read, sizeof(uint64_t));
+-        if (ret == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
+-            /* In the case when reading from the timer failed, don't hide the
+-             * timer error behind ETIMEDOUT such that it isn't retried */
+-            ret = errno;
++        if ((events[0].events & EPOLLIN) == 0) {
++            /* We got an event which was not EPOLLIN; assume this is an error,
++             * and exit with EBADF: epoll_wait said timerfd had an event,
++             * but that event is not an EPOLIN event. */
++            ret = EBADF;
+         } else {
+-            /* If ret == 0, then we definitely timed out. Else, if ret == -1
+-             * and errno == EAGAIN or errno == EWOULDBLOCK, we're in a weird
+-             * edge case where epoll thinks the timer can be read, but it
+-             * is blocking more; treat it like a TIMEOUT and retry, as
+-             * nothing around us would handle EAGAIN from timer and retry
+-             * it. */
+-            ret = ETIMEDOUT;
++            ret = read(gpmctx->timerfd, &timer_read, sizeof(uint64_t));
++            if (ret == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
++                /* In the case when reading from the timer failed, don't hide the
++                 * timer error behind ETIMEDOUT such that it isn't retried */
++                ret = errno;
++            } else {
++                /* If ret == 0, then we definitely timed out. Else, if ret == -1
++                 * and errno == EAGAIN or errno == EWOULDBLOCK, we're in a weird
++                 * edge case where epoll thinks the timer can be read, but it
++                 * is blocking more; treat it like a TIMEOUT and retry, as
++                 * nothing around us would handle EAGAIN from timer and retry
++                 * it. */
++                ret = ETIMEDOUT;
++            }
+         }
+         gpm_epoll_close(gpmctx);
+     } else {
+         /* If ret == 2, then we ignore the timerfd; that way if the next
+          * operation cannot be performed immediately, we timeout and retry.
+-         * If ret == 1 and data.fd == gpmctx->fd, return 0. */
+-        ret = 0;
++         * Always check the returned event of the socket fd. */
++        int fd_index = 0;
++        if (epoll_ret == 2 && events[fd_index].data.fd != gpmctx->fd) {
++            fd_index = 1;
++        }
++
++        if ((events[fd_index].events & event_flags) == 0) {
++            /* We cannot call EPOLLIN/EPOLLOUT at this time; assume that this
++             * is a fatal error; return with EBADFD to distinguish from
++             * EBADF in timer_fd case. */
++            ret = EBADFD;
++            gpm_epoll_close(gpmctx);
++        } else {
++            /* We definintely got a EPOLLIN/EPOLLOUT event; return success. */
++            ret = 0;
++        }
+     }
+     epoll_ret = epoll_ctl(gpmctx->epollfd, EPOLL_CTL_DEL, gpmctx->fd, NULL);
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-memory-leak.patch b/meta-stx/recipes-security/gssproxy/files/Fix-memory-leak.patch
new file mode 100644 (file)
index 0000000..540c1ef
--- /dev/null
@@ -0,0 +1,25 @@
+From abcd9ae04b1c3f9f0ebb72bd48737b08d5d7fe65 Mon Sep 17 00:00:00 2001
+From: Simo Sorce <simo@redhat.com>
+Date: Thu, 25 May 2017 21:35:37 -0400
+Subject: [PATCH] Fix memory leak
+
+Signed-off-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Related-to: #176
+(cherry picked from commit 69a73d85eb3e70fdc7501794d5fd11a73a1d20fa)
+---
+ proxy/src/gp_export.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/proxy/src/gp_export.c b/proxy/src/gp_export.c
+index f44da50..4e081df 100644
+--- a/proxy/src/gp_export.c
++++ b/proxy/src/gp_export.c
+@@ -381,6 +381,7 @@ uint32_t gp_export_gssx_cred(uint32_t *min, struct gp_call_ctx *gpcall,
+ done:
+     *min = ret_min;
++    gss_release_buffer(&ret_min, &token);
+     gss_release_name(&ret_min, &name);
+     gss_release_oid_set(&ret_min, &mechanisms);
+     return ret_maj;
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-mismatched-sign-comparisons.patch b/meta-stx/recipes-security/gssproxy/files/Fix-mismatched-sign-comparisons.patch
new file mode 100644 (file)
index 0000000..8d27612
--- /dev/null
@@ -0,0 +1,741 @@
+From a68b8b418bfc42c628fee605cc52dca92ab410c9 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Wed, 15 Mar 2017 14:52:08 -0400
+Subject: [PATCH] Fix mismatched sign comparisons
+
+We are c99, so also migrate to `for`-loop initializers where possible for
+clarity.
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #173
+(cherry picked from commit 377e92c7ead312c530b233a1e023493ecde033d6)
+---
+ proxy/src/client/gpm_acquire_cred.c          | 11 ++-----
+ proxy/src/client/gpm_common.c                |  4 +--
+ proxy/src/client/gpm_import_and_canon_name.c |  7 ++---
+ proxy/src/client/gpm_indicate_mechs.c        | 33 +++++++++-----------
+ proxy/src/gp_common.h                        |  3 +-
+ proxy/src/gp_config.c                        |  9 ++----
+ proxy/src/gp_conv.c                          |  6 ++--
+ proxy/src/gp_creds.c                         |  3 +-
+ proxy/src/gp_export.c                        |  9 ++----
+ proxy/src/gp_rpc_acquire_cred.c              |  5 ++-
+ proxy/src/gp_rpc_debug.c                     | 26 +++++++--------
+ proxy/src/gp_rpc_indicate_mechs.c            | 15 +++------
+ proxy/src/gp_socket.c                        |  4 +--
+ proxy/src/gp_util.c                          |  4 +--
+ proxy/tests/t_utils.c                        |  4 +--
+ 15 files changed, 58 insertions(+), 85 deletions(-)
+
+diff --git a/proxy/src/client/gpm_acquire_cred.c b/proxy/src/client/gpm_acquire_cred.c
+index 632973d..8e30e1d 100644
+--- a/proxy/src/client/gpm_acquire_cred.c
++++ b/proxy/src/client/gpm_acquire_cred.c
+@@ -6,8 +6,6 @@ static int gpmint_cred_to_actual_mechs(gssx_cred *c, gss_OID_set *a)
+ {
+     gssx_cred_element *e;
+     gss_OID_set m = GSS_C_NO_OID_SET;
+-    int i;
+-
+     if (c->elements.elements_len) {
+@@ -22,7 +20,7 @@ static int gpmint_cred_to_actual_mechs(gssx_cred *c, gss_OID_set *a)
+             return ENOMEM;
+         }
+-        for (i = 0; i < c->elements.elements_len; i++) {
++        for (unsigned i = 0; i < c->elements.elements_len; i++) {
+             e = &c->elements.elements_val[i];
+             m->elements[i].elements = gp_memdup(e->mech.octet_string_val,
+@@ -280,7 +278,6 @@ OM_uint32 gpm_inquire_cred(OM_uint32 *minor_status,
+     uint32_t ret_maj = GSS_S_COMPLETE;
+     uint32_t life;
+     int cu;
+-    int i;
+     if (!cred) {
+         *minor_status = 0;
+@@ -308,8 +305,7 @@ OM_uint32 gpm_inquire_cred(OM_uint32 *minor_status,
+     life = GSS_C_INDEFINITE;
+     cu = -1;
+-    for (i = 0; i < cred->elements.elements_len; i++) {
+-
++    for (unsigned i = 0; i < cred->elements.elements_len; i++) {
+         e = &cred->elements.elements_val[i];
+         switch (e->cred_usage) {
+@@ -402,7 +398,7 @@ OM_uint32 gpm_inquire_cred_by_mech(OM_uint32 *minor_status,
+     gss_OID_desc tmp_oid;
+     uint32_t ret_min = 0;
+     uint32_t ret_maj = GSS_S_COMPLETE;
+-    int i;
++    unsigned i;
+     if (!cred) {
+         *minor_status = 0;
+@@ -414,7 +410,6 @@ OM_uint32 gpm_inquire_cred_by_mech(OM_uint32 *minor_status,
+     }
+     for (i = 0; i < cred->elements.elements_len; i++) {
+-
+         e = &cred->elements.elements_val[i];
+         gp_conv_gssx_to_oid(&e->mech, &tmp_oid);
+         if (!gss_oid_equal(&tmp_oid, mech_type)) {
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index 030765a..8c96986 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -166,7 +166,7 @@ static int gpm_send_buffer(struct gpm_ctx *gpmctx,
+                            char *buffer, uint32_t length)
+ {
+     uint32_t size;
+-    size_t wn;
++    ssize_t wn;
+     size_t pos;
+     bool retry;
+     int ret;
+@@ -232,7 +232,7 @@ static int gpm_recv_buffer(struct gpm_ctx *gpmctx,
+                            char *buffer, uint32_t *length)
+ {
+     uint32_t size;
+-    size_t rn;
++    ssize_t rn;
+     size_t pos;
+     int ret;
+diff --git a/proxy/src/client/gpm_import_and_canon_name.c b/proxy/src/client/gpm_import_and_canon_name.c
+index 83d0736..70149a3 100644
+--- a/proxy/src/client/gpm_import_and_canon_name.c
++++ b/proxy/src/client/gpm_import_and_canon_name.c
+@@ -275,7 +275,6 @@ OM_uint32 gpm_inquire_name(OM_uint32 *minor_status,
+ {
+     gss_buffer_set_t xattrs = GSS_C_NO_BUFFER_SET;
+     int ret;
+-    int i;
+     *minor_status = 0;
+@@ -306,13 +305,13 @@ OM_uint32 gpm_inquire_name(OM_uint32 *minor_status,
+             *minor_status = ENOMEM;
+             return GSS_S_FAILURE;
+         }
+-        for (i = 0; i < xattrs->count; i++) {
++        for (unsigned i = 0; i < xattrs->count; i++) {
+             ret = gp_copy_gssx_to_buffer(
+                         &name->name_attributes.name_attributes_val[i].attr,
+                         &xattrs->elements[i]);
+             if (ret) {
+-                for (--i; i >= 0; i--) {
+-                    free(xattrs->elements[i].value);
++                for (; i > 0; i--) {
++                    free(xattrs->elements[i-1].value);
+                 }
+                 free(xattrs->elements);
+                 free(xattrs);
+diff --git a/proxy/src/client/gpm_indicate_mechs.c b/proxy/src/client/gpm_indicate_mechs.c
+index d4df923..b019a96 100644
+--- a/proxy/src/client/gpm_indicate_mechs.c
++++ b/proxy/src/client/gpm_indicate_mechs.c
+@@ -51,7 +51,6 @@ static uint32_t gpm_copy_gss_OID_set(uint32_t *minor_status,
+     gss_OID_set n;
+     uint32_t ret_maj;
+     uint32_t ret_min;
+-    int i;
+     ret_maj = gss_create_empty_oid_set(&ret_min, &n);
+     if (ret_maj) {
+@@ -59,7 +58,7 @@ static uint32_t gpm_copy_gss_OID_set(uint32_t *minor_status,
+         return ret_maj;
+     }
+-    for (i = 0; i < oldset->count; i++) {
++    for (size_t i = 0; i < oldset->count; i++) {
+         ret_maj = gss_add_oid_set_member(&ret_min, &oldset->elements[i], &n);
+         if (ret_maj) {
+             *minor_status = ret_min;
+@@ -124,7 +123,6 @@ static void gpmint_indicate_mechs(void)
+     uint32_t ret_min;
+     uint32_t ret_maj = 0;
+     int ret = 0;
+-    int i;
+     memset(arg, 0, sizeof(gssx_arg_indicate_mechs));
+     memset(res, 0, sizeof(gssx_res_indicate_mechs));
+@@ -158,7 +156,7 @@ static void gpmint_indicate_mechs(void)
+         goto done;
+     }
+-    for (i = 0; i < res->mechs.mechs_len; i++) {
++    for (unsigned i = 0; i < res->mechs.mechs_len; i++) {
+         mi = &res->mechs.mechs_val[i];
+         gi = &global_mechs.info[i];
+@@ -222,7 +220,7 @@ static void gpmint_indicate_mechs(void)
+         goto done;
+     }
+-    for (i = 0; i < res->mech_attr_descs.mech_attr_descs_len; i++) {
++    for (unsigned i = 0; i < res->mech_attr_descs.mech_attr_descs_len; i++) {
+         ma = &res->mech_attr_descs.mech_attr_descs_val[i];
+         ga = &global_mechs.desc[i];
+@@ -249,7 +247,7 @@ static void gpmint_indicate_mechs(void)
+ done:
+     if (ret || ret_maj) {
+-        for (i = 0; i < global_mechs.desc_len; i++) {
++        for (unsigned i = 0; i < global_mechs.desc_len; i++) {
+             ga = &global_mechs.desc[i];
+             gss_release_oid(&discard, &ga->attr);
+             gss_release_buffer(&discard, ga->name);
+@@ -258,7 +256,7 @@ done:
+         }
+         free(global_mechs.desc);
+         global_mechs.desc = NULL;
+-        for (i = 0; i < global_mechs.info_len; i++) {
++        for (unsigned i = 0; i < global_mechs.info_len; i++) {
+             gi = &global_mechs.info[i];
+             gss_release_oid(&discard, &gi->mech);
+             gss_release_oid_set(&discard, &gi->name_types);
+@@ -335,7 +333,6 @@ OM_uint32 gpm_inquire_names_for_mech(OM_uint32 *minor_status,
+ {
+     uint32_t ret_min;
+     uint32_t ret_maj;
+-    int i;
+     if (!minor_status) {
+         return GSS_S_CALL_INACCESSIBLE_WRITE;
+@@ -351,7 +348,7 @@ OM_uint32 gpm_inquire_names_for_mech(OM_uint32 *minor_status,
+         return GSS_S_FAILURE;
+     }
+-    for (i = 0; i < global_mechs.info_len; i++) {
++    for (unsigned i = 0; i < global_mechs.info_len; i++) {
+         if (!gpm_equal_oids(global_mechs.info[i].mech, mech_type)) {
+             continue;
+         }
+@@ -375,7 +372,6 @@ OM_uint32 gpm_inquire_mechs_for_name(OM_uint32 *minor_status,
+     uint32_t discard;
+     gss_OID name_type = GSS_C_NO_OID;
+     int present;
+-    int i;
+     if (!minor_status) {
+         return GSS_S_CALL_INACCESSIBLE_WRITE;
+@@ -402,7 +398,7 @@ OM_uint32 gpm_inquire_mechs_for_name(OM_uint32 *minor_status,
+         goto done;
+     }
+-    for (i = 0; i < global_mechs.info_len; i++) {
++    for (unsigned i = 0; i < global_mechs.info_len; i++) {
+         ret_maj = gss_test_oid_set_member(&ret_min, name_type,
+                                           global_mechs.info[i].name_types,
+                                           &present);
+@@ -439,7 +435,6 @@ OM_uint32 gpm_inquire_attrs_for_mech(OM_uint32 *minor_status,
+     uint32_t ret_min;
+     uint32_t ret_maj;
+     uint32_t discard;
+-    int i;
+     if (!minor_status) {
+         return GSS_S_CALL_INACCESSIBLE_WRITE;
+@@ -451,7 +446,7 @@ OM_uint32 gpm_inquire_attrs_for_mech(OM_uint32 *minor_status,
+         return GSS_S_FAILURE;
+     }
+-    for (i = 0; i < global_mechs.info_len; i++) {
++    for (unsigned i = 0; i < global_mechs.info_len; i++) {
+         if (!gpm_equal_oids(global_mechs.info[i].mech, mech)) {
+             continue;
+         }
+@@ -495,7 +490,6 @@ OM_uint32 gpm_inquire_saslname_for_mech(OM_uint32 *minor_status,
+     uint32_t ret_min;
+     uint32_t ret_maj;
+     uint32_t discard;
+-    int i;
+     if (!minor_status) {
+         return GSS_S_CALL_INACCESSIBLE_WRITE;
+@@ -511,7 +505,7 @@ OM_uint32 gpm_inquire_saslname_for_mech(OM_uint32 *minor_status,
+         return GSS_S_FAILURE;
+     }
+-    for (i = 0; i < global_mechs.info_len; i++) {
++    for (unsigned i = 0; i < global_mechs.info_len; i++) {
+         if (!gpm_equal_oids(global_mechs.info[i].mech, desired_mech)) {
+             continue;
+         }
+@@ -554,7 +548,6 @@ OM_uint32 gpm_display_mech_attr(OM_uint32 *minor_status,
+     uint32_t ret_min;
+     uint32_t ret_maj;
+     uint32_t discard;
+-    int i;
+     if (!minor_status) {
+         return GSS_S_CALL_INACCESSIBLE_WRITE;
+@@ -570,7 +563,7 @@ OM_uint32 gpm_display_mech_attr(OM_uint32 *minor_status,
+         return GSS_S_FAILURE;
+     }
+-    for (i = 0; i < global_mechs.desc_len; i++) {
++    for (unsigned i = 0; i < global_mechs.desc_len; i++) {
+         if (!gpm_equal_oids(global_mechs.desc[i].attr, mech_attr)) {
+             continue;
+         }
+@@ -614,7 +607,6 @@ OM_uint32 gpm_indicate_mechs_by_attrs(OM_uint32 *minor_status,
+     uint32_t ret_maj;
+     uint32_t discard;
+     int present;
+-    int i, j;
+     if (!minor_status) {
+         return GSS_S_CALL_INACCESSIBLE_WRITE;
+@@ -636,8 +628,9 @@ OM_uint32 gpm_indicate_mechs_by_attrs(OM_uint32 *minor_status,
+         return ret_maj;
+     }
+-    for (i = 0; i < global_mechs.info_len; i++) {
++    for (unsigned i = 0; i < global_mechs.info_len; i++) {
+         if (desired_mech_attrs != GSS_C_NO_OID_SET) {
++            unsigned j;
+             for (j = 0; j < desired_mech_attrs->count; j++) {
+                 ret_maj = gss_test_oid_set_member(&ret_min,
+                                             &desired_mech_attrs->elements[j],
+@@ -657,6 +650,7 @@ OM_uint32 gpm_indicate_mechs_by_attrs(OM_uint32 *minor_status,
+             }
+         }
+         if (except_mech_attrs != GSS_C_NO_OID_SET) {
++            unsigned j;
+             for (j = 0; j < except_mech_attrs->count; j++) {
+                 ret_maj = gss_test_oid_set_member(&ret_min,
+                                             &except_mech_attrs->elements[j],
+@@ -676,6 +670,7 @@ OM_uint32 gpm_indicate_mechs_by_attrs(OM_uint32 *minor_status,
+             }
+         }
+         if (critical_mech_attrs != GSS_C_NO_OID_SET) {
++            unsigned j;
+             for (j = 0; j < critical_mech_attrs->count; j++) {
+                 ret_maj = gss_test_oid_set_member(&ret_min,
+                                     &critical_mech_attrs->elements[j],
+diff --git a/proxy/src/gp_common.h b/proxy/src/gp_common.h
+index edc23b4..4f76e58 100644
+--- a/proxy/src/gp_common.h
++++ b/proxy/src/gp_common.h
+@@ -104,9 +104,8 @@ union gp_rpc_res {
+ #define gp_options_find(res, opts, name, len) \
+ do { \
+     struct gssx_option *_v; \
+-    int _o; \
+     res = NULL; \
+-    for (_o = 0; _o < opts.options_len; _o++) { \
++    for (unsigned _o = 0; _o < opts.options_len; _o++) { \
+         _v = &opts.options_val[_o]; \
+         if (gp_option_name_match(_v, name, len)) { \
+             res = _v; \
+diff --git a/proxy/src/gp_config.c b/proxy/src/gp_config.c
+index 5c1ca02..a671333 100644
+--- a/proxy/src/gp_config.c
++++ b/proxy/src/gp_config.c
+@@ -57,11 +57,9 @@ static void free_str_array(const char ***a, int *count)
+ void free_cred_store_elements(gss_key_value_set_desc *cs)
+ {
+-    int i;
+-
+     if (!cs->elements) return;
+-    for (i = 0; i < cs->count; i++) {
++    for (unsigned i = 0; i < cs->count; i++) {
+         safefree(cs->elements[i].key);
+         safefree(cs->elements[i].value);
+     }
+@@ -146,7 +144,7 @@ static int get_krb5_mech_cfg(struct gp_service *svc,
+                                      &count, &strings);
+     if (ret == 0) {
+         const char *p;
+-        size_t len;
++        ssize_t len;
+         char *key;
+         svc->krb5.store.elements =
+@@ -698,7 +696,6 @@ struct gp_creds_handle *gp_service_get_creds_handle(struct gp_service *svc)
+ void free_config(struct gp_config **cfg)
+ {
+     struct gp_config *config = *cfg;
+-    uint32_t i;
+     if (!config) {
+         return;
+@@ -709,7 +706,7 @@ void free_config(struct gp_config **cfg)
+     free(config->socket_name);
+     free(config->proxy_user);
+-    for (i=0; i < config->num_svcs; i++) {
++    for (int i = 0; i < config->num_svcs; i++) {
+         gp_service_free(config->svcs[i]);
+         safefree(config->svcs[i]);
+     }
+diff --git a/proxy/src/gp_conv.c b/proxy/src/gp_conv.c
+index 71d6d9d..b874b06 100644
+--- a/proxy/src/gp_conv.c
++++ b/proxy/src/gp_conv.c
+@@ -599,7 +599,6 @@ done:
+ int gp_conv_gssx_to_oid_set(gssx_OID_set *in, gss_OID_set *out)
+ {
+     gss_OID_set o;
+-    int i;
+     if (in->gssx_OID_set_len == 0) {
+         *out = GSS_C_NO_OID_SET;
+@@ -618,7 +617,7 @@ int gp_conv_gssx_to_oid_set(gssx_OID_set *in, gss_OID_set *out)
+         return ENOMEM;
+     }
+-    for (i = 0; i < o->count; i++) {
++    for (size_t i = 0; i < o->count; i++) {
+         o->elements[i].elements =
+                         gp_memdup(in->gssx_OID_set_val[i].octet_string_val,
+                                   in->gssx_OID_set_val[i].octet_string_len);
+@@ -641,7 +640,6 @@ int gp_conv_gssx_to_oid_set(gssx_OID_set *in, gss_OID_set *out)
+ int gp_conv_oid_set_to_gssx(gss_OID_set in, gssx_OID_set *out)
+ {
+     int ret;
+-    int i;
+     if (in->count == 0) {
+         return 0;
+@@ -653,7 +651,7 @@ int gp_conv_oid_set_to_gssx(gss_OID_set in, gssx_OID_set *out)
+         return ENOMEM;
+     }
+-    for (i = 0; i < in->count; i++) {
++    for (size_t i = 0; i < in->count; i++) {
+         ret = gp_conv_octet_string(in->elements[i].length,
+                                    in->elements[i].elements,
+                                    &out->gssx_OID_set_val[i]);
+diff --git a/proxy/src/gp_creds.c b/proxy/src/gp_creds.c
+index 6570b06..e05ad01 100644
+--- a/proxy/src/gp_creds.c
++++ b/proxy/src/gp_creds.c
+@@ -312,7 +312,6 @@ static int gp_get_cred_environment(struct gp_call_ctx *gpcall,
+     int k_num = -1;
+     int ck_num = -1;
+     int cc_num = -1;
+-    int d;
+     memset(cs, 0, sizeof(gss_key_value_set_desc));
+@@ -419,7 +418,7 @@ static int gp_get_cred_environment(struct gp_call_ctx *gpcall,
+         ret = ENOMEM;
+         goto done;
+     }
+-    for (d = 0; d < svc->krb5.store.count; d++) {
++    for (unsigned d = 0; d < svc->krb5.store.count; d++) {
+         if (strcmp(svc->krb5.store.elements[d].key, "client_keytab") == 0) {
+             ck_num = cs->count;
+         } else if (strcmp(svc->krb5.store.elements[d].key, "keytab") == 0) {
+diff --git a/proxy/src/gp_export.c b/proxy/src/gp_export.c
+index 12b8d5f..3a927c9 100644
+--- a/proxy/src/gp_export.c
++++ b/proxy/src/gp_export.c
+@@ -288,7 +288,6 @@ uint32_t gp_export_gssx_cred(uint32_t *min, struct gp_call_ctx *gpcall,
+     uint32_t acceptor_lifetime = 0;
+     struct gssx_cred_element *el;
+     int ret;
+-    int i, j;
+     struct gp_creds_handle *handle = NULL;
+     gss_buffer_desc token = GSS_C_EMPTY_BUFFER;
+@@ -314,8 +313,7 @@ uint32_t gp_export_gssx_cred(uint32_t *min, struct gp_call_ctx *gpcall,
+     }
+     out->elements.elements_len = mechanisms->count;
+-    for (i = 0, j = 0; i < mechanisms->count; i++, j++) {
+-
++    for (unsigned i = 0, j = 0; i < mechanisms->count; i++, j++) {
+         el = &out->elements.elements_val[j];
+         ret_maj = gss_inquire_cred_by_mech(&ret_min, *in,
+@@ -399,11 +397,10 @@ static void gp_set_cred_options(gssx_cred *cred, gss_cred_id_t gss_cred)
+     krb5_enctype *ktypes;
+     bool no_ci_flags = false;
+     uint32_t maj, min;
+-    int i, j;
+-    for (i = 0; i < cred->elements.elements_len; i++) {
++    for (unsigned i = 0; i < cred->elements.elements_len; i++) {
+         ce = &cred->elements.elements_val[i];
+-        for (j = 0; j < ce->options.options_len; j++) {
++        for (unsigned j = 0; j < ce->options.options_len; j++) {
+             op = &ce->options.options_val[j];
+             if ((op->option.octet_string_len ==
+                     sizeof(KRB5_SET_ALLOWED_ENCTYPE)) &&
+diff --git a/proxy/src/gp_rpc_acquire_cred.c b/proxy/src/gp_rpc_acquire_cred.c
+index e9c7d56..fcb4fbe 100644
+--- a/proxy/src/gp_rpc_acquire_cred.c
++++ b/proxy/src/gp_rpc_acquire_cred.c
+@@ -20,7 +20,6 @@ int gp_acquire_cred(struct gp_call_ctx *gpcall,
+     gss_cred_id_t *add_out_cred = NULL;
+     int acquire_type = ACQ_NORMAL;
+     int ret;
+-    int i;
+     aca = &arg->acquire_cred;
+     acr = &res->acquire_cred;
+@@ -63,7 +62,7 @@ int gp_acquire_cred(struct gp_call_ctx *gpcall,
+             goto done;
+         }
+-        for (i = 0; i < desired_mechs->count; i++) {
++        for (unsigned i = 0; i < desired_mechs->count; i++) {
+             desired_mech = &desired_mechs->elements[i];
+             if (!gp_creds_allowed_mech(gpcall, desired_mech)) {
+@@ -93,7 +92,7 @@ int gp_acquire_cred(struct gp_call_ctx *gpcall,
+     cred_usage = gp_conv_gssx_to_cred_usage(aca->cred_usage);
+-    for (i = 0; i < use_mechs->count; i++) {
++    for (unsigned i = 0; i < use_mechs->count; i++) {
+         desired_mech = &use_mechs->elements[i];
+         /* this should really be folded into an extended
+          * gss_add_cred in gssapi that can accept a set of URIs
+diff --git a/proxy/src/gp_rpc_debug.c b/proxy/src/gp_rpc_debug.c
+index 2e2c050..a814448 100644
+--- a/proxy/src/gp_rpc_debug.c
++++ b/proxy/src/gp_rpc_debug.c
+@@ -19,7 +19,7 @@ void gpdbg_octet_string(octet_string *x)
+         }
+         fprintf(stderr, "... ] ");
+     } else {
+-        for (int i = 0; i < x->octet_string_len; i++) {
++        for (unsigned i = 0; i < x->octet_string_len; i++) {
+             fprintf(stderr, "%x", x->octet_string_val[i]);
+         }
+         fprintf(stderr, " ] ");
+@@ -55,7 +55,7 @@ void gpdbg_gssx_OID(gssx_OID *x)
+ void gpdbg_gssx_OID_set(gssx_OID_set *x)
+ {
+     gp_debug_printf("{ ");
+-    for (int i = 0; i < x->gssx_OID_set_len; i++) {
++    for (unsigned i = 0; i < x->gssx_OID_set_len; i++) {
+         gpdbg_gssx_OID(&x->gssx_OID_set_val[i]);
+     }
+     gp_debug_printf("} ");
+@@ -90,7 +90,7 @@ void gpdbg_gssx_option(gssx_option *x)
+ #define gpdbg_extensions(x) do { \
+     if ((x)->extensions.extensions_len > 0) { \
+         gp_debug_printf("[ "); \
+-        for (int i = 0; i < (x)->extensions.extensions_len; i++) { \
++        for (unsigned i = 0; i < (x)->extensions.extensions_len; i++) { \
+             gpdbg_gssx_option(&(x)->extensions.extensions_val[i]); \
+         } \
+         gp_debug_printf("] "); \
+@@ -100,7 +100,7 @@ void gpdbg_gssx_option(gssx_option *x)
+ #define gpdbg_options(x) do { \
+     if ((x)->options.options_len > 0) { \
+         gp_debug_printf("[ "); \
+-        for (int i = 0; i < (x)->options.options_len; i++) { \
++        for (unsigned i = 0; i < (x)->options.options_len; i++) { \
+             gpdbg_gssx_option(&(x)->options.options_val[i]); \
+         } \
+         gp_debug_printf("] "); \
+@@ -168,7 +168,7 @@ void gpdbg_gssx_call_ctx(gssx_call_ctx *x)
+ #define gpdbg_name_attributes(X) do { \
+     gp_debug_printf("[ "); \
+     if (x->name_attributes.name_attributes_len > 0) { \
+-        for (int i = 0; i < x->name_attributes.name_attributes_len; i++) { \
++        for (unsigned i = 0; i < x->name_attributes.name_attributes_len; i++) { \
+             gpdbg_gssx_name_attr( \
+                 &x->name_attributes.name_attributes_val[i]); \
+         } \
+@@ -209,7 +209,7 @@ void gpdbg_gssx_cred(gssx_cred *x)
+     gp_debug_printf("{ ");
+     gpdbg_gssx_name(&x->desired_name);
+     gp_debug_printf("[ ");
+-    for (int i = 0; i < x->elements.elements_len; i++) {
++    for (unsigned i = 0; i < x->elements.elements_len; i++) {
+         gpdbg_gssx_cred_element(&x->elements.elements_val[i]);
+     }
+     gp_debug_printf("] ");
+@@ -289,17 +289,17 @@ void gpdbg_gssx_res_indicate_mechs(gssx_res_indicate_mechs *x)
+     gp_debug_printf("    GSSX_RES_INDICATE_MECHS( status: ");
+     gpdbg_gssx_status(&x->status);
+     gp_debug_printf("mechs: [ ");
+-    for (int i = 0; i < x->mechs.mechs_len; i++) {
++    for (unsigned i = 0; i < x->mechs.mechs_len; i++) {
+         gpdbg_gssx_mech_info(&x->mechs.mechs_val[i]);
+     }
+     gp_debug_printf("] ");
+     gp_debug_printf("mech_attr_descs: [ ");
+-    for (int i = 0; i < x->mech_attr_descs.mech_attr_descs_len; i++) {
++    for (unsigned i = 0; i < x->mech_attr_descs.mech_attr_descs_len; i++) {
+         gpdbg_gssx_mech_attr(&x->mech_attr_descs.mech_attr_descs_val[i]);
+     }
+     gp_debug_printf("] ");
+     gp_debug_printf("supported_extensions: [ ");
+-    for (int i = 0;
++    for (unsigned i = 0;
+          i < x->supported_extensions.supported_extensions_len; i++) {
+         gpdbg_gssx_buffer(
+             &x->supported_extensions.supported_extensions_val[i]);
+@@ -602,7 +602,7 @@ void gpdbg_gssx_arg_wrap(gssx_arg_wrap *x)
+     gp_debug_printf("conf_req: ");
+     gp_debug_printf("%d ", (int)x->conf_req);
+     gp_debug_printf("message_buffer: [ ");
+-    for (int i = 0; i < x->message_buffer.message_buffer_len; i++) {
++    for (unsigned i = 0; i < x->message_buffer.message_buffer_len; i++) {
+         gpdbg_octet_string(&x->message_buffer.message_buffer_val[i]);
+     }
+     gp_debug_printf("] ");
+@@ -618,7 +618,7 @@ void gpdbg_gssx_res_wrap(gssx_res_wrap *x)
+     gp_debug_printf("context_handle: ");
+     GPRPCDEBUG(gssx_ctx, x->context_handle);
+     gp_debug_printf("token_buffer: [ ");
+-    for (int i = 0; i < x->token_buffer.token_buffer_len; i++) {
++    for (unsigned i = 0; i < x->token_buffer.token_buffer_len; i++) {
+         gpdbg_octet_string(&x->token_buffer.token_buffer_val[i]);
+     }
+     gp_debug_printf("] ");
+@@ -640,7 +640,7 @@ void gpdbg_gssx_arg_unwrap(gssx_arg_unwrap *x)
+     gp_debug_printf("context_handle: ");
+     gpdbg_gssx_ctx(&x->context_handle);
+     gp_debug_printf("token_buffer: [ ");
+-    for (int i = 0; i < x->token_buffer.token_buffer_len; i++) {
++    for (unsigned i = 0; i < x->token_buffer.token_buffer_len; i++) {
+         gpdbg_octet_string(&x->token_buffer.token_buffer_val[i]);
+     }
+     gp_debug_printf("] ");
+@@ -656,7 +656,7 @@ void gpdbg_gssx_res_unwrap(gssx_res_unwrap *x)
+     gp_debug_printf("context_handle: ");
+     GPRPCDEBUG(gssx_ctx, x->context_handle);
+     gp_debug_printf("message_buffer: [ ");
+-    for (int i = 0; i < x->message_buffer.message_buffer_len; i++) {
++    for (unsigned i = 0; i < x->message_buffer.message_buffer_len; i++) {
+         gpdbg_octet_string(&x->message_buffer.message_buffer_val[i]);
+     }
+     gp_debug_printf("] ");
+diff --git a/proxy/src/gp_rpc_indicate_mechs.c b/proxy/src/gp_rpc_indicate_mechs.c
+index 8abbc7f..6ae6756 100644
+--- a/proxy/src/gp_rpc_indicate_mechs.c
++++ b/proxy/src/gp_rpc_indicate_mechs.c
+@@ -25,8 +25,7 @@ int gp_indicate_mechs(struct gp_call_ctx *gpcall UNUSED,
+     uint32_t ret_maj;
+     uint32_t ret_min;
+     int present;
+-    int h, i, j;
+-    int ret;
++     int ret;
+     ima = &arg->indicate_mechs;
+     imr = &res->indicate_mechs;
+@@ -53,8 +52,7 @@ int gp_indicate_mechs(struct gp_call_ctx *gpcall UNUSED,
+     }
+     imr->mechs.mechs_len = mech_set->count;
+-    for (i = 0, h = 0; i < mech_set->count; i++, h++) {
+-
++    for (unsigned i = 0, h = 0; i < mech_set->count; i++, h++) {
+         mi = &imr->mechs.mechs_val[h];
+         ret = gp_conv_oid_to_gssx(&mech_set->elements[i], &mi->mech);
+@@ -104,8 +102,7 @@ int gp_indicate_mechs(struct gp_call_ctx *gpcall UNUSED,
+             ret_min = ret;
+             goto done;
+         }
+-        for (j = 0; j < mech_attrs->count; j++) {
+-
++        for (unsigned j = 0; j < mech_attrs->count; j++) {
+             ret_maj = gss_test_oid_set_member(&ret_min,
+                                               &mech_attrs->elements[j],
+                                               attr_set,
+@@ -136,8 +133,7 @@ int gp_indicate_mechs(struct gp_call_ctx *gpcall UNUSED,
+             goto done;
+         }
+-        for (j = 0; j < known_mech_attrs->count; j++) {
+-
++        for (unsigned j = 0; j < known_mech_attrs->count; j++) {
+             ret_maj = gss_test_oid_set_member(&ret_min,
+                                               &known_mech_attrs->elements[j],
+                                               attr_set,
+@@ -205,8 +201,7 @@ int gp_indicate_mechs(struct gp_call_ctx *gpcall UNUSED,
+     }
+     imr->mech_attr_descs.mech_attr_descs_len = attr_set->count;
+-    for (i = 0; i < attr_set->count; i++) {
+-
++    for (unsigned i = 0; i < attr_set->count; i++) {
+         ma = &imr->mech_attr_descs.mech_attr_descs_val[i];
+         ret = gp_conv_oid_to_gssx(&attr_set->elements[i], &ma->attr);
+diff --git a/proxy/src/gp_socket.c b/proxy/src/gp_socket.c
+index 829ff21..17ecf7c 100644
+--- a/proxy/src/gp_socket.c
++++ b/proxy/src/gp_socket.c
+@@ -303,7 +303,7 @@ static void gp_socket_read(verto_ctx *vctx, verto_ev *ev)
+     struct gp_buffer *rbuf;
+     uint32_t size;
+     bool header = false;
+-    size_t rn;
++    ssize_t rn;
+     int ret;
+     int fd;
+@@ -487,7 +487,7 @@ static void gp_socket_write(verto_ctx *vctx, verto_ev *ev)
+         return;
+     }
+     if (vecs == 2) {
+-        if (wn < sizeof(size)) {
++        if (wn < (ssize_t) sizeof(size)) {
+             /* don't bother trying to handle sockets that can't
+              * buffer even 4 bytes */
+             gp_conn_free(wbuf->conn);
+diff --git a/proxy/src/gp_util.c b/proxy/src/gp_util.c
+index ca83eb3..f158b84 100644
+--- a/proxy/src/gp_util.c
++++ b/proxy/src/gp_util.c
+@@ -109,7 +109,7 @@ char *gp_strerror(int errnum)
+ ssize_t gp_safe_read(int fd, void *buf, size_t count)
+ {
+     char *b = (char *)buf;
+-    ssize_t len = 0;
++    size_t len = 0;
+     ssize_t ret;
+     do {
+@@ -128,7 +128,7 @@ ssize_t gp_safe_read(int fd, void *buf, size_t count)
+ ssize_t gp_safe_write(int fd, const void *buf, size_t count)
+ {
+     const char *b = (const char *)buf;
+-    ssize_t len = 0;
++    size_t len = 0;
+     ssize_t ret;
+     do {
+diff --git a/proxy/tests/t_utils.c b/proxy/tests/t_utils.c
+index 6af9a16..36f7bd1 100644
+--- a/proxy/tests/t_utils.c
++++ b/proxy/tests/t_utils.c
+@@ -8,7 +8,7 @@
+ int t_send_buffer(int fd, char *buf, uint32_t len)
+ {
+     uint32_t size;
+-    size_t wn;
++    ssize_t wn;
+     size_t pos;
+     size = htonl(len);
+@@ -36,7 +36,7 @@ int t_send_buffer(int fd, char *buf, uint32_t len)
+ int t_recv_buffer(int fd, char *buf, uint32_t *len)
+ {
+     uint32_t size;
+-    size_t rn;
++    ssize_t rn;
+     size_t pos;
+     rn = read(fd, &size, sizeof(uint32_t));
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-most-memory-leaks.patch b/meta-stx/recipes-security/gssproxy/files/Fix-most-memory-leaks.patch
new file mode 100644 (file)
index 0000000..f4a83d3
--- /dev/null
@@ -0,0 +1,250 @@
+From 9f9ab1e13c72b7c1fd06b6ba085ba2853bb9c3ca Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <ascheel@redhat.com>
+Date: Thu, 29 Jun 2017 10:59:46 -0400
+Subject: [PATCH] Fix most memory leaks
+
+Signed-off-by: Alexander Scheel <ascheel@redhat.com>
+[rharwood@redhat.com: commit message, whitespace]
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #203
+Related: #176
+(cherry picked from commit 470cf4d745d57f0597124a35b2faf86ba1107bb5)
+[rharwood@redhat.com: backport around missing program support]
+---
+ proxy/src/gp_config.c            |  1 +
+ proxy/src/gp_creds.c             |  2 ++
+ proxy/src/gp_export.c            |  3 ++-
+ proxy/src/gp_rpc_acquire_cred.c  | 17 ++++++++-----
+ proxy/src/gssproxy.c             | 42 +++++++++++++++++++++++---------
+ proxy/src/mechglue/gpp_context.c |  2 ++
+ proxy/tests/t_acquire.c          |  3 +++
+ 7 files changed, 51 insertions(+), 19 deletions(-)
+
+diff --git a/proxy/src/gp_config.c b/proxy/src/gp_config.c
+index a671333..b4ab90c 100644
+--- a/proxy/src/gp_config.c
++++ b/proxy/src/gp_config.c
+@@ -75,6 +75,7 @@ static void gp_service_free(struct gp_service *svc)
+         free_cred_store_elements(&svc->krb5.store);
+         gp_free_creds_handle(&svc->krb5.creds_handle);
+     }
++    free(svc->socket);
+     SELINUX_context_free(svc->selinux_ctx);
+     memset(svc, 0, sizeof(struct gp_service));
+ }
+diff --git a/proxy/src/gp_creds.c b/proxy/src/gp_creds.c
+index fdc6bdf..2cb4ce7 100644
+--- a/proxy/src/gp_creds.c
++++ b/proxy/src/gp_creds.c
+@@ -1049,6 +1049,8 @@ uint32_t gp_count_tickets(uint32_t *min, gss_cred_id_t cred, uint32_t *ccsum)
+             goto done;
+         }
++        krb5_free_cred_contents(context, &creds);
++
+         /* TODO: Should we do a real checksum over all creds->ticket data and
+          * flags in future ? */
+         (*ccsum)++;
+diff --git a/proxy/src/gp_export.c b/proxy/src/gp_export.c
+index 4e081df..ab08bb7 100644
+--- a/proxy/src/gp_export.c
++++ b/proxy/src/gp_export.c
+@@ -47,7 +47,7 @@ uint32_t gp_init_creds_with_keytab(uint32_t *min, const char *svc_name,
+     krb5_keytab ktid = NULL;
+     krb5_kt_cursor cursor;
+     krb5_keytab_entry entry;
+-    krb5_enctype *permitted;
++    krb5_enctype *permitted = NULL;
+     uint32_t ret_maj = 0;
+     uint32_t ret_min = 0;
+     int ret;
+@@ -127,6 +127,7 @@ uint32_t gp_init_creds_with_keytab(uint32_t *min, const char *svc_name,
+     ret_maj = GSS_S_COMPLETE;
+ done:
++    krb5_free_enctypes(handle->context, permitted);
+     if (ktid) {
+         (void)krb5_kt_close(handle->context, ktid);
+     }
+diff --git a/proxy/src/gp_rpc_acquire_cred.c b/proxy/src/gp_rpc_acquire_cred.c
+index fcb4fbe..7ddb427 100644
+--- a/proxy/src/gp_rpc_acquire_cred.c
++++ b/proxy/src/gp_rpc_acquire_cred.c
+@@ -130,17 +130,18 @@ int gp_acquire_cred(struct gp_call_ctx *gpcall,
+         }
+     }
+-    acr->output_cred_handle = calloc(1, sizeof(gssx_cred));
+-    if (!acr->output_cred_handle) {
+-        ret_maj = GSS_S_FAILURE;
+-        ret_min = ENOMEM;
+-        goto done;
+-    }
+     if (out_cred == in_cred) {
+         acr->output_cred_handle = aca->input_cred_handle;
+         aca->input_cred_handle = NULL;
+     } else {
++        acr->output_cred_handle = calloc(1, sizeof(gssx_cred));
++        if (!acr->output_cred_handle) {
++            ret_maj = GSS_S_FAILURE;
++            ret_min = ENOMEM;
++            goto done;
++        }
++
+         ret_maj = gp_export_gssx_cred(&ret_min, gpcall,
+                                       &out_cred, acr->output_cred_handle);
+         if (ret_maj) {
+@@ -154,6 +155,10 @@ done:
+     GPRPCDEBUG(gssx_res_acquire_cred, acr);
++    if (add_out_cred != &in_cred && add_out_cred != &out_cred)
++        gss_release_cred(&ret_min, add_out_cred);
++    if (in_cred != out_cred)
++        gss_release_cred(&ret_min, &in_cred);
+     gss_release_cred(&ret_min, &out_cred);
+     gss_release_oid_set(&ret_min, &use_mechs);
+     gss_release_oid_set(&ret_min, &desired_mechs);
+diff --git a/proxy/src/gssproxy.c b/proxy/src/gssproxy.c
+index a020218..5c5937d 100644
+--- a/proxy/src/gssproxy.c
++++ b/proxy/src/gssproxy.c
+@@ -157,7 +157,7 @@ int main(int argc, const char *argv[])
+     verto_ctx *vctx;
+     verto_ev *ev;
+     int wait_fd;
+-    int ret;
++    int ret = -1;
+     struct poptOption long_options[] = {
+         POPT_AUTOHELP
+@@ -187,13 +187,17 @@ int main(int argc, const char *argv[])
+             fprintf(stderr, "\nInvalid option %s: %s\n\n",
+                     poptBadOption(pc, 0), poptStrerror(opt));
+             poptPrintUsage(pc, stderr, 0);
+-            return 1;
++
++            ret = 1;
++            goto cleanup;
+         }
+     }
+     if (opt_version) {
+         puts(VERSION""DISTRO_VERSION""PRERELEASE_VERSION);
+-        return 0;
++        poptFreeContext(pc);
++        ret = 0;
++        goto cleanup;
+     }
+     if (opt_debug || opt_debug_level > 0) {
+@@ -204,7 +208,8 @@ int main(int argc, const char *argv[])
+     if (opt_daemon && opt_interactive) {
+         fprintf(stderr, "Option -i|--interactive is not allowed together with -D|--daemon\n");
+         poptPrintUsage(pc, stderr, 0);
+-        return 1;
++        ret = 0;
++        goto cleanup;
+     }
+     if (opt_interactive) {
+@@ -218,7 +223,8 @@ int main(int argc, const char *argv[])
+                                 opt_config_socket,
+                                 opt_daemon);
+     if (!gpctx->config) {
+-        exit(EXIT_FAILURE);
++        ret = EXIT_FAILURE;
++        goto cleanup;
+     }
+     init_server(gpctx->config->daemonize, &wait_fd);
+@@ -229,7 +235,8 @@ int main(int argc, const char *argv[])
+     if (!vctx) {
+         fprintf(stderr, "Failed to initialize event loop. "
+                         "Is there at least one libverto backend installed?\n");
+-        return 1;
++        ret = 1;
++        goto cleanup;
+     }
+     gpctx->vctx = vctx;
+@@ -237,12 +244,13 @@ int main(int argc, const char *argv[])
+     ev = verto_add_signal(vctx, VERTO_EV_FLAG_PERSIST, hup_handler, SIGHUP);
+     if (!ev) {
+         fprintf(stderr, "Failed to register SIGHUP handler with verto!\n");
+-        return 1;
++        ret = 1;
++        goto cleanup;
+     }
+     ret = init_sockets(vctx, NULL);
+     if (ret != 0) {
+-        return ret;
++        goto cleanup;
+     }
+     /* We need to tell nfsd that GSS-Proxy is available before it starts,
+@@ -256,12 +264,14 @@ int main(int argc, const char *argv[])
+     ret = drop_privs(gpctx->config);
+     if (ret) {
+-        exit(EXIT_FAILURE);
++        ret = EXIT_FAILURE;
++        goto cleanup;
+     }
+     ret = gp_workers_init(gpctx);
+     if (ret) {
+-        exit(EXIT_FAILURE);
++        ret = EXIT_FAILURE;
++        goto cleanup;
+     }
+     verto_run(vctx);
+@@ -271,9 +281,17 @@ int main(int argc, const char *argv[])
+     fini_server();
+-    poptFreeContext(pc);
+     free_config(&gpctx->config);
++    free(gpctx);
+-    return 0;
++    ret = 0;
++
++cleanup:
++    poptFreeContext(pc);
++    free(opt_config_file);
++    free(opt_config_dir);
++    free(opt_config_socket);
++
++    return ret;
+ }
+diff --git a/proxy/src/mechglue/gpp_context.c b/proxy/src/mechglue/gpp_context.c
+index 2f41e4f..69e69e0 100644
+--- a/proxy/src/mechglue/gpp_context.c
++++ b/proxy/src/mechglue/gpp_context.c
+@@ -362,6 +362,8 @@ OM_uint32 gssi_delete_sec_context(OM_uint32 *minor_status,
+         }
+     }
++    free(ctx);
++
+     return rmaj;
+ }
+diff --git a/proxy/tests/t_acquire.c b/proxy/tests/t_acquire.c
+index 2bb7706..5334565 100644
+--- a/proxy/tests/t_acquire.c
++++ b/proxy/tests/t_acquire.c
+@@ -132,5 +132,8 @@ done:
+     gss_release_buffer(&ret_min, &in_token);
+     gss_release_buffer(&ret_min, &out_token);
+     gss_release_cred(&ret_min, &cred_handle);
++    gss_release_name(&ret_min, &target_name);
++    gss_delete_sec_context(&ret_min, &init_ctx, GSS_C_NO_BUFFER);
++    gss_delete_sec_context(&ret_min, &accept_ctx, GSS_C_NO_BUFFER);
+     return ret;
+ }
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-potential-free-of-non-heap-address.patch b/meta-stx/recipes-security/gssproxy/files/Fix-potential-free-of-non-heap-address.patch
new file mode 100644 (file)
index 0000000..699307b
--- /dev/null
@@ -0,0 +1,28 @@
+From e087470af1a51b58fbac434ff2e30bc3f2a1f9ac Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Mon, 11 Sep 2017 10:52:03 -0400
+Subject: [PATCH] Fix potential free of non-heap address
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #211
+(cherry picked from commit 068f4174001c3ea4ae7913fb37210fec84abf1df)
+---
+ proxy/src/client/gpm_common.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index dba23a6..c65c69d 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -553,8 +553,8 @@ static int gpm_send_recv_loop(struct gpm_ctx *gpmctx, char *send_buffer,
+             ret = gpm_retry_socket(gpmctx);
+             /* Free buffer and set it to NULL to prevent free(xdr_reply_ctx) */
+-            free(recv_buffer);
+-            recv_buffer = NULL;
++            free(*recv_buffer);
++            *recv_buffer = NULL;
+             if (ret != 0)
+                 return ret;
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-segfault-when-no-config-files-are-present.patch b/meta-stx/recipes-security/gssproxy/files/Fix-segfault-when-no-config-files-are-present.patch
new file mode 100644 (file)
index 0000000..f41fbea
--- /dev/null
@@ -0,0 +1,39 @@
+From 76954aa028a897021a9bdcb0a1b5249e2652f7b6 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Tue, 16 May 2017 14:16:23 -0400
+Subject: [PATCH] Fix segfault when no config files are present
+
+Resolves: rhbz#1451255
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #185
+(cherry picked from commit df434333de34a13440857b511a4c60fbc6a71a5c)
+---
+ proxy/src/gp_config.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/proxy/src/gp_config.c b/proxy/src/gp_config.c
+index b4ab90c..8fd60a3 100644
+--- a/proxy/src/gp_config.c
++++ b/proxy/src/gp_config.c
+@@ -844,16 +844,17 @@ int gp_config_init(const char *config_file, const char *config_dir,
+     if (config_file) {
+         ret = gp_config_from_file(config_file, ini_config, collision_flags);
+-        if (ret == ENOENT) {
+-            GPDEBUG("Expected config file %s but did not find it.\n",
++        if (ret) {
++            GPDEBUG("Error when trying to read config file %s.\n",
+                     config_file);
+-        } else if (ret) {
+             return ret;
+         }
+     }
+     if (config_dir) {
+         ret = gp_config_from_dir(config_dir, &ini_config, collision_flags);
+         if (ret) {
++            GPDEBUG("Error when trying to read config directory %s.\n",
++                    config_dir);
+             return ret;
+         }
+     }
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-silent-crash-with-duplicate-config-sections.patch b/meta-stx/recipes-security/gssproxy/files/Fix-silent-crash-with-duplicate-config-sections.patch
new file mode 100644 (file)
index 0000000..5496f74
--- /dev/null
@@ -0,0 +1,220 @@
+From caec174b203206185b6075c0e822c6f45070dd87 Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <ascheel@redhat.com>
+Date: Wed, 9 Aug 2017 15:00:26 -0400
+Subject: [PATCH] Fix silent crash with duplicate config sections
+
+Signed-off-by: Alexander Scheel <ascheel@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Resolves: #194
+Merges: #202
+(cherry picked from commit c0d85387fc38f9554d601ec2ddb111031a694387)
+---
+ proxy/configure.ac    | 125 ++++++++++++++++++++++++++++++++++++++++++
+ proxy/src/gp_config.c |  27 ++++-----
+ 2 files changed, 137 insertions(+), 15 deletions(-)
+
+diff --git a/proxy/configure.ac b/proxy/configure.ac
+index c52dbb6..9e01f7d 100644
+--- a/proxy/configure.ac
++++ b/proxy/configure.ac
+@@ -107,6 +107,131 @@ fi
+ AC_SUBST(INI_LIBS)
+ AC_SUBST(INI_CFLAGS)
++AC_CHECK_LIB(ref_array, ref_array_destroy, [],
++             [AC_MSG_WARN([ref_array library must support ref_array_destroy])],
++             [$INI_CONFIG_LIBS])
++
++AC_RUN_IFELSE([AC_LANG_SOURCE([[
++/* See: https://pagure.io/SSSD/ding-libs/pull-request/3172 */
++#include <linux/limits.h>
++#include <string.h>
++#include <errno.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <stdint.h>
++#include <ini_configobj.h>
++#include <ini_config.h>
++
++static int write_to_file(char *path, char *text)
++{
++    FILE *f = fopen(path, "w");
++    int bytes = 0;
++    if (f == NULL)
++        return 1;
++
++    bytes = fprintf(f, "%s", text);
++    if (bytes != strlen(text))
++        return 1;
++
++    return fclose(f);
++}
++
++int main(void)
++{
++    char base_path[PATH_MAX];
++    char augment_path[PATH_MAX];
++
++    char config_base[] =
++        "[section]\n"
++        "key1 = first\n"
++        "key2 = exists\n";
++
++    char config_augment[] =
++        "[section]\n"
++        "key1 = augment\n"
++        "key3 = exists\n";
++
++    char *builddir;
++
++    struct ini_cfgobj *in_cfg, *result_cfg;
++    struct ini_cfgfile *file_ctx;
++
++    uint32_t merge_flags = INI_MS_DETECT | INI_MS_PRESERVE;
++
++    int ret;
++
++    builddir = getenv("builddir");
++    if (builddir == NULL) {
++        builddir = strdup(".");
++    }
++
++    snprintf(base_path, PATH_MAX, "%s/tmp_augment_base.conf", builddir);
++    snprintf(augment_path, PATH_MAX, "%s/tmp_augment_augment.conf", builddir);
++
++    ret = write_to_file(base_path, config_base);
++    if (ret != 0) {
++        ret = 1;
++        goto cleanup;
++    }
++
++    ret = write_to_file(augment_path, config_augment);
++    if (ret != 0) {
++        goto cleanup;
++    }
++
++    /* Match only augment.conf */
++    const char *m_patterns[] = { "^tmp_augment_augment.conf$", NULL };
++
++     /* Match all sections */
++    const char *m_sections[] = { ".*", NULL };
++
++    /* Create config collection */
++    ret = ini_config_create(&in_cfg);
++    if (ret != EOK)
++        goto cleanup;
++
++    /* Open base.conf */
++    ret = ini_config_file_open(base_path, 0, &file_ctx);
++    if (ret != EOK)
++        goto cleanup;
++
++    /* Seed in_cfg with base.conf */
++    ret = ini_config_parse(file_ctx, 1, 0, 0, in_cfg);
++    if (ret != EOK)
++        goto cleanup;
++
++    /* Update base.conf with augment.conf */
++    ret = ini_config_augment(in_cfg,
++                             builddir,
++                             m_patterns,
++                             m_sections,
++                             NULL,
++                             INI_STOP_ON_NONE,
++                             0,
++                             INI_PARSE_NOSPACE|INI_PARSE_NOTAB,
++                             merge_flags,
++                             &result_cfg,
++                             NULL,
++                             NULL);
++    /* We always expect EEXIST due to DETECT being set. */
++    if (ret != EEXIST)
++        goto cleanup;
++
++    ret = 0;
++
++cleanup:
++    remove(base_path);
++    remove(augment_path);
++
++    /* Per autoconf guidelines */
++    if (ret != 0)
++        ret = 1;
++
++    return ret;
++}
++]])]
++,, [AC_MSG_ERROR(["ini_config library must support extended INI_MS_DETECT. See: https://pagure.io/SSSD/ding-libs/pull-request/3172"])])
++
+ AX_PTHREAD(,[AC_MSG_ERROR([Could not find Pthreads support])])
+ LIBS="$PTHREAD_LIBS $LIBS"
+diff --git a/proxy/src/gp_config.c b/proxy/src/gp_config.c
+index 07f7c8d..cd057a0 100644
+--- a/proxy/src/gp_config.c
++++ b/proxy/src/gp_config.c
+@@ -728,7 +728,7 @@ static int gp_config_from_file(const char *config_file,
+                                0, /* metadata_flags, FIXME */
+                                &file_ctx);
+     if (ret) {
+-        GPDEBUG("Failed to open config file: %d (%s)\n",
++        GPERROR("Failed to open config file: %d (%s)\n",
+                 ret, gp_strerror(ret));
+         ini_config_destroy(ini_config);
+         return ret;
+@@ -742,7 +742,7 @@ static int gp_config_from_file(const char *config_file,
+     if (ret) {
+         char **errors = NULL;
+         /* we had a parsing failure */
+-        GPDEBUG("Failed to parse config file: %d (%s)\n",
++        GPERROR("Failed to parse config file: %d (%s)\n",
+                 ret, gp_strerror(ret));
+         if (ini_config_error_count(ini_config)) {
+             ini_config_get_errors(ini_config, &errors);
+@@ -791,26 +791,25 @@ static int gp_config_from_dir(const char *config_dir,
+                              INI_STOP_ON_ANY, /* error_level */
+                              collision_flags,
+                              INI_PARSE_NOWRAP,
+-                             /* do not allow colliding sections with the same
+-                              * name in different files */
+-                             INI_MS_ERROR,
++                             /* allow sections with the same name in
++                              * different files, but log warnings */
++                             INI_MS_DETECT | INI_MS_PRESERVE,
+                              &result_cfg,
+                              &error_list,
+                              NULL);
+-    if (ret) {
++    if (error_list) {
+         uint32_t len;
+-
+-        if (!error_list) {
+-            GPAUDIT("Error when reading config directory number: %d\n", ret);
+-            return ret;
+-        }
+-
+         len = ref_array_len(error_list);
+         for (uint32_t i = 0; i < len; i++) {
+             /* libini has an unfixable bug where error strings are (char **) */
+             GPAUDIT("Error when reading config directory: %s\n",
+                     *(char **)ref_array_get(error_list, i, NULL));
+         }
++        ref_array_destroy(error_list);
++    }
++
++    if (ret && ret != EEXIST) {
++        GPERROR("Error when reading config directory number: %d\n", ret);
+         ref_array_destroy(error_list);
+         return ret;
+@@ -821,9 +820,7 @@ static int gp_config_from_dir(const char *config_dir,
+         ini_config_destroy(*ini_config);
+         *ini_config = result_cfg;
+     }
+-    if (error_list) {
+-        ref_array_destroy(error_list);
+-    }
++
+     return 0;
+ }
diff --git a/meta-stx/recipes-security/gssproxy/files/Fix-unused-variables.patch b/meta-stx/recipes-security/gssproxy/files/Fix-unused-variables.patch
new file mode 100644 (file)
index 0000000..069b942
--- /dev/null
@@ -0,0 +1,503 @@
+From a3c9d87924455448cf3bcb20d34f1bd4e6b915d8 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Wed, 15 Mar 2017 13:52:36 -0400
+Subject: [PATCH] Fix unused variables
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #173
+(cherry picked from commit e72d1fa53df8af55b47639ed01f9f0bafa7a2ca8)
+---
+ proxy/src/client/gpm_common.c            |  1 +
+ proxy/src/client/gpm_display_status.c    |  2 +-
+ proxy/src/client/gpm_release_handle.c    |  2 +-
+ proxy/src/gp_common.h                    |  1 +
+ proxy/src/gp_config.c                    |  8 ++------
+ proxy/src/gp_conv.c                      |  4 ++--
+ proxy/src/gp_conv.h                      |  3 +--
+ proxy/src/gp_creds.c                     |  7 +++----
+ proxy/src/gp_init.c                      |  2 +-
+ proxy/src/gp_rpc_accept_sec_context.c    |  3 +--
+ proxy/src/gp_rpc_acquire_cred.c          |  3 +--
+ proxy/src/gp_rpc_get_mic.c               |  4 ++--
+ proxy/src/gp_rpc_import_and_canon_name.c |  5 ++---
+ proxy/src/gp_rpc_indicate_mechs.c        |  5 ++---
+ proxy/src/gp_rpc_init_sec_context.c      |  3 +--
+ proxy/src/gp_rpc_process.c               | 21 ++++-----------------
+ proxy/src/gp_rpc_process.h               |  6 ++++++
+ proxy/src/gp_rpc_release_handle.c        |  5 ++---
+ proxy/src/gp_rpc_unwrap.c                |  5 ++---
+ proxy/src/gp_rpc_verify_mic.c            |  5 ++---
+ proxy/src/gp_rpc_wrap.c                  |  4 ++--
+ proxy/src/gp_rpc_wrap_size_limit.c       |  5 ++---
+ proxy/src/gp_socket.c                    |  2 +-
+ proxy/src/gssproxy.c                     |  2 +-
+ 24 files changed, 44 insertions(+), 64 deletions(-)
+
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index 0a54dbc..030765a 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -320,6 +320,7 @@ static void gpm_release_ctx(struct gpm_ctx *gpmctx)
+ OM_uint32 gpm_release_buffer(OM_uint32 *minor_status,
+                              gss_buffer_t buffer)
+ {
++    *minor_status = 0;
+     if (buffer != GSS_C_NO_BUFFER) {
+         if (buffer->value) {
+             free(buffer->value);
+diff --git a/proxy/src/client/gpm_display_status.c b/proxy/src/client/gpm_display_status.c
+index 1f8d755..bbb546f 100644
+--- a/proxy/src/client/gpm_display_status.c
++++ b/proxy/src/client/gpm_display_status.c
+@@ -43,7 +43,7 @@ void gpm_save_internal_status(uint32_t err, char *err_str)
+ OM_uint32 gpm_display_status(OM_uint32 *minor_status,
+                              OM_uint32 status_value,
+                              int status_type,
+-                             const gss_OID mech_type,
++                             const gss_OID mech_type UNUSED,
+                              OM_uint32 *message_context,
+                              gss_buffer_t status_string)
+ {
+diff --git a/proxy/src/client/gpm_release_handle.c b/proxy/src/client/gpm_release_handle.c
+index 7a6aaed..8f49ee9 100644
+--- a/proxy/src/client/gpm_release_handle.c
++++ b/proxy/src/client/gpm_release_handle.c
+@@ -58,7 +58,7 @@ done:
+ OM_uint32 gpm_delete_sec_context(OM_uint32 *minor_status,
+                                  gssx_ctx **context_handle,
+-                                 gss_buffer_t output_token)
++                                 gss_buffer_t output_token UNUSED)
+ {
+     union gp_rpc_arg uarg;
+     union gp_rpc_res ures;
+diff --git a/proxy/src/gp_common.h b/proxy/src/gp_common.h
+index 36fd843..edc23b4 100644
+--- a/proxy/src/gp_common.h
++++ b/proxy/src/gp_common.h
+@@ -8,6 +8,7 @@
+ #include "gp_log.h"
+ #define no_const(ptr) ((void *)((uintptr_t)(ptr)))
++#define UNUSED  __attribute__((unused))
+ /* add element to list head */
+ #define LIST_ADD(list, elem) do { \
+diff --git a/proxy/src/gp_config.c b/proxy/src/gp_config.c
+index 1b833fd..5c1ca02 100644
+--- a/proxy/src/gp_config.c
++++ b/proxy/src/gp_config.c
+@@ -720,7 +720,6 @@ void free_config(struct gp_config **cfg)
+ }
+ static int gp_config_from_file(const char *config_file,
+-                               struct gp_ini_context *ctx,
+                                struct ini_cfgobj *ini_config,
+                                const uint32_t collision_flags)
+ {
+@@ -764,7 +763,6 @@ static int gp_config_from_file(const char *config_file,
+ }
+ static int gp_config_from_dir(const char *config_dir,
+-                              struct gp_ini_context *ctx,
+                               struct ini_cfgobj **ini_config,
+                               const uint32_t collision_flags)
+ {
+@@ -847,8 +845,7 @@ int gp_config_init(const char *config_file, const char *config_dir,
+     }
+     if (config_file) {
+-        ret = gp_config_from_file(config_file, ctx, ini_config,
+-                                  collision_flags);
++        ret = gp_config_from_file(config_file, ini_config, collision_flags);
+         if (ret == ENOENT) {
+             GPDEBUG("Expected config file %s but did not find it.\n",
+                     config_file);
+@@ -857,8 +854,7 @@ int gp_config_init(const char *config_file, const char *config_dir,
+         }
+     }
+     if (config_dir) {
+-        ret = gp_config_from_dir(config_dir, ctx, &ini_config,
+-                                 collision_flags);
++        ret = gp_config_from_dir(config_dir, &ini_config, collision_flags);
+         if (ret) {
+             return ret;
+         }
+diff --git a/proxy/src/gp_conv.c b/proxy/src/gp_conv.c
+index 6aa66a8..71d6d9d 100644
+--- a/proxy/src/gp_conv.c
++++ b/proxy/src/gp_conv.c
+@@ -6,6 +6,7 @@
+ #include <stdbool.h>
+ #include <errno.h>
+ #include "gp_conv.h"
++#include "src/gp_common.h"
+ void *gp_memdup(void *in, size_t len)
+ {
+@@ -488,8 +489,7 @@ done:
+     return ret_maj;
+ }
+-int gp_conv_status_to_gssx(struct gssx_call_ctx *call_ctx,
+-                           uint32_t ret_maj, uint32_t ret_min,
++int gp_conv_status_to_gssx(uint32_t ret_maj, uint32_t ret_min,
+                            gss_OID mech, struct gssx_status *status)
+ {
+     int ret;
+diff --git a/proxy/src/gp_conv.h b/proxy/src/gp_conv.h
+index e247dbd..699b301 100644
+--- a/proxy/src/gp_conv.h
++++ b/proxy/src/gp_conv.h
+@@ -39,8 +39,7 @@ uint32_t gp_conv_name_to_gssx_alloc(uint32_t *min,
+                                     gss_name_t in, gssx_name **out);
+ uint32_t gp_conv_gssx_to_name(uint32_t *min, gssx_name *in, gss_name_t *out);
+-int gp_conv_status_to_gssx(struct gssx_call_ctx *call_ctx,
+-                           uint32_t ret_maj, uint32_t ret_min,
++int gp_conv_status_to_gssx(uint32_t ret_maj, uint32_t ret_min,
+                            gss_OID mech, struct gssx_status *status);
+ int gp_copy_utf8string(utf8string *in, utf8string *out);
+diff --git a/proxy/src/gp_creds.c b/proxy/src/gp_creds.c
+index 7d89b06..6570b06 100644
+--- a/proxy/src/gp_creds.c
++++ b/proxy/src/gp_creds.c
+@@ -252,7 +252,6 @@ done:
+ static int ensure_segregated_ccache(struct gp_call_ctx *gpcall,
+                                     int cc_num,
+-                                    struct gp_service *svc,
+                                     gss_key_value_set_desc *cs)
+ {
+     int ret;
+@@ -482,7 +481,7 @@ static int gp_get_cred_environment(struct gp_call_ctx *gpcall,
+         }
+     }
+-    ret = ensure_segregated_ccache(gpcall, cc_num, svc, cs);
++    ret = ensure_segregated_ccache(gpcall, cc_num, cs);
+     if (ret != 0) {
+         goto done;
+     }
+@@ -587,8 +586,8 @@ uint32_t gp_add_krb5_creds(uint32_t *min,
+                            gss_cred_id_t in_cred,
+                            gssx_name *desired_name,
+                            gss_cred_usage_t cred_usage,
+-                           uint32_t initiator_time_req,
+-                           uint32_t acceptor_time_req,
++                           uint32_t initiator_time_req UNUSED,
++                           uint32_t acceptor_time_req UNUSED,
+                            gss_cred_id_t *output_cred_handle,
+                            gss_OID_set *actual_mechs,
+                            uint32_t *initiator_time_rec,
+diff --git a/proxy/src/gp_init.c b/proxy/src/gp_init.c
+index d367f92..e69934d 100644
+--- a/proxy/src/gp_init.c
++++ b/proxy/src/gp_init.c
+@@ -96,7 +96,7 @@ void fini_server(void)
+     closelog();
+ }
+-static void break_loop(verto_ctx *vctx, verto_ev *ev)
++static void break_loop(verto_ctx *vctx, verto_ev *ev UNUSED)
+ {
+     GPDEBUG("Exiting after receiving a signal\n");
+     verto_break(vctx);
+diff --git a/proxy/src/gp_rpc_accept_sec_context.c b/proxy/src/gp_rpc_accept_sec_context.c
+index 22a4cf7..ae4de55 100644
+--- a/proxy/src/gp_rpc_accept_sec_context.c
++++ b/proxy/src/gp_rpc_accept_sec_context.c
+@@ -152,8 +152,7 @@ done:
+         ret_maj = acpt_maj;
+         ret_min = acpt_min;
+     }
+-    ret = gp_conv_status_to_gssx(&asca->call_ctx,
+-                                 ret_maj, ret_min, oid,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min, oid,
+                                  &ascr->status);
+     GPRPCDEBUG(gssx_res_accept_sec_context, ascr);
+diff --git a/proxy/src/gp_rpc_acquire_cred.c b/proxy/src/gp_rpc_acquire_cred.c
+index 9a55937..e9c7d56 100644
+--- a/proxy/src/gp_rpc_acquire_cred.c
++++ b/proxy/src/gp_rpc_acquire_cred.c
+@@ -150,8 +150,7 @@ int gp_acquire_cred(struct gp_call_ctx *gpcall,
+     }
+ done:
+-    ret = gp_conv_status_to_gssx(&aca->call_ctx,
+-                                 ret_maj, ret_min, desired_mech,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min, desired_mech,
+                                  &acr->status);
+     GPRPCDEBUG(gssx_res_acquire_cred, acr);
+diff --git a/proxy/src/gp_rpc_get_mic.c b/proxy/src/gp_rpc_get_mic.c
+index 1d9a1fe..dfba77e 100644
+--- a/proxy/src/gp_rpc_get_mic.c
++++ b/proxy/src/gp_rpc_get_mic.c
+@@ -3,7 +3,7 @@
+ #include "gp_rpc_process.h"
+ #include <gssapi/gssapi.h>
+-int gp_get_mic(struct gp_call_ctx *gpcall,
++int gp_get_mic(struct gp_call_ctx *gpcall UNUSED,
+                union gp_rpc_arg *arg,
+                union gp_rpc_res *res)
+ {
+@@ -79,7 +79,7 @@ int gp_get_mic(struct gp_call_ctx *gpcall,
+     ret_min = 0;
+ done:
+-    ret = gp_conv_status_to_gssx(&gma->call_ctx, ret_maj, ret_min,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min,
+                                  GSS_C_NO_OID, &gmr->status);
+     GPRPCDEBUG(gssx_res_get_mic, gmr);
+     gss_release_buffer(&ret_min, &message_token);
+diff --git a/proxy/src/gp_rpc_import_and_canon_name.c b/proxy/src/gp_rpc_import_and_canon_name.c
+index 3d67f40..e7b8e63 100644
+--- a/proxy/src/gp_rpc_import_and_canon_name.c
++++ b/proxy/src/gp_rpc_import_and_canon_name.c
+@@ -8,7 +8,7 @@
+  * I am not kidding, if you hav not read it, go back and do it now, or do not
+  * touch this function */
+-int gp_import_and_canon_name(struct gp_call_ctx *gpcall,
++int gp_import_and_canon_name(struct gp_call_ctx *gpcall UNUSED,
+                              union gp_rpc_arg *arg,
+                              union gp_rpc_res *res)
+ {
+@@ -64,8 +64,7 @@ int gp_import_and_canon_name(struct gp_call_ctx *gpcall,
+     /* TODO: icna->name_attributes */
+ done:
+-    ret = gp_conv_status_to_gssx(&icna->call_ctx,
+-                                 ret_maj, ret_min, mech,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min, mech,
+                                  &icnr->status);
+     GPRPCDEBUG(gssx_res_import_and_canon_name, icnr);
+diff --git a/proxy/src/gp_rpc_indicate_mechs.c b/proxy/src/gp_rpc_indicate_mechs.c
+index c24b926..8abbc7f 100644
+--- a/proxy/src/gp_rpc_indicate_mechs.c
++++ b/proxy/src/gp_rpc_indicate_mechs.c
+@@ -3,7 +3,7 @@
+ #include "gp_rpc_process.h"
+ #include "gp_debug.h"
+-int gp_indicate_mechs(struct gp_call_ctx *gpcall,
++int gp_indicate_mechs(struct gp_call_ctx *gpcall UNUSED,
+                       union gp_rpc_arg *arg,
+                       union gp_rpc_res *res)
+ {
+@@ -251,8 +251,7 @@ int gp_indicate_mechs(struct gp_call_ctx *gpcall,
+     }
+ done:
+-    ret = gp_conv_status_to_gssx(&ima->call_ctx,
+-                                 ret_maj, ret_min, GSS_C_NO_OID,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min, GSS_C_NO_OID,
+                                  &imr->status);
+     GPRPCDEBUG(gssx_res_indicate_mechs, imr);
+diff --git a/proxy/src/gp_rpc_init_sec_context.c b/proxy/src/gp_rpc_init_sec_context.c
+index 413e2ec..e4af495 100644
+--- a/proxy/src/gp_rpc_init_sec_context.c
++++ b/proxy/src/gp_rpc_init_sec_context.c
+@@ -187,8 +187,7 @@ done:
+         ret_maj = init_maj;
+         ret_min = init_min;
+     }
+-    ret = gp_conv_status_to_gssx(&isca->call_ctx,
+-                                 ret_maj, ret_min, mech_type,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min, mech_type,
+                                  &iscr->status);
+     GPRPCDEBUG(gssx_res_init_sec_context, iscr);
+diff --git a/proxy/src/gp_rpc_process.c b/proxy/src/gp_rpc_process.c
+index d1a0232..0ea17f0 100644
+--- a/proxy/src/gp_rpc_process.c
++++ b/proxy/src/gp_rpc_process.c
+@@ -396,20 +396,7 @@ int gp_rpc_process_call(struct gp_call_ctx *gpcall,
+     return ret;
+ }
+-int gp_get_call_context(gp_exec_std_args)
+-{
+-    return 0;
+-}
+-int gp_export_cred(gp_exec_std_args)
+-{
+-    return 0;
+-}
+-int gp_import_cred(gp_exec_std_args)
+-{
+-    return 0;
+-}
+-
+-int gp_store_cred(gp_exec_std_args)
+-{
+-    return 0;
+-}
++GP_EXEC_UNUSED_FUNC(gp_get_call_context);
++GP_EXEC_UNUSED_FUNC(gp_export_cred);
++GP_EXEC_UNUSED_FUNC(gp_import_cred);
++GP_EXEC_UNUSED_FUNC(gp_store_cred);
+diff --git a/proxy/src/gp_rpc_process.h b/proxy/src/gp_rpc_process.h
+index eb02c95..da27795 100644
+--- a/proxy/src/gp_rpc_process.h
++++ b/proxy/src/gp_rpc_process.h
+@@ -24,6 +24,12 @@ struct gp_service;
+                          union gp_rpc_arg *arg, \
+                          union gp_rpc_res *res
++#define GP_EXEC_UNUSED_FUNC(name)               \
++    int name(struct gp_call_ctx *gpcall UNUSED, \
++             union gp_rpc_arg *arg UNUSED,      \
++             union gp_rpc_res *res UNUSED)      \
++    { return 0; }
++
+ int gp_indicate_mechs(gp_exec_std_args);
+ int gp_get_call_context(gp_exec_std_args);
+ int gp_import_and_canon_name(gp_exec_std_args);
+diff --git a/proxy/src/gp_rpc_release_handle.c b/proxy/src/gp_rpc_release_handle.c
+index 4ffdfb9..c8ba8f2 100644
+--- a/proxy/src/gp_rpc_release_handle.c
++++ b/proxy/src/gp_rpc_release_handle.c
+@@ -2,7 +2,7 @@
+ #include "gp_rpc_process.h"
+-int gp_release_handle(struct gp_call_ctx *gpcall,
++int gp_release_handle(struct gp_call_ctx *gpcall UNUSED,
+                       union gp_rpc_arg *arg,
+                       union gp_rpc_res *res)
+ {
+@@ -35,8 +35,7 @@ int gp_release_handle(struct gp_call_ctx *gpcall,
+         break;
+     }
+-    ret = gp_conv_status_to_gssx(&rha->call_ctx,
+-                                 ret_maj, ret_min, GSS_C_NO_OID,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min, GSS_C_NO_OID,
+                                  &rhr->status);
+     GPRPCDEBUG(gssx_res_release_handle, rhr);
+diff --git a/proxy/src/gp_rpc_unwrap.c b/proxy/src/gp_rpc_unwrap.c
+index bc052cb..fad8cfe 100644
+--- a/proxy/src/gp_rpc_unwrap.c
++++ b/proxy/src/gp_rpc_unwrap.c
+@@ -3,7 +3,7 @@
+ #include "gp_rpc_process.h"
+ #include <gssapi/gssapi.h>
+-int gp_unwrap(struct gp_call_ctx *gpcall,
++int gp_unwrap(struct gp_call_ctx *gpcall UNUSED,
+               union gp_rpc_arg *arg,
+               union gp_rpc_res *res)
+ {
+@@ -106,8 +106,7 @@ int gp_unwrap(struct gp_call_ctx *gpcall,
+     ret_min = 0;
+ done:
+-    ret = gp_conv_status_to_gssx(&uwa->call_ctx,
+-                                 ret_maj, ret_min,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min,
+                                  GSS_C_NO_OID,
+                                  &uwr->status);
+     GPRPCDEBUG(gssx_res_unwrap, uwr);
+diff --git a/proxy/src/gp_rpc_verify_mic.c b/proxy/src/gp_rpc_verify_mic.c
+index d2920d2..6da6dac 100644
+--- a/proxy/src/gp_rpc_verify_mic.c
++++ b/proxy/src/gp_rpc_verify_mic.c
+@@ -3,7 +3,7 @@
+ #include "gp_rpc_process.h"
+ #include <gssapi/gssapi.h>
+-int gp_verify_mic(struct gp_call_ctx *gpcall,
++int gp_verify_mic(struct gp_call_ctx *gpcall UNUSED,
+                   union gp_rpc_arg *arg,
+                   union gp_rpc_res *res)
+ {
+@@ -74,8 +74,7 @@ int gp_verify_mic(struct gp_call_ctx *gpcall,
+     ret_min = 0;
+ done:
+-    ret = gp_conv_status_to_gssx(&vma->call_ctx,
+-                                 ret_maj, ret_min,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min,
+                                  GSS_C_NO_OID,
+                                  &vmr->status);
+     GPRPCDEBUG(gssx_res_verify_mic, vmr);
+diff --git a/proxy/src/gp_rpc_wrap.c b/proxy/src/gp_rpc_wrap.c
+index d5c950e..ae20bdb 100644
+--- a/proxy/src/gp_rpc_wrap.c
++++ b/proxy/src/gp_rpc_wrap.c
+@@ -3,7 +3,7 @@
+ #include "gp_rpc_process.h"
+ #include <gssapi/gssapi.h>
+-int gp_wrap(struct gp_call_ctx *gpcall,
++int gp_wrap(struct gp_call_ctx *gpcall UNUSED,
+             union gp_rpc_arg *arg,
+             union gp_rpc_res *res)
+ {
+@@ -105,7 +105,7 @@ int gp_wrap(struct gp_call_ctx *gpcall,
+     ret_min = 0;
+ done:
+-    ret = gp_conv_status_to_gssx(&wa->call_ctx, ret_maj, ret_min,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min,
+                                  GSS_C_NO_OID, &wr->status);
+     GPRPCDEBUG(gssx_res_wrap, wr);
+     gss_release_buffer(&ret_min, &output_message_buffer);
+diff --git a/proxy/src/gp_rpc_wrap_size_limit.c b/proxy/src/gp_rpc_wrap_size_limit.c
+index 355113c..cab6826 100644
+--- a/proxy/src/gp_rpc_wrap_size_limit.c
++++ b/proxy/src/gp_rpc_wrap_size_limit.c
+@@ -3,7 +3,7 @@
+ #include "gp_rpc_process.h"
+ #include <gssapi/gssapi.h>
+-int gp_wrap_size_limit(struct gp_call_ctx *gpcall,
++int gp_wrap_size_limit(struct gp_call_ctx *gpcall UNUSED,
+                        union gp_rpc_arg *arg,
+                        union gp_rpc_res *res)
+ {
+@@ -51,8 +51,7 @@ int gp_wrap_size_limit(struct gp_call_ctx *gpcall,
+     ret_min = 0;
+ done:
+-    ret = gp_conv_status_to_gssx(&wsla->call_ctx,
+-                                 ret_maj, ret_min,
++    ret = gp_conv_status_to_gssx(ret_maj, ret_min,
+                                  GSS_C_NO_OID,
+                                  &wslr->status);
+     GPRPCDEBUG(gssx_res_wrap_size_limit, wslr);
+diff --git a/proxy/src/gp_socket.c b/proxy/src/gp_socket.c
+index 62d7dbc..829ff21 100644
+--- a/proxy/src/gp_socket.c
++++ b/proxy/src/gp_socket.c
+@@ -146,7 +146,7 @@ static int set_fd_flags(int fd, int flags)
+     return 0;
+ }
+-void free_unix_socket(verto_ctx *ctx, verto_ev *ev)
++void free_unix_socket(verto_ctx *ctx UNUSED, verto_ev *ev)
+ {
+     struct gp_sock_ctx *sock_ctx = NULL;
+     sock_ctx = verto_get_private(ev);
+diff --git a/proxy/src/gssproxy.c b/proxy/src/gssproxy.c
+index 561188e..a020218 100644
+--- a/proxy/src/gssproxy.c
++++ b/proxy/src/gssproxy.c
+@@ -119,7 +119,7 @@ static int init_sockets(verto_ctx *vctx, struct gp_config *old_config)
+     return 0;
+ }
+-static void hup_handler(verto_ctx *vctx, verto_ev *ev)
++static void hup_handler(verto_ctx *vctx, verto_ev *ev UNUSED)
+ {
+     int ret;
+     struct gp_config *new_config, *old_config;
diff --git a/meta-stx/recipes-security/gssproxy/files/Handle-outdated-encrypted-ccaches.patch b/meta-stx/recipes-security/gssproxy/files/Handle-outdated-encrypted-ccaches.patch
new file mode 100644 (file)
index 0000000..c9c35d9
--- /dev/null
@@ -0,0 +1,121 @@
+From 24d776205605f3c113fdc2cb356d4c28b8033676 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Fri, 15 Sep 2017 18:07:28 -0400
+Subject: [PATCH] Handle outdated encrypted ccaches
+
+When the encrypting keytab changes, all credentials that it was used
+to encrypt must be re-created.  Otherwise, we log obtuse messages and
+fail to do what the user wants.
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #214
+(cherry picked from commit 657d3c8339309dd8e2bfa4ee10f005e0f0c055e8)
+---
+ proxy/src/gp_export.c                 | 11 ++++++-----
+ proxy/src/gp_rpc_accept_sec_context.c | 28 +++++++++++++++++++++++++++
+ proxy/src/gp_rpc_init_sec_context.c   |  2 +-
+ 3 files changed, 35 insertions(+), 6 deletions(-)
+
+diff --git a/proxy/src/gp_export.c b/proxy/src/gp_export.c
+index ab08bb7..0c39045 100644
+--- a/proxy/src/gp_export.c
++++ b/proxy/src/gp_export.c
+@@ -268,7 +268,7 @@ static int gp_decrypt_buffer(krb5_context context, krb5_keyblock *key,
+                          &enc_handle,
+                          &data_out);
+     if (ret) {
+-        return EINVAL;
++        return ret;
+     }
+     *len = data_out.length;
+@@ -446,8 +446,8 @@ uint32_t gp_import_gssx_cred(uint32_t *min, struct gp_call_ctx *gpcall,
+ {
+     gss_buffer_desc token = GSS_C_EMPTY_BUFFER;
+     struct gp_creds_handle *handle = NULL;
+-    uint32_t ret_maj;
+-    uint32_t ret_min;
++    uint32_t ret_maj = GSS_S_COMPLETE;
++    uint32_t ret_min = 0;
+     int ret;
+     handle = gp_service_get_creds_handle(gpcall->service);
+@@ -469,8 +469,9 @@ uint32_t gp_import_gssx_cred(uint32_t *min, struct gp_call_ctx *gpcall,
+                             &cred->cred_handle_reference,
+                             &token.length, token.value);
+     if (ret) {
+-        ret_maj = GSS_S_FAILURE;
+-        ret_min = ENOENT;
++        /* Allow for re-issuance of the keytab. */
++        GPDEBUG("Stored ccache failed to decrypt; treating as empty\n");
++        *out = GSS_C_NO_CREDENTIAL;
+         goto done;
+     }
+diff --git a/proxy/src/gp_rpc_accept_sec_context.c b/proxy/src/gp_rpc_accept_sec_context.c
+index ae4de55..2cdc94b 100644
+--- a/proxy/src/gp_rpc_accept_sec_context.c
++++ b/proxy/src/gp_rpc_accept_sec_context.c
+@@ -25,6 +25,13 @@ int gp_accept_sec_context(struct gp_call_ctx *gpcall,
+     int exp_creds_type;
+     uint32_t acpt_maj;
+     uint32_t acpt_min;
++    struct gp_cred_check_handle gcch = {
++        .ctx = gpcall,
++        .options.options_len = arg->accept_sec_context.options.options_len,
++        .options.options_val = arg->accept_sec_context.options.options_val,
++    };
++    uint32_t gccn_before = 0;
++    uint32_t gccn_after = 0;
+     int ret;
+     asca = &arg->accept_sec_context;
+@@ -52,6 +59,8 @@ int gp_accept_sec_context(struct gp_call_ctx *gpcall,
+         if (ret_maj) {
+             goto done;
+         }
++
++        gccn_before = gp_check_sync_creds(&gcch, ach);
+     }
+     if (ach == GSS_C_NO_CREDENTIAL) {
+@@ -146,6 +155,25 @@ int gp_accept_sec_context(struct gp_call_ctx *gpcall,
+                                               src_name, oid,
+                                               &ascr->options.options_len,
+                                               &ascr->options.options_val);
++    if (ret_maj) {
++        goto done;
++    }
++
++    gccn_after = gp_check_sync_creds(&gcch, ach);
++
++    if (gccn_before != gccn_after) {
++        /* export creds back to client for sync up */
++        ret_maj = gp_export_sync_creds(&ret_min, gpcall, &ach,
++                                       &ascr->options.options_val,
++                                       &ascr->options.options_len);
++        if (ret_maj) {
++            /* not fatal, log and continue */
++            GPDEBUG("Failed to export sync creds (%d: %d)",
++                    (int)ret_maj, (int)ret_min);
++        }
++    }
++
++    ret_maj = GSS_S_COMPLETE;
+ done:
+     if (ret_maj == GSS_S_COMPLETE) {
+diff --git a/proxy/src/gp_rpc_init_sec_context.c b/proxy/src/gp_rpc_init_sec_context.c
+index e4af495..f362dbc 100644
+--- a/proxy/src/gp_rpc_init_sec_context.c
++++ b/proxy/src/gp_rpc_init_sec_context.c
+@@ -91,7 +91,7 @@ int gp_init_sec_context(struct gp_call_ctx *gpcall,
+         gp_conv_gssx_to_buffer(isca->input_token, &ibuf);
+     }
+-    if (!isca->cred_handle) {
++    if (!ich) {
+         if (gss_oid_equal(mech_type, gss_mech_krb5)) {
+             ret_maj = gp_add_krb5_creds(&ret_min, gpcall,
+                                         ACQ_NORMAL, NULL, NULL,
diff --git a/meta-stx/recipes-security/gssproxy/files/Include-header-for-writev.patch b/meta-stx/recipes-security/gssproxy/files/Include-header-for-writev.patch
new file mode 100644 (file)
index 0000000..1b6c36e
--- /dev/null
@@ -0,0 +1,49 @@
+From 26b5ff6b802b6a24b23ea774b0305f6f2031d4da Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Wed, 17 May 2017 12:21:37 -0400
+Subject: [PATCH] Include header for writev()
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #186
+(cherry picked from commit c8c5e8d2b2154d1006633634478a24bfa0b04b4d)
+---
+ proxy/src/gp_socket.c | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/proxy/src/gp_socket.c b/proxy/src/gp_socket.c
+index 17ecf7c..29b6a44 100644
+--- a/proxy/src/gp_socket.c
++++ b/proxy/src/gp_socket.c
+@@ -1,19 +1,22 @@
+ /* Copyright (C) 2011,2015 the GSS-PROXY contributors, see COPYING for license */
+ #include "config.h"
+-#include <stdlib.h>
+-#include <unistd.h>
+-#include <fcntl.h>
+-#include <sys/types.h>
+-#include <sys/stat.h>
+-#include <sys/socket.h>
+-#include <sys/un.h>
+-#include <errno.h>
+-#include <netinet/in.h>
++
+ #include "gp_proxy.h"
+ #include "gp_creds.h"
+ #include "gp_selinux.h"
++#include <errno.h>
++#include <fcntl.h>
++#include <netinet/in.h>
++#include <stdlib.h>
++#include <sys/socket.h>
++#include <sys/stat.h>
++#include <sys/types.h>
++#include <sys/uio.h>
++#include <sys/un.h>
++#include <unistd.h>
++
+ #define FRAGMENT_BIT (1 << 31)
+ struct unix_sock_conn {
diff --git a/meta-stx/recipes-security/gssproxy/files/Make-proc-file-failure-loud-but-nonfatal.patch b/meta-stx/recipes-security/gssproxy/files/Make-proc-file-failure-loud-but-nonfatal.patch
new file mode 100644 (file)
index 0000000..18a58df
--- /dev/null
@@ -0,0 +1,75 @@
+From 938bd1adc15342e8ebed3d4e135d862e362a619e Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Thu, 25 May 2017 13:06:17 -0400
+Subject: [PATCH] Make proc file failure loud but nonfatal
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Resolves: #190
+(cherry picked from commit 4f60bf02a1a68cbb26251e764357b753f80790f3)
+---
+ proxy/src/gp_init.c | 34 +++++++++++++++-------------------
+ 1 file changed, 15 insertions(+), 19 deletions(-)
+
+diff --git a/proxy/src/gp_init.c b/proxy/src/gp_init.c
+index bb7ba6b..d367f92 100644
+--- a/proxy/src/gp_init.c
++++ b/proxy/src/gp_init.c
+@@ -144,11 +144,11 @@ void init_proc_nfsd(struct gp_config *cfg)
+ {
+     char buf[] = "1";
+     bool enabled = false;
+-    int fd, i, ret;
++    int fd, ret;
+     /* check first if any service enabled kernel support */
+-    for (i = 0; i < cfg->num_svcs; i++) {
+-        if (cfg->svcs[i]->kernel_nfsd == true) {
++    for (int i = 0; i < cfg->num_svcs; i++) {
++        if (cfg->svcs[i]->kernel_nfsd) {
+             enabled = true;
+             break;
+         }
+@@ -161,30 +161,26 @@ void init_proc_nfsd(struct gp_config *cfg)
+     fd = open(LINUX_PROC_USE_GSS_PROXY_FILE, O_RDWR);
+     if (fd == -1) {
+         ret = errno;
+-        fprintf(stderr, "GSS-Proxy is not supported by this kernel since "
+-                "file %s could not be found: %d (%s)\n",
+-                LINUX_PROC_USE_GSS_PROXY_FILE,
+-                ret, gp_strerror(ret));
+-        exit(1);
++        GPDEBUG("Kernel doesn't support GSS-Proxy (can't open %s: %d (%s))\n",
++                LINUX_PROC_USE_GSS_PROXY_FILE, ret, gp_strerror(ret));
++        goto fail;
+     }
+     ret = write(fd, buf, 1);
+     if (ret != 1) {
+         ret = errno;
+-        fprintf(stderr, "Failed to write to %s: %d (%s)\n",
+-                LINUX_PROC_USE_GSS_PROXY_FILE,
+-                ret, gp_strerror(ret));
+-        exit(1);
++        GPDEBUG("Failed to write to %s: %d (%s)\n",
++                LINUX_PROC_USE_GSS_PROXY_FILE, ret, gp_strerror(ret));
+     }
+-    ret = close(fd);
+-    if (ret == -1) {
+-        ret = errno;
+-        fprintf(stderr, "Failed to close %s: %d (%s)\n",
+-                LINUX_PROC_USE_GSS_PROXY_FILE,
+-                ret, gp_strerror(ret));
+-        exit(1);
++    close(fd);
++    if (ret != 0) {
++        goto fail;
+     }
++
++    return;
++fail:
++    GPDEBUG("Problem with kernel communication!  NFS server will not work\n");
+ }
+ void write_pid(void)
diff --git a/meta-stx/recipes-security/gssproxy/files/Only-empty-FILE-ccaches-when-storing-remote-creds.patch b/meta-stx/recipes-security/gssproxy/files/Only-empty-FILE-ccaches-when-storing-remote-creds.patch
new file mode 100644 (file)
index 0000000..06edf09
--- /dev/null
@@ -0,0 +1,55 @@
+From 1fa33903be640f8d22757d21da294e70f0812698 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Tue, 10 Oct 2017 18:00:45 -0400
+Subject: [PATCH] Only empty FILE ccaches when storing remote creds
+
+This mitigates issues when services share a ccache between two
+processes.  We cannot fix this for FILE ccaches without introducing
+other issues.
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #216
+(cherry picked from commit d09e87f47a21dd250bfd7a9c59a5932b5c995057)
+---
+ proxy/src/mechglue/gpp_creds.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/proxy/src/mechglue/gpp_creds.c b/proxy/src/mechglue/gpp_creds.c
+index 9fe9bd1..6bdff45 100644
+--- a/proxy/src/mechglue/gpp_creds.c
++++ b/proxy/src/mechglue/gpp_creds.c
+@@ -147,6 +147,7 @@ uint32_t gpp_store_remote_creds(uint32_t *min, bool default_creds,
+     char cred_name[creds->desired_name.display_name.octet_string_len + 1];
+     XDR xdrctx;
+     bool xdrok;
++    const char *cc_type;
+     *min = 0;
+@@ -193,13 +194,20 @@ uint32_t gpp_store_remote_creds(uint32_t *min, bool default_creds,
+     }
+     cred.ticket.length = xdr_getpos(&xdrctx);
+-    /* Always initialize and destroy any existing contents to avoid pileup of
+-     * entries */
+-    ret = krb5_cc_initialize(ctx, ccache, cred.client);
+-    if (ret == 0) {
+-        ret = krb5_cc_store_cred(ctx, ccache, &cred);
++    cc_type = krb5_cc_get_type(ctx, ccache);
++    if (strcmp(cc_type, "FILE") == 0) {
++        /* FILE ccaches don't handle updates properly: if they have the same
++         * principal name, they are blackholed.  We either have to change the
++         * name (at which point the file grows forever) or flash the cache on
++         * every update. */
++        ret = krb5_cc_initialize(ctx, ccache, cred.client);
++        if (ret != 0) {
++            goto done;
++        }
+     }
++    ret = krb5_cc_store_cred(ctx, ccache, &cred);
++
+ done:
+     if (ctx) {
+         krb5_free_cred_contents(ctx, &cred);
diff --git a/meta-stx/recipes-security/gssproxy/files/Prevent-uninitialized-read-in-error-path-of-XDR-cont.patch b/meta-stx/recipes-security/gssproxy/files/Prevent-uninitialized-read-in-error-path-of-XDR-cont.patch
new file mode 100644 (file)
index 0000000..e922183
--- /dev/null
@@ -0,0 +1,28 @@
+From 8a7fc8e280e31fd8c277adc4f74cc26c341a71b4 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Tue, 12 Sep 2017 12:40:27 -0400
+Subject: [PATCH] Prevent uninitialized read in error path of XDR contexts
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #211
+(cherry picked from commit 8ba0f42f06bc7d0ed68cb2eb3ef2794fc860ac2d)
+---
+ proxy/src/client/gpm_common.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index c65c69d..d0f99d6 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -656,8 +656,8 @@ int gpm_make_call(int proc, union gp_rpc_arg *arg, union gp_rpc_res *res)
+ {
+     struct gpm_ctx *gpmctx;
+     gp_rpc_msg msg;
+-    XDR xdr_call_ctx;
+-    XDR xdr_reply_ctx;
++    XDR xdr_call_ctx = {0};
++    XDR xdr_reply_ctx = {0};
+     char *send_buffer = NULL;
+     char *recv_buffer = NULL;
+     uint32_t send_length;
diff --git a/meta-stx/recipes-security/gssproxy/files/Properly-initialize-ccaches-before-storing-into-them.patch b/meta-stx/recipes-security/gssproxy/files/Properly-initialize-ccaches-before-storing-into-them.patch
new file mode 100644 (file)
index 0000000..617c6e2
--- /dev/null
@@ -0,0 +1,38 @@
+From 2d91093925c8546d68f9314546353226b4f41569 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Tue, 5 Dec 2017 13:14:29 -0500
+Subject: [PATCH] Properly initialize ccaches before storing into them
+
+krb5_cc_new_unique() doesn't initialize ccaches, which results in the
+krb5 libraries being aware of their presence within the collection but
+being unable to manipulate them.
+
+This is transparent to most gssproxy consumers because we just
+re-fetch the ccache on error.
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #223
+(cherry picked from commit be7df45b6a56631033de387d28a2c06b7658c36a)
+---
+ proxy/src/mechglue/gpp_creds.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/proxy/src/mechglue/gpp_creds.c b/proxy/src/mechglue/gpp_creds.c
+index 187ada7..f8ab320 100644
+--- a/proxy/src/mechglue/gpp_creds.c
++++ b/proxy/src/mechglue/gpp_creds.c
+@@ -247,6 +247,13 @@ uint32_t gpp_store_remote_creds(uint32_t *min, bool store_as_default_cred,
+         ret = krb5_cc_new_unique(ctx, cc_type, NULL, &ccache);
+         free(cc_type);
++        if (ret)
++            goto done;
++
++        /* krb5_cc_new_unique() doesn't initialize, and we need to initialize
++         * before storing into the ccache.  Note that this will only clobber
++         * the ccache handle, not the whole collection. */
++        ret = krb5_cc_initialize(ctx, ccache, cred.client);
+     }
+     if (ret)
+         goto done;
diff --git a/meta-stx/recipes-security/gssproxy/files/Properly-locate-credentials-in-collection-caches-in-.patch b/meta-stx/recipes-security/gssproxy/files/Properly-locate-credentials-in-collection-caches-in-.patch
new file mode 100644 (file)
index 0000000..f1099e8
--- /dev/null
@@ -0,0 +1,147 @@
+From 5fa4e2d5d484df17ebd9a585a6dfdf4522320426 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Mon, 20 Nov 2017 14:09:04 -0500
+Subject: [PATCH] Properly locate credentials in collection caches in mechglue
+
+Previously, we would just put the credentials in the default cache for
+a collection type, which lead to some mysterious failures.
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #221
+(cherry picked from commit 670240a6cd4d5e2ecf13e481621098693cdbaa89)
+---
+ proxy/src/mechglue/gpp_creds.c  | 81 +++++++++++++++++++++++----------
+ proxy/src/mechglue/gss_plugin.h |  2 +-
+ 2 files changed, 59 insertions(+), 24 deletions(-)
+
+diff --git a/proxy/src/mechglue/gpp_creds.c b/proxy/src/mechglue/gpp_creds.c
+index 3ebd726..187ada7 100644
+--- a/proxy/src/mechglue/gpp_creds.c
++++ b/proxy/src/mechglue/gpp_creds.c
+@@ -170,7 +170,16 @@ static krb5_error_code gpp_construct_cred(gssx_cred *creds, krb5_context ctx,
+     return 0;
+ }
+-uint32_t gpp_store_remote_creds(uint32_t *min, bool default_creds,
++/* Store creds from remote in a local ccache, updating where possible.
++ *
++ * If store_as_default_cred is true, the cred is made default for its
++ * collection, if there is one.  Note that if the ccache is not of a
++ * collection type, the creds will overwrite the ccache.
++ *
++ * If no "ccache" entry is specified in cred_store, the default ccache for a
++ * new context will be used.
++ */
++uint32_t gpp_store_remote_creds(uint32_t *min, bool store_as_default_cred,
+                                 gss_const_key_value_set_t cred_store,
+                                 gssx_cred *creds)
+ {
+@@ -179,7 +188,7 @@ uint32_t gpp_store_remote_creds(uint32_t *min, bool default_creds,
+     krb5_creds cred;
+     krb5_error_code ret;
+     char cred_name[creds->desired_name.display_name.octet_string_len + 1];
+-    const char *cc_type;
++    const char *cc_name;
+     *min = 0;
+@@ -191,38 +200,64 @@ uint32_t gpp_store_remote_creds(uint32_t *min, bool default_creds,
+         goto done;
+     }
+-    if (cred_store) {
+-        for (unsigned i = 0; i < cred_store->count; i++) {
+-            if (strcmp(cred_store->elements[i].key, "ccache") == 0) {
+-                ret = krb5_cc_resolve(ctx, cred_store->elements[i].value,
+-                                      &ccache);
+-                if (ret) goto done;
+-                break;
+-            }
++    for (unsigned i = 0; cred_store && i < cred_store->count; i++) {
++        if (strcmp(cred_store->elements[i].key, "ccache") == 0) {
++            /* krb5 creates new ccaches based off the default name. */
++            ret = krb5_cc_set_default_name(ctx,
++                                           cred_store->elements[i].value);
++            if (ret)
++                goto done;
++
++            break;
+         }
+     }
+-    if (!ccache) {
+-        if (!default_creds) {
+-            ret = ENOMEDIUM;
+-            goto done;
+-        }
+-        ret = krb5_cc_default(ctx, &ccache);
+-        if (ret) goto done;
+-    }
+-    cc_type = krb5_cc_get_type(ctx, ccache);
+-    if (strcmp(cc_type, "FILE") == 0) {
++    cc_name = krb5_cc_default_name(ctx);
++    if (strncmp(cc_name, "FILE:", 5) == 0 || !strchr(cc_name, ':')) {
+         /* FILE ccaches don't handle updates properly: if they have the same
+          * principal name, they are blackholed.  We either have to change the
+          * name (at which point the file grows forever) or flash the cache on
+          * every update. */
+-        ret = krb5_cc_initialize(ctx, ccache, cred.client);
+-        if (ret != 0) {
++        ret = krb5_cc_default(ctx, &ccache);
++        if (ret)
+             goto done;
+-        }
++
++        ret = krb5_cc_initialize(ctx, ccache, cred.client);
++        if (ret != 0)
++            goto done;
++
++        ret = krb5_cc_store_cred(ctx, ccache, &cred);
++        goto done;
+     }
++    ret = krb5_cc_cache_match(ctx, cred.client, &ccache);
++    if (ret == KRB5_CC_NOTFOUND) {
++        /* A new ccache within the collection whose name is based off the
++         * default_name for the context.  krb5_cc_new_unique only accepts the
++         * leading component of a name as a type. */
++        char *cc_type;
++        const char *p;
++
++        p = strchr(cc_name, ':'); /* can't be FILE here */
++        cc_type = strndup(cc_name, p - cc_name);
++        if (!cc_type) {
++            ret = ENOMEM;
++            goto done;
++        }
++
++        ret = krb5_cc_new_unique(ctx, cc_type, NULL, &ccache);
++        free(cc_type);
++    }
++    if (ret)
++        goto done;
++
+     ret = krb5_cc_store_cred(ctx, ccache, &cred);
++    if (ret)
++        goto done;
++
++    if (store_as_default_cred) {
++        ret = krb5_cc_switch(ctx, ccache);
++    }
+ done:
+     if (ctx) {
+diff --git a/proxy/src/mechglue/gss_plugin.h b/proxy/src/mechglue/gss_plugin.h
+index 333d63c..c0e8870 100644
+--- a/proxy/src/mechglue/gss_plugin.h
++++ b/proxy/src/mechglue/gss_plugin.h
+@@ -76,7 +76,7 @@ uint32_t gpp_cred_handle_init(uint32_t *min, bool defcred, const char *ccache,
+                               struct gpp_cred_handle **out_handle);
+ uint32_t gpp_cred_handle_free(uint32_t *min, struct gpp_cred_handle *handle);
+ bool gpp_creds_are_equal(gssx_cred *a, gssx_cred *b);
+-uint32_t gpp_store_remote_creds(uint32_t *min, bool default_creds,
++uint32_t gpp_store_remote_creds(uint32_t *min, bool store_as_default_cred,
+                                 gss_const_key_value_set_t cred_store,
+                                 gssx_cred *creds);
diff --git a/meta-stx/recipes-security/gssproxy/files/Properly-renew-expired-credentials.patch b/meta-stx/recipes-security/gssproxy/files/Properly-renew-expired-credentials.patch
new file mode 100644 (file)
index 0000000..a4b3005
--- /dev/null
@@ -0,0 +1,75 @@
+From fc748ba83eb29f10fd44b6572b04709fa27dc587 Mon Sep 17 00:00:00 2001
+From: Simo Sorce <simo@redhat.com>
+Date: Mon, 13 Mar 2017 08:06:12 -0400
+Subject: [PATCH] Properly renew expired credentials
+
+When a caller imports expired credentials, we aim to actually renew them
+if we can. However due to incorrect checks and not clearing of the
+ret_maj variable after checks we end up returning an error instead.
+
+Also fix mechglue to also save and properly report the first call errors
+when both remote and local fail.
+
+Resolves: #170
+
+Signed-off-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+(cherry picked from commit dc462321226f59ceaab0d3db47446a694a8ecba2)
+---
+ proxy/src/gp_creds.c                  | 14 +++++++++-----
+ proxy/src/mechglue/gpp_acquire_cred.c |  5 +++++
+ 2 files changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/proxy/src/gp_creds.c b/proxy/src/gp_creds.c
+index 5d84904..171a724 100644
+--- a/proxy/src/gp_creds.c
++++ b/proxy/src/gp_creds.c
+@@ -629,8 +629,12 @@ uint32_t gp_add_krb5_creds(uint32_t *min,
+         ret_maj = gp_check_cred(&ret_min, in_cred, desired_name, cred_usage);
+         if (ret_maj == GSS_S_COMPLETE) {
+             return GSS_S_COMPLETE;
+-        } else if (ret_maj != GSS_S_CREDENTIALS_EXPIRED &&
+-                   ret_maj != GSS_S_NO_CRED) {
++        } else if (ret_maj == GSS_S_CREDENTIALS_EXPIRED ||
++                   ret_maj == GSS_S_NO_CRED) {
++            /* continue and try to obtain new creds */
++            ret_maj = 0;
++            ret_min = 0;
++        } else {
+             *min = ret_min;
+             return GSS_S_CRED_UNAVAIL;
+         }
+@@ -639,14 +643,14 @@ uint32_t gp_add_krb5_creds(uint32_t *min,
+     if (acquire_type == ACQ_NORMAL) {
+         ret_min = gp_get_cred_environment(gpcall, desired_name, &req_name,
+                                           &cred_usage, &cred_store);
++        if (ret_min) {
++            ret_maj = GSS_S_CRED_UNAVAIL;
++        }
+     } else if (desired_name) {
+         ret_maj = gp_conv_gssx_to_name(&ret_min, desired_name, &req_name);
+     }
+     if (ret_maj) {
+         goto done;
+-    } else if (ret_min) {
+-        ret_maj = GSS_S_CRED_UNAVAIL;
+-        goto done;
+     }
+     if (!try_impersonate(gpcall->service, cred_usage, acquire_type)) {
+diff --git a/proxy/src/mechglue/gpp_acquire_cred.c b/proxy/src/mechglue/gpp_acquire_cred.c
+index d876699..514fdd1 100644
+--- a/proxy/src/mechglue/gpp_acquire_cred.c
++++ b/proxy/src/mechglue/gpp_acquire_cred.c
+@@ -186,6 +186,11 @@ OM_uint32 gssi_acquire_cred_from(OM_uint32 *minor_status,
+     }
+     if (behavior == GPP_REMOTE_FIRST) {
++        if (maj != GSS_S_COMPLETE) {
++            /* save errors */
++            tmaj = maj;
++            tmin = min;
++        }
+         /* So remote failed, but we can fallback to local, try that */
+         maj = acquire_local(&min, NULL, name,
+                             time_req, desired_mechs, cred_usage, cred_store,
diff --git a/meta-stx/recipes-security/gssproxy/files/Remove-gpm_release_ctx-to-fix-double-unlock.patch b/meta-stx/recipes-security/gssproxy/files/Remove-gpm_release_ctx-to-fix-double-unlock.patch
new file mode 100644 (file)
index 0000000..cce478c
--- /dev/null
@@ -0,0 +1,37 @@
+From 9e2bdfeee30331254d21eaf9e9c000fb9e642fe9 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Thu, 23 Mar 2017 13:42:55 -0400
+Subject: [PATCH] Remove gpm_release_ctx() to fix double unlock
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #173
+(cherry picked from commit b50a863b20649b80cc44c88aa325c6c3220af61b)
+---
+ proxy/src/client/gpm_common.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index 8c96986..69f4741 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -312,11 +312,6 @@ static struct gpm_ctx *gpm_get_ctx(void)
+     return &gpm_global_ctx;
+ }
+-static void gpm_release_ctx(struct gpm_ctx *gpmctx)
+-{
+-    gpm_release_sock(gpmctx);
+-}
+-
+ OM_uint32 gpm_release_buffer(OM_uint32 *minor_status,
+                              gss_buffer_t buffer)
+ {
+@@ -503,7 +498,6 @@ done:
+     xdr_free((xdrproc_t)xdr_gp_rpc_msg, (char *)&msg);
+     xdr_destroy(&xdr_call_ctx);
+     xdr_destroy(&xdr_reply_ctx);
+-    gpm_release_ctx(gpmctx);
+     return ret;
+ }
diff --git a/meta-stx/recipes-security/gssproxy/files/Separate-cred-and-ccache-manipulation-in-gpp_store_r.patch b/meta-stx/recipes-security/gssproxy/files/Separate-cred-and-ccache-manipulation-in-gpp_store_r.patch
new file mode 100644 (file)
index 0000000..dbc8dd4
--- /dev/null
@@ -0,0 +1,107 @@
+From 1451b65fec69ff35e029b4770dcb4927ba57060a Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Fri, 17 Nov 2017 13:53:37 -0500
+Subject: [PATCH] Separate cred and ccache manipulation in
+ gpp_store_remote_creds()
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+(cherry picked from commit 221b553bfb4082085d05b40da9a04c1f7e4af533)
+---
+ proxy/src/mechglue/gpp_creds.c | 62 +++++++++++++++++++++-------------
+ 1 file changed, 39 insertions(+), 23 deletions(-)
+
+diff --git a/proxy/src/mechglue/gpp_creds.c b/proxy/src/mechglue/gpp_creds.c
+index 6bdff45..3ebd726 100644
+--- a/proxy/src/mechglue/gpp_creds.c
++++ b/proxy/src/mechglue/gpp_creds.c
+@@ -136,6 +136,40 @@ bool gpp_creds_are_equal(gssx_cred *a, gssx_cred *b)
+     return true;
+ }
++static krb5_error_code gpp_construct_cred(gssx_cred *creds, krb5_context ctx,
++                                          krb5_creds *cred, char *cred_name)
++{
++    XDR xdrctx;
++    bool xdrok;
++    krb5_error_code ret = 0;
++
++    memset(cred, 0, sizeof(*cred));
++
++    memcpy(cred_name, creds->desired_name.display_name.octet_string_val,
++           creds->desired_name.display_name.octet_string_len);
++    cred_name[creds->desired_name.display_name.octet_string_len] = '\0';
++
++    ret = krb5_parse_name(ctx, cred_name, &cred->client);
++    if (ret) {
++        return ret;
++    }
++
++    ret = krb5_parse_name(ctx, GPKRB_SRV_NAME, &cred->server);
++    if (ret) {
++        return ret;
++    }
++
++    cred->ticket.data = malloc(GPKRB_MAX_CRED_SIZE);
++    xdrmem_create(&xdrctx, cred->ticket.data, GPKRB_MAX_CRED_SIZE,
++                  XDR_ENCODE);
++    xdrok = xdr_gssx_cred(&xdrctx, creds);
++    if (!xdrok) {
++        return ENOSPC;
++    }
++    cred->ticket.length = xdr_getpos(&xdrctx);
++    return 0;
++}
++
+ uint32_t gpp_store_remote_creds(uint32_t *min, bool default_creds,
+                                 gss_const_key_value_set_t cred_store,
+                                 gssx_cred *creds)
+@@ -145,17 +179,18 @@ uint32_t gpp_store_remote_creds(uint32_t *min, bool default_creds,
+     krb5_creds cred;
+     krb5_error_code ret;
+     char cred_name[creds->desired_name.display_name.octet_string_len + 1];
+-    XDR xdrctx;
+-    bool xdrok;
+     const char *cc_type;
+     *min = 0;
+-    memset(&cred, 0, sizeof(cred));
+-
+     ret = krb5_init_context(&ctx);
+     if (ret) return ret;
++    ret = gpp_construct_cred(creds, ctx, &cred, cred_name);
++    if (ret) {
++        goto done;
++    }
++
+     if (cred_store) {
+         for (unsigned i = 0; i < cred_store->count; i++) {
+             if (strcmp(cred_store->elements[i].key, "ccache") == 0) {
+@@ -175,25 +210,6 @@ uint32_t gpp_store_remote_creds(uint32_t *min, bool default_creds,
+         if (ret) goto done;
+     }
+-    memcpy(cred_name, creds->desired_name.display_name.octet_string_val,
+-           creds->desired_name.display_name.octet_string_len);
+-    cred_name[creds->desired_name.display_name.octet_string_len] = '\0';
+-
+-    ret = krb5_parse_name(ctx, cred_name, &cred.client);
+-    if (ret) goto done;
+-
+-    ret = krb5_parse_name(ctx, GPKRB_SRV_NAME, &cred.server);
+-    if (ret) goto done;
+-
+-    cred.ticket.data = malloc(GPKRB_MAX_CRED_SIZE);
+-    xdrmem_create(&xdrctx, cred.ticket.data, GPKRB_MAX_CRED_SIZE, XDR_ENCODE);
+-    xdrok = xdr_gssx_cred(&xdrctx, creds);
+-    if (!xdrok) {
+-        ret = ENOSPC;
+-        goto done;
+-    }
+-    cred.ticket.length = xdr_getpos(&xdrctx);
+-
+     cc_type = krb5_cc_get_type(ctx, ccache);
+     if (strcmp(cc_type, "FILE") == 0) {
+         /* FILE ccaches don't handle updates properly: if they have the same
diff --git a/meta-stx/recipes-security/gssproxy/files/Simplify-setting-NONBLOCK-on-socket.patch b/meta-stx/recipes-security/gssproxy/files/Simplify-setting-NONBLOCK-on-socket.patch
new file mode 100644 (file)
index 0000000..1762d69
--- /dev/null
@@ -0,0 +1,53 @@
+From 6d12deeb19cc2e231463427db27f8755649450d1 Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <alexander.m.scheel@gmail.com>
+Date: Thu, 14 Sep 2017 10:57:12 -0500
+Subject: [PATCH] Simplify setting NONBLOCK on socket
+
+Signed-off-by: Alexander Scheel <alexander.m.scheel@gmail.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #213
+[rharwood@redhat.com: fixup commit message]
+(cherry picked from commit ec808ee6a5e6746ed35acc865f253425701be352)
+---
+ proxy/src/client/gpm_common.c | 15 +--------------
+ 1 file changed, 1 insertion(+), 14 deletions(-)
+
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index d0f99d6..7d1158e 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -80,7 +80,6 @@ static int gpm_open_socket(struct gpm_ctx *gpmctx)
+     struct sockaddr_un addr = {0};
+     char name[PATH_MAX];
+     int ret;
+-    unsigned flags;
+     int fd = -1;
+     ret = get_pipe_name(name);
+@@ -92,24 +91,12 @@ static int gpm_open_socket(struct gpm_ctx *gpmctx)
+     strncpy(addr.sun_path, name, sizeof(addr.sun_path)-1);
+     addr.sun_path[sizeof(addr.sun_path)-1] = '\0';
+-    fd = socket(AF_UNIX, SOCK_STREAM, 0);
++    fd = socket(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, 0);
+     if (fd == -1) {
+         ret = errno;
+         goto done;
+     }
+-    ret = fcntl(fd, F_GETFD, &flags);
+-    if (ret != 0) {
+-        ret = errno;
+-        goto done;
+-    }
+-
+-    ret = fcntl(fd, F_SETFD, flags | O_NONBLOCK);
+-    if (ret != 0) {
+-        ret = errno;
+-        goto done;
+-    }
+-
+     ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
+     if (ret == -1) {
+         ret = errno;
diff --git a/meta-stx/recipes-security/gssproxy/files/Tolerate-NULL-pointers-in-gp_same.patch b/meta-stx/recipes-security/gssproxy/files/Tolerate-NULL-pointers-in-gp_same.patch
new file mode 100644 (file)
index 0000000..9f2a147
--- /dev/null
@@ -0,0 +1,31 @@
+From 05a2677920f0240ea302e67d699546665687dd14 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Tue, 13 Jun 2017 14:22:44 -0400
+Subject: [PATCH] Tolerate NULL pointers in gp_same
+
+Fixes potential NULL derefs of program names
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #195
+(cherry picked from commit afe4c2fe6f7f939df914959dda11131bd80ccec6)
+---
+ proxy/src/gp_util.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/proxy/src/gp_util.c b/proxy/src/gp_util.c
+index f158b84..5442992 100644
+--- a/proxy/src/gp_util.c
++++ b/proxy/src/gp_util.c
+@@ -12,10 +12,9 @@
+ bool gp_same(const char *a, const char *b)
+ {
+-    if ((a == b) || strcmp(a, b) == 0) {
++    if (a == b || (a && b && strcmp(a, b) == 0)) {
+         return true;
+     }
+-
+     return false;
+ }
diff --git a/meta-stx/recipes-security/gssproxy/files/Turn-on-Wextra.patch b/meta-stx/recipes-security/gssproxy/files/Turn-on-Wextra.patch
new file mode 100644 (file)
index 0000000..bfec8e4
--- /dev/null
@@ -0,0 +1,26 @@
+From a50ea0aa3dfd39ab4a3c39dde35c12fc51fe40d5 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Wed, 15 Mar 2017 13:28:26 -0400
+Subject: [PATCH] Turn on -Wextra
+
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+Merges: #173
+(cherry picked from commit 85bc3d794efa52aba4c32f6109e7e7741521ec5f)
+---
+ proxy/Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/proxy/Makefile.am b/proxy/Makefile.am
+index e1fbac1..5cd2255 100644
+--- a/proxy/Makefile.am
++++ b/proxy/Makefile.am
+@@ -44,7 +44,7 @@ AM_CFLAGS += -Wall -Wshadow -Wstrict-prototypes -Wpointer-arith \
+     -Wcast-qual -Wcast-align -Wwrite-strings \
+     -fstrict-aliasing -Wstrict-aliasing -Werror=strict-aliasing \
+     -Werror-implicit-function-declaration \
+-    -Werror=format-security
++    -Werror=format-security -Wextra
+ if BUILD_HARDENING
+     AM_CPPFLAGS += -D_FORTIFY_SOURCE=2 -Wdate-time
+     AM_CFLAGS += -fPIE -fstack-protector-strong
diff --git a/meta-stx/recipes-security/gssproxy/files/Update-systemd-file.patch b/meta-stx/recipes-security/gssproxy/files/Update-systemd-file.patch
new file mode 100644 (file)
index 0000000..6b2345b
--- /dev/null
@@ -0,0 +1,35 @@
+From 90d7a614b3eb451f0067dfacf0f0b6f41eb00180 Mon Sep 17 00:00:00 2001
+From: Robbie Harwood <rharwood@redhat.com>
+Date: Wed, 26 Apr 2017 21:02:47 -0400
+Subject: [PATCH] Update systemd file
+
+Add `reload` capability, and remove dependency on nfs-utils.
+
+Closes: #127
+Signed-off-by: Robbie Harwood <rharwood@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+(cherry picked from commit c7e8b4066575508a91a38bb6a44694c8a171f0c5)
+---
+ proxy/systemd/gssproxy.service.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/proxy/systemd/gssproxy.service.in b/proxy/systemd/gssproxy.service.in
+index dae39ee..f50f526 100644
+--- a/proxy/systemd/gssproxy.service.in
++++ b/proxy/systemd/gssproxy.service.in
+@@ -3,7 +3,6 @@ Description=GSSAPI Proxy Daemon
+ # GSSPROXY will not be started until syslog is
+ After=syslog.target
+ Before=nfs-secure.service nfs-secure-server.service
+-Requires=proc-fs-nfsd.mount
+ [Service]
+ Environment=KRB5RCACHEDIR=/var/lib/gssproxy/rcache
+@@ -12,6 +11,7 @@ ExecStart=@sbindir@/gssproxy -D
+ # consult systemd.service(5) for more details
+ Type=forking
+ PIDFile=@localstatedir@/run/gssproxy.pid
++ExecReload=/bin/kill -HUP $MAINPID
+ [Install]
+ WantedBy=multi-user.target
diff --git a/meta-stx/recipes-security/gssproxy/files/client-Switch-to-non-blocking-sockets.patch b/meta-stx/recipes-security/gssproxy/files/client-Switch-to-non-blocking-sockets.patch
new file mode 100644 (file)
index 0000000..2d0b34b
--- /dev/null
@@ -0,0 +1,485 @@
+From 1962e6128a4d86a7c54977577e1e4224cadbb5f7 Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <ascheel@redhat.com>
+Date: Wed, 2 Aug 2017 15:11:49 -0400
+Subject: [PATCH] [client] Switch to non-blocking sockets
+
+Switch the gssproxy client library to non-blocking sockets, allowing
+for timeout and retry operations.  The client will automatically retry
+both send() and recv() operations three times on ETIMEDOUT.  If the
+combined send() and recv() hit the three time limit, ETIMEDOUT will be
+exposed to the caller in the minor status.
+
+Signed-off-by: Alexander Scheel <ascheel@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+[rharwood@redhat.com: commit message cleanups, rebased]
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+(cherry picked from commit d035646c8feb0b78f0c157580ca02c46cd00dd7e)
+---
+ proxy/src/client/gpm_common.c | 317 +++++++++++++++++++++++++++++++---
+ 1 file changed, 295 insertions(+), 22 deletions(-)
+
+diff --git a/proxy/src/client/gpm_common.c b/proxy/src/client/gpm_common.c
+index 2133618..dba23a6 100644
+--- a/proxy/src/client/gpm_common.c
++++ b/proxy/src/client/gpm_common.c
+@@ -7,9 +7,15 @@
+ #include <stdlib.h>
+ #include <time.h>
+ #include <pthread.h>
++#include <sys/epoll.h>
++#include <fcntl.h>
++#include <sys/timerfd.h>
+ #define FRAGMENT_BIT (1 << 31)
++#define RESPONSE_TIMEOUT 15
++#define MAX_TIMEOUT_RETRY 3
++
+ struct gpm_ctx {
+     pthread_mutex_t lock;
+     int fd;
+@@ -20,6 +26,9 @@ struct gpm_ctx {
+     gid_t gid;
+     int next_xid;
++
++    int epollfd;
++    int timerfd;
+ };
+ /* a single global struct is not particularly efficient,
+@@ -39,6 +48,8 @@ static void gpm_init_once(void)
+     pthread_mutex_init(&gpm_global_ctx.lock, &attr);
+     gpm_global_ctx.fd = -1;
++    gpm_global_ctx.epollfd = -1;
++    gpm_global_ctx.timerfd = -1;
+     seedp = time(NULL) + getpid() + pthread_self();
+     gpm_global_ctx.next_xid = rand_r(&seedp);
+@@ -69,6 +80,7 @@ static int gpm_open_socket(struct gpm_ctx *gpmctx)
+     struct sockaddr_un addr = {0};
+     char name[PATH_MAX];
+     int ret;
++    unsigned flags;
+     int fd = -1;
+     ret = get_pipe_name(name);
+@@ -86,6 +98,18 @@ static int gpm_open_socket(struct gpm_ctx *gpmctx)
+         goto done;
+     }
++    ret = fcntl(fd, F_GETFD, &flags);
++    if (ret != 0) {
++        ret = errno;
++        goto done;
++    }
++
++    ret = fcntl(fd, F_SETFD, flags | O_NONBLOCK);
++    if (ret != 0) {
++        ret = errno;
++        goto done;
++    }
++
+     ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
+     if (ret == -1) {
+         ret = errno;
+@@ -163,6 +187,158 @@ static int gpm_release_sock(struct gpm_ctx *gpmctx)
+     return pthread_mutex_unlock(&gpmctx->lock);
+ }
++static void gpm_timer_close(struct gpm_ctx *gpmctx) {
++    if (gpmctx->timerfd < 0) {
++        return;
++    }
++
++    close(gpmctx->timerfd);
++    gpmctx->timerfd = -1;
++}
++
++static int gpm_timer_setup(struct gpm_ctx *gpmctx, int timeout_seconds) {
++    int ret;
++    struct itimerspec its;
++
++    if (gpmctx->timerfd >= 0) {
++        gpm_timer_close(gpmctx);
++    }
++
++    gpmctx->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK | TFD_CLOEXEC);
++    if (gpmctx->timerfd < 0) {
++        return errno;
++    }
++
++    its.it_interval.tv_sec = timeout_seconds;
++    its.it_interval.tv_nsec = 0;
++    its.it_value.tv_sec = timeout_seconds;
++    its.it_value.tv_nsec = 0;
++
++    ret = timerfd_settime(gpmctx->timerfd, 0, &its, NULL);
++    if (ret) {
++        ret = errno;
++        gpm_timer_close(gpmctx);
++        return ret;
++    }
++
++    return 0;
++}
++
++static void gpm_epoll_close(struct gpm_ctx *gpmctx) {
++    if (gpmctx->epollfd < 0) {
++        return;
++    }
++
++    close(gpmctx->epollfd);
++    gpmctx->epollfd = -1;
++}
++
++static int gpm_epoll_setup(struct gpm_ctx *gpmctx) {
++    struct epoll_event ev;
++    int ret;
++
++    if (gpmctx->epollfd >= 0) {
++        gpm_epoll_close(gpmctx);
++    }
++
++    gpmctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
++    if (gpmctx->epollfd == -1) {
++        return errno;
++    }
++
++    /* Add timer */
++    ev.events = EPOLLIN;
++    ev.data.fd = gpmctx->timerfd;
++    ret = epoll_ctl(gpmctx->epollfd, EPOLL_CTL_ADD, gpmctx->timerfd, &ev);
++    if (ret == -1) {
++        ret = errno;
++        gpm_epoll_close(gpmctx);
++        return ret;
++    }
++
++    return ret;
++}
++
++static int gpm_epoll_wait(struct gpm_ctx *gpmctx, uint32_t event_flags) {
++    int ret;
++    int epoll_ret;
++    struct epoll_event ev;
++    struct epoll_event events[2];
++    uint64_t timer_read;
++
++    if (gpmctx->epollfd < 0) {
++        ret = gpm_epoll_setup(gpmctx);
++        if (ret)
++            return ret;
++    }
++
++    ev.events = event_flags;
++    ev.data.fd = gpmctx->fd;
++    epoll_ret = epoll_ctl(gpmctx->epollfd, EPOLL_CTL_ADD, gpmctx->fd, &ev);
++    if (epoll_ret == -1) {
++        ret = errno;
++        gpm_epoll_close(gpmctx);
++        return ret;
++    }
++
++    do {
++        epoll_ret = epoll_wait(gpmctx->epollfd, events, 2, -1);
++    } while (epoll_ret < 0 && errno == EINTR);
++
++    if (epoll_ret < 0) {
++        /* Error while waiting that isn't EINTR */
++        ret = errno;
++        gpm_epoll_close(gpmctx);
++    } else if (epoll_ret == 0) {
++        /* Shouldn't happen as timeout == -1; treat it like a timeout
++         * occurred. */
++        ret = ETIMEDOUT;
++        gpm_epoll_close(gpmctx);
++    } else if (epoll_ret == 1 && events[0].data.fd == gpmctx->timerfd) {
++        /* Got an event which is only our timer */
++        ret = read(gpmctx->timerfd, &timer_read, sizeof(uint64_t));
++        if (ret == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
++            /* In the case when reading from the timer failed, don't hide the
++             * timer error behind ETIMEDOUT such that it isn't retried */
++            ret = errno;
++        } else {
++            /* If ret == 0, then we definitely timed out. Else, if ret == -1
++             * and errno == EAGAIN or errno == EWOULDBLOCK, we're in a weird
++             * edge case where epoll thinks the timer can be read, but it
++             * is blocking more; treat it like a TIMEOUT and retry, as
++             * nothing around us would handle EAGAIN from timer and retry
++             * it. */
++            ret = ETIMEDOUT;
++        }
++        gpm_epoll_close(gpmctx);
++    } else {
++        /* If ret == 2, then we ignore the timerfd; that way if the next
++         * operation cannot be performed immediately, we timeout and retry.
++         * If ret == 1 and data.fd == gpmctx->fd, return 0. */
++        ret = 0;
++    }
++
++    epoll_ret = epoll_ctl(gpmctx->epollfd, EPOLL_CTL_DEL, gpmctx->fd, NULL);
++    if (epoll_ret == -1) {
++        /* If we previously had an error, expose that error instead of
++         * clobbering it with errno; else if no error, then assume it is
++         * better to notify of the error deleting the event than it is
++         * to continue. */
++        if (ret == 0)
++            ret = errno;
++        gpm_epoll_close(gpmctx);
++    }
++
++    return ret;
++}
++
++static int gpm_retry_socket(struct gpm_ctx *gpmctx)
++{
++    gpm_epoll_close(gpmctx);
++    gpm_close_socket(gpmctx);
++    return gpm_open_socket(gpmctx);
++}
++
+ /* must be called after the lock has been grabbed */
+ static int gpm_send_buffer(struct gpm_ctx *gpmctx,
+                            char *buffer, uint32_t length)
+@@ -183,8 +359,13 @@ static int gpm_send_buffer(struct gpm_ctx *gpmctx,
+     retry = false;
+     do {
+         do {
++            ret = gpm_epoll_wait(gpmctx, EPOLLOUT);
++            if (ret != 0) {
++                goto done;
++            }
++
+             ret = 0;
+-            wn = send(gpmctx->fd, &size, sizeof(uint32_t), MSG_NOSIGNAL);
++            wn = write(gpmctx->fd, &size, sizeof(uint32_t));
+             if (wn == -1) {
+                 ret = errno;
+             }
+@@ -192,8 +373,7 @@ static int gpm_send_buffer(struct gpm_ctx *gpmctx,
+         if (wn != 4) {
+             /* reopen and retry once */
+             if (retry == false) {
+-                gpm_close_socket(gpmctx);
+-                ret = gpm_open_socket(gpmctx);
++                ret = gpm_retry_socket(gpmctx);
+                 if (ret == 0) {
+                     retry = true;
+                     continue;
+@@ -208,9 +388,14 @@ static int gpm_send_buffer(struct gpm_ctx *gpmctx,
+     pos = 0;
+     while (length > pos) {
+-        wn = send(gpmctx->fd, buffer + pos, length - pos, MSG_NOSIGNAL);
++        ret = gpm_epoll_wait(gpmctx, EPOLLOUT);
++        if (ret) {
++            goto done;
++        }
++
++        wn = write(gpmctx->fd, buffer + pos, length - pos);
+         if (wn == -1) {
+-            if (errno == EINTR) {
++            if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
+                 continue;
+             }
+             ret = errno;
+@@ -231,7 +416,7 @@ done:
+ /* must be called after the lock has been grabbed */
+ static int gpm_recv_buffer(struct gpm_ctx *gpmctx,
+-                           char *buffer, uint32_t *length)
++                           char **buffer, uint32_t *length)
+ {
+     uint32_t size;
+     ssize_t rn;
+@@ -239,6 +424,11 @@ static int gpm_recv_buffer(struct gpm_ctx *gpmctx,
+     int ret;
+     do {
++        ret = gpm_epoll_wait(gpmctx, EPOLLIN);
++        if (ret) {
++            goto done;
++        }
++
+         ret = 0;
+         rn = read(gpmctx->fd, &size, sizeof(uint32_t));
+         if (rn == -1) {
+@@ -258,11 +448,22 @@ static int gpm_recv_buffer(struct gpm_ctx *gpmctx,
+         goto done;
+     }
++    *buffer = malloc(*length);
++    if (*buffer == NULL) {
++        ret = ENOMEM;
++        goto done;
++    }
++
+     pos = 0;
+     while (*length > pos) {
+-        rn = read(gpmctx->fd, buffer + pos, *length - pos);
++        ret = gpm_epoll_wait(gpmctx, EPOLLIN);
++        if (ret) {
++            goto done;
++        }
++
++        rn = read(gpmctx->fd, *buffer + pos, *length - pos);
+         if (rn == -1) {
+-            if (errno == EINTR) {
++            if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) {
+                 continue;
+             }
+             ret = errno;
+@@ -281,6 +482,7 @@ done:
+     if (ret) {
+         /* on errors we can only close the fd and return */
+         gpm_close_socket(gpmctx);
++        gpm_epoll_close(gpmctx);
+     }
+     return ret;
+ }
+@@ -309,6 +511,63 @@ static struct gpm_ctx *gpm_get_ctx(void)
+     return &gpm_global_ctx;
+ }
++static int gpm_send_recv_loop(struct gpm_ctx *gpmctx, char *send_buffer,
++                              uint32_t send_length, char** recv_buffer,
++                              uint32_t *recv_length)
++{
++    int ret;
++    int retry_count;
++
++    /* setup timer */
++    ret = gpm_timer_setup(gpmctx, RESPONSE_TIMEOUT);
++    if (ret)
++        return ret;
++
++    for (retry_count = 0; retry_count < MAX_TIMEOUT_RETRY; retry_count++) {
++        /* send to proxy */
++        ret = gpm_send_buffer(gpmctx, send_buffer, send_length);
++
++        if (ret == 0) {
++            /* No error, continue to recv */
++        } else if (ret == ETIMEDOUT) {
++            /* Close and reopen socket before trying again */
++            ret = gpm_retry_socket(gpmctx);
++            if (ret != 0)
++                return ret;
++            ret = ETIMEDOUT;
++
++            /* RETRY entire send */
++            continue;
++        } else {
++            /* Other error */
++            return ret;
++        }
++
++        /* receive answer */
++        ret = gpm_recv_buffer(gpmctx, recv_buffer, recv_length);
++        if (ret == 0) {
++            /* No error */
++            break;
++        } else if (ret == ETIMEDOUT) {
++            /* Close and reopen socket before trying again */
++            ret = gpm_retry_socket(gpmctx);
++
++            /* Free buffer and set it to NULL to prevent free(xdr_reply_ctx) */
++            free(recv_buffer);
++            recv_buffer = NULL;
++
++            if (ret != 0)
++                return ret;
++            ret = ETIMEDOUT;
++        } else {
++            /* Other error */
++            return ret;
++        }
++    }
++
++    return ret;
++}
++
+ OM_uint32 gpm_release_buffer(OM_uint32 *minor_status,
+                              gss_buffer_t buffer)
+ {
+@@ -399,15 +658,20 @@ int gpm_make_call(int proc, union gp_rpc_arg *arg, union gp_rpc_res *res)
+     gp_rpc_msg msg;
+     XDR xdr_call_ctx;
+     XDR xdr_reply_ctx;
+-    char buffer[MAX_RPC_SIZE];
+-    uint32_t length;
++    char *send_buffer = NULL;
++    char *recv_buffer = NULL;
++    uint32_t send_length;
++    uint32_t recv_length;
+     uint32_t xid;
+     bool xdrok;
+     bool sockgrab = false;
+     int ret;
+-    xdrmem_create(&xdr_call_ctx, buffer, MAX_RPC_SIZE, XDR_ENCODE);
+-    xdrmem_create(&xdr_reply_ctx, buffer, MAX_RPC_SIZE, XDR_DECODE);
++    send_buffer = malloc(MAX_RPC_SIZE);
++    if (send_buffer == NULL)
++        return ENOMEM;
++
++    xdrmem_create(&xdr_call_ctx, send_buffer, MAX_RPC_SIZE, XDR_ENCODE);
+     memset(&msg, 0, sizeof(gp_rpc_msg));
+     msg.header.type = GP_RPC_CALL;
+@@ -450,22 +714,22 @@ int gpm_make_call(int proc, union gp_rpc_arg *arg, union gp_rpc_res *res)
+         goto done;
+     }
+-    /* send to proxy */
+-    ret = gpm_send_buffer(gpmctx, buffer, xdr_getpos(&xdr_call_ctx));
+-    if (ret) {
+-        goto done;
+-    }
++    /* set send_length */
++    send_length = xdr_getpos(&xdr_call_ctx);
+-    /* receive answer */
+-    ret = gpm_recv_buffer(gpmctx, buffer, &length);
+-    if (ret) {
++    /* Send request, receive response with timeout */
++    ret = gpm_send_recv_loop(gpmctx, send_buffer, send_length, &recv_buffer,
++                             &recv_length);
++    if (ret)
+         goto done;
+-    }
+     /* release the lock */
+     gpm_release_sock(gpmctx);
+     sockgrab = false;
++    /* Create the reply context */
++    xdrmem_create(&xdr_reply_ctx, recv_buffer, recv_length, XDR_DECODE);
++
+     /* decode header */
+     memset(&msg, 0, sizeof(gp_rpc_msg));
+     xdrok = xdr_gp_rpc_msg(&xdr_reply_ctx, &msg);
+@@ -489,12 +753,21 @@ int gpm_make_call(int proc, union gp_rpc_arg *arg, union gp_rpc_res *res)
+     }
+ done:
++    gpm_timer_close(gpmctx);
++    gpm_epoll_close(gpmctx);
++
+     if (sockgrab) {
+         gpm_release_sock(gpmctx);
+     }
+     xdr_free((xdrproc_t)xdr_gp_rpc_msg, (char *)&msg);
+     xdr_destroy(&xdr_call_ctx);
+-    xdr_destroy(&xdr_reply_ctx);
++
++    if (recv_buffer != NULL)
++        xdr_destroy(&xdr_reply_ctx);
++
++    free(send_buffer);
++    free(recv_buffer);
++
+     return ret;
+ }
diff --git a/meta-stx/recipes-security/gssproxy/files/server-Add-detailed-request-logging.patch b/meta-stx/recipes-security/gssproxy/files/server-Add-detailed-request-logging.patch
new file mode 100644 (file)
index 0000000..778c755
--- /dev/null
@@ -0,0 +1,123 @@
+From f413cc257c6c1e60090c72163152ae7fd2180c41 Mon Sep 17 00:00:00 2001
+From: Alexander Scheel <ascheel@redhat.com>
+Date: Fri, 4 Aug 2017 16:09:20 -0400
+Subject: [PATCH] [server] Add detailed request logging
+
+Add request logging to track requests through gssproxy.  Requests are
+logged as they are read, processed, handled, and replies sent.  These
+are identified by buffer memory address and size.
+
+Signed-off-by: Alexander Scheel <ascheel@redhat.com>
+Reviewed-by: Simo Sorce <simo@redhat.com>
+[rharwood@redhat.com: commit message cleanups, rebase]
+Reviewed-by: Robbie Harwood <rharwood@redhat.com>
+Merges: #205
+(cherry picked from commit 4097dafad3f276c3cf7b1255fe0540e16d59ae03)
+---
+ proxy/src/gp_rpc_process.c |  6 ++++++
+ proxy/src/gp_socket.c      | 12 ++++++++++++
+ proxy/src/gp_workers.c     |  5 +++++
+ 3 files changed, 23 insertions(+)
+
+diff --git a/proxy/src/gp_rpc_process.c b/proxy/src/gp_rpc_process.c
+index 0ea17f0..eaffc55 100644
+--- a/proxy/src/gp_rpc_process.c
++++ b/proxy/src/gp_rpc_process.c
+@@ -372,9 +372,12 @@ int gp_rpc_process_call(struct gp_call_ctx *gpcall,
+     xdrmem_create(&xdr_reply_ctx, reply_buffer, MAX_RPC_SIZE, XDR_ENCODE);
+     /* decode request */
++    GPDEBUGN(3, "[status] Processing request [%p (%zu)]\n", inbuf, inlen);
+     ret = gp_rpc_decode_call(&xdr_call_ctx, &xid, &proc, &arg, &acc, &rej);
+     if (!ret) {
+         /* execute request */
++        GPDEBUGN(3, "[status] Executing request %d (%s) from [%p (%zu)]\n",
++                 proc, gp_rpc_procname(proc), inbuf, inlen);
+         ret = gp_rpc_execute(gpcall, proc, &arg, &res);
+         if (ret) {
+             acc = GP_RPC_SYSTEM_ERR;
+@@ -388,6 +391,9 @@ int gp_rpc_process_call(struct gp_call_ctx *gpcall,
+         /* return encoded buffer */
+         ret = gp_rpc_return_buffer(&xdr_reply_ctx,
+                                    reply_buffer, outbuf, outlen);
++        GPDEBUGN(3, "[status] Returned buffer %d (%s) from [%p (%zu)]: "
++                 "[%p (%zu)]\n", proc, gp_rpc_procname(proc), inbuf, inlen,
++                 *outbuf, *outlen);
+     }
+     /* free resources */
+     gp_rpc_free_xdrs(proc, &arg, &res);
+diff --git a/proxy/src/gp_socket.c b/proxy/src/gp_socket.c
+index 5064e51..8675a0e 100644
+--- a/proxy/src/gp_socket.c
++++ b/proxy/src/gp_socket.c
+@@ -441,6 +441,8 @@ void gp_socket_send_data(verto_ctx *vctx, struct gp_conn *conn,
+     wbuf = calloc(1, sizeof(struct gp_buffer));
+     if (!wbuf) {
++        GPDEBUGN(3, "[status] OOM in gp_socket_send_data: %p (%zu)\n",
++                 buffer, buflen);
+         /* too bad, must kill the client connection now */
+         gp_conn_free(conn);
+         return;
+@@ -467,6 +469,8 @@ static void gp_socket_write(verto_ctx *vctx, verto_ev *ev)
+     vecs = 0;
++    GPDEBUGN(3, "[status] Sending data: %p (%zu)\n", wbuf->data, wbuf->size);
++
+     if (wbuf->pos == 0) {
+         /* first write, send the buffer size as packet header */
+         size = wbuf->size | FRAGMENT_BIT;
+@@ -489,6 +493,9 @@ static void gp_socket_write(verto_ctx *vctx, verto_ev *ev)
+             gp_socket_schedule_write(vctx, wbuf);
+         } else {
+             /* error on socket, close and release it */
++            GPDEBUGN(3, "[status] Error %d in gp_socket_write on writing for "
++                     "[%p (%zu:%zu)]\n", errno, wbuf->data, wbuf->pos,
++                     wbuf->size);
+             gp_conn_free(wbuf->conn);
+             gp_buffer_free(wbuf);
+         }
+@@ -498,6 +505,8 @@ static void gp_socket_write(verto_ctx *vctx, verto_ev *ev)
+         if (wn < (ssize_t) sizeof(size)) {
+             /* don't bother trying to handle sockets that can't
+              * buffer even 4 bytes */
++            GPDEBUGN(3, "[status] Sending data [%p (%zu)]: failed with short "
++                     "write of %d\n", wbuf->data, wbuf->size, wn);
+             gp_conn_free(wbuf->conn);
+             gp_buffer_free(wbuf);
+             return;
+@@ -505,6 +514,9 @@ static void gp_socket_write(verto_ctx *vctx, verto_ev *ev)
+         wn -= sizeof(size);
+     }
++    GPDEBUGN(3, "[status] Sending data [%p (%zu)]: successful write of %d\n",
++             wbuf->data, wbuf->size, wn);
++
+     wbuf->pos += wn;
+     if (wbuf->size > wbuf->pos) {
+         /* short write, reschedule */
+diff --git a/proxy/src/gp_workers.c b/proxy/src/gp_workers.c
+index d37e57c..2a33c21 100644
+--- a/proxy/src/gp_workers.c
++++ b/proxy/src/gp_workers.c
+@@ -319,6 +319,7 @@ static void gp_handle_reply(verto_ctx *vctx, verto_ev *ev)
+             break;
+         case GP_QUERY_OUT:
++            GPDEBUGN(3, "[status] Handling query reply: %p (%zu)\n", q->buffer, q->buflen);
+             gp_socket_send_data(vctx, q->conn, q->buffer, q->buflen);
+             gp_query_free(q, false);
+             break;
+@@ -381,7 +382,11 @@ static void *gp_worker_main(void *pvt)
+         gp_debug_set_conn_id(gp_conn_get_cid(q->conn));
+         /* handle the client request */
++        GPDEBUGN(3, "[status] Handling query input: %p (%zu)\n", q->buffer,
++                 q->buflen);
+         gp_handle_query(t->pool, q);
++        GPDEBUGN(3 ,"[status] Handling query output: %p (%zu)\n", q->buffer,
++                 q->buflen);
+         /* now get lock on main queue, to play with the reply list */
+         /* ======> POOL LOCK */
diff --git a/meta-stx/recipes-security/gssproxy/gssproxy_0.7.0.bb b/meta-stx/recipes-security/gssproxy/gssproxy_0.7.0.bb
new file mode 100644 (file)
index 0000000..ed26073
--- /dev/null
@@ -0,0 +1,89 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "provides a daemon to manage access to GSSAPI credentials"
+HOMEPAGE = "https://pagure.io/gssproxy"
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://COPYING;md5=a9ac3d0a983ebc781f7aa7173499e2e5"
+
+DEPENDS += "popt ding-libs keyutils libverto krb5 libxslt-native libxml2-native \
+            libselinux libpthread-stubs gettext-native \
+            "
+RDEPENDS_${PN} += "libinih popt libverto-libevent libverto-tevent keyutils \
+                   libgssapi-krb5 libgssrpc libk5crypto libkadm5clnt-mit \
+                   libkadm5srv-mit libkdb5 libkrad libkrb5 libkrb5support \
+                   libxslt libxslt-bin libxml2-utils libxml2 \
+                   "
+
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI = "https://releases.pagure.org/gssproxy/gssproxy-${PV}.tar.gz \
+      file://Properly-renew-expired-credentials.patch;striplevel=2 \
+      file://Change-impersonator-check-code.patch;striplevel=2 \
+      file://Allow-connection-to-self-when-impersonator-set.patch;striplevel=2 \
+      file://Make-proc-file-failure-loud-but-nonfatal.patch;striplevel=2 \
+      file://Turn-on-Wextra.patch;striplevel=2 \
+      file://Fix-unused-variables.patch;striplevel=2 \
+      file://Fix-mismatched-sign-comparisons.patch;striplevel=2 \
+      file://Fix-error-checking-on-get_impersonator_fallback.patch;striplevel=2 \
+      file://Remove-gpm_release_ctx-to-fix-double-unlock.patch;striplevel=2 \
+      file://Appease-gcc-7-s-fallthrough-detection.patch;striplevel=2 \
+      file://Fix-memory-leak.patch;striplevel=2 \
+      file://Fix-most-memory-leaks.patch;striplevel=2 \
+      file://Fix-segfault-when-no-config-files-are-present.patch;striplevel=2 \
+      file://Update-systemd-file.patch;striplevel=2 \
+      file://Fix-error-handling-in-gp_config_from_dir.patch;striplevel=2 \
+      file://Do-not-call-gpm_grab_sock-twice.patch;striplevel=2 \
+      file://Only-empty-FILE-ccaches-when-storing-remote-creds.patch;striplevel=2 \
+      file://Handle-outdated-encrypted-ccaches.patch;striplevel=2 \
+      file://Separate-cred-and-ccache-manipulation-in-gpp_store_r.patch;striplevel=2 \
+      file://Properly-locate-credentials-in-collection-caches-in-.patch;striplevel=2 \
+      file://Properly-initialize-ccaches-before-storing-into-them.patch;striplevel=2 \
+      file://Include-header-for-writev.patch;striplevel=2 \
+      file://Tolerate-NULL-pointers-in-gp_same.patch;striplevel=2 \
+      file://Add-Client-ID-to-debug-messages.patch;striplevel=2 \
+      file://client-Switch-to-non-blocking-sockets.patch;striplevel=2 \
+      file://server-Add-detailed-request-logging.patch;striplevel=2 \
+      file://Fix-potential-free-of-non-heap-address.patch;striplevel=2 \
+      file://Prevent-uninitialized-read-in-error-path-of-XDR-cont.patch;striplevel=2 \
+      file://Simplify-setting-NONBLOCK-on-socket.patch;striplevel=2 \
+      file://Fix-handling-of-non-EPOLLIN-EPOLLOUT-events.patch;striplevel=2 \
+      file://Fix-error-handling-in-gpm_send_buffer-gpm_recv_buffe.patch;striplevel=2 \
+      file://Emit-debug-on-queue-errors.patch;striplevel=2 \
+      file://Conditionally-reload-kernel-interface-on-SIGHUP.patch;striplevel=2 \
+      file://Don-t-leak-mech_type-when-CONTINUE_NEEDED-from-init_.patch;striplevel=2 \
+      file://Always-use-the-encype-we-selected.patch;striplevel=2 \
+      file://Clarify-debug-and-debug_level-in-man-pages.patch;striplevel=2 \
+      file://Always-choose-highest-requested-debug-level.patch;striplevel=2 \
+      "
+
+SRC_URI[md5sum] = "1837acb5766ffbc6fcc70ecaa72e285f"
+
+inherit autotools systemd pkgconfig
+
+SYSTEMD_SERVICE_${PN} = "gssproxy.service"
+
+EXTRA_OECONF="--with-pubconf-path=${sysconfdir}/gssproxy \
+              --with-initscript=systemd --disable-static \
+              --disable-rpath --with-gpp-default-behavior=REMOTE_FIRST \
+              --with-systemdunitdir=${systemd_system_unitdir} \
+              --with-manpages=no \
+              "
+
+CFLAGS += "-fPIE -fstack-protector-all"
+LDFLAGS += "-fPIE -pie -Wl,-z,now"
+
diff --git a/meta-stx/recipes-security/gssproxy/gssproxy_0.7.0.bbappend b/meta-stx/recipes-security/gssproxy/gssproxy_0.7.0.bbappend
new file mode 100644 (file)
index 0000000..fa2d4d1
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+do_install_append () {
+       cp ${B}/examples/24-nfs-server.conf \
+               ${B}/examples/99-nfs-client.conf \
+               ${B}/examples/gssproxy.conf ${D}/etc/gssproxy
+}
diff --git a/meta-stx/recipes-security/krb5/files/soname_majversion.diff b/meta-stx/recipes-security/krb5/files/soname_majversion.diff
new file mode 100644 (file)
index 0000000..466b8fa
--- /dev/null
@@ -0,0 +1,11 @@
+--- src/util/verto/Makefile.in 2018-05-03 10:34:47.000000000 -0400
++++ src/util/verto/Makefile.in.new     2019-08-14 06:05:03.779775294 -0400
+@@ -3,7 +3,7 @@
+ RELDIR=../util/verto
+ LIBBASE=verto
+-LIBMAJOR=0
++LIBMAJOR=1
+ LIBMINOR=0
+ LOCALINCLUDES=-I$(srcdir) -I.
diff --git a/meta-stx/recipes-security/krb5/krb5_%.bbappend b/meta-stx/recipes-security/krb5/krb5_%.bbappend
new file mode 100644 (file)
index 0000000..8e63fa3
--- /dev/null
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI_append = "file://soname_majversion.diff \
+                 "
+DEPENDS += "libverto"
+
+PACKAGES_remove = "libverto"
+
+RDEPENDS_${PN} += "\
+        libverto \
+"
+RDEPENDS_krb5-admin-server += "\
+        libverto \
+"
+RDEPENDS_krb5-kdc += "\
+        libverto \
+"
diff --git a/meta-stx/recipes-security/libtomcrypt/libtomcrypt_1.18.2.bb b/meta-stx/recipes-security/libtomcrypt/libtomcrypt_1.18.2.bb
new file mode 100644 (file)
index 0000000..5688fff
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "A comprehensive, portable cryptographic toolkit"
+DESCRIPTION = " \
+  A comprehensive, modular and portable cryptographic toolkit that provides \
+  developers with a vast array of well known published block ciphers, one-way \
+  hash functions, chaining modes, pseudo-random number generators, public key \
+  cryptography and a plethora of other routines. Designed from the ground up to \
+  be very simple to use. It has a modular and standard API that allows new ciphers, \
+  hashes and PRNGs to be added or removed without change to the overall end application. \
+  It features easy to use functions and a complete user manual which has many source \
+  snippet examples. \
+"
+HOMEPAGE = "https://github.com/libtom/libtomcrypt"
+
+LICENSE = "PD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=71baacc459522324ef3e2b9e052e8180"
+
+DEPENDS = "libtool-cross"
+
+SRC_URI = "https://github.com/libtom/libtomcrypt/releases/download/v${PV}/crypt-${PV}.tar.xz"
+
+SRC_URI[md5sum] = "e8d22351b7c95bef423c1cd02dcf836d"
+SRC_URI[sha256sum] = "96ad4c3b8336050993c5bc2cf6c057484f2b0f9f763448151567fbab5e767b84"
+
+EXTRA_OEMAKE = " \
+       LIBTOOL=${STAGING_BINDIR_CROSS}/${HOST_SYS}-libtool \
+       LIBPATH=${libdir} \
+       INCPATH=${includedir} \
+       -f makefile.shared \
+       "
+
+do_compile() {
+       oe_runmake library
+}
+
+do_install() {
+       oe_runmake install DESTDIR=${D}
+}
diff --git a/meta-stx/recipes-support/boost/boost_1.69.0.bbappend b/meta-stx/recipes-support/boost/boost_1.69.0.bbappend
new file mode 100644 (file)
index 0000000..ae528be
--- /dev/null
@@ -0,0 +1,46 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DEP_PYTHON = "\
+    python \
+    python-native \
+    python-numpy-native \
+    python3 \
+    python3-native \
+    python3-numpy-native \
+"
+
+PACKAGECONFIG = "locale python"
+PACKAGECONFIG[python] = ",,${DEP_PYTHON}"
+
+BJAM_OPTS += "${@bb.utils.contains('BOOST_LIBS', 'python', 'python=${PYTHON_BASEVERSION},2.7', '', d)}"
+
+do_configure_append () {
+    if ${@bb.utils.contains('BOOST_LIBS', 'python', 'true', 'false', d)}; then
+        echo "using python : 2.7 : ${STAGING_BINDIR_NATIVE}/python-native/python : ${STAGING_INCDIR}/python2.7 : ${STAGING_LIBDIR}/python2.7 ;" >> ${WORKDIR}/user-config.jam
+        sed -i -e 's|${STAGING_DIR_HOST}${bindir}/python3|${STAGING_BINDIR_NATIVE}/python3-native/python3|' ${WORKDIR}/user-config.jam
+    fi
+}
+
+PACKAGES += "${PN}-python3"
+
+FILES_${PN}-python = " \
+    ${libdir}/libboost_python2*.so.* \
+    ${libdir}/libboost_numpy2*.so.* \
+"
+FILES_${PN}-python3 = " \
+    ${libdir}/libboost_python3*.so.* \
+    ${libdir}/libboost_numpy3*.so.* \
+"
diff --git a/meta-stx/recipes-support/cluster-glue/cluster-glue/0001-don-t-compile-doc-and-Error-Fix.patch b/meta-stx/recipes-support/cluster-glue/cluster-glue/0001-don-t-compile-doc-and-Error-Fix.patch
new file mode 100644 (file)
index 0000000..dd762b8
--- /dev/null
@@ -0,0 +1,75 @@
+From 86924cbbd595cde81584d74f6c93bcc4a080fcdd Mon Sep 17 00:00:00 2001
+From: Li xin <lixin.fnst@cn.fujitsu.com>
+Date: Tue, 18 Aug 2015 17:43:29 +0900
+Subject: [PATCH] don't compile doc and Error Fix
+
+| help2man: can't get `--help' info from ../../lrm/admin/lrmadmin
+| make[2]: *** [lrmadmin.8] Error 1
+
+| make[2]: a2x: Command not found
+| /usr/bin/xsltproc \
+|       --xinclude \
+|       http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl ha_logd.xml
+| make[2]: *** [hb_report.8] Error 127
+
+Upstream-Status: pending
+
+Signed-off-by: Li Xin <lixin.fnst@cn.fujitsu.com>
+---
+ Makefile.am           |  2 +-
+ configure.ac          |  6 ++++++
+ lrm/admin/Makefile.am | 14 +++++++-------
+ 3 files changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 93dbaf6..2f4d03b 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -23,7 +23,7 @@ MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure DRF/config-h.in \
+                         DRF/stamp-h.in libtool.m4 ltdl.m4 libltdl.tar
+ SUBDIRS                       = include $(LIBLTDL_DIR) replace lib lrm logd \
+-                        hb_report doc config
++                        hb_report config
+ install-exec-local:
+       $(INSTALL) -d $(DESTDIR)/$(HA_COREDIR)
+diff --git a/configure.ac b/configure.ac
+index 36bcf12..809fae6 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1437,3 +1437,9 @@ AC_MSG_RESULT([  CFLAGS                   = ${CFLAGS}])
+ AC_MSG_RESULT([  Libraries                = ${LIBS}])
+ AC_MSG_RESULT([  Stack Libraries          = ${CLUSTERLIBS}])
++if test "$TARGET_PYTHON"; then
++      TARGET_PYTHON=$TARGET_PYTHON
++else
++      TARGET_PYTHON=$PYTHON
++fi
++      AC_SUBST(TARGET_PYTHON)
+diff --git a/lrm/admin/Makefile.am b/lrm/admin/Makefile.am
+index a92cd72..de525ea 100644
+--- a/lrm/admin/Makefile.am
++++ b/lrm/admin/Makefile.am
+@@ -31,10 +31,10 @@ lrmadmin_LDFLAGS   =       $(COMMONLIBS)
+ lrmadmin_LDADD = $(top_builddir)/lib/$(LRM_DIR)/liblrm.la
+ lrmadmin_DEPENDENCIES = $(top_builddir)/lib/$(LRM_DIR)/liblrm.la
+-if BUILD_HELP
+-man8_MANS =   $(sbin_PROGRAMS:%=%.8)
+-%.8:  %
+-      echo Creating $@
+-      chmod a+x $<
+-      help2man --output $@ --no-info --section 8 --name "Part of the Linux-HA project" $(top_builddir)/lrm/admin/$<
+-endif
++#if BUILD_HELP
++#man8_MANS =  $(sbin_PROGRAMS:%=%.8)
++#%.8: %
++#     echo Creating $@
++#     chmod a+x $<
++#     help2man --output $@ --no-info --section 8 --name "Part of the Linux-HA project" $(top_builddir)/lrm/admin/$<
++#endif
+-- 
+1.8.4.2
+
diff --git a/meta-stx/recipes-support/cluster-glue/cluster-glue/0001-ribcl.py.in-Warning-Fix.patch b/meta-stx/recipes-support/cluster-glue/cluster-glue/0001-ribcl.py.in-Warning-Fix.patch
new file mode 100644 (file)
index 0000000..31010d4
--- /dev/null
@@ -0,0 +1,34 @@
+From a2c66927b75547cee1db1340a67449ded0812df3 Mon Sep 17 00:00:00 2001
+From: Li xin <lixin.fnst@cn.fujitsu.com>
+Date: Wed, 19 Aug 2015 11:13:46 +0900
+Subject: [PATCH] ribcl.py.in: Warning Fix
+
+WARNING: QA Issue: /usr/lib/stonith/plugins/stonith2/ribcl.py_cluster-glue-plugin-stonith2-ribcl
+contained in package cluster-glue-plugin-stonith2-ribcl requires /usr/local/bin/python,
+but no providers found in its RDEPENDS [file-rdeps]
+
+Add target python handling
+Allow the build to specify a path for the python instead of reusing
+the PYTHON variable which can lead to inconsistencies if we are cross
+compiling.
+
+Upstream-Status: pending
+
+Signed-off-by: Li Xin <lixin.fnst@cn.fujitsu.com>
+---
+ lib/plugins/stonith/ribcl.py.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/plugins/stonith/ribcl.py.in b/lib/plugins/stonith/ribcl.py.in
+index 14e070c..01205e3 100644
+--- a/lib/plugins/stonith/ribcl.py.in
++++ b/lib/plugins/stonith/ribcl.py.in
+@@ -1,4 +1,4 @@
+-#!@PYTHON@
++#!@TRAGET_PYTHON@
+ #
+-- 
+1.8.4.2
+
diff --git a/meta-stx/recipes-support/cluster-glue/cluster-glue/kill-stack-protector.patch b/meta-stx/recipes-support/cluster-glue/cluster-glue/kill-stack-protector.patch
new file mode 100644 (file)
index 0000000..25a21f4
--- /dev/null
@@ -0,0 +1,12 @@
+Index: Reusable-Cluster-Components-glue-1.0.3/configure.ac
+===================================================================
+--- Reusable-Cluster-Components-glue-1.0.3.orig/configure.ac   2010-04-23 12:35:52.000000000 +0400
++++ Reusable-Cluster-Components-glue-1.0.3/configure.ac        2010-04-23 12:36:00.000000000 +0400
+@@ -1135,7 +1135,6 @@
+       # We had to eliminate -Wnested-externs because of libtool changes
+         EXTRA_FLAGS="-fgnu89-inline
+-              -fstack-protector-all
+               -Wall
+               -Waggregate-return
+               -Wbad-function-cast 
diff --git a/meta-stx/recipes-support/cluster-glue/cluster-glue/tmpfiles b/meta-stx/recipes-support/cluster-glue/cluster-glue/tmpfiles
new file mode 100644 (file)
index 0000000..b683b28
--- /dev/null
@@ -0,0 +1,8 @@
+d /var/lib/heartbeat 0755 root root -
+d /var/lib/heartbeat/pengine 0750 hacluster haclient -
+d /var/lib/heartbeat/cores 0755 hacluster haclient -
+d /var/lib/heartbeat/cores/hacluster 0700 hacluster haclient -
+d /var/lib/heartbeat/cores/root 0700 root root -
+d /var/lib/heartbeat/cores/nobody 0700 nobody nogroup -
+d /var/run/heartbeat 0755 root root -
+
diff --git a/meta-stx/recipes-support/cluster-glue/cluster-glue/volatiles b/meta-stx/recipes-support/cluster-glue/cluster-glue/volatiles
new file mode 100644 (file)
index 0000000..d6f0c87
--- /dev/null
@@ -0,0 +1,7 @@
+d root root 0755 /var/lib/heartbeat none
+d hacluster haclient 0750 /var/lib/heartbeat/pengine none
+d hacluster haclient 0755 /var/lib/heartbeat/cores none
+d hacluster haclient 0700 /var/lib/heartbeat/cores/hacluster none
+d root root 0700 /var/lib/heartbeat/cores/root none
+d nobody nogroup 0700 /var/lib/heartbeat/cores/nobody none
+d root root 0755  /var/run/heartbeat none
diff --git a/meta-stx/recipes-support/cluster-glue/cluster-glue_1.0.12.bb b/meta-stx/recipes-support/cluster-glue/cluster-glue_1.0.12.bb
new file mode 100644 (file)
index 0000000..33d4951
--- /dev/null
@@ -0,0 +1,152 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Cluster Glue is a set of libraries, tools and utilities suitable for \
+the Heartbeat/Pacemaker cluster stack. In essence, Glue is everything that \
+is not the cluster messaging layer (Heartbeat), nor the cluster resource manager \
+(Pacemaker), nor a Resource Agent."
+HOMEPAGE = "http://clusterlabs.org/"
+LICENSE = "GPLv2 & LGPLv2.1"
+LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe \
+                    file://COPYING.LIB;md5=243b725d71bb5df4a1e5920b344b86ad \
+"
+
+DEPENDS = "libxml2 libtool glib-2.0 bzip2 util-linux net-snmp openhpi"
+
+SRC_URI = " \
+    http://hg.linux-ha.org/glue/archive/glue-${PV}.tar.bz2 \
+    file://0001-don-t-compile-doc-and-Error-Fix.patch \
+    file://0001-ribcl.py.in-Warning-Fix.patch \
+    file://volatiles \
+    file://tmpfiles \
+"
+SRC_URI_append_libc-uclibc = " file://kill-stack-protector.patch"
+
+SRC_URI[md5sum] = "ec620466d6f23affa3b074b72bca7870"
+SRC_URI[sha256sum] = "feba102fa1e24b6be2005089ebe362b82d6567af60005cf371679b1b44ec503f"
+
+inherit autotools useradd pkgconfig systemd
+
+SYSTEMD_SERVICE_${PN} = "logd.service"
+SYSTEMD_AUTO_ENABLE = "disable"
+
+HA_USER = "hacluster"
+HA_GROUP = "haclient"
+
+S = "${WORKDIR}/Reusable-Cluster-Components-glue--glue-${PV}"
+
+PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'systemd', d)}"
+PACKAGECONFIG[systemd] = "--with-systemdsystemunitdir=${systemd_system_unitdir},--without-systemdsystemunitdir,systemd"
+
+EXTRA_OECONF = "--with-daemon-user=${HA_USER} \
+                --with-daemon-group=${HA_GROUP} \
+                --disable-fatal-warnings \
+               "
+
+CACHED_CONFIGUREVARS="ac_cv_path_XML2CONFIG=0"
+
+USERADD_PACKAGES = "${PN}"
+USERADD_PARAM_${PN} = "--home-dir=${localstatedir}/lib/heartbeat/cores/${HA_USER} \
+                       -g ${HA_GROUP} -r -s ${sbindir}/nologin -c 'cluster user' ${HA_USER} \
+                      "
+GROUPADD_PARAM_${PN} = "-r ${HA_GROUP}"
+
+do_configure_prepend() {
+    ln -sf ${PKG_CONFIG_SYSROOT_DIR}/usr/include/libxml2/libxml ${PKG_CONFIG_SYSROOT_DIR}/usr/include/libxml
+}
+
+do_install_append() {
+       install -d ${D}${sysconfdir}/default/volatiles
+       install -m 0644 ${WORKDIR}/volatiles ${D}${sysconfdir}/default/volatiles/04_cluster-glue
+       install -d ${D}${sysconfdir}/tmpfiles.d
+       install -m 0644 ${WORKDIR}/tmpfiles ${D}${sysconfdir}/tmpfiles.d/${PN}.conf
+}
+
+pkg_postinst_${PN} () {
+       if [ -z "$D" ]; then
+               if type systemd-tmpfiles >/dev/null; then
+                       systemd-tmpfiles --create
+               elif [ -e ${sysconfdir}/init.d/populate-volatile.sh ]; then
+                       ${sysconfdir}/init.d/populate-volatile.sh update
+               fi
+       fi
+}
+
+PACKAGES += "\
+        ${PN}-plugin-test \
+        ${PN}-plugin-test-dbg \
+        ${PN}-plugin-test-staticdev \
+        ${PN}-plugin-stonith2 \
+        ${PN}-plugin-stonith2-dbg \
+        ${PN}-plugin-stonith2-staticdev \
+        ${PN}-plugin-stonith2-ribcl \
+        ${PN}-plugin-stonith-external \
+        ${PN}-plugin-raexec \
+        ${PN}-plugin-raexec-dbg \
+        ${PN}-plugin-raexec-staticdev \
+        ${PN}-plugin-interfacemgr \
+        ${PN}-plugin-interfacemgr-dbg \
+        ${PN}-plugin-interfacemgr-staticdev \
+        ${PN}-lrmtest \
+         ${PN}-plugin-compress \
+         ${PN}-plugin-compress-dbg \
+         ${PN}-plugin-compress-staticdev \
+        "
+
+FILES_${PN} = "${sysconfdir} /var ${libdir}/lib*.so.* ${sbindir} ${datadir}/cluster-glue/*sh ${datadir}/cluster-glue/*pl\
+       ${libdir}/heartbeat/transient-test.sh \
+       ${libdir}/heartbeat/logtest \
+       ${libdir}/heartbeat/ipctransientserver \
+       ${libdir}/heartbeat/base64_md5_test \
+       ${libdir}/heartbeat/ipctest \
+       ${libdir}/heartbeat/ipctransientclient \
+       ${libdir}/heartbeat/ha_logd \
+       ${libdir}/heartbeat/lrmd \
+       ${systemd_unitdir} \
+       "
+
+FILES_${PN}-dbg += "${libdir}/heartbeat/.debug/ \
+                   "
+FILES_${PN}-plugin-compress = "${libdir}/heartbeat/plugins/compress/*.so"
+FILES_${PN}-plugin-compress-staticdev = "${libdir}/heartbeat/plugins/compress/*.*a"
+FILES_${PN}-plugin-compress-dbg = "${libdir}/heartbeat/plugins/compress/.debug/"
+
+FILES_${PN}-plugin-test = "${libdir}/heartbeat/plugins/test/test.so"
+FILES_${PN}-plugin-test-staticdev = "${libdir}/heartbeat/plugins/test/test.*a"
+FILES_${PN}-plugin-test-dbg = "${libdir}/heartbeat/plugins/test/.debug/"
+FILES_${PN}-plugin-stonith2 = " \
+       ${libdir}/stonith/plugins/xen0-ha-dom0-stonith-helper \
+       ${libdir}/stonith/plugins/stonith2/*.so \
+       "
+FILES_${PN}-plugin-stonith2-ribcl = "${libdir}/stonith/plugins/stonith2/ribcl.py"
+
+FILES_${PN}-plugin-stonith2-dbg = "${libdir}/stonith/plugins/stonith2/.debug/"
+FILES_${PN}-plugin-stonith2-staticdev = "${libdir}/stonith/plugins/stonith2/*.*a"
+
+FILES_${PN}-plugin-stonith-external = "${libdir}/stonith/plugins/external/"
+FILES_${PN}-plugin-raexec = "${libdir}/heartbeat/plugins/RAExec/*.so"
+FILES_${PN}-plugin-raexec-staticdev = "${libdir}/heartbeat/plugins/RAExec/*.*a"
+FILES_${PN}-plugin-raexec-dbg = "${libdir}/heartbeat/plugins/RAExec/.debug/"
+
+FILES_${PN}-plugin-interfacemgr = "${libdir}/heartbeat/plugins/InterfaceMgr/generic.so"
+FILES_${PN}-plugin-interfacemgr-staticdev = "${libdir}/heartbeat/plugins/InterfaceMgr/generic.*a"
+FILES_${PN}-plugin-interfacemgr-dbg = "${libdir}/heartbeat/plugins/InterfaceMgr/.debug/"
+
+FILES_${PN}-lrmtest = "${datadir}/cluster-glue/lrmtest/"
+
+RDEPENDS_${PN} += "perl"
+RDEPENDS_${PN}-plugin-stonith2 += "bash"
+RDEPENDS_${PN}-plugin-stonith-external += "bash python perl"
+RDEPENDS_${PN}-plugin-stonith2-ribcl += "python"
diff --git a/meta-stx/recipes-support/cluster-resource-agents/resource-agents/01-disable-doc-build.patch b/meta-stx/recipes-support/cluster-resource-agents/resource-agents/01-disable-doc-build.patch
new file mode 100644 (file)
index 0000000..3fce743
--- /dev/null
@@ -0,0 +1,23 @@
+disable doc build
+
+Signed-off-by: Bian Naimeng <biannm@cn.fujitsu.com>
+---
+ Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 1769c6e..d66b2ae 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -37,7 +37,7 @@ RGMANAGER = with
+ endif
+ if BUILD_LINUX_HA
+-SUBDIRS       += include heartbeat tools ldirectord doc
++SUBDIRS       += include heartbeat tools ldirectord
+ LINUX_HA = without
+ else
+ LINUX_HA = with
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/cluster-resource-agents/resource-agents/02-set-OCF_ROOT_DIR-to-libdir-ocf.patch b/meta-stx/recipes-support/cluster-resource-agents/resource-agents/02-set-OCF_ROOT_DIR-to-libdir-ocf.patch
new file mode 100644 (file)
index 0000000..4a8ac64
--- /dev/null
@@ -0,0 +1,23 @@
+Set OCF_ROOT_DIR to ${libdir}/ocf
+
+Signed-off-by: Bian Naimeng <biannm@cn.fujitsu.com>
+---
+ configure.ac | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/configure.ac b/configure.ac
+index bb45717..3326b53 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -406,7 +406,7 @@ dnl Variables needed for substitution
+ AC_CHECK_HEADERS(heartbeat/glue_config.h)
+ if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then
+-  OCF_ROOT_DIR=`extract_header_define heartbeat/glue_config.h OCF_ROOT_DIR`
++  OCF_ROOT_DIR="${libdir}/ocf"
+ else
+   enable_libnet=no
+ fi
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/cluster-resource-agents/resource-agents/03-fix-header-defs-lookup.patch b/meta-stx/recipes-support/cluster-resource-agents/resource-agents/03-fix-header-defs-lookup.patch
new file mode 100644 (file)
index 0000000..f1d3924
--- /dev/null
@@ -0,0 +1,26 @@
+fix header defs lookup
+
+Signed-off-by: Bian Naimeng <biannm@cn.fujitsu.com>
+---
+ configure.ac | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index 3326b53..8b5706a 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -140,9 +140,8 @@ extract_header_define() {
+         Cfile=$srcdir/extract_define.$2.${$}
+         printf "#include <stdio.h>\n" > ${Cfile}.c
+         printf "#include <%s>\n" $1 >> ${Cfile}.c
+-        printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c
+-        $CC $CFLAGS ${Cfile}.c -o ${Cfile}
+-        value=`${Cfile}`
++        printf "\"%s\":%s\n" $2 $2 >> ${Cfile}.c
++        value=`$CC $CFLAGS -E ${Cfile}.c | grep \"$2\" | cut -f 2 -d ':'`
+         AC_MSG_RESULT($value)
+         printf $value
+         rm -f ${Cfile}.c ${Cfile}
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/cluster-resource-agents/resource-agents/fix-install-sh-not-found.patch b/meta-stx/recipes-support/cluster-resource-agents/resource-agents/fix-install-sh-not-found.patch
new file mode 100644 (file)
index 0000000..15c055b
--- /dev/null
@@ -0,0 +1,60 @@
+configure.ac: fix install-sh not found
+
+Fix configure.ac to cope with new autoconf.
+Recent autoconfs generate a bad configure when AM_INIT_AUTOMAKE is
+called as late as it was, ending up thinking that the am_aux_dir is pwd
+at the start of the build. Move it up to under AC_INIT to fix that.
+
+Author: William Grant <wgrant@ubuntu.com>
+
+Upstream-Status: Pending
+
+https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=724116
+
+Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
+---
+ configure.ac | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index e8e2a7b..f3c8b9f 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -23,13 +23,20 @@ AC_INIT([resource-agents],
+       m4_esyscmd([make/git-version-gen .tarball-version]),
+       [to_be_defined@foobar.org])
++AC_CONFIG_AUX_DIR(.)
++
++dnl
++dnl AM_INIT_AUTOMAKE([1.11.1 foreign dist-bzip2 dist-xz])
++dnl
++
++AM_INIT_AUTOMAKE([1.10.1 foreign dist-bzip2])
++
+ AC_USE_SYSTEM_EXTENSIONS
+ CRM_DTD_VERSION="1.0"
+ PKG_FEATURES=""
+-AC_CONFIG_AUX_DIR(.)
+ AC_CANONICAL_HOST
+ dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below)
+@@ -76,12 +83,6 @@ AC_ARG_WITH(pkg-name,
+       [AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])])
+ AM_CONDITIONAL([HAVE_SYSTEMD], [test "x$with_systemdsystemunitdir" != "xno"])
+-dnl 
+-dnl AM_INIT_AUTOMAKE([1.11.1 foreign dist-bzip2 dist-xz])
+-dnl
+-
+-AM_INIT_AUTOMAKE([1.10.1 foreign dist-bzip2])
+-
+ AC_DEFINE_UNQUOTED(AGENTS_VERSION, "$PACKAGE_VERSION", Current agents version)
+ CC_IN_CONFIGURE=yes
+-- 
+1.8.1.2
+
diff --git a/meta-stx/recipes-support/cluster-resource-agents/resource-agents/fs.sh-fix-builds-when-srcdir-and-builddir-are-sepera.patch b/meta-stx/recipes-support/cluster-resource-agents/resource-agents/fs.sh-fix-builds-when-srcdir-and-builddir-are-sepera.patch
new file mode 100644 (file)
index 0000000..3247927
--- /dev/null
@@ -0,0 +1,39 @@
+From 5b7a84dbb9d06112c4b2804223163d91ba8ab786 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Thu, 1 Jun 2017 10:34:38 +0800
+Subject: [PATCH] fs.sh: fix builds when srcdir and builddir are seperated
+
+It fails to find fs.sh.in when srddir and builddir are not the same:
+
+make[5]: Entering directory '/path/to/builddir/rgmanager/src/resources'
+cat fs.sh.in | sed \
+    -e 's#@''LOGDIR@#/var/log/cluster#g' \
+    > fs.sh.out
+    cat: fs.sh.in: No such file or directorychmod +x fs.sh.out
+    mv fs.sh.out fs.sh
+
+Add abs_srcdir to fix this.
+
+Upstream-Status: Submitted [https://github.com/ClusterLabs/resource-agents/pull/986]
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ rgmanager/src/resources/Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/rgmanager/src/resources/Makefile.am b/rgmanager/src/resources/Makefile.am
+index de88c69..30b3be9 100644
+--- a/rgmanager/src/resources/Makefile.am
++++ b/rgmanager/src/resources/Makefile.am
+@@ -68,7 +68,7 @@ rngdir                       = ${CLUSTERDATA}/relaxng
+ rng_DATA              = $(DTD) $(XSL) $(RESRNG)
+ $(TARGET):
+-      cat $@.in | sed \
++      cat $(abs_srcdir)/$@.in | sed \
+               -e 's#@''LOGDIR@#${LOGDIR}#g' \
+       > $@.out
+       chmod +x $@.out
+-- 
+2.11.0
+
diff --git a/meta-stx/recipes-support/cluster-resource-agents/resource-agents_4.0.1.bb b/meta-stx/recipes-support/cluster-resource-agents/resource-agents_4.0.1.bb
new file mode 100644 (file)
index 0000000..abeb819
--- /dev/null
@@ -0,0 +1,119 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "OCF resource agents for use by compatible cluster managers"
+DESCRIPTION = "A set of scripts to interface with several services \
+to operate in a High Availability environment for both Pacemaker and \
+rgmanager service managers."
+HOMEPAGE = "http://sources.redhat.com/cluster/wiki/"
+
+LICENSE = "GPLv2+ & LGPLv2+ & GPLv3"
+LICENSE_${PN} = "GPLv2+ & LGPLv2+"
+LICENSE_${PN}-dev = "GPLv2+ & LGPLv2+"
+LICENSE_${PN}-staticdev = "GPLv2+ & LGPLv2+"
+LICENSE_${PN}-dbg = "GPLv2+ & LGPLv2+"
+LICENSE_${PN}-doc = "GPLv2+ & LGPLv2+"
+LICENSE_${PN}-extra = "GPLv3"
+LICENSE_${PN}-extra-dbg = "GPLv3"
+LICENSE_ldirectord = "GPLv2+"
+
+SRC_URI = "https://codeload.github.com/ClusterLabs/resource-agents/tar.gz/v${PV};downloadfilename=${BPN}-${PV}.tar.gz \
+           file://01-disable-doc-build.patch \
+           file://02-set-OCF_ROOT_DIR-to-libdir-ocf.patch \
+           file://03-fix-header-defs-lookup.patch \
+           file://fix-install-sh-not-found.patch \
+           file://fs.sh-fix-builds-when-srcdir-and-builddir-are-sepera.patch \
+          "
+
+SRC_URI[md5sum] = "8530431861e659d4ce2f04afcc4efc03"
+SRC_URI[sha256sum] = "863f83c724bad3a8bcff12b9c8712406c43e010041868826cad7b78fd8cfb9fb"
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe \
+                    file://COPYING.LGPL;md5=4fbd65380cdd255951079008b364516c \
+                    file://COPYING.GPLv3;md5=d32239bcb673463ab874e80d47fae504"
+
+DEPENDS = "cluster-glue"
+
+# There are many tools and scripts that need bash and perl.
+# lvm.sh requires: lvm2
+# ip.sh requires: ethtool iproute2 iputils-arping
+# fs.sh requires: e2fsprogs-e2fsck util-linux quota
+# netfs.sh requires: procps util-linux nfs-utils
+RDEPENDS_${PN} += "bash perl lvm2 \
+    ethtool iproute2 iputils-arping \
+    e2fsprogs-e2fsck util-linux quota \
+    procps nfs-utils \
+"
+
+inherit autotools systemd pkgconfig
+
+EXTRA_OECONF += "--disable-fatal-warnings \
+                 --with-rsctmpdir=/var/run/heartbeat/rsctmp"
+
+do_install_append() {
+    rm -rf "${D}${localstatedir}/run"
+    rmdir --ignore-fail-on-non-empty "${D}${localstatedir}"
+}
+
+# tickle_tcp is published under GPLv3, we just split it into ${PN}-extra,
+# and it's required by portblock, so move portblock into ${PN}-extra together.
+PACKAGES_prepend  = "${PN}-extra ${PN}-extra-dbg ldirectord "
+FILES_${PN}-extra = "${libdir}/resource-agents/heartbeat/tickle_tcp \
+                     ${libdir}/ocf/resource.d/heartbeat/portblock \
+                     ${datadir}/resource-agents/ocft/configs/portblock \
+                    "
+FILES_${PN}-extra-dbg += "${libdir}/resource-agents/heartbeat/.debug/tickle_tcp"
+
+FILES_ldirectord = " \
+        ${sbindir}/ldirectord \
+        ${sysconfdir}/ha.d/resource.d/ldirectord \
+        ${sysconfdir}/init.d/ldirectord \
+        ${sysconfdir}/logrotate.d/ldirectord \
+        ${libdir}/ocf/resource.d/heartbeat/ldirectord \
+        "
+FILES_ldirectord-doc = "${mandir}/man8/ldirectord.8*"
+
+RDEPENDS_ldirectord += " \
+        ipvsadm \
+        libdbi-perl \
+        libdigest-hmac-perl \
+        libmailtools-perl \
+        libnet-dns-perl \
+        libsocket6-perl \
+        libwww-perl \
+        perl \
+        perl-module-getopt-long \
+        perl-module-net-ftp \
+        perl-module-net-smtp \
+        perl-module-pod-usage \
+        perl-module-posix \
+        perl-module-socket \
+        perl-module-strict \
+        perl-module-sys-hostname \
+        perl-module-sys-syslog \
+        perl-module-vars \
+        "
+
+SYSTEMD_PACKAGES = "ldirectord"
+SYSTEMD_SERVICE_ldirectord += "ldirectord.service"
+
+FILES_${PN} += "${datadir}/cluster/* \
+                ${libdir}/ocf/resource.d/heartbeat/ \
+                ${libdir}/ocf/lib/heartbeat/* \
+                ${libdir}/ocf/resource.d/redhat \
+                "
+
+FILES_${PN}-dbg += "${libdir}/ocf/resource.d/heartbeat/.debug \
+                    ${libdir}/resource-agents/heartbeat/.debug "
diff --git a/meta-stx/recipes-support/cluster-resource-agents/resource-agents_4.0.1.bbappend b/meta-stx/recipes-support/cluster-resource-agents/resource-agents_4.0.1.bbappend
new file mode 100644 (file)
index 0000000..a91b686
--- /dev/null
@@ -0,0 +1,23 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit update-alternatives
+ALTERNATIVE_PRIORITY = "70"
+ALTERNATIVE_${PN} = "drbd.sh"
+ALTERNATIVE_LINK_NAME[drbd.sh] = "${datadir}/cluster/drbd.sh"
+
+do_install_append() {
+       mv ${D}${datadir}/cluster/drbd.sh ${D}${datadir}/cluster/drbd.sh.${PN}
+}
diff --git a/meta-stx/recipes-support/deltarpm/deltarpm_git.bb b/meta-stx/recipes-support/deltarpm/deltarpm_git.bb
new file mode 100644 (file)
index 0000000..e3ff1a2
--- /dev/null
@@ -0,0 +1,50 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Makedeltarpm creates a deltarpm from two rpms. The deltarpm can \
+               later be used to recreate the new rpm from either filesystem data \
+               or the old rpm. Use the -v option to make makedeltarpm more verbose \
+               about its work (use it twice to make it even more verbose).\
+               "
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE.BSD;md5=574af071cf0d60a71cb781844bbe2d76"
+
+SRCREV = "c5e0ca7482e2cfea5e4d902ffe488e0a71ed3e67"
+# SRCREV = "8660d976f5d2b73adf1088d67341be9c3646f2f2"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/rpm-software-management/deltarpm.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+               file://0001-Makefile-patch-fix-build-errors.patch \
+               "
+
+DEPENDS += " python perl bzip2 rpm zlib python xz zstd"
+
+do_compile_append () {
+       cd ${S}
+       oe_runmake -e DESTDIR=${D} bindir=${bindir} mandir=${mandir} python
+
+}
+
+do_install () {
+       cd ${S}
+       oe_runmake -e DESTDIR=${D} bindir=${bindir} mandir=${mandir} install
+}
+
+FILES_${PN}_append += " \
+       /usr/lib/python2.7/site-packages/deltarpm.py \
+       /usr/lib/python2.7/site-packages/_deltarpmmodule.so \
+       "
diff --git a/meta-stx/recipes-support/deltarpm/files/0001-Makefile-patch-fix-build-errors.patch b/meta-stx/recipes-support/deltarpm/files/0001-Makefile-patch-fix-build-errors.patch
new file mode 100644 (file)
index 0000000..44cc956
--- /dev/null
@@ -0,0 +1,97 @@
+From 0904c124156935c17f29a74e9ba7b49c9dddb56b Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Mon, 26 Aug 2019 13:05:32 -0700
+Subject: [PATCH] Makefile patch fix build errors
+
+---
+ Makefile                        | 35 ++++++++++-----------------------
+ zlib-1.2.2.f-rsyncable/Makefile |  6 +++---
+ 2 files changed, 13 insertions(+), 28 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index e6d4609..bce1b0d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -8,10 +8,10 @@ zlibbundled=$(zlibdir)/libz.a
+ zlibldflags=$(zlibbundled)
+ zlibcppflags=-I$(zlibdir)
+ pylibprefix=/
+-CFLAGS = -fPIC -O2 -Wall -g -DWITH_ZSTD=1
+-CPPFLAGS = -fPIC -DDELTARPM_64BIT -DBSDIFF_NO_SUF -DRPMDUMPHEADER=\"$(rpmdumpheader)\" $(zlibcppflags)
++CFLAGS += -fPIC -O2 -Wall -g -DWITH_ZSTD=1
++CPPFLAGS += -fPIC -DDELTARPM_64BIT -DBSDIFF_NO_SUF -DRPMDUMPHEADER=\"$(rpmdumpheader)\" $(zlibcppflags)
+ LDLIBS = -lbz2 $(zlibldflags) -llzma -lzstd
+-LDFLAGS =
++LDFLAGS ?=
+ PYTHONS = python python3
+ all: makedeltarpm applydeltarpm rpmdumpheader makedeltaiso applydeltaiso combinedeltarpm fragiso
+@@ -38,17 +38,10 @@ applydeltaiso: applydeltaiso.o util.o md5.o cfile.o $(zlibbundled)
+ fragiso: fragiso.o util.o md5.o rpmhead.o cfile.o $(zlibbundled)
+ _deltarpmmodule.so: readdeltarpm.o rpmhead.o util.o md5.o cfile.o $(zlibbundled)
+-      for PY in $(PYTHONS) ; do \
+-              if [ -x /usr/bin/$$PY-config ] && [ -x /usr/bin/$$PY ]; then \
+-                      PYVER=`$$PY -c 'from distutils import sysconfig ; print(sysconfig.get_python_version())'`; \
+-                      PYCFLAGS=`$$PY-config --cflags`; \
+-                      if [ ! -f "python$$PYVER/$@" ]; then \
+-                              mkdir -p python$$PYVER ;\
+-                              $(CC) $(CFLAGS) $$PYCFLAGS $(zlibcppflags) -fPIC -c -o python$$PYVER/deltarpmmodule.o deltarpmmodule.c ;\
+-                              $(CC) $(LDFLAGS) -o python$$PYVER/$@ python$$PYVER/deltarpmmodule.o $^ -shared -Wl,-soname,_deltarpmmodule.so $(LDLIBS); \
+-                      fi; \
+-              fi; \
+-      done
++      mkdir -p python2.7
++      $(CC) $(CFLAGS) -I=/usr/include/python2.7 $(zlibcppflags) -fPIC -c -o python2.7/_deltarpmmodule.o deltarpmmodule.c
++      $(CC) $(LDFLAGS) -o python2.7/_deltarpmmodule.so python2.7/_deltarpmmodule.o $^ \
++              -shared -Wl,-soname,_deltarpmmodule.so $(LDLIBS)
+ $(zlibbundled):
+       cd $(zlibdir) ; make CFLAGS="-fPIC $(CFLAGS)" libz.a
+@@ -76,17 +69,9 @@ install:
+       install -m 644 applydeltaiso.8 $(DESTDIR)$(mandir)/man8
+       install -m 644 fragiso.8 $(DESTDIR)$(mandir)/man8
+       install -m 644 drpmsync.8 $(DESTDIR)$(mandir)/man8
+-      for PY in $(PYTHONS) ; do \
+-              if [ -x /usr/bin/$$PY ]; then \
+-                        PYLIB=`$$PY -c 'from distutils import sysconfig ; print(sysconfig.get_python_lib(1))'` ; \
+-                      PYVER=`$$PY -c 'from distutils import sysconfig ; print(sysconfig.get_python_version())'` ; \
+-                      if [ -e python$$PYVER/_deltarpmmodule.so ]; then \
+-                              mkdir -p $(DESTDIR)$(pylibprefix)$$PYLIB ; \
+-                              install -m 755 python$$PYVER/_deltarpmmodule.so $(DESTDIR)$(pylibprefix)$$PYLIB ; \
+-                              install -m 644 deltarpm.py $(DESTDIR)$(pylibprefix)$$PYLIB ; \
+-                      fi; \
+-              fi; \
+-      done
++      mkdir -p $(DESTDIR)/$(libdir)/python2.7/site-packages/
++      install -m 755 python2.7/_deltarpmmodule.so $(DESTDIR)/$(libdir)/python2.7/site-packages/
++      install -m 644 deltarpm.py $(DESTDIR)/$(libdir)/python2.7/site-packages/
+ .PHONY: clean install perl
+diff --git a/zlib-1.2.2.f-rsyncable/Makefile b/zlib-1.2.2.f-rsyncable/Makefile
+index 29f53de..23b5daf 100644
+--- a/zlib-1.2.2.f-rsyncable/Makefile
++++ b/zlib-1.2.2.f-rsyncable/Makefile
+@@ -16,15 +16,15 @@
+ # To install in $HOME instead of /usr/local, use:
+ #    make install prefix=$HOME
+-CC=cc
++CC?=cc
+-CFLAGS=-O
++CFLAGS?=-O
+ #CFLAGS=-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7
+ #CFLAGS=-g -DDEBUG
+ #CFLAGS=-O3 -Wall -Wwrite-strings -Wpointer-arith -Wconversion \
+ #           -Wstrict-prototypes -Wmissing-prototypes
+-LDFLAGS=libz.a
++LDFLAGS+=libz.a
+ LDSHARED=$(CC)
+ CPP=$(CC) -E
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0001-skip_wait_con_int_on_simplex.patch b/meta-stx/recipes-support/drbd/drbd-utils/0001-skip_wait_con_int_on_simplex.patch
new file mode 100644 (file)
index 0000000..bb43fa4
--- /dev/null
@@ -0,0 +1,16 @@
+diff --git a/scripts/drbd b/scripts/drbd
+index f45f258..7640097 100755
+--- a/scripts/drbd
++++ b/scripts/drbd
+@@ -160,7 +160,10 @@ case "$1" in
+       done
+       [ -d /var/lock/subsys ] && touch /var/lock/subsys/drbd  # for RedHat
+-      $DRBDADM wait-con-int # User interruptible version of wait-connect all
++
++      if [ ! -e /etc/platform/simplex ] ; then # Skip if simplex
++          $DRBDADM wait-con-int # User interruptible version of wait-connect all
++      fi
+       $DRBDADM sh-b-pri all # Become primary if configured
+       log_end_msg 0
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0002-drbd-conditional-crm-dependency.patch b/meta-stx/recipes-support/drbd/drbd-utils/0002-drbd-conditional-crm-dependency.patch
new file mode 100644 (file)
index 0000000..f423dfc
--- /dev/null
@@ -0,0 +1,26 @@
+diff --git a/scripts/drbd.ocf b/scripts/drbd.ocf
+index 2105209..0aa9702 100644
+--- a/scripts/drbd.ocf
++++ b/scripts/drbd.ocf
+@@ -217,13 +217,17 @@ do_drbdadm() {
+ }
+ set_master_score() {
+-      # Use quiet mode (-Q) to quench logging. Actual score updates
+-      # will get logged by attrd anyway
+-      do_cmd ${HA_SBIN_DIR}/crm_master -Q -l reboot -v $1
++      if [ -x ${HA_SBIN_DIR}/crm_master ]; then
++              # Use quiet mode (-Q) to quench logging. Actual score updates
++              # will get logged by attrd anyway
++              do_cmd ${HA_SBIN_DIR}/crm_master -Q -l reboot -v $1
++      fi
+ }
+ remove_master_score() {
+-      do_cmd ${HA_SBIN_DIR}/crm_master -l reboot -D
++      if [ -x ${HA_SBIN_DIR}/crm_master ]; then
++              do_cmd ${HA_SBIN_DIR}/crm_master -l reboot -D
++      fi
+ }
+ _sh_status_process() {
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0003-drbd_report_condition.patch b/meta-stx/recipes-support/drbd/drbd-utils/0003-drbd_report_condition.patch
new file mode 100644 (file)
index 0000000..7103dc9
--- /dev/null
@@ -0,0 +1,387 @@
+---
+ scripts/drbd     |    1 
+ scripts/drbd.ocf |  259 ++++++++++++++++++++++---------------------------------
+ 2 files changed, 109 insertions(+), 151 deletions(-)
+
+--- a/scripts/drbd.ocf
++++ b/scripts/drbd.ocf
+@@ -5,6 +5,8 @@
+ #
+ # Copyright (c) 2009 LINBIT HA-Solutions GmbH,
+ # Copyright (c) 2009 Florian Haas, Lars Ellenberg
++# Copyright (c) 2014 Wind River Systems, Inc. All rights reserved.
++#
+ # Based on the Heartbeat drbd OCF Resource Agent by Lars Marowsky-Bree
+ # (though it turned out to be an almost complete rewrite)
+ #
+@@ -216,20 +218,6 @@ do_drbdadm() {
+       return $ret
+ }
+-set_master_score() {
+-      if [ -x ${HA_SBIN_DIR}/crm_master ]; then
+-              # Use quiet mode (-Q) to quench logging. Actual score updates
+-              # will get logged by attrd anyway
+-              do_cmd ${HA_SBIN_DIR}/crm_master -Q -l reboot -v $1
+-      fi
+-}
+-
+-remove_master_score() {
+-      if [ -x ${HA_SBIN_DIR}/crm_master ]; then
+-              do_cmd ${HA_SBIN_DIR}/crm_master -l reboot -D
+-      fi
+-}
+-
+ _sh_status_process() {
+       # _volume not present should not happen,
+       # but may help make this agent work even if it talks to drbd 8.3.
+@@ -242,6 +230,7 @@ _sh_status_process() {
+       DRBD_DSTATE_LOCAL[$_volume]=${_disk:-Unconfigured}
+       DRBD_DSTATE_REMOTE[$_volume]=${_pdsk:-DUnknown}
+ }
++
+ drbd_set_status_variables() {
+       # drbdsetup sh-status prints these values to stdout,
+       # and then prints _sh_status_process.
+@@ -322,119 +311,9 @@ maybe_outdate_self()
+       ocf_log notice "outdating $DRBD_RESOURCE: according to OCF_RESKEY_CRM_meta_notify_master_uname, '$host' is still master"
+       do_drbdadm outdate $DRBD_RESOURCE
+-      # on some pacemaker versions, -INFINITY may cause resource instance stop/start.
+-      # But in this case that is ok, it may even clear the replication link
+-      # problem.
+-      set_master_score -INFINITY
+-
+       return 0
+ }
+-drbd_update_master_score() {
+-      # NOTE
+-      # there may be constraint scores from rules on role=Master,
+-      # that in some ways can add to the node attribute based master score we
+-      # specify below. If you think you want to add personal preferences,
+-      # in case the scores given by this RA do not suffice, this is the
+-      # value space you can work with:
+-      # -INFINITY: Do not promote. Really. Won't work anyways.
+-              # Too bad, at least with current (Oktober 2009) Pacemaker,
+-              # negative master scores cause instance stop; restart cycle :(
+-      # missing, zero: Do not promote.
+-      #        I think my data is not good enough.
+-      #        Though, of course, you may try, and it might even work.
+-      #     5: please, do not promote, unless this is your only option.
+-      #    10: promotion is probably a bad idea, our local data is no good,
+-      #        you'd probably run into severe performance problems, and risk
+-      #        application crashes or blocking IO in case you lose the
+-      #        replication connection.
+-      #  1000: Ok to be promoted, we have good data locally (though we don't
+-      #        know about the peer, so possibly it has even better data?).
+-      #        You sould use the crm-fence-peer.sh handler or similar
+-      #        mechanism to avoid data divergence.
+-      # 10000: Please promote me/keep me Primary.
+-      #        I'm confident that my data is as good as it gets.
+-      #
+-      # For multi volume, we need to compare who is "better" a bit more sophisticated.
+-      # The ${XXX[*]//UpToDate}, without being in double quotes, results in a single space,
+-      # if all are UpToDate.
+-      : == DEBUG == ${DRBD_ROLE_LOCAL[*]}/${DRBD_DSTATE_LOCAL[*]//UpToDate/ }/${DRBD_DSTATE_REMOTE[*]//UpToDate/ }/ ==
+-      case ${DRBD_ROLE_LOCAL[*]}/${DRBD_DSTATE_LOCAL[*]//UpToDate/ }/${DRBD_DSTATE_REMOTE[*]//UpToDate/ }/ in
+-      *Primary*/\ /*/)
+-              # I am Primary, all local disks are UpToDate
+-              set_master_score 10000
+-              ;;
+-      */\ /*DUnknown*/)
+-              # all local disks are UpToDate,
+-              # but I'm not Primary,
+-              # and I'm not sure about the peer's disk state(s).
+-              # We may need to outdate ourselves?
+-              # But if we outdate in a MONITOR, and are disconnected
+-              # secondary because of a hard primary crash, before CRM noticed
+-              # that there is no more master, we'd make us utterly useless!
+-              # Trust that the primary will also notice the disconnect,
+-              # and will place an appropriate fencing constraint via
+-              # its fence-peer handler callback.
+-              set_master_score  1000
+-              ;;
+-      */\ /*/)
+-              # We know something about our peer, which means that either the
+-              # replication link is established, or it was not even
+-              # consistent last time we talked to each other.
+-              # Also all our local disks are UpToDate, which means even if we are
+-              # currently synchronizing, we do so as SyncSource.
+-              set_master_score 10000
+-              ;;
+-
+-      */*/\ /)
+-              # At least one of our local disks is not up to date.
+-              # But our peer is ALL OK.
+-              # We can expect to have access to useful
+-              # data, but must expect degraded performance.
+-              set_master_score 10
+-              ;;
+-      */*Attaching*/*/|\
+-      */*Negotiating*/*/)
+-              # some transitional state.
+-              # just don't do anything
+-              : ;;
+-
+-      Unconfigured*|\
+-      */*Diskless*/*/|\
+-      */*Failed*/*/|\
+-      */*Inconsistent*/*/|\
+-      */*Outdated*/*/)
+-              # ALWAYS put the cluster in MAINTENANCE MODE
+-              # if you add a volume to a live replication group,
+-              # because the new volume will typically come up as Inconsistent
+-              # the first time, which would cause a monitor to revoke the
+-              # master score!
+-              #
+-              # At least some of our local disks are not really useable.
+-              # Our peer is not all good either (or some previous case block
+-              # would have matched).  We have no access to useful data.
+-              # DRBD would refuse to be promoted, anyways.
+-              #
+-              # set_master_score -INFINITY
+-              # Too bad, at least with current (Oktober 2009) Pacemaker,
+-              # negative master scores cause instance stop; restart cycle :(
+-              # Hope that this will suffice.
+-              remove_master_score
+-              ;;
+-      *)
+-              # All local disks seem to be Consistent.
+-              # They _may_ be up to date, or not.
+-              # We hope that fencing mechanisms have put constraints in
+-              # place, so we won't be promoted with stale data.
+-              # But in case this was a cluster crash,
+-              # at least allow _someone_ to be promoted.
+-              set_master_score 5
+-              ;;
+-      esac
+-
+-      return $OCF_SUCCESS
+-}
+-
+ is_drbd_enabled() {
+       test -f /proc/drbd
+ }
+@@ -488,7 +367,103 @@ drbd_status() {
+       return $rc
+ }
+-# I'm sorry, but there is no $OCF_DEGRADED_MASTER or similar yet.
++drbd_condition() {
++    local status
++    local rc
++
++    status=$1
++    rc=$status
++
++    if [ $status -ne $OCF_SUCCESS -a $status -ne $OCF_RUNNING_MASTER ]
++    then
++        return $rc
++    fi
++
++    drbd_set_status_variables
++
++    ocf_log info "${OCF_RESKEY_drbd_resource} ${DRBD_ROLE_LOCAL}/${DRBD_DSTATE_LOCAL}/${DRBD_DSTATE_REMOTE} ${DRBD_CSTATE}"
++
++    case "${DRBD_DSTATE_LOCAL}" in
++        UpToDate)
++            case "${DRBD_CSTATE}" in
++                StandAlone)
++                    rc=$OCF_DATA_STANDALONE
++                    ocf_log info "${OCF_RESKEY_drbd_resource} standalone, attempting to reconnect."
++                    do_drbdadm connect ${OCF_RESKEY_drbd_resource}
++                    ;;
++                StartingSyncT | WFBitMapT | WFSyncUUID | SyncTarget | \
++                PausedSyncT)
++                    rc=$OCF_DATA_SYNC
++                    #drbd-overview | grep -A 1 drbd-cgcs | grep sync\'ed | cut -f2,3 -d' '
++                    ocf_log info "${OCF_RESKEY_drbd_resource} syncing"
++                    ;;
++                *)
++                    ;;
++             esac
++            ;;
++        Consistent)
++            case "${DRBD_CSTATE}" in
++                StandAlone)
++                    rc=$OCF_DATA_STANDALONE
++                    ocf_log info "${OCF_RESKEY_drbd_resource} standalone, attempting to reconnect"
++                    do_drbdadm connect ${OCF_RESKEY_drbd_resource}
++                    ;;
++                *)
++                    rc=$OCF_DATA_CONSISTENT
++                    ocf_log info "${OCF_RESKEY_drbd_resource} consistent"
++                    ;;
++            esac
++            ;;
++        Outdated)
++            rc=$OCF_DATA_OUTDATED
++            ocf_log info "${OCF_RESKEY_drbd_resource} outdated"
++            ;;
++        *)
++            case "${DRBD_CSTATE}" in
++                StandAlone)
++                    rc=$OCF_DATA_STANDALONE
++                    ocf_log info "${OCF_RESKEY_drbd_resource} standalone"
++                    ;;
++                StartingSyncT | WFBitMapT | WFSyncUUID | SyncTarget | \
++                PausedSyncT)
++                    rc=$OCF_DATA_SYNC
++                    ocf_log info "${OCF_RESKEY_drbd_resource} sync"
++                    ;;
++                *)
++                    rc=$OCF_DATA_INCONSISTENT
++                    ocf_log info "${OCF_RESKEY_drbd_resource} inconsistent"
++                    ;;
++            esac
++            ;;
++    esac
++
++    if [ $status -eq $OCF_RUNNING_MASTER ]
++    then
++        if [ $rc -eq $OCF_DATA_INCONSISTENT ]
++        then
++            rc=$OCF_RUNNING_MASTER_DATA_INCONSISTENT
++
++        elif [ $rc -eq $OCF_DATA_OUTDATED ]
++        then
++            rc=$OCF_RUNNING_MASTER_DATA_OUTDATED
++
++        elif [ $rc -eq $OCF_DATA_CONSISTENT ]
++        then
++            rc=$OCF_RUNNING_MASTER_DATA_CONSISTENT
++
++        elif [ $rc -eq $OCF_DATA_SYNC ]
++        then
++            rc=$OCF_RUNNING_MASTER_DATA_SYNC
++
++        elif [ $rc -eq $OCF_DATA_STANDALONE ]
++        then
++            rc=$OCF_RUNNING_MASTER_DATA_STANDALONE
++        fi
++    fi
++
++    return $rc
++}
++
+ drbd_monitor() {
+       local status
+@@ -501,7 +476,8 @@ drbd_monitor() {
+       drbd_status
+       status=$?
+-      drbd_update_master_score
++      drbd_condition $status
++      status=$?
+       return $status
+ }
+@@ -578,7 +554,8 @@ drbd_start() {
+                       # "running" already, anyways, right?
+                       figure_out_drbd_peer_uname
+                       do_drbdadm $DRBD_TO_PEER adjust $DRBD_RESOURCE
+-                      rc=$OCF_SUCCESS
++                      drbd_condition $OCF_SUCCESS
++            rc=$?
+                       break
+                       ;;
+               $OCF_NOT_RUNNING)
+@@ -606,9 +583,6 @@ drbd_start() {
+               $first_try || sleep 1
+               first_try=false
+       done
+-      # in case someone does not configure monitor,
+-      # we must at least call it once after start.
+-      drbd_update_master_score
+       return $rc
+ }
+@@ -642,7 +616,8 @@ drbd_promote() {
+                       break
+                       ;;
+               $OCF_RUNNING_MASTER)
+-                      rc=$OCF_SUCCESS
++                      drbd_condition $OCF_SUCCESS
++            rc=$?
+                       break
+               esac
+               $first_try || sleep 1
+@@ -666,7 +641,8 @@ drbd_demote() {
+               status=$?
+               case "$status" in
+               $OCF_SUCCESS)
+-                      rc=$OCF_SUCCESS
++                      drbd_condition $OCF_SUCCESS
++            rc=$?
+                       break
+                       ;;
+               $OCF_NOT_RUNNING)
+@@ -718,14 +694,9 @@ drbd_stop() {
+       # outdate myself in drbd on-disk meta data.
+       maybe_outdate_self
+-      # do not let old master scores laying around.
+-      # they may confuse crm if this node was set to standby.
+-      remove_master_score
+-
+       return $rc
+ }
+-
+ drbd_notify() {
+       local n_type=$OCF_RESKEY_CRM_meta_notify_type
+       local n_op=$OCF_RESKEY_CRM_meta_notify_operation
+@@ -760,7 +731,6 @@ drbd_notify() {
+               # After something has been done is a good time to
+               # recheck our status:
+               drbd_set_status_variables
+-              drbd_update_master_score
+               : == DEBUG == ${DRBD_DSTATE_REMOTE[*]} ==
+               case ${DRBD_DSTATE_REMOTE[*]} in
+@@ -793,17 +763,6 @@ ls_stat_is_block_maj_147() {
+       [[ $1 = b* ]] && [[ $5 == 147,* ]]
+ }
+-check_crm_feature_set()
+-{
+-      set -- ${OCF_RESKEY_crm_feature_set//[!0-9]/ }
+-      local a=${1:-0} b=${2:-0} c=${3:-0}
+-      
+-      (( a > 3 )) ||
+-      (( a == 3 && b > 0 )) ||
+-      (( a == 3 && b == 0 && c > 0 )) ||
+-      ocf_log warn "You may be disappointed: This RA is intended for pacemaker 1.0 or better!"
+-}
+-
+ drbd_validate_all () {
+       DRBDADM="drbdadm"
+       DRBDSETUP="drbdsetup"
+@@ -821,7 +780,6 @@ drbd_validate_all () {
+       if (( $DRBDADM_VERSION_CODE >= 0x080400 )); then
+               DRBD_HAS_MULTI_VOLUME=true
+       fi
+-      check_crm_feature_set
+       # Check clone and M/S options.
+       meta_expect clone-max -le 2
+@@ -890,7 +848,6 @@ drbd_validate_all () {
+                       # hm. probably misconfigured constraint somewhere.
+                       # sorry. don't retry anywhere.
+                       ocf_log err "DRBD resource ${DRBD_RESOURCE} not found in configuration file ${OCF_RESKEY_drbdconf}."
+-                      remove_master_score
+                       return $OCF_ERR_INSTALLED
+               fi
+       fi
+--- a/scripts/drbd
++++ b/scripts/drbd
+@@ -4,6 +4,7 @@
+ # description: Loads and unloads the drbd module
+ #
+ # Copyright 2001-2010 LINBIT
++# Copyright (c) 2014 Wind River Systems, Inc. All rights reserved.
+ #
+ # Philipp Reisner, Lars Ellenberg
+ #
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0004-drbdadm-ipaddr-change.patch b/meta-stx/recipes-support/drbd/drbd-utils/0004-drbdadm-ipaddr-change.patch
new file mode 100644 (file)
index 0000000..a2bb2a2
--- /dev/null
@@ -0,0 +1,132 @@
+Index: git/user/drbdadm_adjust.c
+===================================================================
+--- git.orig/user/drbdadm_adjust.c
++++ git/user/drbdadm_adjust.c
+@@ -157,6 +157,7 @@ static int opts_equal(struct context_def
+ static int addr_equal(struct d_resource* conf, struct d_resource* running)
+ {
+       int equal;
++    char *peer_addr, *peer_af, *peer_port;
+       if (conf->peer == NULL && running->peer == NULL) return 1;
+       if (running->peer == NULL) return 0;
+@@ -165,16 +166,29 @@ static int addr_equal(struct d_resource*
+               !strcmp(conf->me->port,           running->me->port) &&
+               !strcmp(conf->me->address_family, running->me->address_family);
+-      if(conf->me->proxy)
+-              equal = equal &&
+-                      !strcmp(conf->me->proxy->inside_addr, running->peer->address) &&
+-                      !strcmp(conf->me->proxy->inside_port, running->peer->port) &&
+-                      !strcmp(conf->me->proxy->inside_af,   running->peer->address_family);
+-      else
+-              equal = equal && conf->peer &&
+-                      !strcmp(conf->peer->address,        running->peer->address) &&
+-                      !strcmp(conf->peer->port,           running->peer->port) &&
+-                      !strcmp(conf->peer->address_family, running->peer->address_family);
++       if(conf->me->proxy) {
++               peer_addr = conf->me->proxy->inside_addr;
++               peer_port = conf->me->proxy->inside_port;
++               peer_af = conf->me->proxy->inside_af;
++       } else {
++               peer_addr = conf->peer->address;
++               peer_port = conf->peer->port;
++               peer_af = conf->peer->address_family;
++       }
++
++       equal = equal && conf->peer &&
++               !strcmp(peer_addr, running->peer->address) &&
++               !strcmp(peer_port, running->peer->port) &&
++               !strcmp(peer_af, running->peer->address_family);
++
++       if (verbose > 2)
++               fprintf(stderr, "Network addresses differ:\n"
++                       "\trunning: %s:%s:%s -- %s:%s:%s\n"
++                       "\t config: %s:%s:%s -- %s:%s:%s\n",
++                       running->me->address_family, running->me->address, running->me->port,
++                       running->peer->address_family, running->peer->address, running->peer->port,
++                       conf->me->address_family, conf->me->address, conf->me->port,
++                       peer_af, peer_addr, peer_port);
+       return equal;
+ }
+@@ -690,8 +704,7 @@ int adm_adjust(struct cfg_ctx *ctx)
+       if (ctx->res->me->proxy && can_do_proxy)
+               do_connect |= proxy_reconf(ctx, running);
+-      if (do_connect && running)
+-              do_disconnect = running->net_options != NULL;
++    do_disconnect = do_connect && running && (running->peer || running->net_options);
+       if (do_res_options)
+               schedule_deferred_cmd(adm_set_default_res_options, ctx, "resource-options", CFG_RESOURCE);
+@@ -716,8 +729,12 @@ int adm_adjust(struct cfg_ctx *ctx)
+       }
+       if (do_connect) {
+-              if (do_disconnect && ctx->res->peer)
+-                      schedule_deferred_cmd(adm_disconnect, ctx, "disconnect", CFG_NET_PREREQ);
++        /* "disconnect" specifying the end-point addresses currently in-use,
++         * before "connect"ing with the addresses currently in-config-file. */
++        if (do_disconnect) {
++                struct cfg_ctx tmp_ctx = { .res = running, .vol = vol, };
++                schedule_deferred_cmd(adm_disconnect, &tmp_ctx, "disconnect", CFG_NET_PREREQ);
++        }
+               schedule_deferred_cmd(adm_connect, ctx, "connect", CFG_NET);
+               do_net_options = 0;
+       }
+Index: git/user/legacy/drbdadm_adjust.c
+===================================================================
+--- git.orig/user/legacy/drbdadm_adjust.c
++++ git/user/legacy/drbdadm_adjust.c
+@@ -133,6 +133,7 @@ static int opts_equal(struct d_option* c
+ static int addr_equal(struct d_resource* conf, struct d_resource* running)
+ {
+       int equal;
++    char *peer_addr, *peer_af, *peer_port;
+       if (conf->peer == NULL && running->peer == NULL) return 1;
+       if (running->peer == NULL) return 0;
+@@ -141,18 +142,31 @@ static int addr_equal(struct d_resource*
+               !strcmp(conf->me->port,           running->me->port) &&
+               !strcmp(conf->me->address_family, running->me->address_family);
+-      if(conf->me->proxy)
+-              equal = equal &&
+-                      !strcmp(conf->me->proxy->inside_addr, running->peer->address) &&
+-                      !strcmp(conf->me->proxy->inside_port, running->peer->port) &&
+-                      !strcmp(conf->me->proxy->inside_af,   running->peer->address_family);
+-      else
+-              equal = equal && conf->peer &&
+-                      !strcmp(conf->peer->address,        running->peer->address) &&
+-                      !strcmp(conf->peer->port,           running->peer->port) &&
+-                      !strcmp(conf->peer->address_family, running->peer->address_family);
++    if(conf->me->proxy) {
++            peer_addr = conf->me->proxy->inside_addr;
++            peer_port = conf->me->proxy->inside_port;
++            peer_af = conf->me->proxy->inside_af;
++    } else {
++            peer_addr = conf->peer->address;
++            peer_port = conf->peer->port;
++            peer_af = conf->peer->address_family;
++    }
++
++    equal = equal && conf->peer &&
++            !strcmp(peer_addr, running->peer->address) &&
++            !strcmp(peer_port, running->peer->port) &&
++            !strcmp(peer_af, running->peer->address_family);
++
++    if (verbose > 2)
++            fprintf(stderr, "Network addresses differ:\n"
++                    "\trunning: %s:%s:%s -- %s:%s:%s\n"
++                    "\t config: %s:%s:%s -- %s:%s:%s\n",
++                    running->me->address_family, running->me->address, running->me->port,
++                    running->peer->address_family, running->peer->address, running->peer->port,
++                    conf->me->address_family, conf->me->address, conf->me->port,
++                    peer_af, peer_addr, peer_port);
+-      return equal;
++    return equal;
+ }
+ static int proto_equal(struct d_resource* conf, struct d_resource* running)
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0005-drbd_reconnect_standby_standalone.patch b/meta-stx/recipes-support/drbd/drbd-utils/0005-drbd_reconnect_standby_standalone.patch
new file mode 100644 (file)
index 0000000..30444c7
--- /dev/null
@@ -0,0 +1,34 @@
+Index: git/scripts/drbd.ocf
+===================================================================
+--- git.orig/scripts/drbd.ocf
++++ git/scripts/drbd.ocf
+@@ -418,6 +418,29 @@ drbd_condition() {
+             rc=$OCF_DATA_OUTDATED
+             ocf_log info "${OCF_RESKEY_drbd_resource} outdated"
+             ;;
++        Inconsistent)
++            case "${DRBD_CSTATE}" in
++                StandAlone)
++                    rc=$OCF_DATA_STANDALONE
++                    if [ $status -eq $OCF_SUCCESS ]
++                    then
++                        ocf_log info "${OCF_RESKEY_drbd_resource} standby standalone, attempting to reconnect."
++                        do_drbdadm connect ${OCF_RESKEY_drbd_resource}
++                    else
++                        ocf_log info "${OCF_RESKEY_drbd_resource} standalone"
++                    fi
++                    ;;
++                StartingSyncT | WFBitMapT | WFSyncUUID | SyncTarget | \
++                PausedSyncT)
++                    rc=$OCF_DATA_SYNC
++                    ocf_log info "${OCF_RESKEY_drbd_resource} sync"
++                    ;;
++                *)
++                    rc=$OCF_DATA_INCONSISTENT
++                    ocf_log info "${OCF_RESKEY_drbd_resource} inconsistent"
++                    ;;
++            esac
++            ;;
+         *)
+             case "${DRBD_CSTATE}" in
+                 StandAlone)
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0006-avoid-kernel-userspace-version-check.patch b/meta-stx/recipes-support/drbd/drbd-utils/0006-avoid-kernel-userspace-version-check.patch
new file mode 100644 (file)
index 0000000..8cd3cc7
--- /dev/null
@@ -0,0 +1,55 @@
+From ea19e3020367cfaf6da20dd690433ee72a24120c Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Mon, 2 May 2016 15:17:54 -0400
+Subject: [PATCH 1/1] Avoid kernel/userspace version check
+
+---
+ user/drbdadm_usage_cnt.c | 32 +-------------------------------
+ 1 file changed, 1 insertion(+), 31 deletions(-)
+
+diff --git a/user/drbdadm_usage_cnt.c b/user/drbdadm_usage_cnt.c
+index ff6d5c8..c6cb4ad 100644
+--- a/user/drbdadm_usage_cnt.c
++++ b/user/drbdadm_usage_cnt.c
+@@ -244,37 +244,7 @@ static int vcs_ver_cmp(struct vcs_rel *rev1, struct vcs_rel *rev2)
+ void warn_on_version_mismatch(void)
+ {
+-      char *msg;
+-      int cmp;
+-
+-      /* get the kernel module version from /proc/drbd */
+-      vcs_get_current();
+-
+-      /* get the userland version from REL_VERSION */
+-      vcs_get_userland();
+-
+-      cmp = vcs_ver_cmp(&userland_version, &current_vcs_rel);
+-      /* no message if equal */
+-      if (cmp == 0)
+-              return;
+-      if (cmp > 0xffff || cmp < -0xffff)       /* major version differs! */
+-              msg = "mixing different major numbers will not work!";
+-      else if (cmp < 0)               /* userland is older. always warn. */
+-              msg = "you should upgrade your drbd tools!";
+-      else if (cmp & 0xff00)          /* userland is newer minor version */
+-              msg = "please don't mix different DRBD series.";
+-      else            /* userland is newer, but only differ in sublevel. */
+-              msg = "preferably kernel and userland versions should match.";
+-
+-      fprintf(stderr, "DRBD module version: %u.%u.%u\n"
+-                      "   userland version: %u.%u.%u\n%s\n",
+-                      current_vcs_rel.version.major,
+-                      current_vcs_rel.version.minor,
+-                      current_vcs_rel.version.sublvl,
+-                      userland_version.version.major,
+-                      userland_version.version.minor,
+-                      userland_version.version.sublvl,
+-                      msg);
++      return;
+ }
+ void add_lib_drbd_to_path(void)
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0007-Update-OCF-to-attempt-connect-in-certain-states.patch b/meta-stx/recipes-support/drbd/drbd-utils/0007-Update-OCF-to-attempt-connect-in-certain-states.patch
new file mode 100644 (file)
index 0000000..9ff4001
--- /dev/null
@@ -0,0 +1,40 @@
+From 5677e262d5b3f5ecc114f1aace4ffd77a7772282 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Tue, 21 Feb 2017 12:37:02 -0500
+Subject: [PATCH] Update OCF to attempt connect in certain states
+
+---
+ scripts/drbd.ocf | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/scripts/drbd.ocf b/scripts/drbd.ocf
+index 0e26ea9..84332b0 100644
+--- a/scripts/drbd.ocf
++++ b/scripts/drbd.ocf
+@@ -415,8 +415,21 @@ drbd_condition() {
+             esac
+             ;;
+         Outdated)
+-            rc=$OCF_DATA_OUTDATED
+-            ocf_log info "${OCF_RESKEY_drbd_resource} outdated"
++            case "${DRBD_CSTATE}" in
++                StandAlone)
++                    rc=$OCF_DATA_STANDALONE
++                    if [ $status -eq $OCF_SUCCESS ]
++                    then
++                        ocf_log info "${OCF_RESKEY_drbd_resource} outdated standalone, attempting to reconnect."
++                        do_drbdadm -- --discard-my-data connect ${OCF_RESKEY_drbd_resource}
++                    else
++                        ocf_log info "${OCF_RESKEY_drbd_resource} outdated"
++                    fi
++                    ;;
++                *)
++                    rc=$OCF_DATA_OUTDATED
++                    ocf_log info "${OCF_RESKEY_drbd_resource} outdated"
++            esac
+             ;;
+         Inconsistent)
+             case "${DRBD_CSTATE}" in
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0008-Increase-short-cmd-timeout-to-15-secs.patch b/meta-stx/recipes-support/drbd/drbd-utils/0008-Increase-short-cmd-timeout-to-15-secs.patch
new file mode 100644 (file)
index 0000000..097e975
--- /dev/null
@@ -0,0 +1,25 @@
+From 100b44d99b0bcbac92abd2122becbfd88d155e09 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Wed, 22 Nov 2017 20:45:28 -0500
+Subject: [PATCH] Increase short cmd timeout to 15 secs
+
+---
+ user/drbdadm_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/user/drbdadm_main.c b/user/drbdadm_main.c
+index b89e91a..19c5a44 100644
+--- a/user/drbdadm_main.c
++++ b/user/drbdadm_main.c
+@@ -1467,7 +1467,7 @@ void m__system(char **argv, int flags, const char *res_name, pid_t *kid, int *fd
+               alarm_raised = 0;
+               switch (flags & SLEEPS_MASK) {
+               case SLEEPS_SHORT:
+-                      timeout = 5;
++                      timeout = 15;
+                       break;
+               case SLEEPS_LONG:
+                       timeout = COMM_TIMEOUT + 1;
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0009-Check-for-mounted-device-before-demoting-Primary-DRB.patch b/meta-stx/recipes-support/drbd/drbd-utils/0009-Check-for-mounted-device-before-demoting-Primary-DRB.patch
new file mode 100644 (file)
index 0000000..ac8f414
--- /dev/null
@@ -0,0 +1,45 @@
+From 017157d21a56410811384a43d0b0cbba6444baeb Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Wed, 6 Feb 2019 01:19:59 -0500
+Subject: [PATCH] Check for mounted device before demoting Primary DRBD
+ resource
+
+Update the OCF script to check for a mounted device when demoting
+a resource that's in the Primary state. The state change will fail
+if it is still in use, otherwise.
+
+Signed-off-by: Don Penney <don.penney@windriver.com>
+---
+ scripts/drbd.ocf | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/scripts/drbd.ocf b/scripts/drbd.ocf
+index e03bf6d..95da11a 100644
+--- a/scripts/drbd.ocf
++++ b/scripts/drbd.ocf
+@@ -720,7 +720,21 @@ drbd_stop() {
+                       ;;
+               $OCF_RUNNING_MASTER)
+                       ocf_log warn "$DRBD_RESOURCE still Primary, demoting."
+-                      do_drbdadm secondary  $DRBD_RESOURCE
++                      found=no
++                      for dev in ${DRBD_DEVICES[@]} ""; do
++                              cat /proc/mounts | grep -q "^${dev} "
++                              if [ $? -eq 0 ]; then
++                                      ocf_log warn "${DRBD_RESOURCE} is still mounted via $dev"
++                                      found=yes
++                                      break
++                              fi
++                      done
++                      if [ "${found}" = "yes" ]; then
++                              ocf_log warn "Waiting to drop $DRBD_RESOURCE"
++                      else
++                              ocf_log warn "Dropping $DRBD_RESOURCE to Secondary"
++                              do_drbdadm secondary  $DRBD_RESOURCE
++                      fi
+               esac
+               $first_try || sleep 1
+               first_try=false
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0010-Include-sysmacros-for-major-minor-macros.patch b/meta-stx/recipes-support/drbd/drbd-utils/0010-Include-sysmacros-for-major-minor-macros.patch
new file mode 100644 (file)
index 0000000..9e3eed6
--- /dev/null
@@ -0,0 +1,37 @@
+From 424eef1074c56ce5137bf0b2718711ca706280e7 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Sat, 26 Oct 2019 02:06:18 -0700
+Subject: [PATCH] Include sysmacros for major minor macros.
+
+---
+ user/drbdadm_adjust.c        | 1 +
+ user/legacy/drbdadm_adjust.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/user/drbdadm_adjust.c b/user/drbdadm_adjust.c
+index efa09a7f..09c38944 100644
+--- a/user/drbdadm_adjust.c
++++ b/user/drbdadm_adjust.c
+@@ -39,6 +39,7 @@
+ #include <stdlib.h>
+ #include <stdarg.h>
+ #include <stdbool.h>
++#include <sys/sysmacros.h>
+ #include "drbdadm.h"
+ #include "drbdtool_common.h"
+diff --git a/user/legacy/drbdadm_adjust.c b/user/legacy/drbdadm_adjust.c
+index 0de5b620..b4d783d4 100644
+--- a/user/legacy/drbdadm_adjust.c
++++ b/user/legacy/drbdadm_adjust.c
+@@ -36,6 +36,7 @@
+ #include <errno.h>
+ #include <stdlib.h>
+ #include <stdarg.h>
++#include <sys/sysmacros.h>
+ #include "drbdadm.h"
+ #include "drbdtool_common.h"
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/0011-Disable-documentation.patch b/meta-stx/recipes-support/drbd/drbd-utils/0011-Disable-documentation.patch
new file mode 100644 (file)
index 0000000..c660b49
--- /dev/null
@@ -0,0 +1,33 @@
+From 27ef15d12cc63432332212fbf8c5cc4d77a29de9 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Sat, 26 Oct 2019 14:17:48 +0000
+Subject: [PATCH] disable documentation
+
+---
+ Makefile.in | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/Makefile.in b/Makefile.in
+index fd6a7443..348389e5 100644
+--- a/Makefile.in
++++ b/Makefile.in
+@@ -56,7 +56,7 @@ KDIR ?= /lib/modules/$(KVER)/build
+ # and not in e.g. dash. I'm too lazy to fix it to be compatible.
+ SHELL=/bin/bash
+-SUBDIRS     = user scripts documentation drbd
++SUBDIRS     = user scripts drbd
+ REL_VERSION := $(shell $(SED) -ne '/^\#define REL_VERSION/{s/^[^"]*"\([^ "]*\).*/\1/;p;q;}' drbd/linux/drbd_config.h)
+ ifdef FORCE
+@@ -181,7 +181,6 @@ drbd/drbd_buildtag.c:
+ .filelist:
+       @$(GIT) ls-files | sed '$(if $(PRESERVE_DEBIAN),,/^debian/d);s#^#drbd-$(DIST_VERSION)/#' > .filelist
+       @[ -s .filelist ] # assert there is something in .filelist now
+-      @find documentation -name "[^.]*.[58]" -o -name "*.html" | \
+       sed "s/^/drbd-$(DIST_VERSION)\//"              >> .filelist ; \
+       echo drbd-$(DIST_VERSION)/drbd_config.h        >> .filelist ; \
+       echo drbd-$(DIST_VERSION)/drbd/drbd_buildtag.c >> .filelist ; \
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/drbd/drbd-utils/drbd.service b/meta-stx/recipes-support/drbd/drbd-utils/drbd.service
new file mode 100644 (file)
index 0000000..052501e
--- /dev/null
@@ -0,0 +1,17 @@
+[Unit]
+Description=Control drbd resources.
+After=network.target sshd.service
+
+[Service]
+Type=forking
+Restart=no
+KillMode=process
+RemainAfterExit=yes
+ExecStart=/etc/init.d/drbd start
+ExecStop=/etc/init.d/drbd stop
+ExecReload=/etc/init.d/drbd reload
+TimeoutSec=5min
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/meta-stx/recipes-support/drbd/drbd-utils_8.4.3.bb b/meta-stx/recipes-support/drbd/drbd-utils_8.4.3.bb
new file mode 100644 (file)
index 0000000..e04c8f6
--- /dev/null
@@ -0,0 +1,54 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Distributed block device driver for Linux"
+
+DESCRIPTION = " DRBD, developed by LINBIT, is a software that allows RAID 1 functionality over \
+       TCP/IP and RDMA for GNU/Linux. DRBD is a block device which is designed to build high \
+       availability clusters and software defined storage by providing a virtual shared device \
+       which keeps disks in nodes synchronised using TCP/IP or RDMA. This simulates RAID 1 but \
+       avoids the use of uncommon hardware (shared SCSI buses or Fibre Channel)."
+HOMEPAGE = "http://www.drbd.org/"
+SECTION = "admin"
+LICENSE = "GPLv2+"
+LIC_FILES_CHKSUM = "file://COPYING;md5=5574c6965ae5f583e55880e397fbb018"
+
+# SRCREV = "89a294209144b68adb3ee85a73221f964d3ee515"
+SRCREV = "136c0e42691aed4a4607c79969de87cb8410285c"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+PV = "8.4.3rc1"
+
+SRC_URI = "git://github.com/LINBIT/drbd-8.4.git;name="git";protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+
+# https://www.linbit.com/downloads/drbd/8.4/archive/
+
+inherit autotools
+
+DEPENDS += " \
+       linux-libc-headers \
+       glibc \
+       "
+# UPSTREAM_CHECK_URI = "https://github.com/LINBIT/drbd-utils/releases"
+
+inherit autotools-brokensep
+
+RDEPENDS_${PN} += "bash perl-module-getopt-long perl-module-exporter perl-module-constant perl-module-overloading perl-module-exporter-heavy"
+
+do_install_append() {
+       rm -rf ${D}/var/lock
+}
diff --git a/meta-stx/recipes-support/drbd/drbd-utils_8.4.3.bbappend b/meta-stx/recipes-support/drbd/drbd-utils_8.4.3.bbappend
new file mode 100644 (file)
index 0000000..5cce906
--- /dev/null
@@ -0,0 +1,102 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_append := ":${THISDIR}/${PN}:"
+
+
+SRC_URI += " \
+       file://0001-skip_wait_con_int_on_simplex.patch \
+       file://0002-drbd-conditional-crm-dependency.patch \
+       file://0003-drbd_report_condition.patch \
+       file://0004-drbdadm-ipaddr-change.patch \
+       file://0005-drbd_reconnect_standby_standalone.patch \
+       file://0006-avoid-kernel-userspace-version-check.patch \
+       file://0007-Update-OCF-to-attempt-connect-in-certain-states.patch \
+       file://0008-Increase-short-cmd-timeout-to-15-secs.patch \
+       file://0009-Check-for-mounted-device-before-demoting-Primary-DRB.patch \
+       file://0010-Include-sysmacros-for-major-minor-macros.patch \
+       file://0011-Disable-documentation.patch \
+       file://drbd.service \
+       "
+
+EXTRA_OECONF = " \
+               --with-utils                    \
+                --with-initdir=/etc/init.d     \
+               --without-km                    \
+                --with-pacemaker               \
+                --with-rgmanager               \
+                --with-bashcompletion          \
+               --with-udev                     \
+               --with-heartbeat                \
+                --with-distro debian           \
+               "
+
+FILES_${PN} = "\
+       /var/lib/drbd \
+       /run \
+       ${base_sbindir}/drbdsetup \
+       ${base_sbindir}/drbdadm \
+       ${base_sbindir}/drbdmeta \
+       ${nonarch_base_libdir}/drbd/drbdsetup-83 \
+       ${nonarch_base_libdir}/drbd/drbdadm-83 \
+       ${sysconfdir}/init.d/drbd \
+       ${sysconfdir}/drbd.conf \
+       ${sysconfdir}/xen \
+       ${sysconfdir}/drbd.d \
+       ${sbindir}/drbd-overview \
+       ${libdir}/drbd/outdate-peer.sh \
+       ${libdir}/drbd/snapshot-resync-target-lvm.sh \
+       ${libdir}/drbd/unsnapshot-resync-target-lvm.sh \
+       ${libdir}/drbd/notify-out-of-sync.sh \
+       ${libdir}/drbd/notify-split-brain.sh \
+       ${libdir}/drbd/notify-emergency-reboot.sh \
+       ${libdir}/drbd/notify-emergency-shutdown.sh \
+       ${libdir}/drbd/notify-io-error.sh \
+       ${libdir}/drbd/notify-pri-lost-after-sb.sh \
+       ${libdir}/drbd/notify-pri-lost.sh \
+       ${libdir}/drbd/notify-pri-on-incon-degr.sh \
+       ${libdir}/drbd/notify.sh \
+       ${libdir}/drbd/rhcs_fence \
+       ${sysconfdir}/udev/rules.d/65-drbd.rules \
+       ${libdir}/drbd/crm-fence-peer.sh \
+       ${libdir}/drbd/crm-unfence-peer.sh \
+       ${libdir}/drbd/stonith_admin-fence-peer.sh \
+       ${libdir}/ocf/resource.d/linbit/drbd \
+       ${datadir}/cluster/drbd.sh.drbd-utils \
+       ${datadir}/cluster/drbd.metadata \
+       ${sysconfdir}/ha.d/resource.d/drbddisk \
+       ${sysconfdir}/ha.d/resource.d/drbdupper \
+       ${sysconfdir}/bash_completion.d/drbdadm* \
+       ${systemd_system_unitdir}/drbd.service \
+       "
+
+inherit update-alternatives
+ALTERNATIVE_PRIORITY = "80"
+ALTERNATIVE_${PN} = "drbd.sh"
+ALTERNATIVE_LINK_NAME[drbd.sh] = "${datadir}/cluster/drbd.sh"
+
+do_install_append() {
+       mv ${D}${datadir}/cluster/drbd.sh ${D}${datadir}/cluster/drbd.sh.drbd-utils
+       install -d -m 755 ${D}/${systemd_system_unitdir}
+       install -p -D -m 644 ${WORKDIR}/drbd.service ${D}/${systemd_system_unitdir}
+}
+
+#inherit systemd
+#SYSTEMD_PACKAGES += "${PN}"
+#SYSTEMD_SERVICE_${PN} = "drbd.service"
+
+pkg_postinst_ontarget_drbd-utils() {
+       ${base_bindir}/systemctl enable drbd.service
+}
diff --git a/meta-stx/recipes-support/eventlog/eventlog_git.bb b/meta-stx/recipes-support/eventlog/eventlog_git.bb
new file mode 100644 (file)
index 0000000..11b7e82
--- /dev/null
@@ -0,0 +1,40 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "syslog replacement"
+DESCRIPTION = " \
+       The EventLog library aims to be a replacement of the simple syslog() API \
+       provided on UNIX systems. The major difference between EventLog and syslog \
+       is that EventLog tries to add structure to messages. \
+        \
+       Where you had a simple non-structrured string in syslog() you have a \
+       combination of description and tag/value pairs. \
+        \
+       EventLog provides an interface to build, format and output an event record. \
+       The exact format and output method can be customized by the administrator \
+       via a configuration file \
+         "
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://COPYING;md5=b8ba8e77bcda9a53fac0fe39fe957767"
+
+SRCREV = "a5c19163ba131f79452c6dfe4e31c2b4ce4be741"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/balabit/eventlog.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit autotools pkgconfig
diff --git a/meta-stx/recipes-support/facter/facter_2.5.0.bbappend b/meta-stx/recipes-support/facter/facter_2.5.0.bbappend
new file mode 100644 (file)
index 0000000..0e5f9ec
--- /dev/null
@@ -0,0 +1,28 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_append := ":${THISDIR}/files:"
+
+inherit openssl10
+DEPENDS_append = " openssl"
+
+SRC_URI += " \
+       file://0001-ps.patch \
+       file://0002-personality.patch \
+       file://0003-centos_remove-net-commands-that-can-timeout.patch;striplevel=2 \
+       file://0004-centos_fix-ipv6-regex.patch \
+       file://0005-Hardcode-ipaddress-fact-to-localhost.patch \
+       file://0006-facter-updates-for-poky-stx.patch \
+       "
diff --git a/meta-stx/recipes-support/facter/files/0001-ps.patch b/meta-stx/recipes-support/facter/files/0001-ps.patch
new file mode 100644 (file)
index 0000000..699327f
--- /dev/null
@@ -0,0 +1,29 @@
+diff --git a/lib/facter/ps.rb b/lib/facter/ps.rb
+index 9163519..02117ac 100644
+--- a/lib/facter/ps.rb
++++ b/lib/facter/ps.rb
+@@ -12,7 +12,7 @@
+ #
+ Facter.add(:ps) do
+-  setcode do 'ps -ef' end
++  setcode do 'ps -efww' end
+ end
+ Facter.add(:ps) do
+diff --git a/spec/unit/ps_spec.rb b/spec/unit/ps_spec.rb
+index 2e25a4b..20c61af 100755
+--- a/spec/unit/ps_spec.rb
++++ b/spec/unit/ps_spec.rb
+@@ -27,9 +27,9 @@ describe "ps facts" do
+     'RedHat',
+     'Debian',
+   ].each do |os|
+-    it "should return gnu/linux style ps -ef on operatingsystem #{os}" do
++    it "should return gnu/linux style ps -efww on operatingsystem #{os}" do
+       Facter.fact(:operatingsystem).stubs(:value).returns os
+-      Facter.fact(:ps).value.should == 'ps -ef'
++      Facter.fact(:ps).value.should == 'ps -efww'
+     end
+   end
diff --git a/meta-stx/recipes-support/facter/files/0002-personality.patch b/meta-stx/recipes-support/facter/files/0002-personality.patch
new file mode 100644 (file)
index 0000000..a1b6b5c
--- /dev/null
@@ -0,0 +1,93 @@
+---
+ lib/facter/personality.rb |   21 ++++++++++++++++++
+ lib/facter/subfunction.rb |   61 ++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 82 insertions(+)
+
+--- /dev/null
++++ b/lib/facter/personality.rb
+@@ -0,0 +1,21 @@
++#
++# personality.rb
++#
++# This fact gives the personality of this node.
++#
++require 'facter/util/file_read'
++
++Facter.add('personality') do
++  confine :kernel => :linux
++
++  setcode do
++    if release = Facter::Util::FileRead.read('/etc/platform/platform.conf')
++      if match = release.match(/^nodetype\=(.*)/)
++        match[1]
++      end
++    end
++  end
++end
++
++# vim: set ts=2 sw=2 et :
++# encoding: utf-8
+--- /dev/null
++++ b/lib/facter/subfunction.rb
+@@ -0,0 +1,61 @@
++#
++# subfunction.rb
++#
++# This fact gives the subfunction of this node.
++#
++require 'facter/util/file_read'
++
++Facter.add('subfunction') do
++  confine :kernel => :linux
++
++  setcode do
++    if release = Facter::Util::FileRead.read('/etc/platform/platform.conf')
++      if match = release.match(/^subfunction\=(.*)/)
++        match[1]
++      end
++    end
++  end
++end
++
++Facter.add('is_worker_subfunction') do
++  confine :kernel => :linux
++
++  setcode do
++    if release = Facter::Util::FileRead.read('/etc/platform/platform.conf')
++      match = release.match(/^subfunction\=.*worker/) ? true : false
++    end
++  end
++end
++
++Facter.add('is_controller_subfunction') do
++  confine :kernel => :linux
++
++  setcode do
++    if release = Facter::Util::FileRead.read('/etc/platform/platform.conf')
++      match = release.match(/^subfunction\=.*controller/) ? true : false
++    end
++  end
++end
++
++Facter.add('is_storage_subfunction') do
++  confine :kernel => :linux
++
++  setcode do
++    if release = Facter::Util::FileRead.read('/etc/platform/platform.conf')
++      match = release.match(/^subfunction\=.*storage/) ? true : false
++    end
++  end
++end
++
++Facter.add('is_lowlatency_subfunction') do
++  confine :kernel => :linux
++
++  setcode do
++    if release = Facter::Util::FileRead.read('/etc/platform/platform.conf')
++      match = release.match(/^subfunction\=.*lowlatency/) ? true : false
++    end
++  end
++end
++
++# vim: set ts=2 sw=2 et :
++# encoding: utf-8
diff --git a/meta-stx/recipes-support/facter/files/0003-centos_remove-net-commands-that-can-timeout.patch b/meta-stx/recipes-support/facter/files/0003-centos_remove-net-commands-that-can-timeout.patch
new file mode 100644 (file)
index 0000000..a17672f
--- /dev/null
@@ -0,0 +1,55 @@
+---
+ facter-2.4.4/lib/facter/domain.rb   |   24 +++++++++++++-----------
+ facter-2.4.4/lib/facter/uniqueid.rb |    9 +++++----
+ 2 files changed, 18 insertions(+), 15 deletions(-)
+
+--- a/facter-2.4.4/lib/facter/domain.rb
++++ b/facter-2.4.4/lib/facter/domain.rb
+@@ -33,22 +33,24 @@ Facter.add(:domain) do
+     full_hostname = 'hostname -f 2> /dev/null'
+     can_do_hostname_f = Regexp.union /Linux/i, /FreeBSD/i, /Darwin/i
+-    hostname_command = if Facter.value(:kernel) =~ can_do_hostname_f
+-                         full_hostname
+-                       elsif Facter.value(:kernel) == "windows"
+-                         windows_hostname
+-                       else
+-                         basic_hostname
+-                       end
++    # Because hostname -f doesn't work for IPv6, don't use that flag
++    # hostname_command = if Facter.value(:kernel) =~ can_do_hostname_f
++    #                      full_hostname
++    #                   elsif Facter.value(:kernel) == "windows"
++    #                     windows_hostname
++    #                   else
++    #                     basic_hostname
++    #                   end
++    hostname_command = basic_hostname
+     if name = Facter::Core::Execution.exec(hostname_command) \
+       and name =~ /.*?\.(.+$)/
+       return_value = $1
+-    elsif Facter.value(:kernel) != "windows" and domain = Facter::Core::Execution.exec('dnsdomainname 2> /dev/null') \
+-      and domain =~ /.+/
+-
+-      return_value = domain
++    # elsif Facter.value(:kernel) != "windows" and domain = Facter::Core::Execution.exec('dnsdomainname 2> /dev/null') \
++    #  and domain =~ /.+/
++    #
++    #  return_value = domain
+     elsif FileTest.exists?("/etc/resolv.conf")
+       domain = nil
+       search = nil
+--- a/facter-2.4.4/lib/facter/uniqueid.rb
++++ b/facter-2.4.4/lib/facter/uniqueid.rb
+@@ -1,4 +1,5 @@
+-Facter.add(:uniqueid) do
+-  setcode 'hostid'
+-  confine :kernel => %w{SunOS Linux AIX GNU/kFreeBSD}
+-end
++# hostid does not work with IPv6, and is not needed for Titanium cloud, so remove
++# Facter.add(:uniqueid) do
++#   setcode 'hostid'
++#   confine :kernel => %w{SunOS Linux AIX GNU/kFreeBSD}
++# end
diff --git a/meta-stx/recipes-support/facter/files/0004-centos_fix-ipv6-regex.patch b/meta-stx/recipes-support/facter/files/0004-centos_fix-ipv6-regex.patch
new file mode 100644 (file)
index 0000000..1704f25
--- /dev/null
@@ -0,0 +1,12 @@
+diff -Nurpd a/lib/facter/util/ip.rb b/lib/facter/util/ip.rb
+--- a/lib/facter/util/ip.rb    2019-12-26 10:22:04.591601094 -0800
++++ b/lib/facter/util/ip.rb    2019-12-26 10:24:10.639781276 -0800
+@@ -6,7 +6,7 @@ module Facter::Util::IP
+   REGEX_MAP = {
+     :linux => {
+       :ipaddress  => /inet (?:addr:)?([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)/,
+-      :ipaddress6 => /inet6 (?:addr: )?((?!(?:fe80|::1))(?>[0-9,a-f,A-F]*\:{1,2})+[0-9,a-f,A-F]{0,4})/,
++      :ipaddress6 => /inet6 (?:addr: )?((?!(fe80|\:\:1))(?>[0-9,a-f,A-F]*\:{1,2})+[0-9,a-f,A-F]{0,4})/,
+       :macaddress => /(?:ether|HWaddr)\s+((\w{1,2}:){5,}\w{1,2})/,
+       :netmask  => /(?:Mask:|netmask )([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)/,
+       :mtu  => /MTU:?\s*(\d+)/i
diff --git a/meta-stx/recipes-support/facter/files/0004-centos_fix-ipv6-regex.patch.bak b/meta-stx/recipes-support/facter/files/0004-centos_fix-ipv6-regex.patch.bak
new file mode 100644 (file)
index 0000000..4701209
--- /dev/null
@@ -0,0 +1,15 @@
+---
+ facter-2.4.4/lib/facter/util/ip.rb |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/facter-2.4.4/lib/facter/util/ip.rb
++++ b/facter-2.4.4/lib/facter/util/ip.rb
+@@ -6,7 +6,7 @@ module Facter::Util::IP
+   REGEX_MAP = {
+     :linux => {
+       :ipaddress  => /inet (?:addr:)?([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)/,
+-      :ipaddress6 => /inet6 (?:addr: )?((?![fe80|::1])(?>[0-9,a-f,A-F]*\:{1,2})+[0-9,a-f,A-F]{0,4})/,
++      :ipaddress6 => /inet6 (?:addr: )?((?!(fe80|\:\:1))(?>[0-9,a-f,A-F]*\:{1,2})+[0-9,a-f,A-F]{0,4})/,
+       :macaddress => /(?:ether|HWaddr)\s+((\w{1,2}:){5,}\w{1,2})/,
+       :netmask  => /(?:Mask:|netmask )([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)/,
+       :mtu  => /MTU:?\s*(\d+)/i
diff --git a/meta-stx/recipes-support/facter/files/0005-Hardcode-ipaddress-fact-to-localhost.patch b/meta-stx/recipes-support/facter/files/0005-Hardcode-ipaddress-fact-to-localhost.patch
new file mode 100644 (file)
index 0000000..3af562c
--- /dev/null
@@ -0,0 +1,188 @@
+From af1818469ed789bad373e6c0f8d29669acc39669 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Thu, 26 Oct 2017 10:44:20 -0400
+Subject: [PATCH] Hardcode ipaddress fact to localhost
+
+---
+ lib/facter/ipaddress.rb | 163 +-----------------------------------------------
+ 1 file changed, 2 insertions(+), 161 deletions(-)
+
+diff --git a/lib/facter/ipaddress.rb b/lib/facter/ipaddress.rb
+index 6179a4d..4c54791 100644
+--- a/lib/facter/ipaddress.rb
++++ b/lib/facter/ipaddress.rb
+@@ -1,169 +1,10 @@
+ # Fact: ipaddress
+ #
+-# Purpose: Return the main IP address for a host.
++# To avoid potential timeouts with this fact, just return 127.0.0.1 always
+ #
+-# Resolution:
+-#   On the Unixes does an ifconfig, and returns the first non 127.0.0.0/8
+-#   subnetted IP it finds.
+-#   On Windows, it attempts to use the socket library and resolve the machine's
+-#   hostname via DNS.
+-#
+-#   On LDAP based hosts it tries to use either the win32/resolv library to
+-#   resolve the hostname to an IP address, or on Unix, it uses the resolv
+-#   library.
+-#
+-#   As a fall back for undefined systems, it tries to run the "host" command to
+-#   resolve the machine's hostname using the system DNS.
+-#
+-# Caveats:
+-#   DNS resolution relies on working DNS infrastructure and resolvers on the
+-#   host system.
+-#   The ifconfig parsing purely takes the first IP address it finds without any
+-#   checking this is a useful IP address.
+-#
+-
+-require 'facter/util/ip'
+-
+-Facter.add(:ipaddress) do
+-  confine :kernel => :linux
+-  setcode do
+-    ip = nil
+-    output = Facter::Util::IP.exec_ifconfig(["2>/dev/null"])
+-    if output
+-      regexp = /inet (?:addr:)?([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)/
+-      output.split("\n").each do |line|
+-        match = regexp.match(line)
+-        if match and not /^127\./.match(match[1])
+-          ip = match[1]
+-          break
+-        end
+-      end
+-    end
+-    ip
+-  end
+-end
+-
+-Facter.add(:ipaddress) do
+-  confine :kernel => %w{FreeBSD OpenBSD Darwin DragonFly}
+-  setcode do
+-    ip = nil
+-    output = Facter::Util::IP.exec_ifconfig
+-
+-    output.split(/^\S/).each do |str|
+-      if str =~ /inet ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)/
+-        tmp = $1
+-        unless tmp =~ /^127\./
+-          ip = tmp
+-          break
+-        end
+-      end
+-    end
+-
+-    ip
+-  end
+-end
+ Facter.add(:ipaddress) do
+-  confine :kernel => %w{NetBSD SunOS}
+-  setcode do
+-    ip = nil
+-    output = Facter::Util::IP.exec_ifconfig(["-a"])
+-
+-    output.split(/^\S/).each do |str|
+-      if str =~ /inet ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)/
+-        tmp = $1
+-        unless tmp =~ /^127\./ or tmp == "0.0.0.0"
+-          ip = tmp
+-          break
+-        end
+-      end
+-    end
+-
+-    ip
+-  end
+-end
+-
+-Facter.add(:ipaddress) do
+-  confine :kernel => %w{AIX}
+-  setcode do
+-    ip = nil
+-
+-    default_interface = Facter::Util::IP.exec_netstat(["-rn | grep default | awk '{ print $6 }'"])
+-    output = Facter::Util::IP.exec_ifconfig([default_interface])
+-
+-    output.split(/^\S/).each do |str|
+-      if str =~ /inet ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)/
+-        ip = $1
+-      end
+-    end
+-
+-    ip
+-  end
+-end
+-
+-Facter.add(:ipaddress) do
+-  confine :kernel => %w{windows}
+-  setcode do
+-    require 'facter/util/ip/windows'
+-    ipaddr = nil
+-
+-    adapters = Facter::Util::IP::Windows.get_preferred_ipv4_adapters
+-    adapters.find do |nic|
+-      nic.IPAddress.any? do |addr|
+-        ipaddr = addr if Facter::Util::IP::Windows.valid_ipv4_address?(addr)
+-        ipaddr
+-      end
+-    end
+-
+-    ipaddr
+-  end
+-end
+-
+-Facter.add(:ipaddress, :timeout => 2) do
+-  setcode do
+-    if Facter.value(:kernel) == 'windows'
+-      require 'win32/resolv'
+-    else
+-      require 'resolv'
+-    end
+-
+-    begin
+-      if hostname = Facter.value(:hostname)
+-        if Facter.value(:kernel) == 'windows'
+-          ip = Win32::Resolv.get_resolv_info.last[0]
+-        else
+-          ip = Resolv.getaddress(hostname)
+-        end
+-        unless ip == "127.0.0.1"
+-          ip
+-        end
+-      else
+-        nil
+-      end
+-    rescue Resolv::ResolvError
+-      nil
+-    rescue NoMethodError # i think this is a bug in resolv.rb?
+-      nil
+-    end
+-  end
+-end
+-
+-Facter.add(:ipaddress, :timeout => 2) do
+   setcode do
+-    if hostname = Facter.value(:hostname)
+-      # we need Hostname to exist for this to work
+-      host = nil
+-      if host = Facter::Core::Execution.execute("host #{hostname}")
+-        list = host.chomp.split(/\s/)
+-        if defined? list[-1] and
+-          list[-1] =~ /[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+/
+-          list[-1]
+-        end
+-      else
+-        nil
+-      end
+-    else
+-      nil
+-    end
++    "127.0.0.1"
+   end
+ end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/facter/files/0006-facter-updates-for-poky-stx.patch b/meta-stx/recipes-support/facter/files/0006-facter-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..90c2568
--- /dev/null
@@ -0,0 +1,44 @@
+diff --git a/lib/facter/architecture.rb b/lib/facter/architecture.rb
+index 72c5755..14ec620 100644
+--- a/lib/facter/architecture.rb
++++ b/lib/facter/architecture.rb
+@@ -31,7 +31,7 @@ Facter.add(:architecture) do
+       end
+     when "x86_64"
+       case Facter.value(:operatingsystem)
+-      when "Debian", "Gentoo", "GNU/kFreeBSD", "Ubuntu"
++      when "Debian", "Gentoo", "GNU/kFreeBSD", "Ubuntu", "poky-stx"
+         "amd64"
+       else
+         model
+diff --git a/lib/facter/operatingsystem/linux.rb b/lib/facter/operatingsystem/linux.rb
+index de7913d..797698a 100644
+--- a/lib/facter/operatingsystem/linux.rb
++++ b/lib/facter/operatingsystem/linux.rb
+@@ -13,6 +13,8 @@ module Facter
+             @operatingsystem ||= "Ubuntu"
+           elsif lsbdistid == "LinuxMint"
+             @operatingsystem ||= "LinuxMint"
++          elsif lsbdistid == "poky-stx"
++            @operatingsystem ||= "poky-stx"
+           else
+             @operatingsystem ||= get_operatingsystem_with_release_files
+           end
+@@ -27,7 +29,7 @@ module Facter
+              "CloudLinux", "PSBM", "OracleLinux", "OVS", "OEL", "Amazon",
+              "XenServer", "VirtuozzoLinux"
+           "RedHat"
+-        when "LinuxMint", "Ubuntu", "Debian"
++        when "LinuxMint", "Ubuntu", "Debian", "poky-stx"
+           "Debian"
+         when "SLES", "SLED", "OpenSuSE", "SuSE"
+           "Suse"
+@@ -74,6 +76,8 @@ module Facter
+           get_ubuntu_release_with_release_file
+         when "VMwareESX"
+           get_vmwareESX_release_with_release_file
++        when "poky-stx"
++          get_lsbdistrelease
+         else
+           Facter.value(:kernelrelease)
+         end
diff --git a/meta-stx/recipes-support/hardlink/hardlink_0.3.0.bb b/meta-stx/recipes-support/hardlink/hardlink_0.3.0.bb
new file mode 100644 (file)
index 0000000..00cd204
--- /dev/null
@@ -0,0 +1,34 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Hardlink is a tool which replaces multiple copies of a file with hardlinks"
+
+HOMEPAGE = "https://jak-linux.org/projects/hardlink"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://hardlink.1;md5=c7a9a3326f28fa218d9af0251a5e1ae4"
+
+SRC_URI = "https://jak-linux.org/projects/hardlink/hardlink_0.3.0.tar.xz"
+SRC_URI[md5sum] = "72f1a460adb6874c151deab766e434ad"
+SRC_URI[sha256sum] = "e8c93dfcb24aeb44a75281ed73757cb862cc63b225d565db1c270af9dbb7300f"
+
+DEPENDS = " attr"
+inherit pkgconfig
+
+do_install() {
+       cd ${S}
+       oe_runmake -e DESTDIR=${D} BINDIR=${bindir} MANDIR=${datadir} install
+}
+
+FILES_${PN}-doc = "${datadir}/"
diff --git a/meta-stx/recipes-support/hiera/hiera_%.bbappend b/meta-stx/recipes-support/hiera/hiera_%.bbappend
new file mode 100644 (file)
index 0000000..940866e
--- /dev/null
@@ -0,0 +1,17 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit openssl10
+DEPENDS_append = " openssl"
diff --git a/meta-stx/recipes-support/ldapscripts/files/allow-anonymous-bind-for-ldap-search.patch b/meta-stx/recipes-support/ldapscripts/files/allow-anonymous-bind-for-ldap-search.patch
new file mode 100644 (file)
index 0000000..e2e0129
--- /dev/null
@@ -0,0 +1,38 @@
+From bee43b9f75ee7a2cee0391319528264014d775f7 Mon Sep 17 00:00:00 2001
+From: Kam Nasim <kam.nasim@windriver.com>
+Date: Mon, 16 Apr 2018 14:58:03 -0400
+Subject: [PATCH] ldapscripts - allow anonymous bind for ldap search
+
+---
+ lib/runtime | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/lib/runtime b/lib/runtime
+index 012ac95..18acf3f 100644
+--- a/lib/runtime
++++ b/lib/runtime
+@@ -197,8 +197,11 @@ _ldapsearch () {
+   elif [ -n "$BINDPWDFILE" ]
+   then
+     $LDAPSEARCHBIN $LDAPBINOPTS $LDAPSEARCHOPTS -y "$BINDPWDFILE" -D "$BINDDN" -b "${1:-$SUFFIX}" -xH "$SERVER" -s sub -LLL "${2:-(objectclass=*)}" "${3:-*}" 2>>"$LOGFILE" 
+-  else
++  elif [ -n "$BINDPWD" ]
++  then
+     $LDAPSEARCHBIN $LDAPBINOPTS $LDAPSEARCHOPTS -w "$BINDPWD" -D "$BINDDN" -b "${1:-$SUFFIX}" -xH "$SERVER" -s sub -LLL "${2:-(objectclass=*)}" "${3:-*}" 2>>"$LOGFILE" 
++  else
++    $LDAPSEARCHBIN $LDAPBINOPTS $LDAPSEARCHOPTS -D "$BINDDN" -b "${1:-$SUFFIX}" -xH "$SERVER" -s sub -LLL "${2:-(objectclass=*)}" "${3:-*}" 2>>"$LOGFILE" 
+   fi
+ }
+@@ -785,7 +788,7 @@ then
+     then
+       warn_log "Warning : using command-line passwords, ldapscripts may not be safe"
+     else
+-      end_die "Unable to read password file $BINDPWDFILE, exiting..."
++      warn_log "Warning: Unable to read password file $BINDPWDFILE, binding anonymously..."
+     fi
+   fi
+ fi
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/ldapscripts/files/ldap-user-setup-support.patch b/meta-stx/recipes-support/ldapscripts/files/ldap-user-setup-support.patch
new file mode 100644 (file)
index 0000000..f2b723e
--- /dev/null
@@ -0,0 +1,354 @@
+---
+ Makefile                 |   5 +-
+ man/man1/ldapusersetup.1 |  60 +++++++++++
+ sbin/ldapusersetup       | 254 +++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 317 insertions(+), 2 deletions(-)
+ create mode 100644 man/man1/ldapusersetup.1
+ create mode 100644 sbin/ldapusersetup
+
+diff --git a/sbin/ldapusersetup b/sbin/ldapusersetup
+new file mode 100644
+index 0000000..27d12dc
+--- /dev/null
++++ b/sbin/ldapusersetup
+@@ -0,0 +1,254 @@
++#!/bin/sh
++
++#  ldapusersetup : interactive setup for adding users to LDAP
++
++#  Copyright (c) 2015 Wind River Systems, Inc.
++#
++#  This program is free software; you can redistribute it and/or
++#  modify it under the terms of the GNU General Public License
++#  as published by the Free Software Foundation; either version 2
++#  of the License, or (at your option) any later version.
++#
++#  This program is distributed in the hope that it will be useful,
++#  but WITHOUT ANY WARRANTY; without even the implied warranty of
++#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++#  GNU General Public License for more details.
++#
++#  You should have received a copy of the GNU General Public License
++#  along with this program; if not, write to the Free Software
++#  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
++#  USA.
++
++if [ "$1" = "-h" ] || [ "$1" = "--help" ] || [ "$#" -eq 1 ]
++then
++  echo "Usage : $0 [-u <username | uid> <field> <value>]
++where accepted field(s) are as follows:
++--sudo                        : whether to add this user to sudoer list
++--secondgroup <grp>           : the secondary group to add this user to
++--passmax     <value>         : the shadowMax value for this user
++--passwarning <value>         : the shadowWarning value for this user"
++  exit 1
++fi
++
++# Source runtime file
++_RUNTIMEFILE="/usr/lib/ldapscripts/runtime"
++. "$_RUNTIMEFILE"
++
++# runtime defaults
++_DEFAULTGRP2="sys_protected"
++_BASHSHELL="/bin/bash"
++_DEFAULTSHADOWMAX="90"
++_DEFAULTSHADOWWARNING="2"
++_SHELL=""
++
++### Helper functions ###
++
++# Gets input from user and validates it.
++# Will only return if input meets validation
++# criteria otherwise will just sit there.
++#
++# Input : input string ($1), valid output options ($2)
++# Output: the validated input
++# Note  : the validation list must be an array
++LdapUserInput () {
++declare -a optionAry=("${!2}")
++while true; do
++    read -p "$1" _output
++    # convert to lower case
++    _output2=${_output,,}
++    # check if output is a valid option
++    if [[ "${optionAry[@]}" =~ "$_output2" ]]; then
++      break
++    else
++       echo "Invalid input \"$_output\". Allowed options: ${optionAry[@]}" >&2
++   fi
++done
++   echo "$_output2"
++}
++
++# Delete an ldap user if it exists
++# and exit with error
++# Input : username ($1), exit msg ($2)
++# Output : none
++LdapRollback() {
++  ldapdeleteuser "$1"
++  end_die "$2"
++}
++
++# Add an ldap user and exit on failure
++# Input : username ($1)
++# Output : none
++LdapAddUser() {
++  ldapadduser "$1" users
++  [ $? -eq 0 ] || end_die "Critical setup error: cannot add user"
++}
++
++# Replace Login Shell and call Rollback on failure
++# Input : username ($1), shell to set ($2)
++# Output : none
++LdapAddLoginShell () {
++  # Support bash only now.
++  _SHELL="$_BASHSHELL"
++  # Replace the login shell
++  ldapmodifyuser $1 replace loginShell $_SHELL &> /dev/null
++  [ $? -eq 0 ] || LdapRollback $1 "Critical setup error: cannot set login shell"
++}
++
++# Add user to sudoer list
++# Input : username ($1)
++# Output : true or false
++LdapAddSudo() {
++  ldapaddsudo "$1" 2> /dev/null
++  [ $? -eq 0 ] || \
++   echo_log "Non critical setup error: cannot add to sudoer list"
++}
++
++# Add user to a secondary user group
++# Input : username ($1), user group ($2)
++# Output : true or false
++LdapSecondaryGroup () {
++  _newGrp="$2"
++  [ -z "$2" ] && _newGrp=$_DEFAULTGRP2
++
++  ldapaddusertogroup $1 $_newGrp
++  [ $? -eq 0 ] || \
++   echo_log "Non critical setup error: cannot add $1 to $_newGrp"
++}
++
++# Update shadowMax for user
++# Input : username ($1), shadow Max value ($2)
++# Output : none
++LdapUpdateShadowMax () {
++  _newShadow="$2"
++  ! [[ "$2" =~ ^[0-9]+$ ]] || [ -z "$2" ] \
++   && _newShadow=$_DEFAULTSHADOWMAX
++
++  ldapmodifyuser $1 replace shadowMax $_newShadow
++  echo "Updating password expiry to $_newShadow days"
++}
++
++# Update shadowWarning for user
++# Input : username ($1), shadow Warning value ($2)
++# Output : none
++LdapUpdateShadowWarning () {
++  _newWarning="$2"
++  ! [[ "$2" =~ ^[0-9]+$ ]] || [ -z "$2" ] \
++   && _newWarning=$_DEFAULTSHADOWWARNING
++
++  ldapmodifyuser $1 replace shadowWarning $_newWarning
++  echo "Updating password expiry to $_newWarning days"
++}
++
++# Since this setup script is meant to be a
++# wrapper on top of existing ldap scripts,
++# it share invoke those... we could have achieved
++# loose coupling by not relying on helpers but
++# at the expense of massively redundant code
++# duplication.
++declare -a helper_scripts=("ldapadduser" "ldapaddsudo" "ldapmodifyuser" "ldapaddusertogroup" "$_BASHSHELL")
++
++# Do some quick sanity tests to make sure
++# helper scripts are present
++for src in "${helper_scripts[@]}"; do
++  if ! type "$src" &>/dev/null; then
++    end_die "Cannot locate $src. Update your PATH variable"
++  fi
++done
++
++if [ "$#" -eq 0 ]; then
++  # This setup collects all attributes
++  # interactively during runtime
++  echo -n "Enter username to add to LDAP: "
++  read _username
++  LdapAddUser "$_username"
++
++  # Replace the login shell. Only bash is supported now.
++  LdapAddLoginShell "$_username"
++
++  # Should sudo be activated for this user
++  echo -n "Add $_username to sudoer list? (yes/NO): "
++  read CONFIRM
++  CONFIRM=${CONFIRM,,}
++
++  if is_yes $CONFIRM
++  then
++    LdapAddSudo "$_username"
++  fi
++
++  # Add to secondary user group
++  shellInput="Add $_username to secondary user group? (yes/NO): "
++  options=( "yes", "no" )
++  CONFIRM=`LdapUserInput "$shellInput" options[@]`
++  if is_yes $CONFIRM
++  then
++    echo -n "Secondary group to add user to? [$_DEFAULTGRP2]: "
++    read _grp2
++    LdapSecondaryGroup $_username $_grp2
++  fi
++
++  # Set password expiry
++  echo -n "Enter days after which user password must \
++be changed [$_DEFAULTSHADOWMAX]: "
++  read _shadowMax
++  LdapUpdateShadowMax $_username $_shadowMax
++
++  # Set password warning
++  echo -n "Enter days before password is to expire that \
++user is warned [$_DEFAULTSHADOWWARNING]: "
++  read _shadowWarning
++  LdapUpdateShadowWarning $_username $_shadowWarning
++
++else
++  # we have to read command line option
++  while [[ $# > 1 ]]
++  do
++    key="$1"
++
++    case $key in
++      -u|--user) # compulsory
++      _username="$2"
++      shift
++      ;;
++      --sudo)      # optional
++      _sudo="yes"
++      ;;
++      --passmax) # optional
++      _shadowMax="$2"
++      shift
++      ;;
++      --passwarning) # optional
++      _shadowWarning="$2"
++      shift
++      ;;
++      --secondgroup) # optional
++        _grpConfirm="1"
++      _grp2="$2"
++      shift
++      ;;
++      *)
++
++      ;;
++    esac
++    shift
++  done
++
++  # Add LDAP user
++  [ -z "$_username" ] && end_die "No username argument specified"
++  LdapAddUser $_username
++
++  # Change Login Shell
++  LdapAddLoginShell $_username "$_loginshell"
++
++  # Add sudo if required
++  if is_yes $_sudo
++  then
++    LdapAddSudo "$_username"
++  fi
++
++  # Add secondary group if required
++  [ -z "$_grpConfirm" ] || LdapSecondaryGroup $_username $_grp2
++
++  # Password modifications
++  LdapUpdateShadowMax $_username $_shadowMax
++  LdapUpdateShadowWarning $_username $_shadowWarning
++fi
+diff --git a/Makefile b/Makefile
+index f81c272..6e5b193 100644
+--- a/Makefile
++++ b/Makefile
+@@ -41,12 +41,13 @@ SBINFILES =        ldapdeletemachine ldapmodifygroup ldapsetpasswd lsldap ldapadduser l
+                       ldapdeleteuser ldapsetprimarygroup ldapfinger ldapid ldapgid ldapmodifymachine \
+                       ldaprenamegroup ldapaddgroup ldapaddusertogroup ldapdeleteuserfromgroup \
+                       ldapinit ldapmodifyuser ldaprenamemachine ldapaddmachine ldapdeletegroup \
+-                      ldaprenameuser ldapmodifysudo ldapdeletesudo
++                      ldaprenameuser ldapmodifysudo ldapdeletesudo ldapusersetup
+ MAN1FILES =   ldapdeletemachine.1 ldapmodifymachine.1 ldaprenamemachine.1 ldapadduser.1 \
+                       ldapdeleteuserfromgroup.1 ldapfinger.1 ldapid.1 ldapgid.1 ldapmodifyuser.1 lsldap.1 \
+                       ldapaddusertogroup.1 ldaprenameuser.1 ldapinit.1 ldapsetpasswd.1 ldapaddgroup.1 \
+                       ldapdeletegroup.1 ldapsetprimarygroup.1 ldapmodifygroup.1 ldaprenamegroup.1 \
+-                      ldapaddmachine.1 ldapdeleteuser.1 ldapaddsudo.1 ldapmodifysudo.1 ldapdeletesudo.1
++                      ldapaddmachine.1 ldapdeleteuser.1 ldapaddsudo.1 ldapmodifysudo.1 \
++                      ldapdeletesudo.1 ldapusersetup.1
+ MAN5FILES = ldapscripts.5
+ TMPLFILES = ldapaddgroup.template.sample ldapaddmachine.template.sample \
+                       ldapadduser.template.sample
+diff --git a/man/man1/ldapusersetup.1 b/man/man1/ldapusersetup.1
+new file mode 100644
+index 0000000..9b3129b
+--- /dev/null
++++ b/man/man1/ldapusersetup.1
+@@ -0,0 +1,60 @@
++.\" Copyright (c) 2015 Wind River Systems, Inc.
++.\"
++.\" This program is free software; you can redistribute it and/or
++.\" modify it under the terms of the GNU General Public License
++.\" as published by the Free Software Foundation; either version 2
++.\" of the License, or (at your option) any later version.
++.\"
++.\" This program is distributed in the hope that it will be useful,
++.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
++.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++.\" GNU General Public License for more details.
++.\"
++.\" You should have received a copy of the GNU General Public License
++.\" along with this program; if not, write to the Free Software
++.\" Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
++.\" USA.
++.\"
++.\" Kam Nasim
++.\" knasim@windriver.com
++.\"
++.TH ldapusersetup 1 "December 16, 2015"
++
++.SH NAME
++ldapusersetup \- wizard for adding an LDAP user to CGCS.
++
++.SH SYNOPSIS
++.B ldapusersetup
++
++.SH DESCRIPTION
++ldapusersetup interactively walks through the process of creating an LDAP user
++for access to CGCS services. The user is prompted for:
++- username
++- if a sudoEntry needs to be created
++- if a secondary user group needs to be added
++- user password expiry and warning configuration
++Alternatively, the user may provide these parameters as command line actions.
++Look at the OPTIONS section for more information.
++
++To delete the user and all its group associations, simply use ldapdeleteuser(1)
++
++.SH OPTIONS
++.TP
++.B [-u <username | uid> <field> <value>]
++The name or uid of the user to modify.
++The following fields are available as long format options:
++--sudo                  : whether to add this user to sudoer list
++--secondgroup <grp>     : the secondary group to add this user to
++--passmax     <value>   : the shadowMax value for this user
++--passwarning <value>   : the shadowWarning value for this user"
++
++.SH "SEE ALSO"
++ldapdeleteuser(1), ldapaddgroup(1), ldapaddusertogroup(1), ldapmodifyuser(1), ldapscripts(5).
++
++.SH AVAILABILITY
++The ldapscripts are provided under the GNU General Public License v2 (see COPYING for more details).
++The latest version of the ldapscripts is available on :
++.B http://contribs.martymac.org
++
++.SH BUGS
++No bug known.
diff --git a/meta-stx/recipes-support/ldapscripts/files/ldapaddgroup.template.cgcs b/meta-stx/recipes-support/ldapscripts/files/ldapaddgroup.template.cgcs
new file mode 100755 (executable)
index 0000000..b34c105
--- /dev/null
@@ -0,0 +1,5 @@
+dn: cn=<group>,<gsuffix>,<suffix>
+objectClass: posixGroup
+cn: <group>
+gidNumber: <gid>
+description: Group account
diff --git a/meta-stx/recipes-support/ldapscripts/files/ldapaddsudo.template.cgcs b/meta-stx/recipes-support/ldapscripts/files/ldapaddsudo.template.cgcs
new file mode 100755 (executable)
index 0000000..f93170d
--- /dev/null
@@ -0,0 +1,10 @@
+dn: cn=<user>,ou=SUDOers,<suffix>
+objectClass: top
+objectClass: sudoRole
+cn: <user>
+sudoUser: <user>
+sudoHost: ALL
+sudoRunAsUser: ALL
+sudoCommand: ALL
+#sudoOrder: <default: 0, if multiple entries match, this entry with the highest sudoOrder is used>
+#sudoOption: <specify other sudo specific attributes here>
diff --git a/meta-stx/recipes-support/ldapscripts/files/ldapadduser.template.cgcs b/meta-stx/recipes-support/ldapscripts/files/ldapadduser.template.cgcs
new file mode 100755 (executable)
index 0000000..29f3ccc
--- /dev/null
@@ -0,0 +1,16 @@
+dn: uid=<user>,<usuffix>,<suffix>
+objectClass: account
+objectClass: posixAccount
+objectClass: shadowAccount
+objectClass: top
+cn: <user>
+uid: <user>
+uidNumber: <uid>
+gidNumber: <gid>
+shadowMax: 99999
+shadowWarning: 7
+shadowLastChange: 0
+homeDirectory: <home>
+loginShell: <shell>
+gecos: <user>
+description: User account
diff --git a/meta-stx/recipes-support/ldapscripts/files/ldapmodsudo.template.cgcs b/meta-stx/recipes-support/ldapscripts/files/ldapmodsudo.template.cgcs
new file mode 100755 (executable)
index 0000000..c79705f
--- /dev/null
@@ -0,0 +1,4 @@
+dn: cn=<user>,ou=SUDOers,<suffix>
+changeType: modify
+<action>: <field>
+<field>: <value>
diff --git a/meta-stx/recipes-support/ldapscripts/files/ldapmoduser.template.cgcs b/meta-stx/recipes-support/ldapscripts/files/ldapmoduser.template.cgcs
new file mode 100755 (executable)
index 0000000..f192024
--- /dev/null
@@ -0,0 +1,4 @@
+dn: uid=<user>,<usuffix>,<suffix>
+changeType: modify
+<action>: <field>
+<field>: <value>
diff --git a/meta-stx/recipes-support/ldapscripts/files/ldapscripts.conf.cgcs b/meta-stx/recipes-support/ldapscripts/files/ldapscripts.conf.cgcs
new file mode 100755 (executable)
index 0000000..9350dd3
--- /dev/null
@@ -0,0 +1,152 @@
+#  Copyright (C) 2005 Ganaël LAPLANCHE - Linagora
+#  Copyright (C) 2006-2013 Ganaël LAPLANCHE
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License
+#  as published by the Free Software Foundation; either version 2
+#  of the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software
+#  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+#  USA.
+
+# LDAP server
+SERVER="ldap://controller"
+
+# Suffixes
+SUFFIX="dc=cgcs,dc=local" # Global suffix
+GSUFFIX="ou=Group"        # Groups ou (just under $SUFFIX)
+USUFFIX="ou=People"       # Users ou (just under $SUFFIX)
+MSUFFIX="ou=Machines"     # Machines ou (just under $SUFFIX)
+
+# Authentication type
+# If empty, use simple authentication
+# Else, use the value as an SASL authentication mechanism
+SASLAUTH=""
+#SASLAUTH="GSSAPI"
+
+# Simple authentication parameters
+# The following BIND* parameters are ignored if SASLAUTH is set
+BINDDN="cn=ldapadmin,dc=cgcs,dc=local"
+# The following file contains the raw password of the BINDDN
+# Create it with something like : echo -n 'secret' > $BINDPWDFILE
+# WARNING !!!! Be careful not to make this file world-readable
+BINDPWDFILE="/usr/local/etc/ldapscripts/ldapscripts.passwd"
+# For older versions of OpenLDAP, it is still possible to use
+# unsecure command-line passwords by defining the following option
+# AND commenting the previous one (BINDPWDFILE takes precedence)
+#BINDPWD="secret"
+
+# Start with these IDs *if no entry found in LDAP*
+GIDSTART="10000" # Group ID
+UIDSTART="10000" # User ID
+MIDSTART="20000" # Machine ID
+
+# Group membership management
+# ObjectCLass used for groups
+# Possible values : posixGroup, groupOfNames, groupOfUniqueNames (case-sensitive !)
+# Warning : when using groupOf*, be sure to be compliant with RFC 2307bis (AUXILIARY posixGroup).
+# Also, do not mix posixGroup and groupOf* entries up in you directory as, within RFC 2307bis,
+# the former is a subset of the latter. The ldapscripts wouldn't cope well with this configuration.
+GCLASS="posixGroup"   # Leave "posixGroup" here if not sure !
+# When using  groupOfNames or groupOfUniqueNames, creating a group requires an initial
+# member. Specify it below, you will be able to remove it once groups are populated.
+#GDUMMYMEMBER="uid=dummy,$USUFFIX,$SUFFIX"
+
+# User properties
+USHELL="/bin/sh"
+UHOMES="/home/%u"     # You may use %u for username here
+CREATEHOMES="no"      # Create home directories and set rights ?
+HOMESKEL="/etc/skel"  # Directory where the skeleton files are located. Ignored if undefined or nonexistant.
+HOMEPERMS="700"       # Default permissions for home directories
+
+# User passwords generation
+# Command-line used to generate a password for added users.
+# You may use %u for username here ; special value "<ask>" will ask for a password interactively
+# WARNING    !!!! This is evaluated, everything specified here will be run !
+# WARNING(2) !!!! Some systems (Linux) use a blocking /dev/random (waiting for enough entropy).
+#                 In this case, consider using /dev/urandom instead.
+#PASSWORDGEN="cat /dev/random | LC_ALL=C tr -dc 'a-zA-Z0-9' | head -c8"
+#PASSWORDGEN="pwgen"
+#PASSWORDGEN="echo changeme"
+PASSWORDGEN="echo %u"
+#PASSWORDGEN="<ask>"
+
+# User passwords recording
+# you can keep trace of generated passwords setting PASSWORDFILE and RECORDPASSWORDS
+# (useful when performing a massive creation / net rpc vampire)
+# WARNING !!!! DO NOT FORGET TO DELETE THE GENERATED FILE WHEN DONE !
+# WARNING !!!! DO NOT FORGET TO TURN OFF RECORDING WHEN DONE !
+RECORDPASSWORDS="no"
+PASSWORDFILE="/var/log/ldapscripts_passwd.log"
+
+# Where to log
+LOGFILE="/var/log/ldapscripts.log"
+
+# Temporary folder
+TMPDIR="/tmp"
+
+# Various binaries used within the scripts
+# Warning : they also use uuencode, date, grep, sed, cut, which... 
+# Please check they are installed before using these scripts
+# Note that many of them should come with your OS
+
+# OpenLDAP client commands
+LDAPSEARCHBIN="/usr/bin/ldapsearch"
+LDAPADDBIN="/usr/bin/ldapadd"
+LDAPDELETEBIN="/usr/bin/ldapdelete"
+LDAPMODIFYBIN="/usr/bin/ldapmodify"
+LDAPMODRDNBIN="/usr/bin/ldapmodrdn"
+LDAPPASSWDBIN="/usr/bin/ldappasswd"
+
+# OpenLDAP client common additional options
+# This allows for adding more configuration options to the OpenLDAP clients, e.g. '-ZZ' to enforce TLS
+#LDAPBINOPTS="-ZZ"
+
+# OpenLDAP ldapsearch-specific additional options
+# The following option disables long-line wrapping (which makes the scripts bug
+# when handling long lines). The option was introduced in OpenLDAP 2.4.24, so
+# comment it if you are using OpenLDAP < 2.4.24.
+LDAPSEARCHOPTS="-o ldif-wrap=no"
+# And here is an example to activate paged results
+#LDAPSEARCHOPTS="-E pr=500/noprompt"
+
+# Character set conversion : $ICONVCHAR <-> UTF-8
+# Comment ICONVBIN to disable UTF-8 conversion
+# ICONVBIN="/usr/bin/iconv"
+# ICONVCHAR=""
+
+# Base64 decoding
+# Comment UUDECODEBIN to disable Base64 decoding
+#UUDECODEBIN="/usr/bin/uudecode"
+
+# Getent command to use - choose the ones used
+# on your system. Leave blank or comment for auto-guess.
+# GNU/Linux
+GETENTPWCMD="getent passwd"
+GETENTGRCMD="getent group"
+# FreeBSD
+#GETENTPWCMD="pw usershow"
+#GETENTGRCMD="pw groupshow"
+# Auto
+#GETENTPWCMD=""
+#GETENTGRCMD=""
+
+# You can specify custom LDIF templates here
+# Leave empty to use default templates
+# See *.template.sample for default templates
+#GTEMPLATE="/path/to/ldapaddgroup.template"
+#UTEMPLATE="/path/to/ldapadduser.template"
+#MTEMPLATE="/path/to/ldapaddmachine.template"
+GTEMPLATE="/usr/local/etc/ldapscripts/ldapaddgroup.template.cgcs"
+UTEMPLATE="/usr/local/etc/ldapscripts/ldapadduser.template.cgcs"
+UMTEMPLATE="/usr/local/etc/ldapscripts/ldapmoduser.template.cgcs"
+STEMPLATE="/usr/local/etc/ldapscripts/ldapaddsudo.template.cgcs"
+SMTEMPLATE="/usr/local/etc/ldapscripts/ldapmodsudo.template.cgcs"
+MTEMPLATE=""
diff --git a/meta-stx/recipes-support/ldapscripts/files/ldapscripts.passwd b/meta-stx/recipes-support/ldapscripts/files/ldapscripts.passwd
new file mode 100644 (file)
index 0000000..385336f
--- /dev/null
@@ -0,0 +1 @@
+_LDAPADMIN_PW_
diff --git a/meta-stx/recipes-support/ldapscripts/files/log_timestamp.patch b/meta-stx/recipes-support/ldapscripts/files/log_timestamp.patch
new file mode 100644 (file)
index 0000000..a521d0e
--- /dev/null
@@ -0,0 +1,15 @@
+---
+ lib/runtime |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/runtime
++++ b/lib/runtime
+@@ -863,7 +863,7 @@ fi
+ # Log command
+ if [ "$LOGTOFILE" = "yes" ]
+ then
+-  log_to_file "$(date '+%b %d %H:%M:%S') $(uname -n | sed 's|\..*$||') ldapscripts: $(basename "$0")($USER): $0 $*"
++  log_to_file "$(date '+%FT%T') $(uname -n | sed 's|\..*$||') ldapscripts: $(basename "$0")($USER): $0 $*"
+ fi
+ if [ "$LOGTOSYSLOG" = "yes" ]
+ then
diff --git a/meta-stx/recipes-support/ldapscripts/files/sudo-delete-support.patch b/meta-stx/recipes-support/ldapscripts/files/sudo-delete-support.patch
new file mode 100644 (file)
index 0000000..ed0d48e
--- /dev/null
@@ -0,0 +1,352 @@
+---
+ Makefile                  |    4 +--
+ lib/runtime               |   15 ++++++++++++
+ man/man1/ldapaddsudo.1    |   54 +++++++++++++++++++++++++++++++++++++++++++
+ man/man1/ldapdeletesudo.1 |   46 +++++++++++++++++++++++++++++++++++++
+ man/man1/ldapdeleteuser.1 |    5 ++--
+ man/man1/ldapmodifysudo.1 |   57 ++++++++++++++++++++++++++++++++++++++++++++++
+ man/man1/ldapmodifyuser.1 |   15 ++++++++---
+ sbin/ldapdeletesudo       |   38 ++++++++++++++++++++++++++++++
+ sbin/ldapdeleteuser       |    5 ++++
+ sbin/ldapmodifysudo       |    2 -
+ 10 files changed, 232 insertions(+), 9 deletions(-)
+
+--- a/sbin/ldapdeleteuser
++++ b/sbin/ldapdeleteuser
+@@ -46,6 +46,11 @@ _UDN="$_ENTRY"
+ # Delete entry
+ _ldapdelete "$_UDN" || end_die "Error deleting user $_UDN from LDAP"
+
++
++# Optionally, delete the sudoer entry if it exists
++_ldapdeletesudo $1
++[ $? -eq 2 ] && end_die "Found sudoEntry for user $_UDN but unable to delete"
++
+ # Finally, delete this user from all his secondary groups
+ case $GCLASS in
+   posixGroup)
+--- a/sbin/ldapmodifysudo
++++ b/sbin/ldapmodifysudo
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+-#  ldapmodifyuser : modifies a sudo entry in an LDAP directory
++#  ldapmodifysudo : modifies a sudo entry in an LDAP directory
+ #  Copyright (C) 2007-2013 Ganaël LAPLANCHE
+ #  Copyright (C) 2014 Stephen Crooks
+--- /dev/null
++++ b/sbin/ldapdeletesudo
+@@ -0,0 +1,38 @@
++#!/bin/sh
++
++#  ldapdeletesudo : deletes a sudoRole from LDAP
++
++#  Copyright (C) 2005 Ganaël LAPLANCHE - Linagora
++#  Copyright (C) 2006-2013 Ganaël LAPLANCHE
++#  Copyright (c) 2015 Wind River Systems, Inc.
++#
++#  This program is free software; you can redistribute it and/or
++#  modify it under the terms of the GNU General Public License
++#  as published by the Free Software Foundation; either version 2
++#  of the License, or (at your option) any later version.
++#
++#  This program is distributed in the hope that it will be useful,
++#  but WITHOUT ANY WARRANTY; without even the implied warranty of
++#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++#  GNU General Public License for more details.
++#
++#  You should have received a copy of the GNU General Public License
++#  along with this program; if not, write to the Free Software
++#  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
++#  USA.
++
++if [ -z "$1" ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]
++then
++  echo "Usage : $0 <username>"
++  exit 1
++fi
++
++# Source runtime file
++_RUNTIMEFILE="/usr/lib/ldapscripts/runtime"
++. "$_RUNTIMEFILE"
++
++# Username = first argument
++_ldapdeletesudo "$1"
++[ $? -eq 0 ] || end_die "Unable to locate or delete sudoUser entry for $1"
++
++end_ok "Successfully deleted sudoUser entry for $1 from LDAP"
+--- a/man/man1/ldapmodifyuser.1
++++ b/man/man1/ldapmodifyuser.1
+@@ -1,4 +1,5 @@
+ .\" Copyright (C) 2007-2017 Ganaël LAPLANCHE
++.\" Copyright (c) 2015 Wind River Systems, Inc.
+ .\"
+ .\" This program is free software; you can redistribute it and/or
+ .\" modify it under the terms of the GNU General Public License
+@@ -19,14 +20,14 @@
+ .\" ganael.laplanche@martymac.org
+ .\" http://contribs.martymac.org
+ .\"
+-.TH ldapmodifyuser 1 "August 22, 2007"
++.TH ldapmodifyuser 1 "December 8, 2015"
+ .SH NAME
+ ldapmodifyuser \- modifies a POSIX user account in LDAP interactively
+ .SH SYNOPSIS
+ .B ldapmodifyuser
+-.RB <username | uid>
++.RB <username | uid> [<add | replace | delete> <field> <value>]
+  
+ .SH DESCRIPTION
+ ldapmodifyuser first looks for the right entry to modify. Once found, the entry is presented and you
+@@ -34,13 +35,18 @@ are prompted to enter LDIF data to modif
+ The DN of the entry being modified is already specified : just begin with a changeType attribute or any
+ other one(s) of your choice (in this case, the defaut changeType is 'modify').
++Alternatively, if an optional "action" argument <add | replace | delete> is given, followed by a
++field - value pair then user will not be interactively prompted.
++
+ .SH OPTIONS
+ .TP
+-.B <username | uid>
++.B <username | uid> [<add | replace | delete> <field> <value>]
+ The name or uid of the user to modify.
++The optional "action" pertaining to this user entry.
++The field - value pair on which the action needs to be undertaken.
+ .SH "SEE ALSO"
+-ldapmodifygroup(1), ldapmodifymachine(1), ldapscripts(5).
++ldapmodifygroup(1), ldapmodifymachine(1), ldapmodifysudo(1), ldapscripts(5).
+ .SH AVAILABILITY
+ The ldapscripts are provided under the GNU General Public License v2 (see COPYING for more details).
+--- a/man/man1/ldapdeleteuser.1
++++ b/man/man1/ldapdeleteuser.1
+@@ -1,4 +1,5 @@
+ .\" Copyright (C) 2006-2017 Ganaël LAPLANCHE
++.\" Copyright (c) 2015 Wind River Systems, Inc.
+ .\"
+ .\" This program is free software; you can redistribute it and/or
+ .\" modify it under the terms of the GNU General Public License
+@@ -19,10 +20,10 @@
+ .\" ganael.laplanche@martymac.org
+ .\" http://contribs.martymac.org
+ .\"
+-.TH ldapdeleteuser 1 "January 1, 2006"
++.TH ldapdeleteuser 1 "December 8, 2015"
+ .SH NAME
+-ldapdeleteuser \- deletes a POSIX user account from LDAP.
++ldapdeleteuser \- deletes a POSIX user account, and its sudo entry, from LDAP.
+ .SH SYNOPSIS
+ .B ldapdeleteuser
+--- /dev/null
++++ b/man/man1/ldapaddsudo.1
+@@ -0,0 +1,54 @@
++.\" Copyright (C) 2006-2013 Ganaël LAPLANCHE
++.\" Copyright (c) 2015 Wind River Systems, Inc.
++.\"
++.\" This program is free software; you can redistribute it and/or
++.\" modify it under the terms of the GNU General Public License
++.\" as published by the Free Software Foundation; either version 2
++.\" of the License, or (at your option) any later version.
++.\"
++.\" This program is distributed in the hope that it will be useful,
++.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
++.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++.\" GNU General Public License for more details.
++.\"
++.\" You should have received a copy of the GNU General Public License
++.\" along with this program; if not, write to the Free Software
++.\" Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
++.\" USA.
++.\"
++.\" Ganael Laplanche
++.\" ganael.laplanche@martymac.org
++.\" http://contribs.martymac.org
++.\"
++.TH ldapaddsudo 1 "December 8, 2015"
++
++.SH NAME
++ldapaddsudo \- adds a POSIX user account to the sudoer list in LDAP.
++
++.SH SYNOPSIS
++.B ldapaddsudo
++.RB <username>
++.RB <groupname | gid>
++.RB [uid]
++ 
++.SH OPTIONS
++.TP
++.B <username>
++The name of the user to add.
++.TP
++.B <groupname | gid>
++The group name or the gid of the user to add.
++.TP
++.B [uid]
++The uid of the user to add. Automatically computed if not specified.
++
++.SH "SEE ALSO"
++ldapadduser(1), ldapaddgroup(1), ldapaddmachine(1), ldapscripts(5).
++
++.SH AVAILABILITY
++The ldapscripts are provided under the GNU General Public License v2 (see COPYING for more details).
++The latest version of the ldapscripts is available on :
++.B http://contribs.martymac.org
++
++.SH BUGS
++No bug known.
+--- /dev/null
++++ b/man/man1/ldapmodifysudo.1
+@@ -0,0 +1,57 @@
++.\" Copyright (C) 2007-2013 Ganaël LAPLANCHE
++.\" Copyright (c) 2015 Wind River Systems, Inc.
++.\"
++.\" This program is free software; you can redistribute it and/or
++.\" modify it under the terms of the GNU General Public License
++.\" as published by the Free Software Foundation; either version 2
++.\" of the License, or (at your option) any later version.
++.\"
++.\" This program is distributed in the hope that it will be useful,
++.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
++.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++.\" GNU General Public License for more details.
++.\"
++.\" You should have received a copy of the GNU General Public License
++.\" along with this program; if not, write to the Free Software
++.\" Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
++.\" USA.
++.\"
++.\" Ganael Laplanche
++.\" ganael.laplanche@martymac.org
++.\" http://contribs.martymac.org
++.\"
++.TH ldapmodifysudo 1 "December 8, 2015"
++
++.SH NAME
++ldapmodifysudo \- modifies the sudo entry of a POSIX user account in LDAP interactively
++
++.SH SYNOPSIS
++.B ldapmodifysudo
++.RB <username | uid> [<add | replace | delete> <field> <value>]
++ 
++.SH DESCRIPTION
++ldapmodifysudo first looks for the right entry to modify. Once found, the entry is presented and you
++are prompted to enter LDIF data to modify it as you would do using a standard LDIF file and ldapmodify(1).
++The DN of the entry being modified is already specified : just begin with a changeType attribute or any
++other one(s) of your choice (in this case, the defaut changeType is 'modify').
++
++Alternatively, if an optional "action" argument <add | replace | delete> is given, followed by a
++field - value pair then user will not be interactively prompted.
++
++.SH OPTIONS
++.TP
++.B <username | uid> [<add | replace | delete> <field> <value>]
++The name or uid of the user to modify.
++The optional "action" pertaining to this user entry.
++The field - value pair on which the action needs to be undertaken.
++
++.SH "SEE ALSO"
++ldapmodifygroup(1), ldapmodifymachine(1), ldapmodifyuser(1), ldapscripts(5).
++
++.SH AVAILABILITY
++The ldapscripts are provided under the GNU General Public License v2 (see COPYING for more details).
++The latest version of the ldapscripts is available on :
++.B http://contribs.martymac.org
++
++.SH BUGS
++No bug known.
+--- /dev/null
++++ b/man/man1/ldapdeletesudo.1
+@@ -0,0 +1,46 @@
++.\" Copyright (C) 2006-2013 Ganaël LAPLANCHE
++.\" Copyright (c) 2015 Wind River Systems, Inc.
++.\"
++.\" This program is free software; you can redistribute it and/or
++.\" modify it under the terms of the GNU General Public License
++.\" as published by the Free Software Foundation; either version 2
++.\" of the License, or (at your option) any later version.
++.\"
++.\" This program is distributed in the hope that it will be useful,
++.\" but WITHOUT ANY WARRANTY; without even the implied warranty of
++.\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++.\" GNU General Public License for more details.
++.\"
++.\" You should have received a copy of the GNU General Public License
++.\" along with this program; if not, write to the Free Software
++.\" Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
++.\" USA.
++.\"
++.\" Ganael Laplanche
++.\" ganael.laplanche@martymac.org
++.\" http://contribs.martymac.org
++.\"
++.TH ldapdeletesudo 1 "December 8, 2015"
++
++.SH NAME
++ldapdeletesudo \- deletes a sudo entry, for a POSIX user account, in LDAP
++
++.SH SYNOPSIS
++.B ldapdeletesudo
++.RB <username | uid>
++ 
++.SH OPTIONS
++.TP
++.B <username | uid>
++The name or uid of the user to delete.
++
++.SH "SEE ALSO"
++ldapdeletegroup(1), ldapdeletemachine(1), ldapdeleteuser(1), ldapscripts(5).
++
++.SH AVAILABILITY
++The ldapscripts are provided under the GNU General Public License v2 (see COPYING for more details).
++The latest version of the ldapscripts is available on :
++.B http://contribs.martymac.org
++
++.SH BUGS
++No bug known.
+--- a/Makefile
++++ b/Makefile
+@@ -41,12 +41,12 @@ SBINFILES =        ldapdeletemachine ldapmodifygroup ldapsetpasswd lsldap ldapadduser |
+                       ldapdeleteuser ldapsetprimarygroup ldapfinger ldapid ldapgid ldapmodifymachine \
+                       ldaprenamegroup ldapaddgroup ldapaddusertogroup ldapdeleteuserfromgroup \
+                       ldapinit ldapmodifyuser ldaprenamemachine ldapaddmachine ldapdeletegroup \
+-                      ldaprenameuser ldapmodifysudo
++                      ldaprenameuser ldapmodifysudo ldapdeletesudo
+ MAN1FILES =   ldapdeletemachine.1 ldapmodifymachine.1 ldaprenamemachine.1 ldapadduser.1 \
+                       ldapdeleteuserfromgroup.1 ldapfinger.1 ldapid.1 ldapgid.1 ldapmodifyuser.1 lsldap.1 \
+                       ldapaddusertogroup.1 ldaprenameuser.1 ldapinit.1 ldapsetpasswd.1 ldapaddgroup.1 \
+                       ldapdeletegroup.1 ldapsetprimarygroup.1 ldapmodifygroup.1 ldaprenamegroup.1 \
+-                      ldapaddmachine.1 ldapdeleteuser.1
++                      ldapaddmachine.1 ldapdeleteuser.1 ldapaddsudo.1 ldapmodifysudo.1 ldapdeletesudo.1
+ MAN5FILES = ldapscripts.5
+ TMPLFILES = ldapaddgroup.template.sample ldapaddmachine.template.sample \
+                       ldapadduser.template.sample
+--- a/lib/runtime
++++ b/lib/runtime
+@@ -294,6 +294,21 @@ _ldapdelete () {
+   fi
+ }
++# Deletes a sudoUser entry in the LDAP directory
++# Input : POSIX username whose sudo entry to delete ($1)
++# Output: 0 on successful delete
++#         1 on being unable to find sudoUser
++#         2 on being unable to delete found sudoUser entry
++_ldapdeletesudo () {
++  [ -z "$1" ] && end_die "_ldapdeletesudo : missing argument"
++  # Find the entry
++  _findentry "$SUFFIX" "(&(objectClass=sudoRole)(|(cn=$1)(sudoUser=$1)))"
++  [ -z "$_ENTRY" ] && return 1
++
++  # Now delete that entry
++  _ldapdelete "$_ENTRY" || return 2
++}
++
+ # Extracts LDIF information from $0 (the current script itself)
+ # selecting lines beginning with $1 occurrences of '#'
+ # Input : depth ($1)
diff --git a/meta-stx/recipes-support/ldapscripts/files/sudo-support.patch b/meta-stx/recipes-support/ldapscripts/files/sudo-support.patch
new file mode 100644 (file)
index 0000000..76fff94
--- /dev/null
@@ -0,0 +1,289 @@
+Index: ldapscripts-2.0.8/sbin/ldapaddsudo
+===================================================================
+--- /dev/null
++++ ldapscripts-2.0.8/sbin/ldapaddsudo
+@@ -0,0 +1,63 @@
++#!/bin/sh
++
++#  ldapaddsudo : adds a sudoRole to LDAP
++
++#  Copyright (C) 2005 Ganaël LAPLANCHE - Linagora
++#  Copyright (C) 2006-2013 Ganaël LAPLANCHE
++#  Copyright (c) 2014 Wind River Systems, Inc.
++#
++#  This program is free software; you can redistribute it and/or
++#  modify it under the terms of the GNU General Public License
++#  as published by the Free Software Foundation; either version 2
++#  of the License, or (at your option) any later version.
++#
++#  This program is distributed in the hope that it will be useful,
++#  but WITHOUT ANY WARRANTY; without even the implied warranty of
++#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++#  GNU General Public License for more details.
++#
++#  You should have received a copy of the GNU General Public License
++#  along with this program; if not, write to the Free Software
++#  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
++#  USA.
++
++if [ -z "$1" ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]
++then
++  echo "Usage : $0 <username>"
++  exit 1
++fi
++
++# Source runtime file
++_RUNTIMEFILE="/usr/lib/ldapscripts/runtime"
++. "$_RUNTIMEFILE"
++
++# Username = first argument
++_USER="$1"
++
++# Use template if necessary
++if [ -n "$STEMPLATE" ] && [ -r "$STEMPLATE" ]
++then
++  _getldif="cat $STEMPLATE"
++else
++  _getldif="_extractldif 2"
++fi
++
++# Add sudo entry to LDAP
++$_getldif | _filterldif | _askattrs | _utf8encode | _ldapadd
++
++[ $? -eq 0 ] || end_die "Error adding user $_USER to LDAP"
++echo_log "Successfully added sudo access for user $_USER to LDAP"
++
++end_ok
++
++# Ldif template ##################################
++##dn: cn=<user>,ou=SUDOers,<usuffix>,<suffix>
++##objectClass: top
++##objectClass: sudoRole
++##cn: <user>
++##sudoUser: <user>
++##sudoHost: ALL
++##sudoRunAsUser: ALL
++##sudoCommand: ALL
++###sudoOrder: <default: 0, if multiple entries match, this entry with the highest sudoOrder is used>
++###sudoOption: <specify other sudo specific attributes here>
+Index: ldapscripts-2.0.8/sbin/ldapmodifyuser
+===================================================================
+--- ldapscripts-2.0.8.orig/sbin/ldapmodifyuser
++++ ldapscripts-2.0.8/sbin/ldapmodifyuser
+@@ -19,9 +19,11 @@
+ #  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
+ #  USA.
+-if [ -z "$1" ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]
++if [ "$1" = "-h" ] || [ "$1" = "--help" ] || \
++   [[ "$2" != "add" && "$2" != "replace" && "$2" != "delete" ]] || \
++   [ "$#" -ne 4 ]
+ then
+-  echo "Usage : $0 <username | uid>"
++  echo "Usage : $0 <username | uid> [<add | replace | delete> <field> <value>]"
+   exit 1
+ fi
+@@ -33,21 +35,48 @@ _RUNTIMEFILE="/usr/lib/ldapscripts/runti
+ _findentry "$USUFFIX,$SUFFIX" "(&(objectClass=posixAccount)(|(uid=$1)(uidNumber=$1)))"
+ [ -z "$_ENTRY" ] && end_die "User $1 not found in LDAP"
+-# Allocate and create temp file
+-mktempf
+-echo "dn: $_ENTRY" > "$_TMPFILE" || end_die "Error writing to temporary file $_TMPFILE"
+-
+-# Display entry
+-echo "# About to modify the following entry :"
+-_ldapsearch "$_ENTRY"
+-
+-# Edit entry
+-echo "# Enter your modifications here, end with CTRL-D."
+-echo "dn: $_ENTRY"
+-cat >> "$_TMPFILE" || end_die "Error writing to temporary file $_TMPFILE"
++# Username = first argument
++_USER="$1"
++
++if [ "$#" -eq 1 ]
++then
++  # Allocate and create temp file
++  mktempf
++  echo "dn: $_ENTRY" > "$_TMPFILE" || end_die "Error writing to temporary file $_TMPFILE"
++
++  # Display entry
++  echo "# About to modify the following entry :"
++  _ldapsearch "$_ENTRY"
++
++  # Edit entry
++  echo "# Enter your modifications here, end with CTRL-D."
++  echo "dn: $_ENTRY"
++  cat >> "$_TMPFILE" || end_die "Error writing to temporary file $_TMPFILE"
++
++  # Send modifications
++  cat "$_TMPFILE" | _utf8encode | _ldapmodify
++else
++  # Action = second argument
++  _ACTION="$2"
++
++  # Field = third argument
++  _FIELD="$3"
++
++  # Value = fourth argument
++  _VALUE="$4"
++
++  # Use template if necessary
++  if [ -n "$UMTEMPLATE" ] && [ -r "$UMTEMPLATE" ]
++  then
++    _getldif="cat $UMTEMPLATE"
++  else
++    _getldif="_extractldif 2"
++  fi
++
++  # Modify user in LDAP
++  $_getldif | _filterldif | _utf8encode | _ldapmodify
++fi
+-# Send modifications
+-cat "$_TMPFILE" | _utf8encode | _ldapmodify
+ if [ $? -ne 0 ]
+ then
+   reltempf
+@@ -55,3 +84,9 @@ then
+ fi
+ reltempf
+ end_ok "Successfully modified user entry $_ENTRY in LDAP"
++
++# Ldif template ##################################
++##dn: uid=<user>,<usuffix>,<suffix>
++##changeType: modify
++##<action>: <field>
++##<field>: <value>
+Index: ldapscripts-2.0.8/lib/runtime
+===================================================================
+--- ldapscripts-2.0.8.orig/lib/runtime
++++ ldapscripts-2.0.8/lib/runtime
+@@ -344,6 +344,9 @@ s|<msuffix>|$MSUFFIX|g
+ s|<_msuffix>|$_MSUFFIX|g
+ s|<gsuffix>|$GSUFFIX|g
+ s|<_gsuffix>|$_GSUFFIX|g
++s|<action>|$_ACTION|g
++s|<field>|$_FIELD|g
++s|<value>|$_VALUE|g
+ EOF
+   # Use it
+Index: ldapscripts-2.0.8/Makefile
+===================================================================
+--- ldapscripts-2.0.8.orig/Makefile
++++ ldapscripts-2.0.8/Makefile
+@@ -37,11 +37,11 @@ LIBDIR = $(PREFIX)/lib/$(NAME)
+ RUNFILE = runtime
+ ETCFILE = ldapscripts.conf
+ PWDFILE = ldapscripts.passwd
+-SBINFILES =   ldapdeletemachine ldapmodifygroup ldapsetpasswd lsldap ldapadduser \
++SBINFILES =   ldapdeletemachine ldapmodifygroup ldapsetpasswd lsldap ldapadduser ldapaddsudo \
+                       ldapdeleteuser ldapsetprimarygroup ldapfinger ldapid ldapgid ldapmodifymachine \
+                       ldaprenamegroup ldapaddgroup ldapaddusertogroup ldapdeleteuserfromgroup \
+                       ldapinit ldapmodifyuser ldaprenamemachine ldapaddmachine ldapdeletegroup \
+-                      ldaprenameuser
++                      ldaprenameuser ldapmodifysudo
+ MAN1FILES =   ldapdeletemachine.1 ldapmodifymachine.1 ldaprenamemachine.1 ldapadduser.1 \
+                       ldapdeleteuserfromgroup.1 ldapfinger.1 ldapid.1 ldapgid.1 ldapmodifyuser.1 lsldap.1 \
+                       ldapaddusertogroup.1 ldaprenameuser.1 ldapinit.1 ldapsetpasswd.1 ldapaddgroup.1 \
+Index: ldapscripts-2.0.8/sbin/ldapmodifysudo
+===================================================================
+--- /dev/null
++++ ldapscripts-2.0.8/sbin/ldapmodifysudo
+@@ -0,0 +1,93 @@
++#!/bin/sh
++
++#  ldapmodifyuser : modifies a sudo entry in an LDAP directory
++
++#  Copyright (C) 2007-2013 Ganaël LAPLANCHE
++#  Copyright (C) 2014 Stephen Crooks
++#
++#  This program is free software; you can redistribute it and/or
++#  modify it under the terms of the GNU General Public License
++#  as published by the Free Software Foundation; either version 2
++#  of the License, or (at your option) any later version.
++#
++#  This program is distributed in the hope that it will be useful,
++#  but WITHOUT ANY WARRANTY; without even the implied warranty of
++#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++#  GNU General Public License for more details.
++#
++#  You should have received a copy of the GNU General Public License
++#  along with this program; if not, write to the Free Software
++#  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
++#  USA.
++
++if [ "$1" = "-h" ] || [ "$1" = "--help" ] || \
++   [[ "$2" != "add" && "$2" != "replace" && "$2" != "delete" ]] || \
++   [ "$#" -ne 4 ]
++then
++  echo "Usage : $0 <username | uid> [<add | replace | delete> <field> <value>]"
++  exit 1
++fi
++
++# Source runtime file
++_RUNTIMEFILE="/usr/lib/ldapscripts/runtime"
++. "$_RUNTIMEFILE"
++
++# Find username : $1 must exist in LDAP !
++_findentry "$SUFFIX" "(&(objectClass=sudoRole)(|(cn=$1)(sudoUser=$1)))"
++[ -z "$_ENTRY" ] && end_die "Sudo user $1 not found in LDAP"
++
++# Username = first argument
++_USER="$1"
++
++if [ "$#" -eq 1 ]
++then
++  # Allocate and create temp file
++  mktempf
++  echo "dn: $_ENTRY" > "$_TMPFILE" || end_die "Error writing to temporary file $_TMPFILE"
++
++  # Display entry
++  echo "# About to modify the following entry :"
++  _ldapsearch "$_ENTRY"
++
++  # Edit entry
++  echo "# Enter your modifications here, end with CTRL-D."
++  echo "dn: $_ENTRY"
++  cat >> "$_TMPFILE" || end_die "Error writing to temporary file $_TMPFILE"
++
++  # Send modifications
++  cat "$_TMPFILE" | _utf8encode | _ldapmodify
++else
++  # Action = second argument
++  _ACTION="$2"
++
++  # Field = third argument
++  _FIELD="$3"
++
++  # Value = fourth argument
++  _VALUE="$4"
++
++  # Use template if necessary
++  if [ -n "$SMTEMPLATE" ] && [ -r "$SMTEMPLATE" ]
++  then
++    _getldif="cat $SMTEMPLATE"
++  else
++    _getldif="_extractldif 2"
++  fi
++
++  # Modify user in LDAP
++  $_getldif | _filterldif | _utf8encode | _ldapmodify
++fi
++
++if [ $? -ne 0 ]
++then
++  reltempf
++  end_die "Error modifying sudo entry $_ENTRY in LDAP"
++fi
++reltempf
++end_ok "Successfully modified sudo entry $_ENTRY in LDAP"
++
++# Ldif template ##################################
++##dn: cn=<user>,ou=SUDOers,<suffix>
++##changeType: modify
++##<action>: <field>
++##<field>: <value>
diff --git a/meta-stx/recipes-support/ldapscripts/ldapscripts_2.0.8.bb b/meta-stx/recipes-support/ldapscripts/ldapscripts_2.0.8.bb
new file mode 100644 (file)
index 0000000..b73540b
--- /dev/null
@@ -0,0 +1,93 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = " \
+The ldapscripts are originally designed to be used within Samba 3.x's \
+smb.conf file. They allow to manipulate POSIX entries for users, groups \
+and machines in an LDAP directory. They are written in shell and need ldap \
+client commands to work correctly (ldapadd, ldapdelete, ldapmodify, \
+ldapsearch). Other scripts also are provided as simple tools to (manually) \
+query your LDAP directory : ldapfinger, ldapid, lsldap (...). \
+ \
+They are designed to be used under GNU/Linux or FreeBSD (any other \
+recent UNIX-like should also work) and require several binaries that should \
+come with your OS (uuencode, getent/pw, date, grep, sed, cut...). \
+ \
+Latest version available on http://contribs.martymac.org \
+"
+
+SUMMARY = "Shell scripts to manage POSIX accounts in LDAP"
+
+SECTION = "base"
+LICENSE = "GPLv2"
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=393a5ca445f6965873eca0259a17f833"
+
+SRC_URI = "https://downloads.sourceforge.net/project/ldapscripts/ldapscripts/ldapscripts-2.0.8/ldapscripts-2.0.8.tgz"
+SRC_URI[md5sum] = "99a7222215eaea2c8bc790d0437f22ea"
+SRC_URI[sha256sum] = "7db3848501f257a10417c9bcfc0b70b76d0a8093eb993f2354925e156c3419ff"
+
+SRC_URI += " file://sudo-support.patch \
+             file://sudo-delete-support.patch \
+             file://log_timestamp.patch \
+             file://ldap-user-setup-support.patch \
+             file://allow-anonymous-bind-for-ldap-search.patch \
+             file://ldapscripts.conf.cgcs \
+       file://ldapadduser.template.cgcs \
+       file://ldapaddgroup.template.cgcs \
+       file://ldapmoduser.template.cgcs \
+       file://ldapaddsudo.template.cgcs \
+       file://ldapmodsudo.template.cgcs \
+       file://ldapscripts.passwd \
+"
+
+SOURCE1 = "${WORKDIR}/ldapscripts.conf.cgcs"
+SOURCE2 = "${WORKDIR}/ldapadduser.template.cgcs"
+SOURCE3 = "${WORKDIR}/ldapaddgroup.template.cgcs"
+SOURCE4 = "${WORKDIR}/ldapmoduser.template.cgcs"
+SOURCE5 = "${WORKDIR}/ldapaddsudo.template.cgcs"
+SOURCE6 = "${WORKDIR}/ldapmodsudo.template.cgcs"
+SOURCE7 = "${WORKDIR}/ldapscripts.passwd"
+
+do_configure () {
+       cd ${S}
+       oe_runmake -e configure
+}
+
+do_compile () {
+       :
+}
+
+do_install () {
+       cd ${S}
+       oe_runmake -e DESTDIR=${D} SBINDIR=${sbindir} \
+               MANDIR=${mandir} ETCDIR=${sysconfdir}/ldapscripts \
+               LIBDIR=${libdir} install
+
+       rm -Rf ${D}${mandir}/*
+       rm -f ${D}${sbindir}/*machine*
+       rm -f ${D}${sysconfdir}//ldapscripts/ldapaddmachine.template.sample
+       install -m 644 ${SOURCE1} ${D}${sysconfdir}/ldapscripts/ldapscripts.conf
+       install -m 644 ${SOURCE2} ${D}${sysconfdir}/ldapscripts/ldapadduser.template.cgcs
+       install -m 644 ${SOURCE3} ${D}${sysconfdir}/ldapscripts/ldapaddgroup.template.cgcs
+       install -m 644 ${SOURCE4} ${D}${sysconfdir}/ldapscripts/ldapmoduser.template.cgcs
+       install -m 644 ${SOURCE5} ${D}${sysconfdir}/ldapscripts/ldapaddsudo.template.cgcs
+       install -m 644 ${SOURCE6} ${D}${sysconfdir}/ldapscripts/ldapmodsudo.template.cgcs
+       install -m 600 ${SOURCE7} ${D}${sysconfdir}/ldapscripts/ldapscripts.passwd
+}
+
+FILES_${PN}_append = " ${libdir}/runtime \
+                       ${sysconfdir} \
+"
diff --git a/meta-stx/recipes-support/libtommath/libtommath_1.1.0.bb b/meta-stx/recipes-support/libtommath/libtommath_1.1.0.bb
new file mode 100644 (file)
index 0000000..1764fb9
--- /dev/null
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "A portable number theoretic multiple-precision integer library"
+DESCRIPTION = " \
+  A free open source portable number theoretic multiple-precision integer \
+  library written entirely in C. (phew!). The library is designed to provide \
+  a simple to work with API that provides fairly efficient routines that \
+  build out of the box without configuration. \
+"
+HOMEPAGE = "https://github.com/libtom/libtommath"
+
+LICENSE = "PD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=23e7e0a32e53a2b1d35f5fd9ef053402"
+
+DEPENDS = "libtool-cross"
+
+SRC_URI = "https://github.com/libtom/libtommath/releases/download/v${PV}/ltm-${PV}.tar.xz"
+
+SRC_URI[md5sum] = "b2da4488c9024976d36870132f4b8a42"
+SRC_URI[sha256sum] = "90466c88783d1fe9f5c2364a69f5479f10d73ed616011be6196f35f7f1537ead"
+
+EXTRA_OEMAKE = " \
+       LIBTOOL=${STAGING_BINDIR_CROSS}/${HOST_SYS}-libtool \
+       LIBPATH=${libdir} \
+       INCPATH=${includedir} \
+       -f makefile.shared \
+       "
+
+do_install() {
+       oe_runmake install DESTDIR=${D}
+}
diff --git a/meta-stx/recipes-support/libtpms/libtpms_git.bb b/meta-stx/recipes-support/libtpms/libtpms_git.bb
new file mode 100644 (file)
index 0000000..5d3642b
--- /dev/null
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "\
+               "
+
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=e73f0786a936da3814896df06ad225a9"
+
+SRCREV = "f74b7104c0124db56c123ed171f378e82bd207c2"
+PROTOCOL = "https"
+BRANCH = "stable-0.6.0"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/stefanberger/libtpms.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+DEPENDS += " tpm2-tss tpm2-tools tpm2-abrmd openssh openssl"
+
+inherit autotools
+
+EXTRA_OECONF += "--with-openssl --with-tpm2"
diff --git a/meta-stx/recipes-support/libverto/libverto_0.2.5.bb b/meta-stx/recipes-support/libverto/libverto_0.2.5.bb
new file mode 100644 (file)
index 0000000..41e5e1b
--- /dev/null
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Event loop abstraction for Libraries"
+DESCRIPTION = "Libverto exists to isolate libraries from the particular event loop \
+chosen by an application. Libverto provides an asynchronous \
+programming interface independent of any particular event loop and \
+allows applications to attach this interface to whatever event loop \
+they select."
+HOMEPAGE = "http://fedorahosted.net/libverto"
+
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://COPYING;md5=bc8917ab981cfa6161dc29319a4038d9"
+
+
+DEPENDS += "libevent libtevent"
+
+# fedorahosted tarball cannot be fetched completely, so switch to use other source
+# SRC_URI = "http://fedorahosted.org/releases/l/i/${PN}/${PN}-${PV}.tar.gz"
+SRC_URI = "https://github.com/latchset/libverto/releases/download/0.2.5/${PN}-${PV}.tar.gz \
+          "
+SRC_URI[md5sum] = "144fb8f00759ef8ad71c472333847f03"
+
+inherit autotools pkgconfig
+
+PACKAGECONFIG ??= "libevent tevent"
+PACKAGECONFIG[glib] = "--with-glib,--without-glib,glib-2.0"
+PACKAGECONFIG[libev] = "--with-libev,--without-libev,libev"
+PACKAGECONFIG[libevent] = "--with-libevent,--without-libevent,libevent"
+PACKAGECONFIG[tevent] = "--with-tevent,--without-tevent,libtevent"
+
+PACKAGES =+ "${PN}-libevent ${PN}-tevent"
+
+FILES_${PN}-libevent = "${libdir}/libverto-libevent${SOLIBS}"
+FILES_${PN}-tevent = "${libdir}/libverto-tevent${SOLIBS}"
+
+RPROVIDES_${PN}-libevent += "${PN}-module-base"
+RPROVIDES_${PN}-tevent += "${PN}-module-base"
+
diff --git a/meta-stx/recipes-support/memcached/files/memcached.sysconfig b/meta-stx/recipes-support/memcached/files/memcached.sysconfig
new file mode 100644 (file)
index 0000000..d065678
--- /dev/null
@@ -0,0 +1,5 @@
+PORT="11211"
+USER="memcached"
+MAXCONN="1024"
+CACHESIZE="64"
+OPTIONS="-l 127.0.0.1,::1"
diff --git a/meta-stx/recipes-support/memcached/memcached_%.bbappend b/meta-stx/recipes-support/memcached/memcached_%.bbappend
new file mode 100644 (file)
index 0000000..c39fdf0
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += "file://memcached.sysconfig"
+
+inherit useradd
+
+USERADD_PACKAGES = "${PN}"
+
+USERADD_PARAM_${PN} = "-r -g memcached -d /run/memcached -s /sbin/nologin -c 'Memcached daemon' memcached"
+GROUPADD_PARAM_${PN} = "-r memcached"
+
+do_install_append () {
+    install -d ${D}${sysconfdir}/sysconfig
+    install -m 0644 ${WORKDIR}/memcached.sysconfig ${D}${sysconfdir}/sysconfig/memcached
+}
diff --git a/meta-stx/recipes-support/mod-wsgi/mod-wsgi_git.bbappend b/meta-stx/recipes-support/mod-wsgi/mod-wsgi_git.bbappend
new file mode 100644 (file)
index 0000000..7e6b28c
--- /dev/null
@@ -0,0 +1,18 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+do_install_append() {
+       sed -i -e 's|${libdir}|${libexecdir}|' ${D}/etc/apache2/modules.d/wsgi.load
+}
diff --git a/meta-stx/recipes-support/nss-pam-ldapd/nss-pam-ldapd_%.bbappend b/meta-stx/recipes-support/nss-pam-ldapd/nss-pam-ldapd_%.bbappend
new file mode 100644 (file)
index 0000000..6ad9aeb
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit useradd
+
+USERADD_PACKAGES = "${PN}"
+
+USERADD_PARAM_${PN} = "-r -g ldap -c 'LDAP Client User' -u 65 -d / -s /sbin/nologin nslcd"
diff --git a/meta-stx/recipes-support/openldap/files/0021-openldap-and-stx-source-and-config-files.patch b/meta-stx/recipes-support/openldap/files/0021-openldap-and-stx-source-and-config-files.patch
new file mode 100644 (file)
index 0000000..14f74d0
--- /dev/null
@@ -0,0 +1,997 @@
+From 2adc9fa71e3a47542793e61c7794629fa9255a57 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Tue, 5 Nov 2019 14:49:06 -0800
+Subject: [PATCH] openldap and stx source and config files
+
+From stx 1901 openldap-2.4.44-21.el7_6.src.rpm
+---
+ stx-sources/ldap.conf                        |  18 +++
+ stx-sources/libexec-check-config.sh          |  91 ++++++++++++
+ stx-sources/libexec-convert-config.sh        |  79 ++++++++++
+ stx-sources/libexec-create-certdb.sh         |  70 +++++++++
+ stx-sources/libexec-functions                | 136 +++++++++++++++++
+ stx-sources/libexec-generate-server-cert.sh  | 118 +++++++++++++++
+ stx-sources/libexec-update-ppolicy-schema.sh | 142 ++++++++++++++++++
+ stx-sources/libexec-upgrade-db.sh            |  40 +++++
+ stx-sources/openldap.tmpfiles                |   3 +
+ stx-sources/slapd.ldif                       | 148 +++++++++++++++++++
+ stx-sources/slapd.service                    |  19 +++
+ stx-sources/slapd.sysconfig                  |  15 ++
+ stx-sources/slapd.tmpfiles                   |   2 +
+ 13 files changed, 881 insertions(+)
+ create mode 100644 stx-sources/ldap.conf
+ create mode 100755 stx-sources/libexec-check-config.sh
+ create mode 100755 stx-sources/libexec-convert-config.sh
+ create mode 100755 stx-sources/libexec-create-certdb.sh
+ create mode 100644 stx-sources/libexec-functions
+ create mode 100755 stx-sources/libexec-generate-server-cert.sh
+ create mode 100755 stx-sources/libexec-update-ppolicy-schema.sh
+ create mode 100755 stx-sources/libexec-upgrade-db.sh
+ create mode 100644 stx-sources/openldap.tmpfiles
+ create mode 100644 stx-sources/slapd.ldif
+ create mode 100644 stx-sources/slapd.service
+ create mode 100644 stx-sources/slapd.sysconfig
+ create mode 100644 stx-sources/slapd.tmpfiles
+
+diff --git a/stx-sources/ldap.conf b/stx-sources/ldap.conf
+new file mode 100644
+index 0000000..aa6f8fd
+--- /dev/null
++++ b/stx-sources/ldap.conf
+@@ -0,0 +1,18 @@
++#
++# LDAP Defaults
++#
++
++# See ldap.conf(5) for details
++# This file should be world readable but not world writable.
++
++#BASE dc=example,dc=com
++#URI  ldap://ldap.example.com ldap://ldap-master.example.com:666
++
++#SIZELIMIT    12
++#TIMELIMIT    15
++#DEREF                never
++
++TLS_CACERTDIR /etc/openldap/certs
++
++# Turning this off breaks GSSAPI used with krb5 when rdns = false
++SASL_NOCANON  on
+diff --git a/stx-sources/libexec-check-config.sh b/stx-sources/libexec-check-config.sh
+new file mode 100755
+index 0000000..87e377f
+--- /dev/null
++++ b/stx-sources/libexec-check-config.sh
+@@ -0,0 +1,91 @@
++#!/bin/sh
++# Author: Jan Vcelak <jvcelak@redhat.com>
++
++. /usr/libexec/openldap/functions
++
++function check_config_syntax()
++{
++      retcode=0
++      tmp_slaptest=`mktemp --tmpdir=/var/run/openldap`
++      run_as_ldap "/usr/sbin/slaptest $SLAPD_GLOBAL_OPTIONS -u" &>$tmp_slaptest
++      if [ $? -ne 0 ]; then
++              error "Checking configuration file failed:"
++              cat $tmp_slaptest >&2
++              retcode=1
++      fi
++      rm $tmp_slaptest
++      return $retcode
++}
++
++function check_certs_perms()
++{
++      retcode=0
++      for cert in `certificates`; do
++              run_as_ldap "/usr/bin/test -e \"$cert\""
++              if [ $? -ne 0 ]; then
++                      error "TLS certificate/key/DB '%s' was not found." "$cert"
++                      retcoder=1
++                      continue
++              fi
++              run_as_ldap "/usr/bin/test -r \"$cert\""
++              if [ $? -ne 0 ]; then
++                      error "TLS certificate/key/DB '%s' is not readable." "$cert"
++                      retcode=1
++              fi
++      done
++      return $retcode
++}
++
++function check_db_perms()
++{
++      retcode=0
++      for dbdir in `databases`; do
++              [ -d "$dbdir" ] || continue
++              for dbfile in `find ${dbdir} -maxdepth 1 -name "*.dbb" -or -name "*.gdbm" -or -name "*.bdb" -or -name "__db.*" -or -name "log.*" -or -name "alock"`; do
++                      run_as_ldap "/usr/bin/test -r \"$dbfile\" -a -w \"$dbfile\""
++                      if [ $? -ne 0 ]; then
++                              error "Read/write permissions for DB file '%s' are required." "$dbfile"
++                              retcode=1
++                      fi
++              done
++      done
++      return $retcode
++}
++
++function check_everything()
++{
++      retcode=0
++      check_config_syntax || retcode=1
++      # TODO: need support for Mozilla NSS, disabling temporarily
++      #check_certs_perms || retcode=1
++      check_db_perms || retcode=1
++      return $retcode
++}
++
++if [ `id -u` -ne 0 ]; then
++      error "You have to be root to run this script."
++      exit 4
++fi
++
++load_sysconfig
++
++if [ -n "$SLAPD_CONFIG_DIR" ]; then
++      if [ ! -d "$SLAPD_CONFIG_DIR" ]; then
++              error "Configuration directory '%s' does not exist." "$SLAPD_CONFIG_DIR"
++      else
++              check_everything
++              exit $?
++      fi
++fi
++
++if [ -n "$SLAPD_CONFIG_FILE" ]; then
++      if [ ! -f "$SLAPD_CONFIG_FILE" ]; then
++              error "Configuration file '%s' does not exist." "$SLAPD_CONFIG_FILE"
++      else
++              error "Warning: Usage of a configuration file is obsolete!"
++              check_everything
++              exit $?
++      fi
++fi
++
++exit 1
+diff --git a/stx-sources/libexec-convert-config.sh b/stx-sources/libexec-convert-config.sh
+new file mode 100755
+index 0000000..824c3b1
+--- /dev/null
++++ b/stx-sources/libexec-convert-config.sh
+@@ -0,0 +1,79 @@
++#!/bin/sh
++# Author: Jan Vcelak <jvcelak@redhat.com>
++
++. /usr/libexec/openldap/functions
++
++function help()
++{
++      error "usage: %s [-f config-file] [-F config-dir]\n" "`basename $0`"
++      exit 2
++}
++
++load_sysconfig
++
++while getopts :f:F: opt; do
++      case "$opt" in
++      f)
++              SLAPD_CONFIG_FILE="$OPTARG"
++              ;;
++      F)
++              SLAPD_CONFIG_DIR="$OPTARG"
++              ;;
++      *)
++              help
++              ;;
++      esac
++done
++shift $((OPTIND-1))
++[ -n "$1" ] && help
++
++# check source, target
++
++if [ ! -f "$SLAPD_CONFIG_FILE" ]; then
++      error "Source configuration file '%s' not found." "$SLAPD_CONFIG_FILE"
++      exit 1
++fi
++
++if grep -iq '^dn: cn=config$' "$SLAPD_CONFIG_FILE"; then
++      SLAPD_CONFIG_FILE_FORMAT=ldif
++else
++      SLAPD_CONFIG_FILE_FORMAT=conf
++fi
++
++if [ -d "$SLAPD_CONFIG_DIR" ]; then
++      if [ `find "$SLAPD_CONFIG_DIR" -maxdepth 0 -empty | wc -l` -eq 0 ]; then
++              error "Target configuration directory '%s' is not empty." "$SLAPD_CONFIG_DIR"
++              exit 1
++      fi
++fi
++
++# perform the conversion
++
++tmp_convert=`mktemp --tmpdir=/var/run/openldap`
++
++if [ `id -u` -eq 0 ]; then
++      install -d --owner $SLAPD_USER --group `id -g $SLAPD_USER` --mode 0750 "$SLAPD_CONFIG_DIR" &>>$tmp_convert
++      if [ $SLAPD_CONFIG_FILE_FORMAT = ldif ]; then
++              run_as_ldap "/usr/sbin/slapadd -F \"$SLAPD_CONFIG_DIR\" -n 0 -l \"$SLAPD_CONFIG_FILE\"" &>>$tmp_convert
++      else
++              run_as_ldap "/usr/sbin/slaptest -f \"$SLAPD_CONFIG_FILE\" -F \"$SLAPD_CONFIG_DIR\"" &>>$tmp_convert
++      fi
++      retcode=$?
++else
++      error "You are not root! Permission will not be set."
++      install -d --mode 0750 "$SLAPD_CONFIG_DIR" &>>$tmp_convert
++      if [ $SLAPD_CONFIG_FILE_FORMAT = ldif ]; then
++              /usr/sbin/slapadd -F "$SLAPD_CONFIG_DIR" -n 0 -l "$SLAPD_CONFIG_FILE" &>>$tmp_convert
++      else
++              /usr/sbin/slaptest -f "$SLAPD_CONFIG_FILE" -F "$SLAPD_CONFIG_DIR" &>>$tmp_convert
++      fi
++      retcode=$?
++fi
++
++if [ $retcode -ne 0 ]; then
++      error "Configuration conversion failed:"
++      cat $tmp_convert >&2
++fi
++
++rm $tmp_convert
++exit $retcode
+diff --git a/stx-sources/libexec-create-certdb.sh b/stx-sources/libexec-create-certdb.sh
+new file mode 100755
+index 0000000..2377fdd
+--- /dev/null
++++ b/stx-sources/libexec-create-certdb.sh
+@@ -0,0 +1,70 @@
++#!/bin/bash
++# Author: Jan Vcelak <jvcelak@redhat.com>
++
++set -e
++
++# default options
++
++CERTDB_DIR=/etc/openldap/certs
++
++# internals
++
++MODULE_CKBI="$(rpm --eval %{_libdir})/libnssckbi.so"
++RANDOM_SOURCE=/dev/urandom
++PASSWORD_BYTES=32
++
++# parse arguments
++
++usage() {
++      printf "usage: create-certdb.sh [-d certdb]\n" >&2
++      exit 1
++}
++
++while getopts "d:" opt; do
++      case "$opt" in
++      d)
++              CERTDB_DIR="$OPTARG"
++              ;;
++      \?)
++              usage
++              ;;
++      esac
++done
++
++[ "$OPTIND" -le "$#" ] && usage
++
++# verify target location
++
++if [ ! -d "$CERTDB_DIR" ]; then
++      printf "Directory '%s' does not exist.\n" "$CERTDB_DIR" >&2
++      exit 1
++fi
++
++if [ ! "$(find "$CERTDB_DIR"  -maxdepth 0 -empty | wc -l)" -eq 1 ]; then
++      printf "Directory '%s' is not empty.\n" "$CERTDB_DIR" >&2
++      exit 1
++fi
++
++# create the database
++
++printf "Creating certificate database in '%s'.\n" "$CERTDB_DIR" >&2
++
++PASSWORD_FILE="$CERTDB_DIR/password"
++OLD_UMASK="$(umask)"
++umask 0377
++dd if=$RANDOM_SOURCE bs=$PASSWORD_BYTES count=1 2>/dev/null | base64 > "$PASSWORD_FILE"
++umask "$OLD_UMASK"
++
++certutil -d "$CERTDB_DIR" -N -f "$PASSWORD_FILE" &>/dev/null
++
++# load module with builtin CA certificates
++
++echo | modutil -dbdir "$CERTDB_DIR" -add "Root Certs" -libfile "$MODULE_CKBI" &>/dev/null
++
++# tune permissions
++
++for dbfile in "$CERTDB_DIR"/*.db; do
++      chmod 0644 "$dbfile"
++done
++
++exit 0
+diff --git a/stx-sources/libexec-functions b/stx-sources/libexec-functions
+new file mode 100644
+index 0000000..98c8631
+--- /dev/null
++++ b/stx-sources/libexec-functions
+@@ -0,0 +1,136 @@
++# Author: Jan Vcelak <jvcelak@redhat.com>
++
++SLAPD_USER=
++SLAPD_CONFIG_FILE=
++SLAPD_CONFIG_DIR=
++SLAPD_CONFIG_CUSTOM=
++SLAPD_GLOBAL_OPTIONS=
++SLAPD_SYSCONFIG_FILE=
++
++function default_config()
++{
++      SLAPD_USER=ldap
++      SLAPD_CONFIG_FILE=/etc/openldap/slapd.conf
++      SLAPD_CONFIG_DIR=/etc/openldap/slapd.d
++      SLAPD_CONFIG_CUSTOM=
++      SLAPD_GLOBAL_OPTIONS=
++      SLAPD_SYSCONFIG_FILE=/etc/sysconfig/slapd
++}
++
++function parse_config_options()
++{
++      user=
++      config_file=
++      config_dir=
++      while getopts :u:f:F: opt; do
++              case "$opt" in
++              u)
++                      user="$OPTARG"
++                      ;;
++              f)
++                      config_file="$OPTARG"
++                      ;;
++              F)
++                      config_dir="$OPTARG"
++                      ;;
++              esac
++      done
++
++      unset OPTIND
++
++      if [ -n "$user" ]; then
++              SLAPD_USER="$user"
++      fi
++
++      if [ -n "$config_dir" ]; then
++              SLAPD_CONFIG_DIR="$config_dir"
++              SLAPD_CONFIG_FILE=
++              SLAPD_CONFIG_CUSTOM=1
++              SLAPD_GLOBAL_OPTIONS="-F '$config_dir'"
++      elif [ -n "$config_file" ]; then
++              SLAPD_CONFIG_DIR=
++              SLAPD_CONFIG_FILE="$config_file"
++              SLAPD_CONFIG_CUSTOM=1
++              SLAPD_GLOBAL_OPTIONS="-f '$config_file'"
++      fi
++}
++
++function uses_new_config()
++{
++      [ -n "$SLAPD_CONFIG_DIR" ]
++      return $?
++}
++
++function run_as_ldap()
++{
++      /sbin/runuser --shell /bin/sh --session-command "$1" "$SLAPD_USER"
++      return $?
++}
++
++function ldif_unbreak()
++{
++      sed ':a;N;s/\n //;ta;P;D'
++}
++
++function ldif_value()
++{
++      sed 's/^[^:]*: //'
++}
++
++function databases_new()
++{
++      slapcat $SLAPD_GLOBAL_OPTIONS -c \
++      -H 'ldap:///cn=config???(|(objectClass=olcBdbConfig)(objectClass=olcHdbConfig))' 2>/dev/null | \
++              ldif_unbreak | \
++              grep '^olcDbDirectory: ' | \
++              ldif_value
++}
++
++function databases_old()
++{
++      awk     'begin { database="" }
++              $1 == "database" { database=$2 }
++              $1 == "directory" { if (database == "bdb" || database == "hdb") print $2}' \
++              "$SLAPD_CONFIG_FILE"
++}
++
++function certificates_new()
++{
++      slapcat $SLAPD_GLOBAL_OPTIONS -c -H 'ldap:///cn=config???(cn=config)' 2>/dev/null | \
++              ldif_unbreak | \
++              grep '^olcTLS\(CACertificateFile\|CACertificatePath\|CertificateFile\|CertificateKeyFile\): ' | \
++              ldif_value
++}
++
++function certificates_old()
++{
++      awk '$1 ~ "^TLS(CACertificate(File|Path)|CertificateFile|CertificateKeyFile)$" { print $2 } ' \
++              "$SLAPD_CONFIG_FILE"
++}
++
++function certificates()
++{
++      uses_new_config && certificates_new || certificates_old
++}
++
++function databases()
++{
++      uses_new_config && databases_new || databases_old
++}
++
++
++function error()
++{
++      format="$1\n"; shift
++      printf "$format" $@ >&2
++}
++
++function load_sysconfig()
++{
++      [ -r "$SLAPD_SYSCONFIG_FILE" ] || return
++
++      . "$SLAPD_SYSCONFIG_FILE"
++      [ -n "$SLAPD_OPTIONS" ] && parse_config_options $SLAPD_OPTIONS
++}
++
++default_config
+diff --git a/stx-sources/libexec-generate-server-cert.sh b/stx-sources/libexec-generate-server-cert.sh
+new file mode 100755
+index 0000000..e2f4974
+--- /dev/null
++++ b/stx-sources/libexec-generate-server-cert.sh
+@@ -0,0 +1,118 @@
++#!/bin/bash
++# Author: Jan Vcelak <jvcelak@redhat.com>
++
++set -e
++
++# default options
++
++CERTDB_DIR=/etc/openldap/certs
++CERT_NAME="OpenLDAP Server"
++PASSWORD_FILE=
++HOSTNAME_FQDN="$(hostname --fqdn)"
++ALT_NAMES=
++ONCE=0
++
++# internals
++
++RANDOM_SOURCE=/dev/urandom
++CERT_RANDOM_BYTES=256
++CERT_KEY_TYPE=rsa
++CERT_KEY_SIZE=1024
++CERT_VALID_MONTHS=12
++
++# parse arguments
++
++usage() {
++      printf "usage: generate-server-cert.sh [-d certdb-dir] [-n cert-name]\n" >&2
++      printf "                               [-p password-file] [-h hostnames]\n" >&2
++      printf "                               [-a dns-alt-names] [-o]\n" >&2
++      exit 1
++}
++
++while getopts "d:n:p:h:a:o" opt; do
++      case "$opt" in
++      d)
++              CERTDB_DIR="$OPTARG"
++              ;;
++      n)
++              CERT_NAME="$OPTARG"
++              ;;
++      p)
++              PASSWORD_FILE="$OPTARG"
++              ;;
++      h)
++              HOSTNAME_FQDN="$OPTARG"
++              ;;
++      a)
++              ALT_NAMES="$OPTARG"
++              ;;
++      o)
++              ONCE=1
++              ;;
++      \?)
++              usage
++              ;;
++      esac
++done
++
++[ "$OPTIND" -le "$#" ] && usage
++
++# generated options
++
++ONCE_FILE="$CERTDB_DIR/.slapd-leave"
++PASSWORD_FILE="${PASSWORD_FILE:-${CERTDB_DIR}/password}"
++ALT_NAMES="${ALT_NAMES:-${HOSTNAME_FQDN},localhost,localhost.localdomain}"
++
++# verify target location
++
++if [ "$ONCE" -eq 1 -a -f "$ONCE_FILE" ]; then
++      printf "Skipping certificate generating, '%s' exists.\n" "$ONCE_FILE" >&2
++      exit 0
++fi
++
++if ! certutil -d "$CERTDB_DIR" -U &>/dev/null; then
++      printf "Directory '%s' is not a valid certificate database.\n" "$CERTDB_DIR" >&2
++      exit 1
++fi
++
++printf "Creating new server certificate in '%s'.\n" "$CERTDB_DIR" >&2
++
++if [ ! -r "$PASSWORD_FILE" ]; then
++      printf "Password file '%s' is not readable.\n" "$PASSWORD_FILE" >&2
++      exit 1
++fi
++
++if certutil -d "$CERTDB_DIR" -L -a -n "$CERT_NAME" &>/dev/null; then
++      printf "Certificate '%s' already exists in the certificate database.\n" "$CERT_NAME" >&2
++      exit 1
++fi
++
++# generate server certificate (self signed)
++
++
++CERT_RANDOM=$(mktemp --tmpdir=/var/run/openldap)
++dd if=$RANDOM_SOURCE bs=$CERT_RANDOM_BYTES count=1 of=$CERT_RANDOM &>/dev/null
++
++certutil -d "$CERTDB_DIR" -f "$PASSWORD_FILE" -z "$CERT_RANDOM" \
++      -S -x -n "$CERT_NAME" \
++      -s "CN=$HOSTNAME_FQDN" \
++      -t TC,, \
++      -k $CERT_KEY_TYPE -g $CERT_KEY_SIZE \
++      -v $CERT_VALID_MONTHS \
++      -8 "$ALT_NAMES" \
++      &>/dev/null
++
++rm -f $CERT_RANDOM
++
++# tune permissions
++
++if [ "$(id -u)" -eq 0 ]; then
++      chgrp ldap "$PASSWORD_FILE"
++      chmod g+r "$PASSWORD_FILE"
++else
++      printf "WARNING: The server requires read permissions on the password file in order to\n" >&2
++      printf "         load it's private key from the certificate database.\n" >&2
++fi
++
++touch "$ONCE_FILE"
++exit 0
+diff --git a/stx-sources/libexec-update-ppolicy-schema.sh b/stx-sources/libexec-update-ppolicy-schema.sh
+new file mode 100755
+index 0000000..a853b27
+--- /dev/null
++++ b/stx-sources/libexec-update-ppolicy-schema.sh
+@@ -0,0 +1,142 @@
++#!/bin/bash
++# This script serves one purpose, to add a possibly missing attribute
++# to a ppolicy schema in a dynamic configuration of OpenLDAP. This
++# attribute was introduced in openldap-2.4.43 and slapd will not 
++# start without it later on.
++#
++# The script tries to update in a directory given as first parameter,
++# or in /etc/openldap/slapd.d implicitly.
++#
++# Author: Matus Honek <mhonek@redhat.com>
++# Bugzilla: #1487857
++
++function log {
++    echo "Update dynamic configuration: " $@
++    true
++}
++
++function iferr {
++    if [ $? -ne 0 ]; then
++      log "ERROR: " $@
++      true
++    else
++      false
++    fi
++}
++
++function update {
++    set -u
++    shopt -s extglob
++
++    ORIGINAL="${1:-/etc/openldap/slapd.d}"
++    ORIGINAL="${ORIGINAL%*(/)}"
++
++    ### check if necessary
++    grep -r "pwdMaxRecordedFail" "${ORIGINAL}/cn=config/cn=schema" >/dev/null
++    [ $? -eq 0 ] && log "Schemas look up to date. Ok. Quitting." && return 0
++
++    ### prep
++    log "Prepare environment."
++    
++    TEMPDIR=$(mktemp -d)
++    iferr "Could not create a temporary directory. Quitting." && return 1
++    DBDIR="${TEMPDIR}/db"
++    SUBDBDIR="${DBDIR}/cn=temporary"
++
++    mkdir "${DBDIR}"
++    iferr "Could not create temporary configuration directory. Quitting." && return 1
++    cp -r --no-target-directory "${ORIGINAL}" "${SUBDBDIR}"
++    iferr "Could not copy configuration. Quitting." && return 1
++    
++    pushd "$TEMPDIR" >/dev/null
++
++    cat > temp.conf <<EOF
++database ldif
++suffix cn=temporary
++directory db
++access to * by * manage
++EOF
++    
++    SOCKET="$(pwd)/socket"
++    LISTENER="ldapi://${SOCKET//\//%2F}"
++    CONN_PARAMS=("-Y" "EXTERNAL" "-H" "${LISTENER}")
++    
++    slapd -f temp.conf -h "$LISTENER" -d 0 >/dev/null 2>&1 &
++    SLAPDPID="$!"
++    sleep 2
++
++    ldapadd ${CONN_PARAMS[@]} -d 0 >/dev/null 2>&1 <<EOF
++dn: cn=temporary
++objectClass: olcGlobal
++cn: temporary
++EOF
++    iferr "Could not populate the temporary database. Quitting." && return 1
++    
++    ### update
++    log "Update with new pwdMaxRecordedFailure attribute."
++    FILTER="(&"
++    FILTER+="(olcObjectClasses=*'pwdPolicy'*)"
++    FILTER+="(!(olcObjectClasses=*'pwdPolicy'*'pwdMaxRecordedFailure'*))"
++    FILTER+="(!(olcAttributeTypes=*'pwdMaxRecordedFailure'*))"
++    FILTER+=")"
++    RES=$(ldapsearch ${CONN_PARAMS[@]} \
++                   -b cn=schema,cn=config,cn=temporary \
++                   -LLL \
++                   -o ldif-wrap=no \
++                   "$FILTER" \
++                   dn olcObjectClasses \
++                   2>/dev/null \
++            | sed '/^$/d')
++    DN=$(printf "$RES" | grep '^dn:')
++    OC=$(printf "$RES" | grep "^olcObjectClasses:.*'pwdPolicy'")
++    NEWOC="${OC//$ pwdSafeModify /$ pwdSafeModify $ pwdMaxRecordedFailure }"
++
++    test $(echo "$DN" | wc -l) = 1
++    iferr "Received more than one DN. Cannot continue. Quitting." && return 1
++    test "$NEWOC" != "$OC"
++    iferr "Updating pwdPolicy objectClass definition failed. Quitting." && return 1
++
++    ldapmodify ${CONN_PARAMS[@]} -d 0 >/dev/null 2>&1 <<EOF
++$DN
++changetype: modify
++add: olcAttributeTypes
++olcAttributeTypes: ( 1.3.6.1.4.1.42.2.27.8.1.30 NAME 'pwdMaxRecordedFailur
++ e' EQUALITY integerMatch ORDERING integerOrderingMatch  SYNTAX 1.3.6.1.4.1.
++ 1466.115.121.1.27 SINGLE-VALUE )
++-
++delete: olcObjectClasses
++$OC
++-
++add: olcObjectClasses
++$NEWOC
++EOF
++    iferr "Updating with new attribute failed. Quitting." && return 1
++
++    popd >/dev/null
++
++    ### apply
++    log "Apply changes."
++    cp -r --no-target-directory "$ORIGINAL" "$ORIGINAL~backup"
++    iferr "Backing up old configuration failed. Quitting." && return 1
++    cp -r --no-target-directory "$SUBDBDIR" "$ORIGINAL"
++    iferr "Applying new configuration failed. Quitting." && return 1
++    
++    ### clean up
++    log "Clean up."
++    kill "$SLAPDPID"
++    SLAPDPID=
++    rm -rf "$TEMPDIR"
++    TEMPDIR=
++}
++
++SLAPDPID=
++TEMPDIR=
++update "$1"
++if [ $? -ne 0 ]; then
++    log "Clean up."
++    echo "$SLAPDPID"
++    echo "$TEMPDIR"
++    kill "$SLAPDPID"
++    rm -rf "$TEMPDIR"
++fi
++log "Finished."
+diff --git a/stx-sources/libexec-upgrade-db.sh b/stx-sources/libexec-upgrade-db.sh
+new file mode 100755
+index 0000000..1543c80
+--- /dev/null
++++ b/stx-sources/libexec-upgrade-db.sh
+@@ -0,0 +1,40 @@
++#!/bin/sh
++# Author: Jan Vcelak <jvcelak@redhat.com>
++
++. /usr/libexec/openldap/functions
++
++if [ `id -u` -ne 0 ]; then
++      error "You have to be root to run this command."
++      exit 4
++fi
++
++load_sysconfig
++retcode=0
++
++for dbdir in `databases`; do
++      upgrade_log="$dbdir/db_upgrade.`date +%Y%m%d%H%M%S`.log"
++      bdb_files=`find "$dbdir" -maxdepth 1 -name "*.bdb" -printf '"%f" '`
++
++      # skip uninitialized database
++      [ -z "$bdb_files"]  || continue
++
++      printf "Updating '%s', logging into '%s'\n" "$dbdir" "$upgrade_log"
++
++      # perform the update
++      for command in \
++              "/usr/bin/db_recover -v -h \"$dbdir\"" \
++              "/usr/bin/db_upgrade -v -h \"$dbdir\" $bdb_files" \
++              "/usr/bin/db_checkpoint -v -h \"$dbdir\" -1" \
++      ; do
++              printf "Executing: %s\n" "$command" &>>$upgrade_log
++              run_as_ldap "$command" &>>$upgrade_log
++              result=$?
++              printf "Exit code: %d\n" $result >>"$upgrade_log"
++              if [ $result -ne 0 ]; then
++                      printf "Upgrade failed: %d\n" $result
++                      retcode=1
++              fi
++      done
++done
++
++exit $retcode
+diff --git a/stx-sources/openldap.tmpfiles b/stx-sources/openldap.tmpfiles
+new file mode 100644
+index 0000000..aa0e805
+--- /dev/null
++++ b/stx-sources/openldap.tmpfiles
+@@ -0,0 +1,3 @@
++# OpenLDAP TLSMC runtime directories
++x /tmp/openldap-tlsmc-*
++X /tmp/openldap-tlsmc-*
+diff --git a/stx-sources/slapd.ldif b/stx-sources/slapd.ldif
+new file mode 100644
+index 0000000..7b7f328
+--- /dev/null
++++ b/stx-sources/slapd.ldif
+@@ -0,0 +1,148 @@
++#
++# See slapd-config(5) for details on configuration options.
++# This file should NOT be world readable.
++#
++
++dn: cn=config
++objectClass: olcGlobal
++cn: config
++olcArgsFile: /var/run/openldap/slapd.args
++olcPidFile: /var/run/openldap/slapd.pid
++#
++# TLS settings
++#
++olcTLSCACertificatePath: /etc/openldap/certs
++olcTLSCertificateFile: "OpenLDAP Server"
++olcTLSCertificateKeyFile: /etc/openldap/certs/password
++#
++# Do not enable referrals until AFTER you have a working directory
++# service AND an understanding of referrals.
++#
++#olcReferral: ldap://root.openldap.org
++#
++# Sample security restrictions
++#     Require integrity protection (prevent hijacking)
++#     Require 112-bit (3DES or better) encryption for updates
++#     Require 64-bit encryption for simple bind
++#
++#olcSecurity: ssf=1 update_ssf=112 simple_bind=64
++
++
++#
++# Load dynamic backend modules:
++# - modulepath is architecture dependent value (32/64-bit system)
++# - back_sql.la backend requires openldap-servers-sql package
++# - dyngroup.la and dynlist.la cannot be used at the same time
++#
++
++#dn: cn=module,cn=config
++#objectClass: olcModuleList
++#cn: module
++#olcModulepath:       /usr/lib/openldap
++#olcModulepath:       /usr/lib64/openldap
++#olcModuleload: accesslog.la
++#olcModuleload: auditlog.la
++#olcModuleload: back_dnssrv.la
++#olcModuleload: back_ldap.la
++#olcModuleload: back_mdb.la
++#olcModuleload: back_meta.la
++#olcModuleload: back_null.la
++#olcModuleload: back_passwd.la
++#olcModuleload: back_relay.la
++#olcModuleload: back_shell.la
++#olcModuleload: back_sock.la
++#olcModuleload: collect.la
++#olcModuleload: constraint.la
++#olcModuleload: dds.la
++#olcModuleload: deref.la
++#olcModuleload: dyngroup.la
++#olcModuleload: dynlist.la
++#olcModuleload: memberof.la
++#olcModuleload: pcache.la
++#olcModuleload: ppolicy.la
++#olcModuleload: refint.la
++#olcModuleload: retcode.la
++#olcModuleload: rwm.la
++#olcModuleload: seqmod.la
++#olcModuleload: smbk5pwd.la
++#olcModuleload: sssvlv.la
++#olcModuleload: syncprov.la
++#olcModuleload: translucent.la
++#olcModuleload: unique.la
++#olcModuleload: valsort.la
++
++
++#
++# Schema settings
++#
++
++dn: cn=schema,cn=config
++objectClass: olcSchemaConfig
++cn: schema
++
++include: file:///etc/openldap/schema/core.ldif
++
++#
++# Frontend settings
++#
++
++dn: olcDatabase=frontend,cn=config
++objectClass: olcDatabaseConfig
++objectClass: olcFrontendConfig
++olcDatabase: frontend
++#
++# Sample global access control policy:
++#     Root DSE: allow anyone to read it
++#     Subschema (sub)entry DSE: allow anyone to read it
++#     Other DSEs:
++#             Allow self write access
++#             Allow authenticated users read access
++#             Allow anonymous users to authenticate
++#
++#olcAccess: to dn.base="" by * read
++#olcAccess: to dn.base="cn=Subschema" by * read
++#olcAccess: to *
++#     by self write
++#     by users read
++#     by anonymous auth
++#
++# if no access controls are present, the default policy
++# allows anyone and everyone to read anything but restricts
++# updates to rootdn.  (e.g., "access to * by * read")
++#
++# rootdn can always read and write EVERYTHING!
++#
++
++#
++# Configuration database
++#
++
++dn: olcDatabase=config,cn=config
++objectClass: olcDatabaseConfig
++olcDatabase: config
++olcAccess: to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,c
++ n=auth" manage by * none
++
++#
++# Server status monitoring
++#
++
++dn: olcDatabase=monitor,cn=config
++objectClass: olcDatabaseConfig
++olcDatabase: monitor
++olcAccess: to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,c
++ n=auth" read by dn.base="cn=Manager,dc=my-domain,dc=com" read by * none
++
++#
++# Backend database definitions
++#
++
++dn: olcDatabase=hdb,cn=config
++objectClass: olcDatabaseConfig
++objectClass: olcHdbConfig
++olcDatabase: hdb
++olcSuffix: dc=my-domain,dc=com
++olcRootDN: cn=Manager,dc=my-domain,dc=com
++olcDbDirectory:       /var/lib/ldap
++olcDbIndex: objectClass eq,pres
++olcDbIndex: ou,cn,mail,surname,givenname eq,pres,sub
+diff --git a/stx-sources/slapd.service b/stx-sources/slapd.service
+new file mode 100644
+index 0000000..8a3a722
+--- /dev/null
++++ b/stx-sources/slapd.service
+@@ -0,0 +1,19 @@
++[Unit]
++Description=OpenLDAP Server Daemon
++After=syslog.target network-online.target
++Documentation=man:slapd
++Documentation=man:slapd-config
++Documentation=man:slapd-hdb
++Documentation=man:slapd-mdb
++Documentation=file:///usr/share/doc/openldap-servers/guide.html
++
++[Service]
++Type=forking
++PIDFile=/var/run/openldap/slapd.pid
++Environment="SLAPD_URLS=ldap:/// ldapi:///" "SLAPD_OPTIONS="
++EnvironmentFile=/etc/sysconfig/slapd
++ExecStartPre=/usr/libexec/openldap/check-config.sh
++ExecStart=/usr/sbin/slapd -u ldap -h ${SLAPD_URLS} $SLAPD_OPTIONS
++
++[Install]
++WantedBy=multi-user.target
+diff --git a/stx-sources/slapd.sysconfig b/stx-sources/slapd.sysconfig
+new file mode 100644
+index 0000000..68091a5
+--- /dev/null
++++ b/stx-sources/slapd.sysconfig
+@@ -0,0 +1,15 @@
++# OpenLDAP server configuration
++# see 'man slapd' for additional information
++
++# Where the server will run (-h option)
++# - ldapi:/// is required for on-the-fly configuration using client tools
++#   (use SASL with EXTERNAL mechanism for authentication)
++# - default: ldapi:/// ldap:///
++# - example: ldapi:/// ldap://127.0.0.1/ ldap://10.0.0.1:1389/ ldaps:///
++SLAPD_URLS="ldapi:/// ldap:///"
++
++# Any custom options
++#SLAPD_OPTIONS=""
++
++# Keytab location for GSSAPI Kerberos authentication
++#KRB5_KTNAME="FILE:/etc/openldap/ldap.keytab"
+diff --git a/stx-sources/slapd.tmpfiles b/stx-sources/slapd.tmpfiles
+new file mode 100644
+index 0000000..56aa32e
+--- /dev/null
++++ b/stx-sources/slapd.tmpfiles
+@@ -0,0 +1,2 @@
++# openldap runtime directory for slapd.arg and slapd.pid
++d /var/run/openldap 0755 ldap ldap -
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/openldap/files/rootdn-should-not-bypass-ppolicy.patch b/meta-stx/recipes-support/openldap/files/rootdn-should-not-bypass-ppolicy.patch
new file mode 100644 (file)
index 0000000..797c8ad
--- /dev/null
@@ -0,0 +1,41 @@
+From 9456b0eee753d9fd368347b6974a2f6f8d941d4f Mon Sep 17 00:00:00 2001
+From: Kam Nasim <kam.nasim@windriver.com>
+Date: Tue, 11 Apr 2017 17:23:03 -0400
+Subject: [PATCH] rootdn should not bypass ppolicy
+
+---
+ servers/slapd/overlays/ppolicy.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/servers/slapd/overlays/ppolicy.c b/servers/slapd/overlays/ppolicy.c
+index b446deb..fa79872 100644
+--- a/servers/slapd/overlays/ppolicy.c
++++ b/servers/slapd/overlays/ppolicy.c
+@@ -1905,7 +1905,8 @@ ppolicy_modify( Operation *op, SlapReply *rs )
+               for(p=tl; p; p=p->next, hsize++); /* count history size */
+       }
+
+-      if (be_isroot( op )) goto do_modify;
++      /* WRS UPDATE: Run ppolicy for all user password modify ops */
++      //if (be_isroot( op )) goto do_modify;
+
+       /* NOTE: according to draft-behera-ldap-password-policy
+        * pwdAllowUserChange == FALSE must only prevent pwd changes
+@@ -2009,7 +2010,13 @@ ppolicy_modify( Operation *op, SlapReply *rs )
+       }
+
+       bv = newpw.bv_val ? &newpw : &addmod->sml_values[0];
+-      if (pp.pwdCheckQuality > 0) {
++
++      /* WRS UPDATE:
++       * If this is a rootDN op and this is the first password
++       * then bypass password policies as this is a new account
++       * creation
++       */
++      if (pp.pwdCheckQuality > 0 && !(be_isroot( op ) && !pa)) {
+
+               rc = check_password_quality( bv, &pp, &pErr, e, (char **)&txt );
+               if (rc != LDAP_SUCCESS) {
+--
+1.9.1
+
diff --git a/meta-stx/recipes-support/openldap/files/stx-slapd.service b/meta-stx/recipes-support/openldap/files/stx-slapd.service
new file mode 100644 (file)
index 0000000..1b219f2
--- /dev/null
@@ -0,0 +1,21 @@
+[Unit]
+Description=OpenLDAP Server Daemon
+After=syslog.target network-online.target
+Documentation=man:slapd
+Documentation=man:slapd-config
+Documentation=man:slapd-hdb
+Documentation=man:slapd-mdb
+Documentation=file:///usr/share/doc/openldap-servers/guide.html
+
+[Service]
+Type=forking
+RuntimeDirectory=openldap
+StateDirectory=openldap-data
+# PIDFile=/var/run/openldap/slapd.pid
+Environment="SLAPD_URLS=ldap:/// ldapi:///" "SLAPD_OPTIONS="
+EnvironmentFile=/etc/sysconfig/slapd
+ExecStartPre=/usr/libexec/openldap/check-config.sh
+ExecStart=/usr/sbin/slapd -u root -h ${SLAPD_URLS} $SLAPD_OPTIONS
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-support/openldap/openldap_%.bbappend b/meta-stx/recipes-support/openldap/openldap_%.bbappend
new file mode 100644 (file)
index 0000000..762a493
--- /dev/null
@@ -0,0 +1,175 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " \
+       file://rootdn-should-not-bypass-ppolicy.patch \
+       file://0021-openldap-and-stx-source-and-config-files.patch \
+       file://stx-slapd.service \
+       "
+
+inherit pkgconfig useradd
+
+USERADD_PACKAGES = "${PN}"
+USERADD_PARAM_${PN} = "-r -g ldap -u 55 -d / -s /sbin/nologin -c 'OpenLDAP server' ldap"
+GROUPADD_PARAM_${PN} = "-r -g 55 ldap"
+
+PACKAGECONFIG_CONFARGS_remove = "--with-tls=gnutls "
+DEPENDS += " \
+       openssl \
+       glibc \
+       mariadb \
+       mariadb-native \
+       libtirpc \
+       "
+
+RDEPENDS_${PN}_append = " bash"
+
+# Do not remove libtool la files slapd.conf uses ppolicy.la 
+REMOVE_LIBTOOL_LA = "0"
+
+
+# Defaults:
+#      --enable-bdb=no
+#      --enable-hdb=no
+#      --enable-bdb=no
+#      --enable-monitor=mod 
+######
+# Stx :
+#      --enable-wrappers=yes
+#      --enable-moznss-compatibility=yes
+
+#################
+# TODO:
+#      mysql_config: native command missing
+
+EXTRA_OECONF += " \
+               --enable-syslog \
+               --enable-proctitle \
+               --enable-ipv6 \
+               --enable-local \
+               --enable-slapd \
+               --enable-dynacl \
+               --enable-aci \
+               --enable-cleartext \
+               --enable-crypt \
+               --enable-lmpasswd \
+               --enable-modules \
+               --enable-rewrite \
+               --enable-rlookups \
+               --disable-slp \
+               --enable-wrappers=no \
+               --enable-backends=mod \
+               --enable-bdb=yes \
+               --enable-hdb=yes \
+               --enable-mdb=yes \
+               --enable-monitor=yes \
+               --disable-ndb \
+               --enable-overlays=mod \
+               --disable-static \
+               --enable-shared \
+               --with-cyrus-sasl \
+               --without-fetch \
+               --with-tls=openssl \
+               "
+#      --enable-moznss-compatibility=no 
+# NEW:
+# --enable-lmpasswd 
+# --enable-slapi
+# --enable-wrappers
+# --enable-moznss-compatibility=yes
+
+do_configure_append () {
+   cd ${S}
+   ln -f -s ${S}/contrib/slapd-modules/smbk5pwd/smbk5pwd.c servers/slapd/overlays
+   ln -f -s ${S}/contrib/slapd-modules/allop/allop.c servers/slapd/overlays
+   ln -f -s ${S}/contrib/slapd-modules/passwd/sha2/sha2.c servers/slapd/overlays
+   ln -f -s ${S}/contrib/slapd-modules/passwd/sha2/sha2.h servers/slapd/overlays
+   ln -f -s ${S}/contrib/slapd-modules/passwd/sha2/slapd-sha2.c servers/slapd/overlays
+}
+
+
+# If liblmdb is needed, then patch the Makefile
+#do_compile_append () {
+#   cd ${S}/ltb-project-openldap-ppolicy-check-password-1.1
+#   oe_runmake
+#}
+
+do_install_append () {
+       
+       # For this we need to build ltb-project-openldap
+       #install -m 755 check_password.so.%{check_password_version} %{buildroot}%{_libdir}/openldap/
+
+       cd ${S}/stx-sources
+       install -m 0755 -d ${D}/var/run/openldap
+       install -m 0755 -d ${D}/${sysconfdir}/tmpfiles.d
+       install -m 0755 ${S}/stx-sources/slapd.tmpfiles ${D}/${sysconfdir}/tmpfiles.d/slapd.conf 
+       install -m 0755 ${S}/stx-sources/openldap.tmpfiles ${D}/${sysconfdir}/tmpfiles.d/openldap.conf 
+       install -m 0755 ${S}/stx-sources/ldap.conf ${D}/${sysconfdir}/tmpfiles.d/ldap.conf 
+
+       # The database directory MUST exist prior to running slapd AND
+       # should only be accessible by the slapd and slap tools.
+       # Mode 700 recommended.
+       echo "d /var/lib/openldap-data 0700 ldap ldap -" >> ${D}/${sysconfdir}/tmpfiles.d/slapd.conf
+
+       install -m 0644 libexec-functions ${D}/${libexecdir}/openldap/functions
+       install -m 0755 libexec-convert-config.sh ${D}/${libexecdir}/openldap/convert-config.sh
+       install -m 0755 libexec-check-config.sh ${D}/${libexecdir}/openldap/check-config.sh
+       install -m 0755 libexec-upgrade-db.sh ${D}/${libexecdir}/openldap/upgrade-db.sh
+
+       install -m 0755 libexec-create-certdb.sh ${D}/${libexecdir}/openldap/create-certdb.sh
+       install -m 0755 libexec-generate-server-cert.sh ${D}/${libexecdir}/openldap/generate-server-cert.sh
+       install -m 0755 libexec-update-ppolicy-schema.sh ${D}/${libexecdir}/openldap/update-ppolicy-schema.sh
+
+       install -m 0644  ${S}/../stx-slapd.service ${D}/${systemd_system_unitdir}/stx-slapd.service
+       install -m 0755 -d ${D}/${sysconfdir}/sysconfig
+       install -m 0644 slapd.sysconfig ${D}/${sysconfdir}/sysconfig/slapd
+       install -m 0755 -d ${D}/${datadir}/openldap-servers
+       install -m 0644 slapd.ldif ${D}/${datadir}/openldap-servers/slapd.ldif
+       install -m 0750 -d ${D}/${sysconfdir}/openldap/slapd.d
+       rm -rf ${D}/var/run
+
+       #cd ${S}/
+       #oe_runmake -e -C servers/slapd/overlays  DESTDIR=${D} install
+       sed -i -e 's:\(/sbin/runuser\):/usr\1:g' ${D}/usr/libexec/openldap/functions
+
+}
+
+#pkg_postinst_ontarget_libldap-2.4_append () {
+#      cp /usr/share/starlingx/slapd.service ${systemd_system_unitdir}/slapd.service
+#      chmod 644 ${systemd_system_unitdir}/slapd.service
+#      cp ${datadir}/starlingx/slapd.sysconfig ${sysconfdir}/sysconfig/slapd
+#      systemctl daemon-reload
+#      chmod 755 /etc/openldap
+#      chmod 755 /etc/openldap/slapd.d
+#}
+
+FILES_${PN}_append = " \
+       ${datadir}/openldap-servers/ \
+       ${libexecdir}/openldap/ \
+       ${sysconfdir}/sysconfig \
+       ${sysconfdir}/tmpfiles.d \
+       ${systemd_system_unitdir}/stx-slapd.service \
+       "
+
+# *.la are openldap modules, so re-define
+# to remove the *.la from -dev package
+FILES_${PN}-dev = " \
+       ${includedir} \
+       ${FILES_SOLIBSDEV} \
+       ${libdir}/*.la \
+       ${libexecdir}/openldap/*${SOLIBSDEV} \
+       "
diff --git a/meta-stx/recipes-support/openstack-barbican-api/files/LICENSE b/meta-stx/recipes-support/openstack-barbican-api/files/LICENSE
new file mode 100644 (file)
index 0000000..b3201ab
--- /dev/null
@@ -0,0 +1,204 @@
+
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don`t include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/meta-stx/recipes-support/openstack-barbican-api/files/barbican-api-paste.ini b/meta-stx/recipes-support/openstack-barbican-api/files/barbican-api-paste.ini
new file mode 100644 (file)
index 0000000..979f2b4
--- /dev/null
@@ -0,0 +1,65 @@
+[composite:main]
+use = egg:Paste#urlmap
+/: barbican_version
+/v1: barbican-api-keystone
+
+# Use this pipeline for Barbican API - versions no authentication
+[pipeline:barbican_version]
+pipeline = cors http_proxy_to_wsgi versionapp
+
+# Use this pipeline for Barbican API - DEFAULT no authentication
+[pipeline:barbican_api]
+pipeline = cors http_proxy_to_wsgi unauthenticated-context apiapp
+
+#Use this pipeline to activate a repoze.profile middleware and HTTP port,
+#  to provide profiling information for the REST API processing.
+[pipeline:barbican-profile]
+pipeline = cors http_proxy_to_wsgi unauthenticated-context egg:Paste#cgitb egg:Paste#httpexceptions profile apiapp
+
+#Use this pipeline for keystone auth
+[pipeline:barbican-api-keystone]
+pipeline = cors http_proxy_to_wsgi authtoken context apiapp
+
+#Use this pipeline for keystone auth with audit feature
+[pipeline:barbican-api-keystone-audit]
+pipeline = http_proxy_to_wsgi authtoken context audit apiapp
+
+[app:apiapp]
+paste.app_factory = barbican.api.app:create_main_app
+
+[app:versionapp]
+paste.app_factory = barbican.api.app:create_version_app
+
+[filter:simple]
+paste.filter_factory = barbican.api.middleware.simple:SimpleFilter.factory
+
+[filter:unauthenticated-context]
+paste.filter_factory = barbican.api.middleware.context:UnauthenticatedContextMiddleware.factory
+
+[filter:context]
+paste.filter_factory = barbican.api.middleware.context:ContextMiddleware.factory
+
+[filter:audit]
+paste.filter_factory = keystonemiddleware.audit:filter_factory
+audit_map_file = /etc/barbican/api_audit_map.conf
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:profile]
+use = egg:repoze.profile
+log_filename = myapp.profile
+cachegrind_filename = cachegrind.out.myapp
+discard_first_request = true
+path = /__profile__
+flush_at_shutdown = true
+unwind = false
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = barbican
+
+[filter:http_proxy_to_wsgi]
+paste.filter_factory = oslo_middleware:HTTPProxyToWSGI.factory
+[server:main]
+use = egg:gunicorn#main
diff --git a/meta-stx/recipes-support/openstack-barbican-api/files/barbican.conf b/meta-stx/recipes-support/openstack-barbican-api/files/barbican.conf
new file mode 100644 (file)
index 0000000..ce70e37
--- /dev/null
@@ -0,0 +1,1411 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+[DEFAULT]
+
+#
+# From barbican.common.config
+#
+
+# Role used to identify an authenticated user as administrator.
+# (string value)
+#admin_role = admin
+
+# Allow unauthenticated users to access the API with read-only
+# privileges. This only applies when using ContextMiddleware. (boolean
+# value)
+#allow_anonymous_access = false
+
+# Maximum allowed http request size against the barbican-api. (integer
+# value)
+#max_allowed_request_size_in_bytes = 15000
+
+# Maximum allowed secret size in bytes. (integer value)
+#max_allowed_secret_in_bytes = 10000
+
+# Host name, for use in HATEOAS-style references Note: Typically this
+# would be the load balanced endpoint that clients would use to
+# communicate back with this service. If a deployment wants to derive
+# host from wsgi request instead then make this blank. Blank is needed
+# to override default config value which is 'http://localhost:9311'
+# (string value)
+#host_href = http://localhost:9311
+
+# SQLAlchemy connection string for the reference implementation
+# registry server. Any valid SQLAlchemy connection string is fine.
+# See:
+# http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine.
+# Note: For absolute addresses, use '////' slashes after 'sqlite:'.
+# (string value)
+#sql_connection = sqlite:///barbican.sqlite
+
+# Period in seconds after which SQLAlchemy should reestablish its
+# connection to the database. MySQL uses a default `wait_timeout` of 8
+# hours, after which it will drop idle connections. This can result in
+# 'MySQL Gone Away' exceptions. If you notice this, you can lower this
+# value to ensure that SQLAlchemy reconnects before MySQL can drop the
+# connection. (integer value)
+#sql_idle_timeout = 3600
+
+# Maximum number of database connection retries during startup. Set to
+# -1 to specify an infinite retry count. (integer value)
+#sql_max_retries = 60
+
+# Interval between retries of opening a SQL connection. (integer
+# value)
+#sql_retry_interval = 1
+
+# Create the Barbican database on service startup. (boolean value)
+#db_auto_create = true
+
+# Maximum page size for the 'limit' paging URL parameter. (integer
+# value)
+#max_limit_paging = 100
+
+# Default page size for the 'limit' paging URL parameter. (integer
+# value)
+#default_limit_paging = 10
+
+# Accepts a class imported from the sqlalchemy.pool module, and
+# handles the details of building the pool for you. If commented out,
+# SQLAlchemy will select based on the database dialect. Other options
+# are QueuePool (for SQLAlchemy-managed connections) and NullPool (to
+# disabled SQLAlchemy management of connections). See
+# http://docs.sqlalchemy.org/en/latest/core/pooling.html for more
+# details (string value)
+#sql_pool_class = QueuePool
+
+# Show SQLAlchemy pool-related debugging output in logs (sets DEBUG
+# log level output) if specified. (boolean value)
+#sql_pool_logging = false
+
+# Size of pool used by SQLAlchemy. This is the largest number of
+# connections that will be kept persistently in the pool. Can be set
+# to 0 to indicate no size limit. To disable pooling, use a NullPool
+# with sql_pool_class instead. Comment out to allow SQLAlchemy to
+# select the default. (integer value)
+#sql_pool_size = 5
+
+# # The maximum overflow size of the pool used by SQLAlchemy. When the
+# number of checked-out connections reaches the size set in
+# sql_pool_size, additional connections will be returned up to this
+# limit. It follows then that the total number of simultaneous
+# connections the pool will allow is sql_pool_size +
+# sql_pool_max_overflow. Can be set to -1 to indicate no overflow
+# limit, so no limit will be placed on the total number of concurrent
+# connections. Comment out to allow SQLAlchemy to select the default.
+# (integer value)
+#sql_pool_max_overflow = 10
+
+# Enable eventlet backdoor.  Acceptable values are 0, <port>, and
+# <start>:<end>, where 0 results in listening on a random tcp port
+# number; <port> results in listening on the specified port number
+# (and not enabling backdoor if that port is in use); and
+# <start>:<end> results in listening on the smallest unused port
+# number within the specified range of port numbers.  The chosen port
+# is displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+# Enable eventlet backdoor, using the provided path as a unix socket
+# that can receive connections. This option is mutually exclusive with
+# 'backdoor_port' in that only one should be provided. If both are
+# provided then the existence of this option overrides the usage of
+# that option. (string value)
+#backdoor_socket = <None>
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of
+# the default INFO level. (boolean value)
+# Note: This option can be changed without restarting.
+#debug = false
+
+# The name of a logging configuration file. This file is appended to
+# any existing logging configuration files. For details about logging
+# configuration files, see the Python logging module documentation.
+# Note that when logging configuration files are used then all logging
+# configuration is set in the configuration file and other logging
+# configuration options are ignored (for example, log-date-format).
+# (string value)
+# Note: This option can be changed without restarting.
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set.
+# (string value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default
+# is set, logging will go to stderr as defined by use_stderr. This
+# option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = <None>
+
+# (Optional) The base directory used for relative log_file  paths.
+# This option is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = <None>
+
+# Uses logging handler designed to watch file system. When log file is
+# moved or removed this handler will open a new log file with
+# specified path instantaneously. It makes sense only if log_file
+# option is specified and Linux platform is used. This option is
+# ignored if log_config_append is set. (boolean value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and
+# will be changed later to honor RFC5424. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_syslog = false
+
+# Enable journald for logging. If running in a systemd environment you
+# may wish to enable journal support. Doing so will use the journal
+# native protocol which includes structured metadata in addition to
+# log messages.This option is ignored if log_config_append is set.
+# (boolean value)
+#use_journal = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Use JSON formatting for logging. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_json = false
+
+# Log output to standard error. This option is ignored if
+# log_config_append is set. (boolean value)
+#use_stderr = false
+
+# Log output to Windows Event Log. (boolean value)
+#use_eventlog = false
+
+# The amount of time before the log files are rotated. This option is
+# ignored unless log_rotation_type is setto "interval". (integer
+# value)
+#log_rotate_interval = 1
+
+# Rotation interval type. The time of the last file change (or the
+# time when the service was started) is used when scheduling the next
+# rotation. (string value)
+# Possible values:
+# Seconds - <No description provided>
+# Minutes - <No description provided>
+# Hours - <No description provided>
+# Days - <No description provided>
+# Weekday - <No description provided>
+# Midnight - <No description provided>
+#log_rotate_interval_type = days
+
+# Maximum number of rotated log files. (integer value)
+#max_logfile_count = 30
+
+# Log file maximum size in MB. This option is ignored if
+# "log_rotation_type" is not set to "size". (integer value)
+#max_logfile_size_mb = 200
+
+# Log rotation type. (string value)
+# Possible values:
+# interval - Rotate logs at predefined time intervals.
+# size - Rotate logs once they reach a predefined size.
+# none - Do not rotate log files.
+#log_rotation_type = none
+
+# Format string to use for log messages with context. Used by
+# oslo_log.formatters.ContextFormatter (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined.
+# Used by oslo_log.formatters.ContextFormatter (string value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the
+# message is DEBUG. Used by oslo_log.formatters.ContextFormatter
+# (string value)
+#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. Used by
+# oslo_log.formatters.ContextFormatter (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. Used by
+# oslo_log.formatters.ContextFormatter (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is
+# ignored if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message.
+# (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message.
+# (string value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Interval, number of seconds, of log rate limiting. (integer value)
+#rate_limit_interval = 0
+
+# Maximum number of logged messages per rate_limit_interval. (integer
+# value)
+#rate_limit_burst = 0
+
+# Log level name used by rate limiting: CRITICAL, ERROR, INFO,
+# WARNING, DEBUG or empty string. Logs with level greater or equal to
+# rate_limit_except_level are not filtered. An empty string means that
+# all levels are filtered. (string value)
+#rate_limit_except_level = CRITICAL
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size = 30
+
+# The pool size limit for connections expiration policy (integer
+# value)
+#conn_pool_min_size = 2
+
+# The time-to-live in sec of idle connections in the pool (integer
+# value)
+#conn_pool_ttl = 1200
+
+# Size of executor thread pool when executor is threading or eventlet.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# The network address and optional user credentials for connecting to
+# the messaging backend, in URL format. The expected format is:
+#
+# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query
+#
+# Example: rabbit://rabbitmq:password@127.0.0.1:5672//
+#
+# For full details on the fields in the URL see the documentation of
+# oslo_messaging.TransportURL at
+# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html
+# (string value)
+#transport_url = rabbit://
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the transport_url
+# option. (string value)
+#control_exchange = openstack
+
+#
+# From oslo.service.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should we run
+# them here? (boolean value)
+#run_external_periodic_tasks = true
+
+#
+# From oslo.service.wsgi
+#
+
+# File name for the paste.deploy config for api service (string value)
+#api_paste_config = api-paste.ini
+
+# A python format string that is used as the template to generate log
+# lines. The following values can beformatted into it: client_ip,
+# date_time, request_line, status_code, body_length, wall_seconds.
+# (string value)
+#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s  len: %(body_length)s time: %(wall_seconds).7f
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server socket.
+# Not supported on OS X. (integer value)
+#tcp_keepidle = 600
+
+# Size of the pool of greenthreads used by wsgi (integer value)
+#wsgi_default_pool_size = 100
+
+# Maximum line size of message headers to be accepted. max_header_line
+# may need to be increased when using large tokens (typically those
+# generated when keystone is configured to use PKI tokens with big
+# service catalogs). (integer value)
+#max_header_line = 16384
+
+# If False, closes the client socket connection explicitly. (boolean
+# value)
+#wsgi_keep_alive = true
+
+# Timeout for client connections' socket operations. If an incoming
+# connection is idle for this number of seconds it will be closed. A
+# value of '0' means wait forever. (integer value)
+#client_socket_timeout = 900
+
+
+[certificate]
+
+#
+# From barbican.certificate.plugin
+#
+
+# Extension namespace to search for plugins. (string value)
+#namespace = barbican.certificate.plugin
+
+# List of certificate plugins to load. (multi valued)
+#enabled_certificate_plugins = simple_certificate
+
+
+[certificate_event]
+
+#
+# From barbican.certificate.plugin
+#
+
+# Extension namespace to search for eventing plugins. (string value)
+#namespace = barbican.certificate.event.plugin
+
+# List of certificate plugins to load. (multi valued)
+#enabled_certificate_event_plugins = simple_certificate_event
+
+
+[cors]
+
+#
+# From oslo.middleware.cors
+#
+
+# Indicate whether this resource may be shared with the domain
+# received in the requests "origin" header. Format:
+# "<protocol>://<host>[:<port>]", no trailing slash. Example:
+# https://horizon.example.com (list value)
+#allowed_origin = <None>
+
+# Indicate that the actual request can include user credentials
+# (boolean value)
+#allow_credentials = true
+
+# Indicate which headers are safe to expose to the API. Defaults to
+# HTTP Simple Headers. (list value)
+#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Project-Id,X-Identity-Status,X-User-Id,X-Storage-Token,X-Domain-Id,X-User-Domain-Id,X-Project-Domain-Id,X-Roles
+
+# Maximum cache age of CORS preflight requests. (integer value)
+#max_age = 3600
+
+# Indicate which methods can be used during the actual request. (list
+# value)
+#allow_methods = GET,PUT,POST,DELETE,PATCH
+
+# Indicate which header field names may be used during the actual
+# request. (list value)
+#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Project-Id,X-Identity-Status,X-User-Id,X-Storage-Token,X-Domain-Id,X-User-Domain-Id,X-Project-Domain-Id,X-Roles
+
+
+[crypto]
+
+#
+# From barbican.plugin.crypto
+#
+
+# Extension namespace to search for plugins. (string value)
+#namespace = barbican.crypto.plugin
+
+# List of crypto plugins to load. (multi valued)
+#enabled_crypto_plugins = simple_crypto
+
+
+[dogtag_plugin]
+
+#
+# From barbican.plugin.dogtag
+#
+
+# Path to PEM file for authentication (string value)
+#pem_path = /etc/barbican/kra_admin_cert.pem
+
+# Hostname for the Dogtag instance (string value)
+#dogtag_host = localhost
+
+# Port for the Dogtag instance (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#dogtag_port = 8443
+
+# Path to the NSS certificate database (string value)
+#nss_db_path = /etc/barbican/alias
+
+# Password for the NSS certificate databases (string value)
+#nss_password = <None>
+
+# Profile for simple CMC requests (string value)
+#simple_cmc_profile = caOtherCert
+
+# List of automatically approved enrollment profiles (string value)
+#auto_approved_profiles = caServerCert
+
+# Time in days for CA entries to expire (string value)
+#ca_expiration_time = 1
+
+# Working directory for Dogtag plugin (string value)
+#plugin_working_dir = /etc/barbican/dogtag
+
+# User friendly plugin name (string value)
+#plugin_name = Dogtag KRA
+
+# Retries when storing or generating secrets (integer value)
+#retries = 3
+
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete "public" Identity API endpoint. This endpoint should not be
+# an "admin" endpoint, as it should be accessible by all end users.
+# Unauthenticated clients are redirected to this endpoint to
+# authenticate. Although this endpoint should ideally be unversioned,
+# client support in the wild varies. If you're using a versioned v2
+# endpoint here, then this should *not* be the same endpoint the
+# service user utilizes for validating tokens, because normal end
+# users may not be able to reach that endpoint. (string value)
+# Deprecated group/name - [keystone_authtoken]/auth_uri
+#www_authenticate_uri = <None>
+
+# DEPRECATED: Complete "public" Identity API endpoint. This endpoint
+# should not be an "admin" endpoint, as it should be accessible by all
+# end users. Unauthenticated clients are redirected to this endpoint
+# to authenticate. Although this endpoint should ideally be
+# unversioned, client support in the wild varies. If you're using a
+# versioned v2 endpoint here, then this should *not* be the same
+# endpoint the service user utilizes for validating tokens, because
+# normal end users may not be able to reach that endpoint. This option
+# is deprecated in favor of www_authenticate_uri and will be removed
+# in the S release. (string value)
+# This option is deprecated for removal since Queens.
+# Its value may be silently ignored in the future.
+# Reason: The auth_uri option is deprecated in favor of
+# www_authenticate_uri and will be removed in the S  release.
+#auth_uri = <None>
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but
+# delegate the authorization decision to downstream WSGI components.
+# (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server.
+# (integer value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with
+# Identity API Server. (integer value)
+#http_request_max_retries = 3
+
+# Request environment key where the Swift cache object is stored. When
+# auth_token middleware is deployed with a Swift cache, use this
+# option to have the middleware share a caching backend with swift.
+# Otherwise, use the ``memcached_servers`` option instead. (string
+# value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string
+# value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs
+# connections. Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+
+# DEPRECATED: Directory used to cache files related to PKI tokens.
+# This option has been deprecated in the Ocata release and will be
+# removed in the P release. (string value)
+# This option is deprecated for removal since Ocata.
+# Its value may be silently ignored in the future.
+# Reason: PKI token format is no longer supported.
+#signing_dir = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching.
+# If left undefined, tokens will instead be cached in-process. (list
+# value)
+# Deprecated group/name - [keystone_authtoken]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the
+# middleware caches previously-seen tokens for a configurable duration
+# (in seconds). Set to -1 to disable caching completely. (integer
+# value)
+#token_cache_time = 300
+
+# (Optional) If defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token data is
+# encrypted and authenticated in the cache. If the value is not one of
+# these options or empty, auth_token will raise an exception on
+# initialization. (string value)
+# Possible values:
+# None - <No description provided>
+# MAC - <No description provided>
+# ENCRYPT - <No description provided>
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This
+# string is used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead
+# before it is tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a
+# memcached server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held
+# unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a
+# memcached client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool.
+# The advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If
+# False, middleware will not ask for service catalog on token
+# validation and will not set the X-Service-Catalog header. (boolean
+# value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to:
+# "disabled" to not check token binding. "permissive" (default) to
+# validate binding information if the bind type is of a form known to
+# the server and ignore it if not. "strict" like "permissive" but if
+# the bind type is unknown the token will be rejected. "required" any
+# form of token binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string value)
+#enforce_token_bind = permissive
+
+# DEPRECATED: Hash algorithms to use for hashing PKI tokens. This may
+# be a single algorithm or multiple. The algorithms are those
+# supported by Python standard hashlib.new(). The hashes will be tried
+# in the order given, so put the preferred one first for performance.
+# The result of the first hash will be stored in the cache. This will
+# typically be set to multiple values only while migrating from a less
+# secure algorithm to a more secure one. Once all the old tokens are
+# expired this option should be set to a single value for better
+# performance. (list value)
+# This option is deprecated for removal since Ocata.
+# Its value may be silently ignored in the future.
+# Reason: PKI token format is no longer supported.
+#hash_algorithms = md5
+
+# A choice of roles that must be present in a service token. Service
+# tokens are allowed to request that an expired token can be used and
+# so this check should tightly control that only actual services
+# should be sending this token. Roles here are applied as an ANY check
+# so any role in this list must be present. For backwards
+# compatibility reasons this currently only affects the allow_expired
+# check. (list value)
+#service_token_roles = service
+
+# For backwards compatibility reasons we must let valid service tokens
+# pass that don't pass the service_token_roles check as valid. Setting
+# this true will become the default in a future release and should be
+# enabled if possible. (boolean value)
+#service_token_roles_required = false
+
+# Authentication type to load (string value)
+# Deprecated group/name - [keystone_authtoken]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (string
+# value)
+#auth_section = <None>
+
+
+[keystone_notifications]
+
+#
+# From barbican.common.config
+#
+
+# True enables keystone notification listener  functionality. (boolean
+# value)
+#enable = false
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the transport_url
+# option. (string value)
+#control_exchange = keystone
+
+# Keystone notification queue topic name. This name needs to match one
+# of values mentioned in Keystone deployment's 'notification_topics'
+# configuration e.g.    notification_topics=notifications,
+# barbican_notificationsMultiple servers may listen on a topic and
+# messages will be dispatched to one of the servers in a round-robin
+# fashion. That's why Barbican service should have its own dedicated
+# notification queue so that it receives all of Keystone
+# notifications. (string value)
+#topic = notifications
+
+# True enables requeue feature in case of notification processing
+# error. Enable this only when underlying transport supports this
+# feature. (boolean value)
+#allow_requeue = false
+
+# Version of tasks invoked via notifications (string value)
+#version = 1.0
+
+# Define the number of max threads to be used for notification server
+# processing functionality. (integer value)
+#thread_pool_size = 10
+
+
+[kmip_plugin]
+
+#
+# From barbican.plugin.secret_store.kmip
+#
+
+# Username for authenticating with KMIP server (string value)
+#username = <None>
+
+# Password for authenticating with KMIP server (string value)
+#password = <None>
+
+# Address of the KMIP server (string value)
+#host = localhost
+
+# Port for the KMIP server (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = 5696
+
+# SSL version, maps to the module ssl's constants (string value)
+#ssl_version = PROTOCOL_TLSv1_2
+
+# File path to concatenated "certification authority" certificates
+# (string value)
+#ca_certs = <None>
+
+# File path to local client certificate (string value)
+#certfile = <None>
+
+# File path to local client certificate keyfile (string value)
+#keyfile = <None>
+
+# Only support PKCS#1 encoding of asymmetric keys (boolean value)
+#pkcs1_only = false
+
+# User friendly plugin name (string value)
+#plugin_name = KMIP HSM
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# Name for the AMQP container. must be globally unique. Defaults to a
+# generated UUID (string value)
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+#trace = false
+
+# Attempt to connect via SSL. If no other ssl-related parameters are
+# given, it will use the system's CA-bundle to verify the server's
+# certificate. (boolean value)
+#ssl = false
+
+# CA certificate PEM file used to verify the server's certificate
+# (string value)
+#ssl_ca_file =
+
+# Self-identifying certificate PEM file for client authentication
+# (string value)
+#ssl_cert_file =
+
+# Private key PEM file used to sign ssl_cert_file certificate
+# (optional) (string value)
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+#ssl_key_password = <None>
+
+# By default SSL checks that the name in the server's certificate
+# matches the hostname in the transport_url. In some configurations it
+# may be preferable to use the virtual hostname instead, for example
+# if the server uses the Server Name Indication TLS extension
+# (rfc6066) to provide a certificate per virtual host. Set
+# ssl_verify_vhost to True if the server's SSL certificate uses the
+# virtual host name instead of the DNS name. (boolean value)
+#ssl_verify_vhost = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string
+# value)
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+#sasl_config_name =
+
+# SASL realm to use if no realm present in username (string value)
+#sasl_default_realm =
+
+# Seconds to pause before attempting to re-connect. (integer value)
+# Minimum value: 1
+#connection_retry_interval = 1
+
+# Increase the connection_retry_interval by this many seconds after
+# each unsuccessful failover attempt. (integer value)
+# Minimum value: 0
+#connection_retry_backoff = 2
+
+# Maximum limit for connection_retry_interval +
+# connection_retry_backoff (integer value)
+# Minimum value: 1
+#connection_retry_interval_max = 30
+
+# Time to pause between re-connecting an AMQP 1.0 link that failed due
+# to a recoverable error. (integer value)
+# Minimum value: 1
+#link_retry_delay = 10
+
+# The maximum number of attempts to re-send a reply message which
+# failed due to a recoverable error. (integer value)
+# Minimum value: -1
+#default_reply_retry = 0
+
+# The deadline for an rpc reply message delivery. (integer value)
+# Minimum value: 5
+#default_reply_timeout = 30
+
+# The deadline for an rpc cast or call message delivery. Only used
+# when caller does not provide a timeout expiry. (integer value)
+# Minimum value: 5
+#default_send_timeout = 30
+
+# The deadline for a sent notification message delivery. Only used
+# when caller does not provide a timeout expiry. (integer value)
+# Minimum value: 5
+#default_notify_timeout = 30
+
+# The duration to schedule a purge of idle sender links. Detach link
+# after expiry. (integer value)
+# Minimum value: 1
+#default_sender_link_timeout = 600
+
+# Indicates the addressing mode used by the driver.
+# Permitted values:
+# 'legacy'   - use legacy non-routable addressing
+# 'routable' - use routable addresses
+# 'dynamic'  - use legacy addresses if the message bus does not
+# support routing otherwise use routable addressing (string value)
+#addressing_mode = dynamic
+
+# Enable virtual host support for those message buses that do not
+# natively support virtual hosting (such as qpidd). When set to true
+# the virtual host name will be added to all message bus addresses,
+# effectively creating a private 'subnet' per virtual host. Set to
+# False if the message bus supports virtual hosting using the
+# 'hostname' field in the AMQP 1.0 Open performative as the name of
+# the virtual host. (boolean value)
+#pseudo_vhost = true
+
+# address prefix used when sending to a specific server (string value)
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+#group_request_prefix = unicast
+
+# Address prefix for all generated RPC addresses (string value)
+#rpc_address_prefix = openstack.org/om/rpc
+
+# Address prefix for all generated Notification addresses (string
+# value)
+#notify_address_prefix = openstack.org/om/notify
+
+# Appended to the address prefix when sending a fanout message. Used
+# by the message bus to identify fanout messages. (string value)
+#multicast_address = multicast
+
+# Appended to the address prefix when sending to a particular
+# RPC/Notification server. Used by the message bus to identify
+# messages sent to a single destination. (string value)
+#unicast_address = unicast
+
+# Appended to the address prefix when sending to a group of consumers.
+# Used by the message bus to identify messages that should be
+# delivered in a round-robin fashion across consumers. (string value)
+#anycast_address = anycast
+
+# Exchange name used in notification addresses.
+# Exchange name resolution precedence:
+# Target.exchange if set
+# else default_notification_exchange if set
+# else control_exchange if set
+# else 'notify' (string value)
+#default_notification_exchange = <None>
+
+# Exchange name used in RPC addresses.
+# Exchange name resolution precedence:
+# Target.exchange if set
+# else default_rpc_exchange if set
+# else control_exchange if set
+# else 'rpc' (string value)
+#default_rpc_exchange = <None>
+
+# Window size for incoming RPC Reply messages. (integer value)
+# Minimum value: 1
+#reply_link_credit = 200
+
+# Window size for incoming RPC Request messages (integer value)
+# Minimum value: 1
+#rpc_server_credit = 100
+
+# Window size for incoming Notification messages (integer value)
+# Minimum value: 1
+#notify_server_credit = 100
+
+# Send messages of this type pre-settled.
+# Pre-settled messages will not receive acknowledgement
+# from the peer. Note well: pre-settled messages may be
+# silently discarded if the delivery fails.
+# Permitted values:
+# 'rpc-call' - send RPC Calls pre-settled
+# 'rpc-reply'- send RPC Replies pre-settled
+# 'rpc-cast' - Send RPC Casts pre-settled
+# 'notify'   - Send Notifications pre-settled
+#  (multi valued)
+#pre_settled = rpc-cast
+#pre_settled = rpc-reply
+
+
+[oslo_messaging_kafka]
+
+#
+# From oslo.messaging
+#
+
+# Max fetch bytes of Kafka consumer (integer value)
+#kafka_max_fetch_bytes = 1048576
+
+# Default timeout(s) for Kafka consumers (floating point value)
+#kafka_consumer_timeout = 1.0
+
+# DEPRECATED: Pool Size for Kafka Consumers (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Driver no longer uses connection pool.
+#pool_size = 10
+
+# DEPRECATED: The pool size limit for connections expiration policy
+# (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Driver no longer uses connection pool.
+#conn_pool_min_size = 2
+
+# DEPRECATED: The time-to-live in sec of idle connections in the pool
+# (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Driver no longer uses connection pool.
+#conn_pool_ttl = 1200
+
+# Group id for Kafka consumer. Consumers in one group will coordinate
+# message consumption (string value)
+#consumer_group = oslo_messaging_consumer
+
+# Upper bound on the delay for KafkaProducer batching in seconds
+# (floating point value)
+#producer_batch_timeout = 0.0
+
+# Size of batch for the producer async send (integer value)
+#producer_batch_size = 16384
+
+# Enable asynchronous consumer commits (boolean value)
+#enable_auto_commit = false
+
+# The maximum number of records returned in a poll call (integer
+# value)
+#max_poll_records = 500
+
+# Protocol used to communicate with brokers (string value)
+# Possible values:
+# PLAINTEXT - <No description provided>
+# SASL_PLAINTEXT - <No description provided>
+# SSL - <No description provided>
+# SASL_SSL - <No description provided>
+#security_protocol = PLAINTEXT
+
+# Mechanism when security protocol is SASL (string value)
+#sasl_mechanism = PLAIN
+
+# CA certificate PEM file used to verify the server certificate
+# (string value)
+#ssl_cafile =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are
+# messaging, messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If
+# not set, we fall back to the same configuration used for RPC.
+# (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+# The maximum number of attempts to re-send a notification message
+# which failed to be delivered due to a recoverable error. 0 - No
+# retry, -1 - indefinite (integer value)
+#retry = -1
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete = false
+
+# Connect over SSL. (boolean value)
+# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl
+#ssl = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are
+# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
+# available on some distributions. (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version
+#ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile
+#ssl_key_file =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile
+#ssl_cert_file =
+
+# SSL certification authority file (valid only if SSL enabled).
+# (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs
+#ssl_ca_file =
+
+# How long to wait before reconnecting in response to an AMQP consumer
+# cancel notification. (floating point value)
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression
+# will not be used. This option may not be available in future
+# versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client before abandoning to send it its
+# replies. This value should not be longer than rpc_response_timeout.
+# (integer value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we
+# are currently connected to becomes unavailable. Takes effect only if
+# more than one RabbitMQ node is provided in config. (string value)
+# Possible values:
+# round-robin - <No description provided>
+# shuffle - <No description provided>
+#kombu_failover_strategy = round-robin
+
+# The RabbitMQ login method. (string value)
+# Possible values:
+# PLAIN - <No description provided>
+# AMQPLAIN - <No description provided>
+# RABBIT-CR-DEMO - <No description provided>
+#rabbit_login_method = AMQPLAIN
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ.
+# (integer value)
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30
+# seconds. (integer value)
+#rabbit_interval_max = 30
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. In RabbitMQ 3.0,
+# queue mirroring is no longer controlled by the x-ha-policy argument
+# when declaring a queue. If you just want to make sure that all
+# queues (except those with auto-generated names) are mirrored across
+# all nodes, run: "rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-
+# mode": "all"}' " (boolean value)
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL
+# (x-expires). Queues which are unused for the duration of the TTL are
+# automatically deleted. The parameter affects only reply and fanout
+# queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows
+# unlimited messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down
+# if heartbeat's keep-alive fails (0 disable the heartbeat).
+# EXPERIMENTAL (integer value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the
+# heartbeat. (integer value)
+#heartbeat_rate = 2
+
+
+[oslo_middleware]
+
+#
+# From oslo.middleware.http_proxy_to_wsgi
+#
+
+# Whether the application is behind a proxy or not. This determines if
+# the middleware should parse the headers or not. (boolean value)
+#enable_proxy_headers_parsing = false
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# This option controls whether or not to enforce scope when evaluating
+# policies. If ``True``, the scope of the token used in the request is
+# compared to the ``scope_types`` of the policy being enforced. If the
+# scopes do not match, an ``InvalidScope`` exception will be raised.
+# If ``False``, a message will be logged informing operators that
+# policies are being invoked with mismatching scope. (boolean value)
+#enforce_scope = false
+
+# The file that defines policies. (string value)
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string
+# value)
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be
+# relative to any directory in the search path defined by the
+# config_dir option, or absolute paths. The file defined by
+# policy_file must exist for these directories to be searched.
+# Missing or empty directories are ignored. (multi valued)
+#policy_dirs = policy.d
+
+# Content Type to send and receive data for REST based policy check
+# (string value)
+# Possible values:
+# application/x-www-form-urlencoded - <No description provided>
+# application/json - <No description provided>
+#remote_content_type = application/x-www-form-urlencoded
+
+# server identity verification for REST based policy check (boolean
+# value)
+#remote_ssl_verify_server_crt = false
+
+# Absolute path to ca cert file for REST based policy check (string
+# value)
+#remote_ssl_ca_crt_file = <None>
+
+# Absolute path to client cert for REST based policy check (string
+# value)
+#remote_ssl_client_crt_file = <None>
+
+# Absolute path client key file REST based policy check (string value)
+#remote_ssl_client_key_file = <None>
+
+
+[p11_crypto_plugin]
+
+#
+# From barbican.plugin.crypto.p11
+#
+
+# Path to vendor PKCS11 library (string value)
+#library_path = <None>
+
+# Password to login to PKCS11 session (string value)
+#login = <None>
+
+# Master KEK label (as stored in the HSM) (string value)
+#mkek_label = <None>
+
+# Master KEK length in bytes. (integer value)
+#mkek_length = <None>
+
+# Master HMAC Key label (as stored in the HSM) (string value)
+#hmac_label = <None>
+
+# HSM Slot ID (integer value)
+#slot_id = 1
+
+# Flag for Read/Write Sessions (boolean value)
+#rw_session = true
+
+# Project KEK length in bytes. (integer value)
+#pkek_length = 32
+
+# Project KEK Cache Time To Live, in seconds (integer value)
+#pkek_cache_ttl = 900
+
+# Project KEK Cache Item Limit (integer value)
+#pkek_cache_limit = 100
+
+# Secret encryption mechanism (string value)
+# Deprecated group/name - [p11_crypto_plugin]/algorithm
+#encryption_mechanism = CKM_AES_CBC
+
+# HMAC Key Type (string value)
+#hmac_key_type = CKK_AES
+
+# HMAC Key Generation Algorithm (string value)
+#hmac_keygen_mechanism = CKM_AES_KEY_GEN
+
+# HMAC key wrap mechanism (string value)
+#hmac_keywrap_mechanism = CKM_SHA256_HMAC
+
+# File to pull entropy for seeding RNG (string value)
+#seed_file =
+
+# Amount of data to read from file for seed (integer value)
+#seed_length = 32
+
+# User friendly plugin name (string value)
+#plugin_name = PKCS11 HSM
+
+# Generate IVs for CKM_AES_GCM mechanism. (boolean value)
+# Deprecated group/name - [p11_crypto_plugin]/generate_iv
+#aes_gcm_generate_iv = true
+
+# Always set CKA_SENSITIVE=CK_TRUE including CKA_EXTRACTABLE=CK_TRUE
+# keys. (boolean value)
+#always_set_cka_sensitive = true
+
+
+[queue]
+
+#
+# From barbican.common.config
+#
+
+# True enables queuing, False invokes workers synchronously (boolean
+# value)
+#enable = false
+
+# Queue namespace (string value)
+#namespace = barbican
+
+# Queue topic name (string value)
+#topic = barbican.workers
+
+# Version of tasks invoked via queue (string value)
+#version = 1.1
+
+# Server name for RPC task processing server (string value)
+#server_name = barbican.queue
+
+# Number of asynchronous worker processes (integer value)
+#asynchronous_workers = 1
+
+
+[quotas]
+
+#
+# From barbican.common.config
+#
+
+# Number of secrets allowed per project (integer value)
+#quota_secrets = -1
+
+# Number of orders allowed per project (integer value)
+#quota_orders = -1
+
+# Number of containers allowed per project (integer value)
+#quota_containers = -1
+
+# Number of consumers allowed per project (integer value)
+#quota_consumers = -1
+
+# Number of CAs allowed per project (integer value)
+#quota_cas = -1
+
+
+[retry_scheduler]
+
+#
+# From barbican.common.config
+#
+
+# Seconds (float) to wait before starting retry scheduler (floating
+# point value)
+#initial_delay_seconds = 10.0
+
+# Seconds (float) to wait between periodic schedule events (floating
+# point value)
+#periodic_interval_max_seconds = 10.0
+
+
+[secretstore]
+
+#
+# From barbican.plugin.secret_store
+#
+
+# Extension namespace to search for plugins. (string value)
+#namespace = barbican.secretstore.plugin
+
+# List of secret store plugins to load. (multi valued)
+#enabled_secretstore_plugins = store_crypto
+
+# Flag to enable multiple secret store plugin backend support. Default
+# is False (boolean value)
+#enable_multiple_secret_stores = false
+
+# List of suffix to use for looking up plugins which are supported
+# with multiple backend support. (list value)
+#stores_lookup_suffix = <None>
+
+
+[simple_crypto_plugin]
+
+#
+# From barbican.plugin.crypto.simple
+#
+
+# Key encryption key to be used by Simple Crypto Plugin (string value)
+#kek = dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=
+
+# User friendly plugin name (string value)
+#plugin_name = Software Only Crypto
+
+
+[snakeoil_ca_plugin]
+
+#
+# From barbican.certificate.plugin.snakeoil
+#
+
+# Path to CA certificate file (string value)
+#ca_cert_path = <None>
+
+# Path to CA certificate key file (string value)
+#ca_cert_key_path = <None>
+
+# Path to CA certificate chain file (string value)
+#ca_cert_chain_path = <None>
+
+# Path to CA chain pkcs7 file (string value)
+#ca_cert_pkcs7_path = <None>
+
+# Directory in which to store certs/keys for subcas (string value)
+#subca_cert_key_directory = /etc/barbican/snakeoil-cas
+
+
+[ssl]
+
+#
+# From oslo.service.sslutils
+#
+
+# CA certificate file to use to verify connecting clients. (string
+# value)
+# Deprecated group/name - [DEFAULT]/ssl_ca_file
+#ca_file = <None>
+
+# Certificate file to use when starting the server securely. (string
+# value)
+# Deprecated group/name - [DEFAULT]/ssl_cert_file
+#cert_file = <None>
+
+# Private key file to use when starting the server securely. (string
+# value)
+# Deprecated group/name - [DEFAULT]/ssl_key_file
+#key_file = <None>
+
+# SSL version to use (valid only if SSL enabled). Valid values are
+# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be
+# available on some distributions. (string value)
+#version = <None>
+
+# Sets the list of available ciphers. value should be a string in the
+# OpenSSL cipher list format. (string value)
+#ciphers = <None>
diff --git a/meta-stx/recipes-support/openstack-barbican-api/files/gunicorn-config.py b/meta-stx/recipes-support/openstack-barbican-api/files/gunicorn-config.py
new file mode 100644 (file)
index 0000000..7e2b738
--- /dev/null
@@ -0,0 +1,26 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+import multiprocessing
+bind = '0.0.0.0:9311'
+user = 'barbican'
+group = 'barbican'
+timeout = 30
+backlog = 2048
+keepalive = 2
+workers = multiprocessing.cpu_count() * 2
+loglevel = 'info'
+errorlog = '-'
+accesslog = '-'
diff --git a/meta-stx/recipes-support/openstack-barbican-api/files/openstack-barbican-api.service b/meta-stx/recipes-support/openstack-barbican-api/files/openstack-barbican-api.service
new file mode 100644 (file)
index 0000000..197a281
--- /dev/null
@@ -0,0 +1,19 @@
+[Unit]
+Description=Openstack Barbican API server
+After=syslog.target network.target
+Before=httpd.service
+
+[Service]
+PIDFile=/run/barbican/pid
+User=barbican
+Group=barbican
+RuntimeDirectory=barbican
+RuntimeDirectoryMode=770
+ExecStart=/usr/bin/gunicorn --pid /run/barbican/pid -c /etc/barbican/gunicorn-config.py --paste /etc/barbican/barbican-api-paste.ini
+ExecReload=/usr/bin/kill -s HUP $MAINPID
+ExecStop=/usr/bin/kill -s TERM $MAINPID
+StandardError=syslog
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-support/openstack-barbican-api/openstack-barbican-api.bb b/meta-stx/recipes-support/openstack-barbican-api/openstack-barbican-api.bb
new file mode 100644 (file)
index 0000000..9b49e05
--- /dev/null
@@ -0,0 +1,80 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://${WORKDIR}/LICENSE;md5=89aea4e17d99a7cacdbeed46a0096b10"
+
+SRC_URI = " \
+       file://LICENSE \
+       file://barbican.conf \
+       file://gunicorn-config.py \
+       file://barbican-api-paste.ini \
+       file://openstack-barbican-api.service \
+       "
+
+do_configure() {
+       :
+}
+
+
+do_compile() {
+       :
+}
+
+
+do_install() {
+
+       install -m 0755 -d ${D}/${datadir}/starlingx/barbican/
+       install -m 0755 -d ${D}/${datadir}/starlingx/barbican/backup/
+       install -m 0755 -d ${D}/${systemd_system_unitdir}/
+       install -m 0644 ${WORKDIR}/barbican.conf ${D}/${datadir}/starlingx/barbican
+       install -m 0644 ${WORKDIR}/barbican-api-paste.ini ${D}/${datadir}/starlingx/barbican
+       install -m 0644 ${WORKDIR}/gunicorn-config.py ${D}/${datadir}/starlingx/barbican
+       install -m 0644 ${WORKDIR}/openstack-barbican-api.service ${D}/${systemd_system_unitdir}/openstack-barbican-api.service
+}
+
+pkg_postinst_ontarget_${PN}() {
+
+       tar -C / -czpf /usr/share/starlingx/barbican/backup/barbican.$(date +%s).tar.gz ./etc/barbican
+
+       if [ ! -f /usr/share/starlingx/barbican/backup/barbican.default.tar.gz ]; then 
+               tar -C / -czpf /usr/share/starlingx/barbican/backup/barbican.default.tar.gz ./etc/barbican
+       fi;
+
+       rm -rf /etc/barbican/
+
+       # Restore to default settings
+       tar -C / -xzpf /usr/share/starlingx/barbican/backup/barbican.default.tar.gz
+
+       cp /usr/share/starlingx/barbican/barbican-api-paste.ini /etc/barbican/
+       cp /usr/share/starlingx/barbican/barbican.conf /etc/barbican/
+       cp /usr/share/starlingx/barbican/gunicorn-config.py /etc/barbican/
+       systemctl daemon-reload
+}
+
+pkg_prerm_ontarget_${PN}() {
+       tar -C / -czpf /usr/share/starlingx/barbican/backup/barbican.$(date +%s).tar.gz ./etc/barbican
+       rm -rf /etc/barbican/
+
+       # Restore to default settings
+       tar -C / -xzpf /usr/share/starlingx/barbican/backup/barbican.default.tar.gz
+}
+
+FILES_${PN} = " \
+       ${datadir}/starlingx/barbican/ \
+       ${systemd_system_unitdir}/openstack-barbican-api.service \
+       "
diff --git a/meta-stx/recipes-support/os-service-types/python2-os-service-types_1.3.0.bb b/meta-stx/recipes-support/os-service-types/python2-os-service-types_1.3.0.bb
new file mode 100644 (file)
index 0000000..60042f5
--- /dev/null
@@ -0,0 +1,41 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A library to handle official service types for OpenStack and their historical aliases."
+HOMEPAGE = "https://opendev.org/openstack/os-service-types"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1dece7821bf3fd70fe1309eaa37d52a2"
+
+STABLE = "master"
+PROTOCOL = "https"
+BRANCH = "master"
+SRCREV = "2e5b38088a43539621de82aa2d0c1b366c2638a9"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://opendev.org/openstack/os-service-types.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH}"
+
+inherit setuptools 
+
+DEPENDS += " \
+        python-pip \
+        python-pbr-native \
+        "
+
+RDEPENDS_${PN} += " \
+        python-lxml \
+        python-requests \
+        python-cython \
+        "
diff --git a/meta-stx/recipes-support/puppet/files/network/Don-t-write-absent-to-redhat-route-files-and-test-fo.patch b/meta-stx/recipes-support/puppet/files/network/Don-t-write-absent-to-redhat-route-files-and-test-fo.patch
new file mode 100644 (file)
index 0000000..efc5446
--- /dev/null
@@ -0,0 +1,71 @@
+From 49e103bbeb4d6efe1ca75f581d41ee6a8ed7caf5 Mon Sep 17 00:00:00 2001
+From: Romanos Skiadas <rom.skiad@gmail.com>
+Date: Wed, 2 Nov 2016 14:51:47 -0400
+Subject: [PATCH] Don't write absent to redhat route files and test for this
+
+Signed-off-by: Allain Legacy <allain.legacy@windriver.com>
+---
+ .../network/lib/puppet/provider/network_route/redhat.rb |  9 +++++++--
+ .../spec/unit/provider/network_route/redhat_spec.rb     | 17 ++++++++++++++++-
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+index f45eab5..9841c8e 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+@@ -84,10 +84,15 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+         raise Puppet::Error, "#{provider.name} does not have a #{prop}." if provider.send(prop).nil?
+       end
+       if provider.network == "default"
+-        contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface} #{provider.options}\n"
++        contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface}\n"
+       else
+-        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface} #{provider.options}\n"
++        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface}\n"
+       end
++      contents << if provider.options == :absent
++                    "\n"
++                  else
++                    " #{provider.options}\n"
++                  end
+     end
+     contents.join
+   end
+diff --git a/packstack/puppet/modules/network/spec/unit/provider/network_route/redhat_spec.rb b/packstack/puppet/modules/network/spec/unit/provider/network_route/redhat_spec.rb
+index dfc9d6b..1ad2128 100644
+--- a/packstack/puppet/modules/network/spec/unit/provider/network_route/redhat_spec.rb
++++ b/packstack/puppet/modules/network/spec/unit/provider/network_route/redhat_spec.rb
+@@ -91,7 +91,18 @@ describe Puppet::Type.type(:network_route).provider(:redhat) do
+       )
+     end
+-    let(:content) { described_class.format_file('', [route1_provider, route2_provider, defaultroute_provider]) }
++    let(:nooptions_provider) do
++      stub('nooptions_provider',
++           name: 'default',
++           network: 'default',
++           netmask: '',
++           gateway: '10.0.0.1',
++           interface: 'eth2',
++           options: :absent
++      )
++    end
++
++    let(:content) { described_class.format_file('', [route1_provider, route2_provider, defaultroute_provider, nooptions_provider]) }
+     describe "writing the route line" do
+       describe "For standard (non-default) routes" do
+@@ -122,6 +133,10 @@ describe Puppet::Type.type(:network_route).provider(:redhat) do
+       it "should have the correct fields appended" do
+         content.scan(/^default .*$/).first.should be_include("default via 10.0.0.1 dev eth1")
+       end
++
++      it 'should not contain the word absent when no options are defined' do
++        expect(content).to_not match(/absent/)
++      end
+     end
+   end
+ end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/network/fix-absent-options.patch b/meta-stx/recipes-support/puppet/files/network/fix-absent-options.patch
new file mode 100644 (file)
index 0000000..23c738f
--- /dev/null
@@ -0,0 +1,113 @@
+From f22d4c9d24939afb8f29323adffe3eb570f14804 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=3D=3FUTF-8=3Fq=3FIgor=3D20Gali=3DC4=3D87=3F=3D?=
+ <i.galic@brainsware.org>
+Date: Wed, 2 Nov 2016 14:54:28 -0400
+Subject: [PATCH] fix "absent" options
+
+analogous to redhat, we check if options are absent, before appending
+them to the file. This fixes #160
+
+Signed-off-by: Allain Legacy <allain.legacy@windriver.com>
+---
+ .../lib/puppet/provider/network_route/redhat.rb    | 10 ++---
+ .../lib/puppet/provider/network_route/routes.rb    |  3 +-
+ .../unit/provider/network_route/routes_spec.rb     | 48 ++++++++++++++++++++++
+ 3 files changed, 53 insertions(+), 8 deletions(-)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+index 9841c8e..7123d44 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+@@ -84,15 +84,11 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+         raise Puppet::Error, "#{provider.name} does not have a #{prop}." if provider.send(prop).nil?
+       end
+       if provider.network == "default"
+-        contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface}\n"
++        contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface}"
+       else
+-        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface}\n"
++        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface}"
+       end
+-      contents << if provider.options == :absent
+-                    "\n"
+-                  else
+-                    " #{provider.options}\n"
+-                  end
++      contents << (provider.options == :absent ? "\n" : " #{provider.options}\n")
+     end
+     contents.join
+   end
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/routes.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/routes.rb
+index 2dd579f..ca7066d 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/routes.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/routes.rb
+@@ -93,7 +93,8 @@ Puppet::Type.type(:network_route).provide(:routes) do
+       raise Puppet::Error, "#{provider.name} is missing the required parameter 'gateway'." if provider.gateway.nil?
+       raise Puppet::Error, "#{provider.name} is missing the required parameter 'interface'." if provider.interface.nil?
+-      contents << "#{provider.network} #{provider.netmask} #{provider.gateway} #{provider.interface} #{provider.options}\n"
++      contents << "#{provider.network} #{provider.netmask} #{provider.gateway} #{provider.interface}"
++      contents << (provider.options == :absent ? "\n" : " #{provider.options}\n")
+     end
+     contents.join
+diff --git a/packstack/puppet/modules/network/spec/unit/provider/network_route/routes_spec.rb b/packstack/puppet/modules/network/spec/unit/provider/network_route/routes_spec.rb
+index 2e55eba..9376739 100644
+--- a/packstack/puppet/modules/network/spec/unit/provider/network_route/routes_spec.rb
++++ b/packstack/puppet/modules/network/spec/unit/provider/network_route/routes_spec.rb
+@@ -93,4 +93,52 @@ describe Puppet::Type.type(:network_route).provider(:routes) do
+       end
+     end
+   end
++  describe 'when formatting simple files' do
++    let(:route1_provider) do
++      stub('route1_provider',
++           name: '172.17.67.0',
++           network: '172.17.67.0',
++           netmask: '255.255.255.0',
++           gateway: '172.18.6.2',
++           interface: 'vlan200',
++           options: :absent,
++      )
++    end
++
++    let(:route2_provider) do
++      stub('lo_provider',
++           name: '172.28.45.0',
++           network: '172.28.45.0',
++           netmask: '255.255.255.0',
++           gateway: '172.18.6.2',
++           interface: 'eth0',
++           options: :absent,
++      )
++    end
++
++    let(:content) { described_class.format_file('', [route1_provider, route2_provider]) }
++
++    describe 'writing the route line' do
++      it 'should write only fields' do
++        expect(content.scan(/^172.17.67.0 .*$/).length).to eq(1)
++        expect(content.scan(/^172.17.67.0 .*$/).first.split(/\s/, 5).length).to eq(4)
++      end
++
++      it 'should have the correct fields appended' do
++        expect(content.scan(/^172.17.67.0 .*$/).first).to include('172.17.67.0 255.255.255.0 172.18.6.2 vlan200')
++      end
++
++      it 'should fail if the netmask property is not defined' do
++        route2_provider.unstub(:netmask)
++        route2_provider.stubs(:netmask).returns nil
++        expect { content }.to raise_exception
++      end
++
++      it 'should fail if the gateway property is not defined' do
++        route2_provider.unstub(:gateway)
++        route2_provider.stubs(:gateway).returns nil
++        expect { content }.to raise_exception
++      end
++    end
++  end
+ end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/network/ipv6-static-route-support.patch b/meta-stx/recipes-support/puppet/files/network/ipv6-static-route-support.patch
new file mode 100644 (file)
index 0000000..10456b1
--- /dev/null
@@ -0,0 +1,100 @@
+From 49820add1d1e5f63343615ead9b551b8679f466d Mon Sep 17 00:00:00 2001
+From: Kevin Smith <kevin.smith@windriver.com>
+Date: Mon, 16 Oct 2017 15:06:37 -0500
+Subject: [PATCH 1/1] ipv6 static route support
+
+---
+ .../lib/puppet/provider/network_route/redhat.rb    |  3 ++-
+ .../network/lib/puppet/type/network_route.rb       | 26 ++++++++++++++--------
+ .../network/spec/unit/type/network_route_spec.rb   |  5 +++++
+ 3 files changed, 24 insertions(+), 10 deletions(-)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+index 5073519..c289f5f 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+@@ -93,7 +93,8 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+       if provider.network == "default"
+         contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface}"
+       else
+-        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface}"
++        # provider.name will have cidr notation 
++        contents << "#{provider.name} via #{provider.gateway} dev #{provider.interface}"
+       end
+       contents << (provider.options == :absent ? "\n" : " #{provider.options}\n")
+     end
+diff --git a/packstack/puppet/modules/network/lib/puppet/type/network_route.rb b/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
+index 7ab67dd..fd52c58 100644
+--- a/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
++++ b/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
+@@ -5,8 +5,6 @@ Puppet::Type.newtype(:network_route) do
+   ensurable
+-  IPV4_ADDRESS_REGEX = /^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$/
+-
+   newparam(:name) do
+     isnamevar
+     desc "The name of the network route"
+@@ -18,7 +16,7 @@ Puppet::Type.newtype(:network_route) do
+     validate do |value|
+       begin
+         t = IPAddr.new(value) unless value == "default"
+-      rescue ArgumentError
++      rescue
+         fail("Invalid value for network: #{value}")
+       end
+     end
+@@ -29,17 +27,27 @@ Puppet::Type.newtype(:network_route) do
+     desc "The subnet mask to apply to the route"
+     validate do |value|
+-      unless (value.length <= 2 or value =~ IPV4_ADDRESS_REGEX)
++      unless value.length <= 3 || (IPAddr.new(value) rescue false)
+         fail("Invalid value for argument netmask: #{value}")
+       end
+     end
+     munge do |value|
+-      case value
+-      when IPV4_ADDRESS_REGEX
+-        value
+-      when /^\d+$/
+-        IPAddr.new('255.255.255.255').mask(value.strip.to_i).to_s
++      # '255.255.255.255'.to_i  will return 255, so we try to convert it back:
++      if value.to_i.to_s == value
++        if value.to_i <= 32
++          IPAddr.new('255.255.255.255').mask(value.strip.to_i).to_s
++        else
++          IPAddr.new('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff').mask(value.strip.to_i).to_s
++        end
++      else
++        if (IPAddr.new(value).ipv6? rescue false)
++          IPAddr.new('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff').mask(value).to_s
++        elsif (IPAddr.new(value).ipv4? rescue false)
++          IPAddr.new('255.255.255.255').mask(value).to_s
++        else
++          raise("Invalid value for argument netmask: #{value}")
++        end
+       end
+     end
+   end
+diff --git a/packstack/puppet/modules/network/spec/unit/type/network_route_spec.rb b/packstack/puppet/modules/network/spec/unit/type/network_route_spec.rb
+index 24e9da3..6e6f3e4 100644
+--- a/packstack/puppet/modules/network/spec/unit/type/network_route_spec.rb
++++ b/packstack/puppet/modules/network/spec/unit/type/network_route_spec.rb
+@@ -55,6 +55,11 @@ describe Puppet::Type.type(:network_route) do
+         r[:netmask].should == '255.255.255.0'
+       end
++      it 'should convert IPv6 netmasks of the CIDR form' do
++        r = Puppet::Type.type(:network_route).new(name: 'lxd bridge', network: 'fd58:281b:6eef:eb3d::', netmask: '64', gateway: 'fd58:281b:6eef:eb3d::1', interface: 'lxdbr0')
++        expect(r[:netmask]).to eq('ffff:ffff:ffff:ffff::')
++      end
++
+       it "should convert netmasks of the expanded netmask form" do
+         r = described_class.new(:name => '192.168.1.0/24', :network => '192.168.1.0', :netmask => '255.255.128.0', :gateway => '23.23.23.42', :interface => 'eth0')
+         r[:netmask].should == '255.255.128.0'
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/network/permit-inservice-update-of-static-routes.patch b/meta-stx/recipes-support/puppet/files/network/permit-inservice-update-of-static-routes.patch
new file mode 100644 (file)
index 0000000..66e7623
--- /dev/null
@@ -0,0 +1,55 @@
+From 46ec08e58419bb73bf49b44cf32fa3d304236615 Mon Sep 17 00:00:00 2001
+From: Kevin Smith <kevin.smith@windriver.com>
+Date: Thu, 5 Oct 2017 13:33:12 -0500
+Subject: [PATCH 1/1] permit inservice update of static routes
+
+---
+ .../network/lib/puppet/provider/network_route/redhat.rb  | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+index 7123d44..5073519 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+@@ -18,12 +18,18 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+   has_feature :provider_options
++  # WRS: Generate temporary copies.  It will get compared to files under
++  # /etc/sysconfig/network-scripts afterward.  Only config that have changed
++  # will get replaced.  Don't let puppet directly manage them, else it will
++  # trigger un-wanted networking actions (like up/down).
++  RSCRIPT_DIRECTORY = "/var/run/network-scripts.puppet"
++
+   def select_file
+-    "/etc/sysconfig/network-scripts/route-#{@resource[:interface]}"
++    "#{RSCRIPT_DIRECTORY}/route-#{@resource[:interface]}"
+   end
+   def self.target_files
+-    Dir["/etc/sysconfig/network-scripts/route-*"]
++    Dir["#{RSCRIPT_DIRECTORY}/route-*"]
+   end
+   def self.parse_file(filename, contents)
+@@ -76,6 +82,7 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+   # Generate an array of sections
+   def self.format_file(filename, providers)
++    Dir.mkdir(RSCRIPT_DIRECTORY) unless File.exists?(RSCRIPT_DIRECTORY)
+     contents = []
+     contents << header
+     # Build routes
+@@ -103,4 +110,9 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+ HEADER
+     str
+   end
++
++  def self.post_flush_hook(filename)
++    File.chmod(0644, filename)
++  end
++
+ end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/network/puppet-network-Kilo-quilt-changes.patch b/meta-stx/recipes-support/puppet/files/network/puppet-network-Kilo-quilt-changes.patch
new file mode 100644 (file)
index 0000000..841198f
--- /dev/null
@@ -0,0 +1,658 @@
+From 8e14e2e258a8f2f7189ed37c6337c41fbff0362a Mon Sep 17 00:00:00 2001
+From: Al Bailey <al.bailey@windriver.com>
+Date: Mon, 6 Jun 2016 17:13:09 -0400
+Subject: [PATCH] puppet-network Kilo quilt changes
+
+---
+ .../lib/puppet/provider/network_config/redhat.rb   |  39 ++-
+ .../lib/puppet/provider/network_config/wrlinux.rb  | 296 +++++++++++++++++++++
+ .../lib/puppet/provider/network_route/wrlinux.rb   | 109 ++++++++
+ .../network/lib/puppet/type/network_config.rb      |   4 +
+ packstack/puppet/modules/network/manifests/bond.pp |  22 ++
+ .../puppet/modules/network/manifests/bond/setup.pp |   2 +
+ .../modules/network/manifests/bond/wrlinux.pp      |  56 ++++
+ 7 files changed, 521 insertions(+), 7 deletions(-)
+ create mode 100644 packstack/puppet/modules/network/lib/puppet/provider/network_config/wrlinux.rb
+ create mode 100644 packstack/puppet/modules/network/lib/puppet/provider/network_route/wrlinux.rb
+ create mode 100644 packstack/puppet/modules/network/manifests/bond/wrlinux.pp
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
+index 4b6de7e..758f387 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
+@@ -19,7 +19,12 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+   has_feature :provider_options
+   # @return [String] The path to network-script directory on redhat systems
+-  SCRIPT_DIRECTORY = "/etc/sysconfig/network-scripts"
++  # SCRIPT_DIRECTORY = "/etc/sysconfig/network-scripts"
++  # WRS: Generate temporary copies.  It will get compared to files under
++  # /etc/sysconfig/network-scripts afterward.  Only config that have changed
++  # will get replaced.  Don't let puppet directly manage them, else it will
++  # trigger un-wanted networking actions (like up/down).
++  SCRIPT_DIRECTORY = "/var/run/network-scripts.puppet"
+   # The valid vlan ID range is 0-4095; 4096 is out of range
+   VLAN_RANGE_REGEX = %r[\d{1,3}|40[0-9][0-5]]
+@@ -35,6 +40,7 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+     :name       => 'DEVICE',
+     :hotplug    => 'HOTPLUG',
+     :mtu        => 'MTU',
++    :gateway => 'GATEWAY',
+   }
+   # Map provider instances to files based on their name
+@@ -60,8 +66,14 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+   #   RedhatProvider.target_files
+   #   # => ['/etc/sysconfig/network-scripts/ifcfg-eth0', '/etc/sysconfig/network-scripts/ifcfg-eth1']
+   def self.target_files(script_dir = SCRIPT_DIRECTORY)
+-    entries = Dir.entries(script_dir).select {|entry| entry.match SCRIPT_REGEX}
+-    entries.map {|entry| File.join(SCRIPT_DIRECTORY, entry)}
++    entries = []
++    if Dir.exists?(SCRIPT_DIRECTORY)
++      Dir.foreach(SCRIPT_DIRECTORY) do |item|
++        next if not item.match SCRIPT_REGEX
++        entries << item
++      end
++    end
++    entries
+   end
+   # Convert a redhat network script into a hash
+@@ -184,6 +196,8 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+   end
+   def self.format_file(filename, providers)
++    Dir.mkdir(SCRIPT_DIRECTORY) unless File.exists?(SCRIPT_DIRECTORY)
++
+     if providers.length == 0
+       return ""
+     elsif providers.length > 1
+@@ -193,11 +207,11 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+     provider = providers[0]
+     props    = {}
+-    # Map everything to a flat hash
+-    props = (provider.options || {})
++    props = provider.options if provider.options && provider.options != :absent
++    # Map everything to a flat hash
+     NAME_MAPPINGS.keys.each do |type_name|
+-      if (val = provider.send(type_name))
++      if (val = provider.send(type_name)) && val != :absent
+         props[type_name] = val
+       end
+     end
+@@ -214,11 +228,11 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+       str << %{#{key}=#{val}\n}
+     end
++    content.prepend(header)
+     content
+   end
+   def self.unmunge(props)
+-
+     pairs = {}
+     [:onboot, :hotplug].each do |bool_property|
+@@ -245,6 +259,17 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+     pairs
+   end
++  def self.header
++    str = <<-HEADER
++# HEADER: This file is is being managed by puppet. Changes to
++# HEADER: interfaces that are not being managed by puppet will persist;
++# HEADER: however changes to interfaces that are being managed by puppet will
++# HEADER: be overwritten. In addition, file order is NOT guaranteed.
++# HEADER: Last generated at: #{Time.now}
++HEADER
++    str
++  end
++
+   def self.post_flush_hook(filename)
+     File.chmod(0644, filename)
+   end
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_config/wrlinux.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_config/wrlinux.rb
+new file mode 100644
+index 0000000..44c645a
+--- /dev/null
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_config/wrlinux.rb
+@@ -0,0 +1,296 @@
++require 'puppetx/filemapper'
++
++Puppet::Type.type(:network_config).provide(:wrlinux) do
++  # Wind River Linux network_config interfaces provider.
++  #
++  # This provider uses the filemapper mixin to map the interfaces file to a
++  # collection of network_config providers, and back.
++  #
++  include PuppetX::FileMapper
++
++  desc "Wind River interfaces style provider"
++
++  confine    :osfamily => :wrlinux
++  defaultfor :osfamily => :wrlinux
++
++  has_feature :provider_options
++  has_feature :hotpluggable
++
++  def select_file
++    '/var/run/interfaces.puppet'
++  end
++
++  def self.target_files
++    ['/var/run/interfaces.puppet']
++  end
++
++  class MalformedInterfacesError < Puppet::Error
++    def initialize(msg = nil)
++      msg = 'Malformed wrlinux interfaces file; cannot instantiate network_config resources' if msg.nil?
++      super
++    end
++  end
++
++  def self.raise_malformed
++    @failed = true
++    raise MalformedInterfacesError
++  end
++
++  class Instance
++
++    attr_reader :name
++
++    # Booleans
++    attr_accessor :onboot, :hotplug
++
++
++    # These fields are going to get rearranged to resolve issue 16
++    # https://github.com/adrienthebo/puppet-network/issues/16
++    attr_accessor :ipaddress, :netmask, :family, :method, :mtu
++
++    # Options hash
++    attr_reader :options
++
++    def initialize(name)
++      @name = name
++
++      @options = Hash.new {|hash, key| hash[key] = []}
++    end
++
++    def to_hash
++      h = {
++        :name      => @name,
++        :onboot    => @onboot,
++        :hotplug   => @hotplug,
++        :ipaddress => @ipaddress,
++        :netmask   => @netmask,
++        :family    => @family,
++        :method    => @method,
++        :mtu       => @mtu,
++        :options   => squeeze_options
++      }
++
++      h.inject({}) do |hash, (key, val)|
++        hash[key] = val unless val.nil?
++        hash
++      end
++    end
++
++    def squeeze_options
++      @options.inject({}) do |hash, (key, value)|
++        if value.size <= 1
++          hash[key] = value.pop
++        else
++          hash[key] = value
++        end
++
++      hash
++      end
++    end
++
++    class << self
++
++      def reset!
++        @interfaces = {}
++      end
++
++      # @return [Array<Instance>] All class instances
++      def all_instances
++        @interfaces ||= {}
++        @interfaces
++      end
++
++      def [](name)
++        if all_instances[name]
++          obj = all_instances[name]
++        else
++          obj = self.new(name)
++          all_instances[name] = obj
++        end
++
++        obj
++      end
++    end
++  end
++
++  def self.parse_file(filename, contents)
++    # Debian has a very irregular format for the interfaces file. The
++    # parse_file method is somewhat derived from the ifup executable
++    # supplied in the debian ifupdown package. The source can be found at
++    # http://packages.debian.org/squeeze/ifupdown
++
++
++    # The debian interfaces implementation requires global state while parsing
++    # the file; namely, the stanza being parsed as well as the interface being
++    # parsed.
++    status = :none
++    current_interface = nil
++
++    lines = contents.split("\n")
++    # TODO Join lines that end with a backslash
++
++    # Iterate over all lines and determine what attributes they create
++    lines.each do |line|
++
++      # Strip off any trailing comments
++      line.sub!(/#.*$/, '')
++
++      case line
++      when /^\s*#|^\s*$/
++        # Ignore comments and blank lines
++        next
++
++      when /^auto|^allow-auto/
++        # Parse out any auto sections
++        interfaces = line.split(' ')
++        interfaces.delete_at(0)
++
++        interfaces.each do |name|
++          Instance[name].onboot = true
++        end
++
++        # Reset the current parse state
++        current_interface = nil
++
++      when /^allow-hotplug/
++        # parse out allow-hotplug lines
++
++        interfaces = line.split(' ')
++        interfaces.delete_at(0)
++
++        interfaces.each do |name|
++          Instance[name].hotplug = true
++        end
++
++        # Don't reset Reset the current parse state
++      when /^iface/
++
++        # Format of the iface line:
++        #
++        # iface <iface> <family> <method>
++        # zero or more options for <iface>
++
++        if match = line.match(/^iface\s+(\S+)\s+(\S+)\s+(\S+)/)
++          name   = match[1]
++          family = match[2]
++          method = match[3]
++
++          # If an iface block for this interface has been seen, the file is
++          # malformed.
++          raise_malformed if Instance[name] and Instance[name].family
++
++          status = :iface
++          current_interface = name
++
++          # This is done automatically
++          #Instance[name].name   = name
++          Instance[name].family = family
++          Instance[name].method = method
++
++        else
++          # If we match on a string with a leading iface, but it isn't in the
++          # expected format, malformed blar blar
++          raise_malformed
++        end
++
++      when /^mapping/
++
++        # XXX dox
++        raise Puppet::DevError, "Debian interfaces mapping parsing not implemented."
++        status = :mapping
++
++      else
++        # We're currently examining a line that is within a mapping or iface
++        # stanza, so we need to validate the line and add the options it
++        # specifies to the known state of the interface.
++
++        case status
++        when :iface
++          if match = line.match(/(\S+)\s+(\S.*)/)
++            # If we're parsing an iface stanza, then we should receive a set of
++            # lines that contain two or more space delimited strings. Append
++            # them as options to the iface in an array.
++
++            key = match[1]
++            val = match[2]
++
++            name = current_interface
++
++            case key
++            when 'address'; Instance[name].ipaddress    = val
++            when 'netmask'; Instance[name].netmask      = val
++            when 'mtu';     Instance[name].mtu          = val
++            else            Instance[name].options[key] << val
++            end
++          else
++            raise_malformed
++          end
++        when :mapping
++          raise Puppet::DevError, "Debian interfaces mapping parsing not implemented."
++        when :none
++          raise_malformed
++        end
++      end
++    end
++
++    Instance.all_instances.map {|name, instance| instance.to_hash }
++  end
++
++  # Generate an array of sections
++  def self.format_file(filename, providers)
++    contents = []
++    contents << header
++
++    # Add onboot interfaces
++    if (auto_interfaces = providers.select {|provider| provider.onboot == true })
++      stanza = []
++      stanza << "auto " + auto_interfaces.map(&:name).sort.join(" ")
++      contents << stanza.join("\n")
++    end
++
++    # Build iface stanzas
++    providers.sort_by(&:name).each do |provider|
++      # TODO add validation method
++      raise Puppet::Error, "#{provider.name} does not have a method." if provider.method.nil?
++      raise Puppet::Error, "#{provider.name} does not have a family." if provider.family.nil?
++
++      stanza = []
++      stanza << %{iface #{provider.name} #{provider.family} #{provider.method}}
++
++      [
++        [:ipaddress, 'address'],
++        [:netmask,   'netmask'],
++        [:mtu,       'mtu'],
++      ].each do |(property, section)|
++        stanza << "    #{section} #{provider.send property}" if provider.send(property) and provider.send(property) != :absent
++      end
++
++      if provider.options and provider.options != :absent
++        provider.options.each_pair do |key, val|
++          if val.is_a? String
++            stanza << "    #{key} #{val}"
++          elsif val.is_a? Array
++            val.each { |entry| stanza << "    #{key} #{entry}" }
++          else
++            raise Puppet::Error, "#{self} options key #{key} expects a String or Array, got #{val.class}"
++          end
++        end
++      end
++
++      contents << stanza.join("\n")
++    end
++
++    contents.map {|line| line + "\n\n"}.join
++  end
++
++  def self.header
++    str = <<-HEADER
++# HEADER: This file is is being managed by puppet. Changes to
++# HEADER: interfaces that are not being managed by puppet will persist;
++# HEADER: however changes to interfaces that are being managed by puppet will
++# HEADER: be overwritten. In addition, file order is NOT guaranteed.
++# HEADER: Last generated at: #{Time.now}
++HEADER
++    str
++  end
++end
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/wrlinux.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/wrlinux.rb
+new file mode 100644
+index 0000000..d3fa7b5
+--- /dev/null
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/wrlinux.rb
+@@ -0,0 +1,109 @@
++require 'ipaddr'
++require 'puppetx/filemapper'
++
++Puppet::Type.type(:network_route).provide(:wrlinux) do
++  # Wind River Linux network_route routes provider.
++  #
++  # This provider uses the filemapper mixin to map the routes file to a
++  # collection of network_route providers, and back.
++  #
++  include PuppetX::FileMapper
++
++  desc "Wind River routes style provider"
++
++  confine    :osfamily => :wrlinux
++
++  # $ dpkg -S /etc/network/if-up.d/20static-routes
++  # ifupdown-extra: /etc/network/if-up.d/20static-routes
++  confine    :exists   => '/etc/network/if-up.d/20static-routes'
++
++  defaultfor :osfamily => :wrlinux
++
++  has_feature :provider_options
++
++  def select_file
++    '/etc/network/routes'
++  end
++
++  def self.target_files
++    ['/etc/network/routes']
++  end
++
++  class MalformedRoutesError < Puppet::Error
++    def initialize(msg = nil)
++      msg = 'Malformed wrlinux routes file; cannot instantiate network_route resources' if msg.nil?
++      super
++    end
++  end
++
++  def self.raise_malformed
++    @failed = true
++    raise MalformedRoutesError
++  end
++
++  def self.parse_file(filename, contents)
++    # Build out an empty hash for new routes for storing their configs.
++    route_hash = Hash.new do |hash, key|
++      hash[key] = {}
++      hash[key][:name] = key
++      hash[key]
++    end
++
++    lines = contents.split("\n")
++    lines.each do |line|
++      # Strip off any trailing comments
++      line.sub!(/#.*$/, '')
++
++      if line =~ /^\s*#|^\s*$/
++        # Ignore comments and blank lines
++        next
++      end
++
++      route = line.split(' ', 5)
++
++      if route.length < 4
++        raise_malformed
++      end
++
++      # use the CIDR version of the target as :name
++      cidr_target = "#{route[0]}/#{IPAddr.new(route[1]).to_i.to_s(2).count('1')}"
++
++      route_hash[cidr_target][:network] = route[0]
++      route_hash[cidr_target][:netmask] = route[1]
++      route_hash[cidr_target][:gateway] = route[2]
++      route_hash[cidr_target][:interface] = route[3]
++      route_hash[cidr_target][:options] = route[4] if route[4]
++    end
++
++    route_hash.values
++  end
++
++  # Generate an array of sections
++  def self.format_file(filename, providers)
++    contents = []
++    contents << header
++
++    # Build routes
++    providers.sort_by(&:name).each do |provider|
++      raise Puppet::Error, "#{provider.name} is missing the required parameter 'network'." if provider.network.nil?
++      raise Puppet::Error, "#{provider.name} is missing the required parameter 'netmask'." if provider.netmask.nil?
++      raise Puppet::Error, "#{provider.name} is missing the required parameter 'gateway'." if provider.gateway.nil?
++      raise Puppet::Error, "#{provider.name} is missing the required parameter 'interface'." if provider.interface.nil?
++
++      contents << "#{provider.network} #{provider.netmask} #{provider.gateway} #{provider.interface} #{provider.options}\n"
++    end
++
++    contents.join
++  end
++
++  def self.header
++    str = <<-HEADER
++# HEADER: This file is is being managed by puppet. Changes to
++# HEADER: routes that are not being managed by puppet will persist;
++# HEADER: however changes to routes that are being managed by puppet will
++# HEADER: be overwritten. In addition, file order is NOT guaranteed.
++# HEADER: Last generated at: #{Time.now}
++HEADER
++    str
++  end
++end
+diff --git a/packstack/puppet/modules/network/lib/puppet/type/network_config.rb b/packstack/puppet/modules/network/lib/puppet/type/network_config.rb
+index a50a0df..1297ad7 100644
+--- a/packstack/puppet/modules/network/lib/puppet/type/network_config.rb
++++ b/packstack/puppet/modules/network/lib/puppet/type/network_config.rb
+@@ -95,6 +95,10 @@ Puppet::Type.newtype(:network_config) do
+     defaultto :raw
+   end
++  newproperty(:gateway) do
++    desc 'The IP address of the network router or gateway device (if any)'
++  end
++
+   # `:options` provides an arbitrary passthrough for provider properties, so
+   # that provider specific behavior doesn't clutter up the main type but still
+   # allows for more powerful actions to be taken.
+diff --git a/packstack/puppet/modules/network/manifests/bond.pp b/packstack/puppet/modules/network/manifests/bond.pp
+index d6d98ce..26ca104 100644
+--- a/packstack/puppet/modules/network/manifests/bond.pp
++++ b/packstack/puppet/modules/network/manifests/bond.pp
+@@ -188,6 +188,28 @@ define network::bond(
+         require          => Kmod::Alias[$name],
+       }
+     }
++    WRLinux: {
++      network::bond::wrlinux { $name:
++        slaves    => $slaves,
++        ensure    => $ensure,
++        ipaddress => $ipaddress,
++        netmask   => $netmask,
++        method    => $method,
++        family    => $family,
++        onboot    => $onboot,
++
++        mode             => $mode,
++        miimon           => $miimon,
++        downdelay        => $downdelay,
++        updelay          => $updelay,
++        lacp_rate        => $lacp_rate,
++        primary          => $primary,
++        primary_reselect => $primary_reselect,
++        xmit_hash_policy => $xmit_hash_policy,
++
++        require   => Kmod::Alias[$name],
++      }
++    }
+     RedHat: {
+       network::bond::redhat { $name:
+         ensure           => $ensure,
+diff --git a/packstack/puppet/modules/network/manifests/bond/setup.pp b/packstack/puppet/modules/network/manifests/bond/setup.pp
+index abe1252..0a30767 100644
+--- a/packstack/puppet/modules/network/manifests/bond/setup.pp
++++ b/packstack/puppet/modules/network/manifests/bond/setup.pp
+@@ -10,5 +10,7 @@ class network::bond::setup {
+         ensure => present,
+       }
+     }
++    WRLinux: {
++    }
+   }
+ }
+diff --git a/packstack/puppet/modules/network/manifests/bond/wrlinux.pp b/packstack/puppet/modules/network/manifests/bond/wrlinux.pp
+new file mode 100644
+index 0000000..e240341
+--- /dev/null
++++ b/packstack/puppet/modules/network/manifests/bond/wrlinux.pp
+@@ -0,0 +1,56 @@
++# = Define: network::bond::wrlinux
++#
++# Instantiate bonded interfaces on Debian based systems.
++#
++# == See also
++#
++# * Debian Network Bonding http://wiki.wrlinux.org/Bonding
++define network::bond::wrlinux(
++  $slaves,
++  $ensure    = present,
++  $ipaddress = undef,
++  $netmask   = undef,
++  $method    = undef,
++  $family    = undef,
++  $onboot    = undef,
++
++  $mode             = undef,
++  $miimon           = undef,
++  $downdelay        = undef,
++  $updelay          = undef,
++  $lacp_rate        = undef,
++  $primary          = undef,
++  $primary_reselect = undef,
++  $xmit_hash_policy = undef,
++) {
++
++  $raw = {
++    'bond-slaves'    => join($slaves, ' '),
++    'bond-mode'      => $mode,
++    'bond-miimon'    => $miimon,
++    'bond-downdelay' => $downdelay,
++    'bond-updelay'   => $updelay,
++    'bond-lacp-rate' => $lacp_rate,
++    'bond-primary'   => $primary,
++    'bond-primary-reselect' => $primary_reselect,
++    'bond-xmit-hash-policy' => $xmit_hash_policy,
++  }
++
++  $opts = compact_hash($raw)
++
++  network_config { $name:
++    ensure    => $ensure,
++    ipaddress => $ipaddress,
++    netmask   => $netmask,
++    family    => $family,
++    method    => $method,
++    onboot    => $onboot,
++    options   => $opts,
++  }
++
++  network_config { $slaves:
++    ensure      => absent,
++    reconfigure => true,
++    before      => Network_config[$name],
++  }
++}
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/network/puppet-network-support-ipv6.patch b/meta-stx/recipes-support/puppet/files/network/puppet-network-support-ipv6.patch
new file mode 100644 (file)
index 0000000..b6d2f3c
--- /dev/null
@@ -0,0 +1,46 @@
+Index: packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
+@@ -224,6 +224,11 @@
+     pairs = self.unmunge props
++    ip_version = provider.send(:family)
++    if (ip_version.to_s == "inet6")
++        pairs = self.ipv6_fixup pairs
++    end
++
+     content = pairs.inject('') do |str, (key, val)|
+       str << %{#{key}=#{val}\n}
+     end
+@@ -259,6 +264,30 @@
+     pairs
+   end
++  def self.ipv6_fixup(pairs)
++    pairs['IPV6INIT'] = 'yes'
++
++    if (pairs.include? 'NETMASK' and pairs.include? 'IPADDR')
++        pairs['IPV6ADDR'] =  pairs['IPADDR'].to_s + "/" + pairs['NETMASK'].to_s
++        pairs.delete('NETMASK')
++        pairs.delete('IPADDR')
++    elsif (pairs.include? 'IPADDR')
++        pairs['IPV6ADDR'] = pairs['IPADDR'].to_s
++        pairs.delete('IPADDR')
++    end
++
++    if (pairs.include? 'GATEWAY')
++        pairs['IPV6_DEFAULTGW'] = pairs['GATEWAY']
++        pairs.delete('GATEWAY')
++    end
++
++    if (pairs['BOOTPROTO'].to_s == 'dhcp')
++        pairs['DHCPV6C'] = 'yes'
++        pairs['DHCLIENTARGS'] = '-1'
++    end
++    pairs
++  end
++
+   def self.header
+     str = <<-HEADER
+ # HEADER: This file is is being managed by puppet. Changes to
diff --git a/meta-stx/recipes-support/puppet/files/network/route-options-support.patch b/meta-stx/recipes-support/puppet/files/network/route-options-support.patch
new file mode 100644 (file)
index 0000000..37bf138
--- /dev/null
@@ -0,0 +1,28 @@
+From c26a70ab9d5839f90148c578edc5d15133355194 Mon Sep 17 00:00:00 2001
+From: Kevin Smith <kevin.smith@windriver.com>
+Date: Wed, 25 Oct 2017 07:37:52 -0500
+Subject: [PATCH 1/1] route options support
+
+---
+ packstack/puppet/modules/network/lib/puppet/type/network_route.rb | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/type/network_route.rb b/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
+index fd52c58..13ca06a 100644
+--- a/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
++++ b/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
+@@ -3,6 +3,11 @@ require 'ipaddr'
+ Puppet::Type.newtype(:network_route) do
+   @doc = "Manage non-volatile route configuration information"
++  feature :provider_options, <<-EOD
++    The provider can accept an arbitrary options string. The semantics of
++    these options will depend on the provider.
++  EOD
++
+   ensurable
+   newparam(:name) do
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-barbican/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-barbican/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..01a3079
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-barbican.gemspec  2019-10-28 13:33:43.108639914 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-barbican'
++  s.version     = '11.3.0'
++  s.date        = '2017-08-21'
++  s.summary     = "Puppet module for OpenStack Barbican"
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://launchpad.net/puppet-barbican'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-barbican/puppet-barbican-do-not-fail-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-barbican/puppet-barbican-do-not-fail-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..d6cd7ba
--- /dev/null
@@ -0,0 +1,29 @@
+From fd7e40080bc5681376d91aff3956004c6ad2bfc7 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Sat, 14 Mar 2020 20:12:45 +0800
+Subject: [PATCH] puppet-barbican: do not fail for poky-stx
+
+poky-stx is set as Debian osfamily, but we still use barbican-api
+as $service_name, so do not fail for it.
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ manifests/api.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/api.pp b/manifests/api.pp
+index a31088b..6716588 100644
+--- a/manifests/api.pp
++++ b/manifests/api.pp
+@@ -486,7 +486,7 @@ the future release. Please use barbican::api::package_ensure instead.")
+   }
+   if $service_name == 'barbican-api' {
+-    if $::osfamily == 'Debian' {
++    if $::osfamily == 'Debian' and $::operatingsystem != 'poky-stx' {
+       fail('On Debian family the service_name must be set to httpd as there is no eventlet init script.')
+     }
+     service { 'barbican-api':
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-barbican/puppet-barbican-fix-the-pkg-and-service-names-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-barbican/puppet-barbican-fix-the-pkg-and-service-names-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..98826e1
--- /dev/null
@@ -0,0 +1,37 @@
+From 647dd40c145c6b52746a21656a3809bf4d016ab3 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Sat, 14 Mar 2020 19:53:03 +0800
+Subject: [PATCH] puppet-barbican: fix the pkg and service names for poky-stx
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ manifests/params.pp | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/manifests/params.pp b/manifests/params.pp
+index 7dc418e..3a678c2 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -19,9 +19,16 @@ class barbican::params {
+       $httpd_config_file            = '/etc/httpd/conf.d/barbican-api.conf'
+     }
+     'Debian': {
+-      $api_package_name             = 'barbican-api'
+-      $worker_package_name          = 'barbican-worker'
+-      $worker_service_name          = 'barbican-worker'
++      if ($::operatingsystem == 'poky-stx') {
++        $api_package_name             = 'barbican'
++        $api_service_name             = 'openstack-barbican-api'
++        $worker_package_name          = 'barbican'
++        $worker_service_name          = 'openstack-barbican-worker'
++      } else {
++        $api_package_name             = 'barbican-api'
++        $worker_package_name          = 'barbican-worker'
++        $worker_service_name          = 'barbican-worker'
++      }
+       $barbican_wsgi_script_path    = '/usr/lib/cgi-bin/barbican'
+       $barbican_wsgi_script_source  = '/usr/lib/python2.7/dist-packages/barbican/api/app.wsgi'
+       $httpd_config_file            = '/etc/apache2/conf-available/barbican-api.conf'
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-boolean/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-boolean/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..a15079a
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppet-boolean.gemspec   2019-10-30 08:37:38.629069755 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-boolean'
++  s.version     = '1.0.1'
++  s.date        = '2013-08-14'
++  s.summary     = "Boolean normalizing property for Puppet types."
++  s.description = s.summary
++  s.authors     = ["Vox Pupuli"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.markdown Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/voxpupuli/puppet-boolean'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0001-Roll-up-TIS-patches.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0001-Roll-up-TIS-patches.patch
new file mode 100644 (file)
index 0000000..263cc3c
--- /dev/null
@@ -0,0 +1,148 @@
+From ff98c42f0e6ce22969e986933d0a60d73a281a1d Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Tue, 10 Jan 2017 13:31:17 -0500
+Subject: [PATCH 1/5] Roll up TIS patches
+
+---
+ manifests/mon.pp | 14 +++++++++++---
+ manifests/osd.pp | 38 +++++++++++++++++++-------------------
+ manifests/rgw.pp |  7 +++++++
+ 3 files changed, 37 insertions(+), 22 deletions(-)
+
+diff --git a/manifests/mon.pp b/manifests/mon.pp
+index bc0298c..fa99df5 100644
+--- a/manifests/mon.pp
++++ b/manifests/mon.pp
+@@ -65,6 +65,8 @@ define ceph::mon (
+   $authentication_type = 'cephx',
+   $key = undef,
+   $keyring  = undef,
++  $fsid = undef,
++  $service_ensure = 'running',
+   $exec_timeout = $::ceph::params::exec_timeout,
+   ) {
+@@ -154,6 +156,10 @@ test -e \$mon_data/done
+         }
+       }
++      if $fsid {
++        $fsid_option = "--fsid ${fsid}"
++      }
++
+       Ceph_config<||>
+       # prevent automatic creation of the client.admin key by ceph-create-keys
+       -> exec { "ceph-mon-${cluster_name}.client.admin.keyring-${id}":
+@@ -176,7 +182,8 @@ if [ ! -d \$mon_data ] ; then
+               --setuser ceph --setgroup ceph \
+               --mkfs \
+               --id ${id} \
+-              --keyring ${keyring_path} ; then
++              --keyring ${keyring_path} \
++              ${fsid_option} ; then
+             touch \$mon_data/done \$mon_data/${init} \$mon_data/keyring
+             chown -h ceph:ceph \$mon_data/done \$mon_data/${init} \$mon_data/keyring
+         else
+@@ -186,7 +193,8 @@ if [ ! -d \$mon_data ] ; then
+         if ceph-mon ${cluster_option} \
+               --mkfs \
+               --id ${id} \
+-              --keyring ${keyring_path} ; then
++              --keyring ${keyring_path} \
++              ${fsid_option} ; then
+             touch \$mon_data/done \$mon_data/${init} \$mon_data/keyring
+         else
+             rm -fr \$mon_data
+@@ -203,7 +211,7 @@ test -d  \$mon_data
+         timeout   => $exec_timeout,
+       }
+       -> service { $mon_service:
+-        ensure => running,
++        ensure => $service_ensure,
+       }
+       # if the service is running before we setup the configs, notify service
+diff --git a/manifests/osd.pp b/manifests/osd.pp
+index d24b95e..9b8cd99 100644
+--- a/manifests/osd.pp
++++ b/manifests/osd.pp
+@@ -52,6 +52,8 @@ define ceph::osd (
+   $ensure = present,
+   $journal = "''",
+   $cluster = undef,
++  $cluster_uuid = undef,
++  $uuid = undef,
+   $exec_timeout = $::ceph::params::exec_timeout,
+   $selinux_file_context = 'ceph_var_lib_t',
+   $fsid = $::ceph::profile::params::fsid,
+@@ -68,6 +70,14 @@ define ceph::osd (
+     }
+     $cluster_option = "--cluster ${cluster_name}"
++    if $cluster_uuid {
++      $cluster_uuid_option = "--cluster-uuid ${cluster_uuid}"
++    }
++
++    if $uuid {
++      $uuid_option = "--osd-uuid ${uuid}"
++    }
++
+     if $ensure == present {
+       $ceph_check_udev = "ceph-osd-check-udev-${name}"
+@@ -120,25 +130,15 @@ test -z $(ceph-disk list $(readlink -f ${data}) | egrep -o '[0-9a-f]{8}-([0-9a-f
+       Exec[$ceph_check_udev] -> Exec[$ceph_prepare]
+       # ceph-disk: prepare should be idempotent http://tracker.ceph.com/issues/7475
+       exec { $ceph_prepare:
+-        command   => "/bin/true # comment to satisfy puppet syntax requirements
+-set -ex
+-disk=$(readlink -f ${data})
+-if ! test -b \$disk ; then
+-    echo \$disk | egrep -e '^/dev' -q -v
+-    mkdir -p \$disk
+-    if getent passwd ceph >/dev/null 2>&1; then
+-        chown -h ceph:ceph \$disk
+-    fi
+-fi
+-ceph-disk prepare ${cluster_option} ${fsid_option} $(readlink -f ${data}) $(readlink -f ${journal})
+-udevadm settle
+-",
+-        unless    => "/bin/true # comment to satisfy puppet syntax requirements
+-set -ex
+-disk=$(readlink -f ${data})
+-ceph-disk list | egrep \" *(\${disk}1?|\${disk}p1?) .*ceph data, (prepared|active)\" ||
+-{ test -f \$disk/fsid && test -f \$disk/ceph_fsid && test -f \$disk/magic ;}
+-",
++
++        command   => "/usr/sbin/ceph-disk prepare ${cluster_option} ${cluster_uuid_option} ${uuid_option} --fs-type xfs --zap-disk ${data} ${journal}",
++        # We don't want to erase the disk if:
++        # 1. There is already ceph data on the disk for our cluster AND
++        # 2. The uuid for the OSD we are configuring matches the uuid for the
++        #    OSD on the disk. We don't want to attempt to re-use an OSD that
++        #    had previously been deleted.
++        unless    => "/usr/sbin/ceph-disk list | grep -v 'unknown cluster' | grep ' *${data}.*ceph data' | grep 'osd uuid ${uuid}'",
++
+         logoutput => true,
+         timeout   => $exec_timeout,
+         tag       => 'prepare',
+diff --git a/manifests/rgw.pp b/manifests/rgw.pp
+index 2612785..ebc83ce 100644
+--- a/manifests/rgw.pp
++++ b/manifests/rgw.pp
+@@ -185,6 +185,13 @@ define ceph::rgw (
+       provider => $::ceph::params::service_provider,
+     }
+   # Everything else that is supported by puppet-ceph should run systemd.
++  } elsif $::service_provider == 'systemd' {
++    Service {
++      name     => "radosgw-${name}",
++      start    => "systemctl start ceph-radosgw",
++      stop     => "systemctl stop ceph-radosgw",
++      status   => "systemctl status ceph-radosgw",
++    }
+   } else {
+     Service {
+       name   => "ceph-radosgw@${name}",
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0002-Newton-rebase-fixes.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0002-Newton-rebase-fixes.patch
new file mode 100644 (file)
index 0000000..bf626ea
--- /dev/null
@@ -0,0 +1,47 @@
+From 570520c5197dd36c3e4a7956d5916426fb75856a Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Tue, 7 Feb 2017 15:49:02 -0500
+Subject: [PATCH] Newton rebase fixes
+
+---
+ manifests/mon.pp | 9 ++++++---
+ manifests/osd.pp | 2 +-
+ 2 files changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/manifests/mon.pp b/manifests/mon.pp
+index fa99df5..b3458d6 100644
+--- a/manifests/mon.pp
++++ b/manifests/mon.pp
+@@ -99,10 +99,13 @@ define ceph::mon (
+       }
+     # Everything else that is supported by puppet-ceph should run systemd.
+     } else {
+-      $init = 'systemd'
++      $init = 'sysvinit'
+       Service {
+-        name   => "ceph-mon@${id}",
+-        enable => $mon_enable,
++        name     => "ceph-mon-${id}",
++        provider => $::ceph::params::service_provider,
++        start    => "service ceph start mon.${id}",
++        stop     => "service ceph stop mon.${id}",
++        status   => "service ceph status mon.${id}",
+       }
+     }
+diff --git a/manifests/osd.pp b/manifests/osd.pp
+index 9b8cd99..2187361 100644
+--- a/manifests/osd.pp
++++ b/manifests/osd.pp
+@@ -56,7 +56,7 @@ define ceph::osd (
+   $uuid = undef,
+   $exec_timeout = $::ceph::params::exec_timeout,
+   $selinux_file_context = 'ceph_var_lib_t',
+-  $fsid = $::ceph::profile::params::fsid,
++  $fsid = undef,
+   ) {
+     include ::ceph::params
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0003-Ceph-Jewel-rebase.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0003-Ceph-Jewel-rebase.patch
new file mode 100644 (file)
index 0000000..d1385ae
--- /dev/null
@@ -0,0 +1,110 @@
+From c9a5520620d313c08e7f751f3469ec5f4c220486 Mon Sep 17 00:00:00 2001
+From: Daniel Badea <daniel.badea@windriver.com>
+Date: Thu, 23 Mar 2017 08:04:31 +0000
+Subject: [PATCH] ceph jewel rebase
+
+---
+ manifests/mon.pp          |  1 +
+ manifests/rgw.pp          | 33 +++++++++++++++++++++++++--------
+ manifests/rgw/keystone.pp |  6 +++---
+ 3 files changed, 29 insertions(+), 11 deletions(-)
+
+diff --git a/manifests/mon.pp b/manifests/mon.pp
+index b3458d6..17cb925 100644
+--- a/manifests/mon.pp
++++ b/manifests/mon.pp
+@@ -106,6 +106,7 @@ define ceph::mon (
+         start    => "service ceph start mon.${id}",
+         stop     => "service ceph stop mon.${id}",
+         status   => "service ceph status mon.${id}",
++        enable   => $mon_enable,
+       }
+     }
+diff --git a/manifests/rgw.pp b/manifests/rgw.pp
+index ebc83ce..56fb4a8 100644
+--- a/manifests/rgw.pp
++++ b/manifests/rgw.pp
+@@ -193,23 +193,40 @@ define ceph::rgw (
+       status   => "systemctl status ceph-radosgw",
+     }
+   } else {
++    if $rgw_enable {
++      file { "${rgw_data}/sysvinit":
++        ensure => present,
++        before => Service["radosgw-${name}"],
++      }
++    }
++
+     Service {
+-      name   => "ceph-radosgw@${name}",
+-      enable => $rgw_enable,
++      name     => "radosgw-${name}",
++      start    => 'service radosgw start',
++      stop     => 'service radosgw stop',
++      status   => 'service radosgw status',
++      provider => $::ceph::params::service_provider,
+     }
+   }
+-  service { $rgw_service:
++  #for RHEL/CentOS7, systemctl needs to reload to pickup the ceph-radosgw init file
++  if (($::operatingsystem == 'RedHat' or $::operatingsystem == 'CentOS') and (versioncmp($::operatingsystemmajrelease, '7') >= 0))
++  {
++    exec { 'systemctl-reload-from-rgw': #needed for the new init file
++      command => '/usr/bin/systemctl daemon-reload',
++    }
++  }
++  service { "radosgw-${name}":
+     ensure => $rgw_ensure,
+-    tag    => ['ceph-radosgw']
++    tag    => ['radosgw']
+   }
+-  Ceph_config<||> ~> Service<| tag == 'ceph-radosgw' |>
++  Ceph_config<||> -> Service["radosgw-${name}"]
+   Package<| tag == 'ceph' |> -> File['/var/lib/ceph/radosgw']
+   Package<| tag == 'ceph' |> -> File[$log_file]
+   File['/var/lib/ceph/radosgw']
+   -> File[$rgw_data]
+-  -> Service<| tag == 'ceph-radosgw' |>
+-  File[$log_file] -> Service<| tag == 'ceph-radosgw' |>
+-  Ceph::Pool<||> -> Service<| tag == 'ceph-radosgw' |>
++  -> Service["radosgw-${name}"]
++  File[$log_file] -> Service["radosgw-${name}"]
++  Ceph::Pool<||> -> Service["radosgw-${name}"]
+ }
+diff --git a/manifests/rgw/keystone.pp b/manifests/rgw/keystone.pp
+index 8351177..c371fd0 100644
+--- a/manifests/rgw/keystone.pp
++++ b/manifests/rgw/keystone.pp
+@@ -148,7 +148,7 @@ define ceph::rgw::keystone (
+     exec { "${name}-nssdb-ca":
+       command => "/bin/true  # comment to satisfy puppet syntax requirements
+ set -ex
+-wget --no-check-certificate ${rgw_keystone_url}/v2.0/certificates/ca -O - |
++wget --no-check-certificate ${rgw_keystone_url}/${rgw_keystone_version}/certificates/ca -O - |
+   openssl x509 -pubkey | certutil -A -d ${nss_db_path} -n ca -t \"TCu,Cu,Tuw\"
+ ",
+       unless  => "/bin/true  # comment to satisfy puppet syntax requirements
+@@ -161,7 +161,7 @@ certutil -d ${nss_db_path} -L | grep ^ca
+     exec { "${name}-nssdb-signing":
+       command => "/bin/true  # comment to satisfy puppet syntax requirements
+ set -ex
+-wget --no-check-certificate ${rgw_keystone_url}/v2.0/certificates/signing -O - |
++wget --no-check-certificate ${rgw_keystone_url}/${rgw_keystone_version}/certificates/signing -O - |
+   openssl x509 -pubkey | certutil -A -d ${nss_db_path} -n signing_cert -t \"P,P,P\"
+ ",
+       unless  => "/bin/true  # comment to satisfy puppet syntax requirements
+@@ -176,7 +176,7 @@ certutil -d ${nss_db_path} -L | grep ^signing_cert
+     -> File[$nss_db_path]
+     -> Exec["${name}-nssdb-ca"]
+     -> Exec["${name}-nssdb-signing"]
+-    ~> Service<| tag == 'ceph-radosgw' |>
++    ~> Service<| tag == 'radosgw' |>
+   } else {
+     ceph_config {
+       "client.${name}/nss_db_path":                      ensure => absent;
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0004-US92424-Add-OSD-support-for-persistent-naming.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0004-US92424-Add-OSD-support-for-persistent-naming.patch
new file mode 100644 (file)
index 0000000..1947922
--- /dev/null
@@ -0,0 +1,29 @@
+From 7a4c325194885dc43fc87f7094873e0067801652 Mon Sep 17 00:00:00 2001
+From: Robert Church <robert.church@windriver.com>
+Date: Thu, 13 Apr 2017 20:31:21 -0500
+Subject: [PATCH] US92424: Add OSD support for persistent naming
+
+This allows the manifest to provide udev generated /dev/disk/by-* links
+to configure the OSDs without requiring any additional changes. The
+'readlink -f' will produce the currently enumerated device node
+associated with udev link.
+---
+ manifests/osd.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/osd.pp b/manifests/osd.pp
+index 2187361..d9cf5b1 100644
+--- a/manifests/osd.pp
++++ b/manifests/osd.pp
+@@ -61,7 +61,7 @@ define ceph::osd (
+     include ::ceph::params
+-    $data = $name
++    $data = generate('/bin/bash','-c',"/bin/readlink -f ${name}")
+     if $cluster {
+       $cluster_name = $cluster
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0005-Remove-puppetlabs-apt-as-ceph-requirement.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0005-Remove-puppetlabs-apt-as-ceph-requirement.patch
new file mode 100644 (file)
index 0000000..8d43b4e
--- /dev/null
@@ -0,0 +1,29 @@
+From 8875f0dfb30856ba9d2d629dc3c55d304537ad72 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Mon, 28 Oct 2019 14:38:12 -0700
+Subject: [PATCH] Remove puppetlabs-apt as ceph requirement
+
+STX patch ported from 8ab55c717d5088d8c75b465f5b9196036e0968ce
+We will never install apt or puppet-apt, so this requirement cannot be fulfilled
+---
+ metadata.json | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/metadata.json b/metadata.json
+index a760f1c..5997e16 100644
+--- a/metadata.json
++++ b/metadata.json
+@@ -47,10 +47,6 @@
+     ],
+     "description": "Installs and configures Ceph.",
+     "dependencies": [
+-        {
+-            "name": "puppetlabs/apt",
+-            "version_requirement": ">=2.0.0 <3.0.0"
+-        },
+         {
+             "name": "puppetlabs/apache",
+             "version_requirement": ">=1.4.1 <2.0.0"
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0006-ceph-disk-prepare-invalid-data-disk-value.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0006-ceph-disk-prepare-invalid-data-disk-value.patch
new file mode 100644 (file)
index 0000000..401172e
--- /dev/null
@@ -0,0 +1,68 @@
+From 5d8f3dd5d18d611151b4658c5c876e8a3ad8fe51 Mon Sep 17 00:00:00 2001
+From: Daniel Badea <daniel.badea@windriver.com>
+Date: Wed, 31 Oct 2018 16:28:45 +0000
+Subject: [PATCH] ceph-disk prepare invalid data disk value
+
+ceph-disk prepare data OSD parameter contains a new line causing
+puppet manifest to fail:
+
+1. $data = generate('/bin/bash','-c',"/bin/readlink -f ${name}")
+
+   is expanded together with a new line in:
+
+   exec { $ceph_prepare:
+     command   => "/usr/sbin/ceph-disk prepare ${cluster_option}
+                    ${cluster_uuid_option} ${uuid_option}
+                    --fs-type xfs --zap-disk ${data} ${journal}"
+
+   just before ${journal} is expanded. Puppet reports:
+
+     sh: line 1: : command not found
+
+   when trying to run '' (default journal value).
+
+2. 'readlink' should be called when running ceph-disk prepare
+   command, not when the puppet resource is defined. Let
+   exec's shell call readlink instead of using puppet's
+   generate() . See also:
+
+     https://github.com/openstack/puppet-ceph/commit/ff2b2e689846dd3d980c7c706c591e8cfb8f33a9
+
+Added --verbose and --log-stdout options to log commands executed
+by 'ceph-disk prepare' and identify where it fails.
+---
+ manifests/osd.pp | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/manifests/osd.pp b/manifests/osd.pp
+index d9cf5b1..889d28a 100644
+--- a/manifests/osd.pp
++++ b/manifests/osd.pp
+@@ -61,7 +61,7 @@ define ceph::osd (
+     include ::ceph::params
+-    $data = generate('/bin/bash','-c',"/bin/readlink -f ${name}")
++    $data = $name
+     if $cluster {
+       $cluster_name = $cluster
+@@ -131,13 +131,13 @@ test -z $(ceph-disk list $(readlink -f ${data}) | egrep -o '[0-9a-f]{8}-([0-9a-f
+       # ceph-disk: prepare should be idempotent http://tracker.ceph.com/issues/7475
+       exec { $ceph_prepare:
+-        command   => "/usr/sbin/ceph-disk prepare ${cluster_option} ${cluster_uuid_option} ${uuid_option} --fs-type xfs --zap-disk ${data} ${journal}",
++        command   => "/usr/sbin/ceph-disk --verbose --log-stdout prepare ${cluster_option} ${cluster_uuid_option} ${uuid_option} --fs-type xfs --zap-disk $(readlink -f ${data}) $(readlink -f ${journal})",
+         # We don't want to erase the disk if:
+         # 1. There is already ceph data on the disk for our cluster AND
+         # 2. The uuid for the OSD we are configuring matches the uuid for the
+         #    OSD on the disk. We don't want to attempt to re-use an OSD that
+         #    had previously been deleted.
+-        unless    => "/usr/sbin/ceph-disk list | grep -v 'unknown cluster' | grep ' *${data}.*ceph data' | grep 'osd uuid ${uuid}'",
++        unless    => "/usr/sbin/ceph-disk list | grep -v 'unknown cluster' | grep \" *$(readlink -f ${data}).*ceph data\" | grep 'osd uuid ${uuid}'",
+         logoutput => true,
+         timeout   => $exec_timeout,
+-- 
+2.16.5
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0007-Add-StarlingX-specific-restart-command-for-Ceph-moni.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0007-Add-StarlingX-specific-restart-command-for-Ceph-moni.patch
new file mode 100644 (file)
index 0000000..1c3926f
--- /dev/null
@@ -0,0 +1,35 @@
+From a364f37cacab78cdaad5ebd23ab24cf400a3fa40 Mon Sep 17 00:00:00 2001
+From: Ovidiu Poncea <ovidiu.poncea@windriver.com>
+Date: Thu, 20 Dec 2018 07:18:55 -0500
+Subject: [PATCH] Add StarlingX specific restart command for Ceph monitors
+
+Since we don't use systemd to manage Ceph and we have pmon monitoring we
+have to make sure that:
+1. Restarting is properly handled as "systemctl restart" will return error
+   and manifest will fail;
+2. Pmon does not check ceph-mon status during restart. Otherwise we risk
+   getting into a race condition between the puppet restart and pmon
+   detecting that ceph is down and trying a restart.
+
+Both are resolved when using /etc/init.d/ceph-init-wrapper restart
+
+Signed-off-by: Ovidiu Poncea <Ovidiu.Poncea@windriver.com>
+---
+ manifests/mon.pp | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/manifests/mon.pp b/manifests/mon.pp
+index 17cb925..62d5059 100644
+--- a/manifests/mon.pp
++++ b/manifests/mon.pp
+@@ -106,6 +106,7 @@ define ceph::mon (
+         start    => "service ceph start mon.${id}",
+         stop     => "service ceph stop mon.${id}",
+         status   => "service ceph status mon.${id}",
++        restart  => "/etc/init.d/ceph-init-wrapper restart mon.${id}",
+         enable   => $mon_enable,
+       }
+     }
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0008-ceph-mimic-prepare-activate-osd.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0008-ceph-mimic-prepare-activate-osd.patch
new file mode 100644 (file)
index 0000000..6ca302f
--- /dev/null
@@ -0,0 +1,64 @@
+From 4c2e2a196cb5a6890e35098c8499688fc1c26f5c Mon Sep 17 00:00:00 2001
+From: Daniel Badea <daniel.badea@windriver.com>
+Date: Thu, 4 Apr 2019 16:52:12 +0000
+Subject: [PATCH] ceph-mimic-prepare-activate-osd
+
+Prepare and activate disk using filestore
+and given OSD id.
+---
+ manifests/osd.pp | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+diff --git a/manifests/osd.pp b/manifests/osd.pp
+index 889d28a..c51a445 100644
+--- a/manifests/osd.pp
++++ b/manifests/osd.pp
+@@ -54,6 +54,7 @@ define ceph::osd (
+   $cluster = undef,
+   $cluster_uuid = undef,
+   $uuid = undef,
++  $osdid = undef,
+   $exec_timeout = $::ceph::params::exec_timeout,
+   $selinux_file_context = 'ceph_var_lib_t',
+   $fsid = undef,
+@@ -78,6 +79,10 @@ define ceph::osd (
+       $uuid_option = "--osd-uuid ${uuid}"
+     }
++    if $osdid {
++      $osdid_option = "--osd-id ${osdid}"
++    }
++
+     if $ensure == present {
+       $ceph_check_udev = "ceph-osd-check-udev-${name}"
+@@ -131,7 +136,16 @@ test -z $(ceph-disk list $(readlink -f ${data}) | egrep -o '[0-9a-f]{8}-([0-9a-f
+       # ceph-disk: prepare should be idempotent http://tracker.ceph.com/issues/7475
+       exec { $ceph_prepare:
+-        command   => "/usr/sbin/ceph-disk --verbose --log-stdout prepare ${cluster_option} ${cluster_uuid_option} ${uuid_option} --fs-type xfs --zap-disk $(readlink -f ${data}) $(readlink -f ${journal})",
++        command   => "/bin/true # comment to satisfy puppet syntax requirements
++set -ex
++ceph-disk --verbose --log-stdout prepare --filestore  ${cluster_uuid_option} ${uuid_option} ${osdid_option} --fs-type xfs --zap-disk $(readlink -f ${data}) $(readlink -f ${journal})
++mkdir -p /var/lib/ceph/osd/ceph-${osdid}
++ceph auth del osd.${osdid} || true
++mount $(readlink -f ${data})1 /var/lib/ceph/osd/ceph-${osdid}
++ceph-osd --id ${osdid} --mkfs --mkkey --mkjournal
++ceph auth add osd.${osdid} osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-${osdid}/keyring
++umount /var/lib/ceph/osd/ceph-${osdid}
++",
+         # We don't want to erase the disk if:
+         # 1. There is already ceph data on the disk for our cluster AND
+         # 2. The uuid for the OSD we are configuring matches the uuid for the
+@@ -171,7 +185,7 @@ if ! test -b \$disk ; then
+ fi
+ # activate happens via udev when using the entire device
+ if ! test -b \$disk || ! test -b \${disk}1 || ! test -b \${disk}p1 ; then
+-  ceph-disk activate \$disk || true
++  ceph-disk activate \${disk}1 || true
+ fi
+ if test -f ${udev_rules_file}.disabled && ( test -b \${disk}1 || test -b \${disk}p1 ); then
+   ceph-disk activate \${disk}1 || true
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0009-fix-ceph-osd-disk-partition-for-nvme-disks.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0009-fix-ceph-osd-disk-partition-for-nvme-disks.patch
new file mode 100644 (file)
index 0000000..6dfed20
--- /dev/null
@@ -0,0 +1,89 @@
+From b0dd34d2d580c817f9ef6eb62927ba63bebe73c3 Mon Sep 17 00:00:00 2001
+From: Daniel Badea <daniel.badea@windriver.com>
+Date: Thu, 25 Apr 2019 15:37:53 +0000
+Subject: [PATCH] fix ceph osd disk partition for nvme disks
+
+---
+ manifests/osd.pp | 38 +++++++++++++++++++++++++++++++-------
+ 1 file changed, 31 insertions(+), 7 deletions(-)
+
+diff --git a/manifests/osd.pp b/manifests/osd.pp
+index c51a445..5bd30c5 100644
+--- a/manifests/osd.pp
++++ b/manifests/osd.pp
+@@ -138,10 +138,17 @@ test -z $(ceph-disk list $(readlink -f ${data}) | egrep -o '[0-9a-f]{8}-([0-9a-f
+         command   => "/bin/true # comment to satisfy puppet syntax requirements
+ set -ex
+-ceph-disk --verbose --log-stdout prepare --filestore  ${cluster_uuid_option} ${uuid_option} ${osdid_option} --fs-type xfs --zap-disk $(readlink -f ${data}) $(readlink -f ${journal})
++disk=$(readlink -f ${data})
++ceph-disk --verbose --log-stdout prepare --filestore  ${cluster_uuid_option} ${uuid_option} ${osdid_option} --fs-type xfs --zap-disk \${disk} $(readlink -f ${journal})
+ mkdir -p /var/lib/ceph/osd/ceph-${osdid}
+ ceph auth del osd.${osdid} || true
+-mount $(readlink -f ${data})1 /var/lib/ceph/osd/ceph-${osdid}
++part=\${disk}
++if [[ \$part == *nvme* ]]; then
++   part=\${part}p1
++else 
++   part=\${part}1
++fi
++mount $(readlink -f \${part}) /var/lib/ceph/osd/ceph-${osdid}
+ ceph-osd --id ${osdid} --mkfs --mkkey --mkjournal
+ ceph auth add osd.${osdid} osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-${osdid}/keyring
+ umount /var/lib/ceph/osd/ceph-${osdid}
+@@ -183,12 +190,17 @@ if ! test -b \$disk ; then
+         chown -h ceph:ceph \$disk
+     fi
+ fi
+-# activate happens via udev when using the entire device
++part=\${disk}
++if [[ \${part} == *nvme* ]]; then
++   part=\${part}p1
++else 
++   part=\${part}1
++fi
+ if ! test -b \$disk || ! test -b \${disk}1 || ! test -b \${disk}p1 ; then
+-  ceph-disk activate \${disk}1 || true
++  ceph-disk activate \${part} || true
+ fi
+ if test -f ${udev_rules_file}.disabled && ( test -b \${disk}1 || test -b \${disk}p1 ); then
+-  ceph-disk activate \${disk}1 || true
++  ceph-disk activate \${part} || true
+ fi
+ ",
+         unless    => "/bin/true # comment to satisfy puppet syntax requirements
+@@ -206,8 +218,14 @@ ls -ld /var/lib/ceph/osd/${cluster_name}-* | grep \" $(readlink -f ${data})\$\"
+         command   => "/bin/true # comment to satisfy puppet syntax requirements
+ set -ex
+ disk=$(readlink -f ${data})
++part=\${disk}
++if [[ \${part} == *nvme* ]]; then
++   part=\${part}p1
++else 
++   part=\${part}1
++fi
+ if [ -z \"\$id\" ] ; then
+-  id=$(ceph-disk list | sed -nEe \"s:^ *\${disk}1? .*(ceph data|mounted on).*osd\\.([0-9]+).*:\\2:p\")
++  id=$(ceph-disk list | sed -nEe \"s:^ *\${part}? .*(ceph data|mounted on).*osd\\.([0-9]+).*:\\2:p\")
+ fi
+ if [ -z \"\$id\" ] ; then
+   id=$(ls -ld /var/lib/ceph/osd/${cluster_name}-* | sed -nEe \"s:.*/${cluster_name}-([0-9]+) *-> *\${disk}\$:\\1:p\" || true)
+@@ -227,8 +245,14 @@ fi
+         unless    => "/bin/true # comment to satisfy puppet syntax requirements
+ set -ex
+ disk=$(readlink -f ${data})
++part=${disk}
++if [[ \$part == *nvme* ]]; then
++   part=\${part}p1
++else 
++   part=\${part}1
++fi
+ if [ -z \"\$id\" ] ; then
+-  id=$(ceph-disk list | sed -nEe \"s:^ *\${disk}1? .*(ceph data|mounted on).*osd\\.([0-9]+).*:\\2:p\")
++  id=$(ceph-disk list | sed -nEe \"s:^ *\${part}? .*(ceph data|mounted on).*osd\\.([0-9]+).*:\\2:p\")
+ fi
+ if [ -z \"\$id\" ] ; then
+   id=$(ls -ld /var/lib/ceph/osd/${cluster_name}-* | sed -nEe \"s:.*/${cluster_name}-([0-9]+) *-> *\${disk}\$:\\1:p\" || true)
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0010-wipe-unprepared-disks.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0010-wipe-unprepared-disks.patch
new file mode 100644 (file)
index 0000000..fcebed8
--- /dev/null
@@ -0,0 +1,25 @@
+From 828af5dec53192207637d15397887e058d6ea0fb Mon Sep 17 00:00:00 2001
+From: Daniel Badea <daniel.badea@windriver.com>
+Date: Fri, 26 Apr 2019 00:22:12 +0000
+Subject: [PATCH] wipe unprepared disks
+
+---
+ manifests/osd.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/osd.pp b/manifests/osd.pp
+index 5bd30c5..ab65924 100644
+--- a/manifests/osd.pp
++++ b/manifests/osd.pp
+@@ -158,7 +158,7 @@ umount /var/lib/ceph/osd/ceph-${osdid}
+         # 2. The uuid for the OSD we are configuring matches the uuid for the
+         #    OSD on the disk. We don't want to attempt to re-use an OSD that
+         #    had previously been deleted.
+-        unless    => "/usr/sbin/ceph-disk list | grep -v 'unknown cluster' | grep \" *$(readlink -f ${data}).*ceph data\" | grep 'osd uuid ${uuid}'",
++        unless    => "/usr/sbin/ceph-disk list | grep -v 'unknown cluster' | grep \" *$(readlink -f ${data}).*ceph data\" | grep -v unprepared | grep 'osd uuid ${uuid}'",
+         logoutput => true,
+         timeout   => $exec_timeout,
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/0011-puppet-ceph-changes-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/0011-puppet-ceph-changes-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..6b7e8a1
--- /dev/null
@@ -0,0 +1,23 @@
+diff --git a/manifests/params.pp b/manifests/params.pp
+index 2d4d722..4cfb1b4 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -51,11 +51,16 @@ class ceph::params (
+   case $::osfamily {
+     'Debian': {
+-      $pkg_radosgw         = 'radosgw'
++      if ($::operatingsystem == 'poky-stx') {
++        $pkg_radosgw         = 'ceph'
++        $service_provider    = 'systemd'
++      } else {
++        $pkg_radosgw         = 'radosgw'
++        $service_provider    = 'debian'
++      }
+       $user_radosgw        = 'www-data'
+       $pkg_fastcgi         = 'libapache2-mod-fastcgi'
+       $pkg_nsstools        = ['libnss3-tools', 'wget']
+-      $service_provider    = 'debian'
+       $pkg_policycoreutils = 'policycoreutils'
+     }
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ceph/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-ceph/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..460c87e
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-ceph.gemspec      2019-10-28 14:39:41.525008548 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-ceph'
++  s.version     = '2.4.1'
++  s.date        = '2017-09-07'
++  s.summary     = "Community Developed Ceph Module"
++  s.description = s.summary
++  s.authors     = ["rcritten"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://launchpad.net/puppet-ceph'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-certmonger/0001-puppet-certmonger-adjust-path-to-poky-rootfs.patch b/meta-stx/recipes-support/puppet/files/puppet-certmonger/0001-puppet-certmonger-adjust-path-to-poky-rootfs.patch
new file mode 100644 (file)
index 0000000..e80b3c7
--- /dev/null
@@ -0,0 +1,25 @@
+From c513b8d9591740bac5cdbb240853700971eb7c62 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Sat, 7 Mar 2020 00:38:52 -0800
+Subject: [PATCH] puppet-certmonger: adjust path to poky rootfs
+
+---
+ manifests/scripts.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/scripts.pp b/manifests/scripts.pp
+index d53eacb..54945b6 100644
+--- a/manifests/scripts.pp
++++ b/manifests/scripts.pp
+@@ -15,7 +15,7 @@ class certmonger::scripts (
+     mode   => '0755',
+     source => 'puppet:///modules/certmonger/verify_certmonger_request.sh',
+   }
+-  file { '/usr/local/bin/change-perms-restart':
++  file { '/usr/bin/change-perms-restart':
+     ensure => 'present',
+     owner  => 'root',
+     group  => 'root',
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-certmonger/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-certmonger/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..b8fdce4
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-certmonger.gemspec        2019-10-28 07:51:11.303843437 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-certmonger'
++  s.version     = '1.1.1'
++  s.date        = '2016-10-10'
++  s.summary     = "Puppet certmonger module"
++  s.description = s.summary
++  s.authors     = ["Puppet saz "]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/earsdown/puppet-certmonger'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-collectd/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-collectd/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..f381798
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-collectd.gemspec  2019-10-28 08:10:42.292236594 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-collectd'
++  s.version     = '5.1.0'
++  s.date        = '2016-08-18'
++  s.summary     = "Puppet collectd module"
++  s.description = s.summary
++  s.authors     = ["Puppet VoxPupuli"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/voxpupuli/puppet-collectd'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0001-puppet-dnsmasq-Kilo-quilt-patches.patch b/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0001-puppet-dnsmasq-Kilo-quilt-patches.patch
new file mode 100644 (file)
index 0000000..ff631ec
--- /dev/null
@@ -0,0 +1,116 @@
+From 7430149d3a7f1ab9f93ec863e55cdf6d96cd4f06 Mon Sep 17 00:00:00 2001
+From: Al Bailey <al.bailey@windriver.com>
+Date: Tue, 7 Jun 2016 10:22:23 -0400
+Subject: [PATCH] puppet-dnsmasq Kilo quilt patches
+
+---
+ packstack/puppet/modules/dnsmasq/manifests/init.pp          | 8 ++++++++
+ packstack/puppet/modules/dnsmasq/manifests/params.pp        | 7 +++++--
+ packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb | 9 ++++++---
+ 3 files changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/packstack/puppet/modules/dnsmasq/manifests/init.pp b/packstack/puppet/modules/dnsmasq/manifests/init.pp
+index 176bec7..c61fd94 100644
+--- a/packstack/puppet/modules/dnsmasq/manifests/init.pp
++++ b/packstack/puppet/modules/dnsmasq/manifests/init.pp
+@@ -258,6 +258,13 @@
+ #   If you don't want dnsmasq to read /etc/hosts, set this to true.
+ #   Default: false
+ #
++# [*dhcp_hostsfile*]
++#   Read DHCP host information from the specified file. The file contains
++#   information about one host per line. The format of a line is the same
++#   as text to the right of '=' in --dhcp-host. The advantage of storing
++#   DHCP host information in this file is that it can be changed without
++#   re-starting dnsmasq: the file will be re-read when dnsmasq receives SIGHUP.
++#
+ # [*addn_hosts*]
+ #   If you want dnsmasq to read another file/s, as well as /etc/hosts, use this.
+ #   It can be an array of files to read. See next option to manage these files with
+@@ -457,6 +464,7 @@ class dnsmasq (
+   $no_poll             = params_lookup( 'no_poll' ),
+   $bind_interfaces     = params_lookup( 'bind_interfaces' ),
+   $no_hosts            = params_lookup( 'no_hosts' ),
++  $dhcp_hostsfile      = params_lookup( 'dhcp_hostsfile' ),
+   $addn_hosts          = params_lookup( 'addn_hosts' ),
+   $addn_hosts_dir      = params_lookup( 'addn_hosts_dir' ),
+   $expand_hosts        = params_lookup( 'expand_hosts' ),
+diff --git a/packstack/puppet/modules/dnsmasq/manifests/params.pp b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+index 5b8f02d..6dd5b96 100644
+--- a/packstack/puppet/modules/dnsmasq/manifests/params.pp
++++ b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+@@ -38,6 +38,7 @@ class dnsmasq::params {
+   $process_user = $::operatingsystem ? {
+     /(?i:Debian|Ubuntu|Mint)/ => 'dnsmasq',
++    /(?i:wrlinux)/            => 'root',
+     default                   => 'nobody',
+   }
+@@ -62,7 +63,7 @@ class dnsmasq::params {
+   }
+   $config_file_init = $::operatingsystem ? {
+-    /(?i:Debian|Ubuntu|Mint)/ => '/etc/default/dnsmasq',
++    /(?i:Debian|Ubuntu|Mint|wrlinux)/ => '/etc/default/dnsmasq',
+     default                   => '/etc/sysconfig/dnsmasq',
+   }
+@@ -90,6 +91,7 @@ class dnsmasq::params {
+   $no_poll = false
+   $bind_interfaces = false
+   $no_hosts = false
++  $dhcp_hostsfile = ''
+   $addn_hosts = ''
+   $addn_hosts_dir = ''
+   $expand_hosts = false
+@@ -115,6 +117,7 @@ class dnsmasq::params {
+   }
+   $mx_target = ''
+   $localmx = false
++  $selfmx = false
+   $server = ''
+   $local = ''
+   $address = ''
+@@ -151,7 +154,7 @@ class dnsmasq::params {
+   $version = 'present'
+   $absent = false
+   $disable = false
+-  $disableboot = false
++  $disableboot = true
+   ### General module variables that can have a site or per module default
+   $monitor = false
+diff --git a/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb b/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb
+index 7bc4a03..ea5aa01 100644
+--- a/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb
++++ b/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb
+@@ -3,12 +3,12 @@
+ <% if scope.lookupvar('dnsmasq::port') != '' -%>
+ port=<%= scope.lookupvar('dnsmasq::port') %>
+ <% end -%>
+-<% if scope.lookupvar('dnsmasq::bool_domain_need') -%> 
+-domain-needed
+-<% end -%>
+ <% if scope.lookupvar('dnsmasq::bool_bogus_priv') -%>
+ bogus-priv
+ <% end -%>
++<% if scope.lookupvar('dnsmasq::bool_domain_needed') -%>
++domain-needed
++<% end -%>
+ <% if scope.lookupvar('dnsmasq::bool_filterwin2k') -%>
+ filterwin2k
+ <% end -%>
+@@ -33,6 +33,9 @@ bind-interfaces
+ <% if scope.lookupvar('dnsmasq::bool_no_hosts') -%>
+ no-hosts
+ <% end -%>
++<% if scope.lookupvar('dnsmasq::dhcp_hostsfile') != '' -%>
++dhcp-hostsfile=<%= scope.lookupvar('dnsmasq::dhcp_hostsfile') %>
++<% end -%>
+ <% if scope.lookupvar('dnsmasq::bool_expand_hosts') -%>
+ expand-hosts
+ <% end -%>
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0002-Fixing-mismatched-permission-on-dnsmasq-conf.patch b/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0002-Fixing-mismatched-permission-on-dnsmasq-conf.patch
new file mode 100644 (file)
index 0000000..40d422e
--- /dev/null
@@ -0,0 +1,27 @@
+From b8308a495f853d066c5c0e5d2257a070b033f626 Mon Sep 17 00:00:00 2001
+From: Kam Nasim <kam.nasim@windriver.com>
+Date: Tue, 5 Jul 2016 16:46:28 -0400
+Subject: [PATCH] CGTS-4280: Fixing mismatched permission on dnsmasq.conf which
+ was set to 0640 when created from config_controller (controller-0) but was at
+ 0644 on controller-1 through application of this manifest.
+
+---
+ packstack/puppet/modules/dnsmasq/manifests/params.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/packstack/puppet/modules/dnsmasq/manifests/params.pp b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+index 6dd5b96..6129b57 100644
+--- a/packstack/puppet/modules/dnsmasq/manifests/params.pp
++++ b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+@@ -51,7 +51,7 @@ class dnsmasq::params {
+   }
+   $config_file_mode = $::operatingsystem ? {
+-    default => '0644',
++    default => '0640',
+   }
+   $config_file_owner = $::operatingsystem ? {
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0003-Support-management-of-tftp_max-option.patch b/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0003-Support-management-of-tftp_max-option.patch
new file mode 100644 (file)
index 0000000..08341e8
--- /dev/null
@@ -0,0 +1,62 @@
+From 017e2ed0c664fb8689f6a9c4352db740c2c39725 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Thu, 15 Sep 2016 16:49:48 -0400
+Subject: [PATCH] Support management of tftp_max option
+
+---
+ packstack/puppet/modules/dnsmasq/manifests/init.pp          | 4 ++++
+ packstack/puppet/modules/dnsmasq/manifests/params.pp        | 1 +
+ packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb | 3 +++
+ 3 files changed, 8 insertions(+)
+
+diff --git a/packstack/puppet/modules/dnsmasq/manifests/init.pp b/packstack/puppet/modules/dnsmasq/manifests/init.pp
+index c61fd94..b66ac17 100644
+--- a/packstack/puppet/modules/dnsmasq/manifests/init.pp
++++ b/packstack/puppet/modules/dnsmasq/manifests/init.pp
+@@ -328,6 +328,9 @@
+ #   Enable dnsmasq's built-in TFTP server
+ #   Default: false
+ #
++# [*tftp_max*]
++#   Max tftp connections
++#
+ # [*tftp_secure*]
+ #   Make the TFTP server more secure: with this set, only files owned by
+ #   the user dnsmasq is running as will be send over the net.
+@@ -476,6 +479,7 @@ class dnsmasq (
+   $pxe_prompt_timeout  = params_lookup( 'pxe_prompt_timeout' ),
+   $pxe_service         = params_lookup( 'pxe_service' ),
+   $enable_tftp         = params_lookup( 'enable_tftp' ),
++  $tftp_max            = params_lookup( 'tftp_max' ),
+   $tftp_secure         = params_lookup( 'tftp_secure' ),
+   $tftp_root           = params_lookup( 'tftp_root' ),
+   $dhcp_lease_max      = params_lookup( 'dhcp_lease_max' ),
+diff --git a/packstack/puppet/modules/dnsmasq/manifests/params.pp b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+index 6129b57..845e91e 100644
+--- a/packstack/puppet/modules/dnsmasq/manifests/params.pp
++++ b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+@@ -103,6 +103,7 @@ class dnsmasq::params {
+   $pxe_prompt_timeout = '60'
+   $pxe_service = ''
+   $enable_tftp = false
++  $tftp_max = ''
+   $tftp_secure = false
+   $tftp_root = ''
+   $dhcp_lease_max = ''
+diff --git a/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb b/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb
+index ea5aa01..6a6cbdf 100644
+--- a/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb
++++ b/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb
+@@ -60,6 +60,9 @@ pxe-service=<%= scope.lookupvar('dnsmasq::pxe_service') %>
+ <% if scope.lookupvar('dnsmasq::bool_enable_tftp') -%>
+ enable-tftp
+ <% end -%>
++<% if scope.lookupvar('dnsmasq::tftp_max') != '' -%>
++tftp-max=<%= scope.lookupvar('dnsmasq::tftp_max') %>
++<% end -%>
+ <% if scope.lookupvar('dnsmasq::bool_tftp_secure') -%>
+ tftp-secure
+ <% end -%>
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0004-Enable-clear-DNS-cache-on-reload.patch b/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0004-Enable-clear-DNS-cache-on-reload.patch
new file mode 100644 (file)
index 0000000..65b6771
--- /dev/null
@@ -0,0 +1,72 @@
+From 35fa3c673307db2ebed20c952817608fadd26fa6 Mon Sep 17 00:00:00 2001
+From: Tao Liu <tao.liu@windriver.com>
+Date: Thu, 22 Jun 2017 16:33:29 -0400
+Subject: [PATCH 1/1] Enable clear the DNS cache on reload
+
+---
+ packstack/puppet/modules/dnsmasq/manifests/init.pp          | 7 +++++++
+ packstack/puppet/modules/dnsmasq/manifests/params.pp        | 1 +
+ packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb | 3 +++
+ 3 files changed, 11 insertions(+)
+
+diff --git a/packstack/puppet/modules/dnsmasq/manifests/init.pp b/packstack/puppet/modules/dnsmasq/manifests/init.pp
+index b66ac17..93276bb 100644
+--- a/packstack/puppet/modules/dnsmasq/manifests/init.pp
++++ b/packstack/puppet/modules/dnsmasq/manifests/init.pp
+@@ -211,6 +211,11 @@
+ #   bringing up the link unnecessarily.
+ #   Default: true
+ #
++# [*clear_on_reload*]
++#   Whenever /etc/resolv.conf is re-read or the upstream servers are set via
++#   DBus, clear the DNS cache.
++#   Default: true
++#
+ # [*filterwin2k*]
+ #   Uncomment this to filter useless windows-originated DNS requests
+ #   which can trigger dial-on-demand links needlessly.
+@@ -460,6 +465,7 @@ class dnsmasq (
+   $protocol            = params_lookup( 'protocol' ),
+   $domain_needed       = params_lookup( 'domain_needed' ),
+   $bogus_priv          = params_lookup( 'bogus_priv' ),
++  $clear_on_reload     = params_lookup( 'clear_on_reload' ),
+   $filterwin2k         = params_lookup( 'filterwin2k' ),
+   $resolv_file         = params_lookup( 'resolv_file' ),
+   $strict_order        = params_lookup( 'strict_order' ),
+@@ -531,6 +537,7 @@ class dnsmasq (
+   $bool_domain_needed=any2bool($domain_needed)
+   $bool_bogus_priv=any2bool($bogus_priv)
++  $bool_clear_on_reload=any2bool($clear_on_reload)
+   $bool_filterwin2k=any2bool($filterwin2k)
+   $bool_strict_order=any2bool($strict_order)
+   $bool_no_resolv=any2bool($no_resolv)
+diff --git a/packstack/puppet/modules/dnsmasq/manifests/params.pp b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+index 845e91e..4d8e70a 100644
+--- a/packstack/puppet/modules/dnsmasq/manifests/params.pp
++++ b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+@@ -84,6 +84,7 @@ class dnsmasq::params {
+   $domain_needed = true
+   $bogus_priv = true
++  $clear_on_reload = true
+   $filterwin2k = false
+   $resolv_file = ''
+   $strict_order = false
+diff --git a/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb b/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb
+index bb8d941..109b768 100644
+--- a/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb
++++ b/packstack/puppet/modules/dnsmasq/templates/dnsmasq.conf.erb
+@@ -9,6 +9,9 @@ bogus-priv
+ <% if scope.lookupvar('dnsmasq::bool_domain_needed') -%>
+ domain-needed
+ <% end -%>
++<% if scope.lookupvar('dnsmasq::bool_clear_on_reload') -%>
++clear-on-reload
++<% end -%>
+ <% if scope.lookupvar('dnsmasq::bool_filterwin2k') -%>
+ filterwin2k
+ <% end -%>
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0005-puppet-dnsmasq-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/0005-puppet-dnsmasq-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..7383ce2
--- /dev/null
@@ -0,0 +1,22 @@
+diff --git a/packstack/puppet/modules/dnsmasq/manifests/params.pp b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+index 4d8e70a..b978224 100644
+--- a/packstack/puppet/modules/dnsmasq/manifests/params.pp
++++ b/packstack/puppet/modules/dnsmasq/manifests/params.pp
+@@ -38,7 +38,7 @@ class dnsmasq::params {
+   $process_user = $::operatingsystem ? {
+     /(?i:Debian|Ubuntu|Mint)/ => 'dnsmasq',
+-    /(?i:wrlinux)/            => 'root',
++    /(?i:wrlinux|poky-stx)/       => 'root',
+     default                   => 'nobody',
+   }
+@@ -63,7 +63,7 @@ class dnsmasq::params {
+   }
+   $config_file_init = $::operatingsystem ? {
+-    /(?i:Debian|Ubuntu|Mint|wrlinux)/ => '/etc/default/dnsmasq',
++    /(?i:Debian|Ubuntu|Mint|wrlinux|poky-stx)/ => '/etc/default/dnsmasq',
+     default                   => '/etc/sysconfig/dnsmasq',
+   }
diff --git a/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..0dda1c5
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppet-dnsmasq.gemspec   2019-10-31 12:10:38.277914711 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-dnsmasq'
++  s.version     = '1.1.0'
++  s.date        = '2014-03-15'
++  s.summary     = "Puppet module for dnsmasq"
++  s.description = s.summary
++  s.authors     = ["Javier Bertoli"]
++  s.email       = ''
++  s.files       = %w(Modulefile README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'http://www.netmanagers.com.ar'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/metadata.json.patch b/meta-stx/recipes-support/puppet/files/puppet-dnsmasq/metadata.json.patch
new file mode 100644 (file)
index 0000000..361145e
--- /dev/null
@@ -0,0 +1,17 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/metadata.json    2019-10-31 12:11:57.326213149 -0700
+@@ -0,0 +1,14 @@
++{
++  "name": "puppet-dnsmasq",
++  "version": "1.1.0",
++  "author": "Javier Bertoli",
++  "summary": "Puppet module for dnsmasq.",
++  "license": "Apache-2.0",
++  "source": "https://github.com/netmanagers/puppet-dnsmasq",
++  "project_page": "http://www.netmanagers.com.ar",
++  "issues_url": "https://tickets.puppetlabs.com/browse/MODULES",
++  "dependencies": [
++      {"name":"puppetlabs/stdlib","version_requirement":">= 4.1.0 < 5.0.0"},
++      {"name":"puppetlabs/concat","version_requirement":">= 1.1.0 < 5.0.0"}
++  ]
++}
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/0001-TIS-Patches.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/0001-TIS-Patches.patch
new file mode 100644 (file)
index 0000000..a27d0f3
--- /dev/null
@@ -0,0 +1,377 @@
+From 95c0ec5cb26efbe2c5dbf45df21518d8d1776be0 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Wed, 4 Jan 2017 12:15:53 -0500
+Subject: [PATCH] TIS Patches
+
+This patch rolls up the previous TIS patches, which includes:
+1. CGTS-4787 Set DRBD service ensure parameter
+
+2. Updates to fix DRBD resync-rate and engineered parameters:
+
+There are several DRBD performance related parameters that must be set to
+get reasonable resync performance, otherwise default resync throughput
+is limited to 40MB/s.  Note that user community has noted this limit
+when they use default settings, or up-rev DRBD from 8.3, etc.  Eg. they
+realize they hit this limit despite having 10G link or better and faster
+disks.
+
+The following parameters were added to puppet-drbd module for resource
+file generation, in addition to: c-plan-ahead, c-fill-target, c-min-rate,
+c-max-rate, currently engineered for dynamic resync-rates.
+
+disk section:
+- 'resync-rate' (aka 'rate') was missed in the CentOS port from Kilo
+- 'al-extents' set to 3389, set to a prime number. Increasing this improves
+  random write throughput. Could set a bit higher, but would need a study.
+
+net section:
+- 'max-buffers' engineered to scale with supported MBps, setting too low
+  (eg., default setting) is a bottleneck on 10G link.  Set this to
+  maximum settable value of 20000.  Note this parm may be settable to
+  larger values in more current DRBD rev. If we need to support faster
+  disks, likely need to increase this proportionately.
+- 'max-epoch-size' also set to 20000. DRBD tuning recommendation page
+  sets this the same as max-buffers.
+- 'unplug-watermark' set to 16 based on DRBD tuning recommendations page
+- 'sndbuf-size' set to 0 to auto-tune; historically default was too small
+- 'rcvbuf-size' set to 0 to auto-tune
+---
+ manifests/init.pp                            | 11 ++--
+ manifests/resource.pp                        | 93 +++++++++++++++++++++++++---
+ manifests/resource/up.pp                     |  2 +-
+ manifests/service.pp                         |  2 +-
+ templates/header.res.erb                     | 53 ++++++++++++++--
+ templates/primary-resource.res.erb           |  2 +-
+ templates/primary-stacked-resource.res.erb   |  2 +-
+ templates/resource.res.erb                   |  2 +-
+ templates/secondary-resource.res.erb         |  2 +-
+ templates/secondary-stacked-resource.res.erb |  2 +-
+ 10 files changed, 148 insertions(+), 23 deletions(-)
+
+diff --git a/manifests/init.pp b/manifests/init.pp
+index 09f7d48..76ce9c9 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -6,7 +6,8 @@
+ #
+ class drbd(
+   $service_enable = true,
+-  $package_name = 'drbd8-utils',
++  $service_ensure = 'running',
++  $package_name = 'drbd-utils',
+ ) {
+   include ::drbd::service
+@@ -22,7 +23,7 @@ class drbd(
+   }
+   File {
+-    mode    => '0644',
++    mode    => '0640',
+     owner   => 'root',
+     group   => 'root',
+     require => Package['drbd'],
+@@ -45,8 +46,10 @@ class drbd(
+   # only allow files managed by puppet in this directory.
+   file { '/etc/drbd.d':
+     ensure  => directory,
+-    mode    => '0644',
+-    purge   => true,
++    mode    => '0640',
++    # Set purge to false so that it does not clear the dir
++    # when the 2nd drbd resource is added.
++    purge   => false,
+     recurse => true,
+     force   => true,
+     require => Package['drbd'],
+diff --git a/manifests/resource.pp b/manifests/resource.pp
+index af2ff77..10edc1a 100644
+--- a/manifests/resource.pp
++++ b/manifests/resource.pp
+@@ -22,6 +22,10 @@
+ #  [ha_primary] If the resource is being applied on the primary host.
+ #  [initial_setup] If this run is associated with the initial setup. Allows a user
+ #    to only perform dangerous setup on the initial run.
++#  [link_util] replication link network utilization percent
++#  [link_speed] replication link network speed mbps
++#  [num_parallel] number of parallel drbd filesystems to sync
++#  [rtt_ms] round-trip-time milliseconds (i.e., ping between replication nodes)
+ define drbd::resource (
+   $host1          = undef,
+   $host2          = undef,
+@@ -39,7 +43,10 @@ define drbd::resource (
+   $group          = 'root',
+   $protocol       = 'C',
+   $verify_alg     = 'crc32c',
+-  $rate           = false,
++  $link_util      = false,
++  $link_speed     = false,
++  $num_parallel   = false,
++  $rtt_ms         = false,
+   $net_parameters = false,
+   $manage         = true,
+   $ha_primary     = false,
+@@ -47,6 +54,7 @@ define drbd::resource (
+   $fs_type        = 'ext4',
+   $mkfs_opts      = '',
+   $disk           = undef,
++  $handlers       = false,
+ ) {
+   include ::drbd
+@@ -67,6 +75,75 @@ define drbd::resource (
+     group  => $group,
+   }
++  if $link_util and $link_speed and $num_parallel and $rtt_ms {
++    # Engineer drbd variable sync rate parameters based on the following:
++    #  https://blogs.linbit.com/p/128/drbd-sync-rate-controller/
++    #  https://blogs.linbit.com/p/443/drbd-sync-rate-controller-2/
++    # Methodology adapted to account for replication link speed and parallelism.
++
++    # Since there is no aggregate bandwidth control, prorate the drbd
++    # replication bandwidth based on parallelism.
++    # Based on experimentation, it seems generally better to set num_parallel
++    # to 1 and let DRBD auto-regulate its throughput.  The end result is that
++    # multiple competing filesystems (i.e., on same disk device) already have
++    # their sync throughput reduced.
++    $mbps = $link_speed / $num_parallel
++
++    # bandwidth delay product
++    $bdp_k = $mbps * $rtt_ms
++
++    # engineer initial sync rate as percent of link bandwidth
++    $rate_M = floor($link_util * $mbps / 8 / 100)
++    $rate = "${rate_M}M"
++
++    # engineer c_plan_ahead to default value (tenths)
++    # Documentation indicates this value OK even for 200 ms RTT.
++    $c_plan_ahead = 20
++
++    # engineer c_fill_target as 1*BDP (tune within 1x to 3x BDP;
++    # choose minimum value that saturates bandwidth)
++    $fill_target_k = floor(1 * $bdp_k)
++    $c_fill_target = "${fill_target_k}k"
++
++    # engineer c_min_rate -- experimentally determined so DRBD is not
++    # throttled to a crawl even when there is minimal application IO.
++    # DRBD default is way too small.
++    $min_rate_M = 15 + floor($link_util * $mbps / 8 / 100 / 25)
++    $c_min_rate = "${min_rate_M}M"
++
++    # engineer c_max_rate as percent of link bandwidth
++    $max_rate_M = floor($link_util * $mbps / 8 / 100)
++    $c_max_rate = "${max_rate_M}M"
++
++    # various tuning settings to enable larger link bandwidth (eg, 10G)
++    # max_buffers should scale with MBps; set to maximum settable
++    $max_buffers = 20000
++    $max_epoch_size = 20000
++    $unplug_watermark = 16
++    # sndbuf_size and rcvbuf_size should scale with mbps; set 0 to auto-tune
++    $sndbuf_size = 0
++    $rcvbuf_size = 0
++    # increase al_extents to improve random write throughput; set to prime number
++    $al_extents = 3389
++  } else {
++    # disable variable sync rate
++    $c_plan_ahead  = 0
++    $c_fill_target = false
++    $c_min_rate    = false
++    $c_max_rate    = false
++
++    # engineer fixed sync rate at 40 percent of 1G
++    $rate_M = floor(40 * 1000 / 8 / 100)
++    $rate = "${rate_M}M"
++
++    $max_buffers = false
++    $max_epoch_size = false
++    $unplug_watermark = false
++    $sndbuf_size = false
++    $rcvbuf_size = false
++    $al_extents = false
++  }
++
+   concat { "/etc/drbd.d/${name}.res":
+     mode    => '0600',
+     require => [
+@@ -94,13 +171,13 @@ define drbd::resource (
+   }
+   # Export our fragment for the clustered node
+   if $ha_primary and $cluster {
+-    @@concat::fragment { "${name} ${cluster} primary resource":
++    concat::fragment { "${name} ${cluster} primary resource":
+       target  => "/etc/drbd.d/${name}.res",
+       content => template('drbd/resource.res.erb'),
+       order   => '10',
+     }
+   } elsif $cluster {
+-    @@concat::fragment { "${name} ${cluster} secondary resource":
++    concat::fragment { "${name} ${cluster} secondary resource":
+       target  => "/etc/drbd.d/${name}.res",
+       content => template('drbd/resource.res.erb'),
+       order   => '20',
+@@ -137,11 +214,11 @@ define drbd::resource (
+     order   => '99',
+   }
+-  if $cluster {
+-    # Import cluster nodes
+-    Concat::Fragment <<| title == "${name} ${cluster} primary resource" |>>
+-    Concat::Fragment <<| title == "${name} ${cluster} secondary resource" |>>
+-  }
++#  if $cluster {
++#    # Import cluster nodes
++#    Concat::Fragment <<| title == "${name} ${cluster} primary resource" |>>
++#    Concat::Fragment <<| title == "${name} ${cluster} secondary resource" |>>
++#  }
+   # Due to a bug in puppet, defined() conditionals must be in a defined
+   # resource to be evaluated *after* the collector instead of before.
+diff --git a/manifests/resource/up.pp b/manifests/resource/up.pp
+index 7668792..b626f55 100644
+--- a/manifests/resource/up.pp
++++ b/manifests/resource/up.pp
+@@ -70,7 +70,7 @@ define drbd::resource::up (
+       # ensure that the device is mounted
+       mount { $mountpoint:
+         ensure  => mounted,
+-        atboot  => false,
++        atboot  => yes,
+         device  => $device,
+         fstype  => 'auto',
+         options => 'defaults,noauto',
+diff --git a/manifests/service.pp b/manifests/service.pp
+index de56b34..f9b217a 100644
+--- a/manifests/service.pp
++++ b/manifests/service.pp
+@@ -1,6 +1,6 @@
+ class drbd::service {
+   @service { 'drbd':
+-    ensure  => running,
++    ensure  => $drbd::service_ensure,
+     enable  => $drbd::service_enable,
+     require => Package['drbd'],
+     restart => 'service drbd reload',
+diff --git a/templates/header.res.erb b/templates/header.res.erb
+index 2d785c4..a3256a3 100644
+--- a/templates/header.res.erb
++++ b/templates/header.res.erb
+@@ -5,7 +5,32 @@ resource <%= @name %> {
+   disk      <%= @disk %>;
+   meta-disk internal;
++  disk {
++<% if @rate -%>
++    resync-rate <%= @rate %>;
++<% end -%>
++<% if @c_plan_ahead -%>
++    c-plan-ahead <%= @c_plan_ahead %>;
++<% end -%>
++<% if @c_fill_target -%>
++    c-fill-target <%= @c_fill_target %>;
++<% end -%>
++<% if @c_min_rate -%>
++    c-min-rate <%= @c_min_rate %>;
++<% end -%>
++<% if @c_max_rate -%>
++    c-max-rate <%= @c_max_rate %>;
++<% end -%>
++<% if @al_extents -%>
++    al-extents <%= @al_extents %>;
++<% end -%>
++  }
++
+   net {
++    after-sb-0pri discard-zero-changes;
++    after-sb-1pri discard-secondary;
++    after-sb-2pri disconnect;
++
+     cram-hmac-alg sha1;
+ <% if @secret -%>
+     shared-secret "<%= @secret %>";
+@@ -16,12 +41,32 @@ resource <%= @name %> {
+     <%= k %> <%= v %>;
+ <% end -%>
+ <% end -%>
+-  }
+-  syncer {
++<% if @max_buffers -%>
++    max-buffers <%= @max_buffers %>;
++<% end -%>
++<% if @max_epoch_size -%>
++    max-epoch-size <%= @max_epoch_size %>;
++<% end -%>
++<% if @unplug_watermark -%>
++    unplug-watermark <%= @unplug_watermark %>;
++<% end -%>
++<% if @sndbuf_size -%>
++    sndbuf-size <%= @sndbuf_size %>;
++<% end -%>
++<% if @rcvbuf_size -%>
++    rcvbuf-size <%= @rcvbuf_size %>;
++<% end -%>
++<% if @verify_alg -%>
+     verify-alg <%= @verify_alg %>;
+-<% if @rate -%>
+-    rate <%= @rate %>;
+ <% end -%>
+   }
++<% if @handlers -%>
++  handlers {
++<% @handlers.sort_by {|k, v| k}.each do |k, v| -%>
++    <%= k %> "<%= v %>";
++<% end -%>
++  }
++<% end -%>
++
+diff --git a/templates/primary-resource.res.erb b/templates/primary-resource.res.erb
+index f8af77e..6032fd2 100644
+--- a/templates/primary-resource.res.erb
++++ b/templates/primary-resource.res.erb
+@@ -1,3 +1,3 @@
+   on <%= @host1 %> {
+-    address <%= @ip1 %>:<%= @port %>;
++    address <%= IPAddr.new(@ip1).ipv6?() ? "ipv6 ["+@ip1+"]:"+@port : "ipv4 "+@ip1+":"+@port %>;
+   }
+diff --git a/templates/primary-stacked-resource.res.erb b/templates/primary-stacked-resource.res.erb
+index 7eb4dad..a22d8b3 100644
+--- a/templates/primary-stacked-resource.res.erb
++++ b/templates/primary-stacked-resource.res.erb
+@@ -1,3 +1,3 @@
+   stacked-on-top-of <%= @res1 %> {
+-    address <%= @ip1 %>:<%= @port %>;
++    address <%= IPAddr.new(ip1).ipv6?() ? "ipv6 ["+ip1+"]:"+port : "ipv4 "+ip1+":"+port %>;
+   }
+diff --git a/templates/resource.res.erb b/templates/resource.res.erb
+index 047877e..9dd4c4d 100644
+--- a/templates/resource.res.erb
++++ b/templates/resource.res.erb
+@@ -1,3 +1,3 @@
+   on <%= @hostname %> {
+-    address <%= @ipaddress %>:<%= @port %>;
++    address <%= IPAddr.new(ipaddress).ipv6?() ? "ipv6 ["+ipaddress+"]:"+@port : "ipv4 "+ipaddress+":"+port %>;
+   }
+diff --git a/templates/secondary-resource.res.erb b/templates/secondary-resource.res.erb
+index 678640a..cf2fd96 100644
+--- a/templates/secondary-resource.res.erb
++++ b/templates/secondary-resource.res.erb
+@@ -1,3 +1,3 @@
+   on <%= @host2 %> {
+-    address <%= @ip2 %>:<%= @port %>;
++    address <%= IPAddr.new(@ip2).ipv6?() ? "ipv6 ["+@ip2+"]:"+@port : "ipv4 "+@ip2+":"+@port %>;
+   }
+diff --git a/templates/secondary-stacked-resource.res.erb b/templates/secondary-stacked-resource.res.erb
+index 409a705..87d28f5 100644
+--- a/templates/secondary-stacked-resource.res.erb
++++ b/templates/secondary-stacked-resource.res.erb
+@@ -1,3 +1,3 @@
+   stacked-on-top-of <%= @res2 %> {
+-    address <%= @ip2 %>:<%= @port %>;
++    address <%= IPAddr.new(ip2).ipv6?() ? "ipv6 ["+ip2+"]:"+port : "ipv4 "+ip2+":"+port %>;
+   }
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/0002-Disable-timeout-for-mkfs-command.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/0002-Disable-timeout-for-mkfs-command.patch
new file mode 100644 (file)
index 0000000..e578dbe
--- /dev/null
@@ -0,0 +1,24 @@
+From 0c36ecaef39328e85f41ebe8164dc7da5949542a Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Tue, 11 Apr 2017 11:14:25 -0400
+Subject: [PATCH] Disable timeout for mkfs command
+
+---
+ manifests/resource/up.pp | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/manifests/resource/up.pp b/manifests/resource/up.pp
+index b626f55..f9de8ab 100644
+--- a/manifests/resource/up.pp
++++ b/manifests/resource/up.pp
+@@ -54,6 +54,7 @@ define drbd::resource::up (
+       }
+       exec { "drbd_format_volume_${name}":
+         command     => "mkfs.${fs_type} ${mkfs_opts} ${device}",
++        timeout     => 0,
+         refreshonly => true,
+         require     => Exec["drbd_make_primary_${name}"],
+         before      => $before,
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/0003-drbd-parallel-to-serial-synchronization.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/0003-drbd-parallel-to-serial-synchronization.patch
new file mode 100644 (file)
index 0000000..49ad45a
--- /dev/null
@@ -0,0 +1,39 @@
+From a1186e3f68a338c575acdcf5cf41728a1b9ba2c1 Mon Sep 17 00:00:00 2001
+From: Angie Wang <angie.Wang@windriver.com>
+Date: Mon, 29 May 2017 10:20:13 -0400
+Subject: [PATCH 1/1] drbd-parallel-to-serial-synchronization
+
+---
+ manifests/resource.pp    | 1 +
+ templates/header.res.erb | 3 +++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/manifests/resource.pp b/manifests/resource.pp
+index 10edc1a..d19ad8b 100644
+--- a/manifests/resource.pp
++++ b/manifests/resource.pp
+@@ -47,6 +47,7 @@ define drbd::resource (
+   $link_speed     = false,
+   $num_parallel   = false,
+   $rtt_ms         = false,
++  $resync_after   = undef,
+   $net_parameters = false,
+   $manage         = true,
+   $ha_primary     = false,
+diff --git a/templates/header.res.erb b/templates/header.res.erb
+index a3256a3..be53761 100644
+--- a/templates/header.res.erb
++++ b/templates/header.res.erb
+@@ -9,6 +9,9 @@ resource <%= @name %> {
+ <% if @rate -%>
+     resync-rate <%= @rate %>;
+ <% end -%>
++<% if @resync_after -%>
++    resync-after <%= @resync_after %>;
++<% end -%>
+ <% if @c_plan_ahead -%>
+     c-plan-ahead <%= @c_plan_ahead %>;
+ <% end -%>
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/0004-US-96914-reuse-existing-drbd-cinder-resource.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/0004-US-96914-reuse-existing-drbd-cinder-resource.patch
new file mode 100644 (file)
index 0000000..017387d
--- /dev/null
@@ -0,0 +1,53 @@
+From 132fc324c633ee95ca9ac8d00fb27fe5c4df6a3a Mon Sep 17 00:00:00 2001
+From: Daniel Badea <daniel.badea@windriver.com>
+Date: Tue, 30 May 2017 21:52:52 +0000
+Subject: [PATCH] US-96914 reuse existing drbd-cinder resource
+
+Trying to initialize and enable DRBD resource fails in "drbdadm
+create-md" when the disk already contains meta data. In this case
+"drbdadm adjust" should be called.
+---
+ manifests/resource/up.pp | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/manifests/resource/up.pp b/manifests/resource/up.pp
+index f9de8ab..160c8c2 100644
+--- a/manifests/resource/up.pp
++++ b/manifests/resource/up.pp
+@@ -14,7 +14,7 @@ define drbd::resource::up (
+   exec { "initialize DRBD metadata for ${name}":
+     command => "yes yes | drbdadm create-md ${name}",
+     onlyif  => "test -e ${disk}",
+-    unless  => "drbdadm dump-md ${name} || (drbdadm cstate ${name} | egrep -q '^(Sync|Connected|WFConnection|StandAlone|Verify)')",
++    unless  => "drbdadm dump-md ${name} || (drbdadm cstate ${name} | egrep -q '^(Sync|Connected|WFConnection|StandAlone|Verify)') || (drbdadm show-gi ${name} | grep 'meta-data: need apply-al')",
+     before  => Service['drbd'],
+     require => [
+       Exec['modprobe drbd'],
+@@ -26,6 +26,7 @@ define drbd::resource::up (
+   exec { "enable DRBD resource ${name}":
+     command => "drbdadm up ${name}",
+     onlyif  => "drbdadm dstate ${name} | egrep -q '^(Diskless/|Unconfigured|Consistent)'",
++    unless  => "drbdadm show-gi ${name} | grep 'meta-data: need apply-al'",
+     before  => Service['drbd'],
+     require => [
+       Exec["initialize DRBD metadata for ${name}"],
+@@ -34,6 +35,16 @@ define drbd::resource::up (
+     notify  => Service['drbd'],
+   }
++  exec { "reuse existing DRBD resoure ${name}":
++    command => "drbdadm adjust ${name}",
++    onlyif  => "test -e ${disk} && (drbdadm show-gi ${name} | grep 'meta-data: need apply-al')",
++    before  => Service['drbd'],
++    require => [
++      Exec['modprobe drbd'],
++      Concat["/etc/drbd.d/${name}.res"],
++    ],
++    notify  => Service['drbd'],
++  }
+   # these resources should only be applied if we are configuring the
+   # primary node in our HA setup
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/0005-Add-PausedSync-states-to-acceptable-cstate.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/0005-Add-PausedSync-states-to-acceptable-cstate.patch
new file mode 100644 (file)
index 0000000..453d46a
--- /dev/null
@@ -0,0 +1,26 @@
+From b575f4c50e8726c5f9b3227b37a4517c0bbde85c Mon Sep 17 00:00:00 2001
+From: Robert Church <robert.church@windriver.com>
+Date: Fri, 2 Jun 2017 02:15:19 +0000
+Subject: [PATCH] Add PausedSync states to acceptable cstate to avoid metdata
+ creation
+
+---
+ manifests/resource/up.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/resource/up.pp b/manifests/resource/up.pp
+index 160c8c2..3e2fdac 100644
+--- a/manifests/resource/up.pp
++++ b/manifests/resource/up.pp
+@@ -14,7 +14,7 @@ define drbd::resource::up (
+   exec { "initialize DRBD metadata for ${name}":
+     command => "yes yes | drbdadm create-md ${name}",
+     onlyif  => "test -e ${disk}",
+-    unless  => "drbdadm dump-md ${name} || (drbdadm cstate ${name} | egrep -q '^(Sync|Connected|WFConnection|StandAlone|Verify)') || (drbdadm show-gi ${name} | grep 'meta-data: need apply-al')",
++    unless  => "drbdadm dump-md ${name} || (drbdadm cstate ${name} | egrep -q '^(PausedSync|Sync|Connected|WFConnection|StandAlone|Verify)') || (drbdadm show-gi ${name} | grep 'meta-data: need apply-al')",
+     before  => Service['drbd'],
+     require => [
+       Exec['modprobe drbd'],
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/0006-CGTS-7164-Add-resource-options-cpu-mask-to-affine-drbd-kernel-threads.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/0006-CGTS-7164-Add-resource-options-cpu-mask-to-affine-drbd-kernel-threads.patch
new file mode 100644 (file)
index 0000000..5c6aec1
--- /dev/null
@@ -0,0 +1,68 @@
+From 0e264e7ac2b311aa9b42b183660a07b7e4e36b11 Mon Sep 17 00:00:00 2001
+From: Jim Gauld <james.gauld@windriver.com>
+Date: Fri, 9 Jun 2017 14:58:23 -0400
+Subject: [PATCH 1/1] CGTS-7164: Add resource options cpu-mask to affine drbd
+ kernel threads
+
+This adds "options { cpu-mask <cpumask>; }" section to DRBD resource
+configuration if 'cpumask' hexstring is defined. This governs kernel
+threads: drbd_w_<x>, drbd_r_<x>, drbd_a_<x>.
+
+Related notes:
+- if cpumask is not specified, the kernel threads drbd_w_<x>, drbd_r_<x>,
+  drbd_a_<x>, and drbd_as_<x> are affined to individual cores, each <x>
+  on a different core.
+
+- the remainder of the kernel threads are governed by kernel boot
+  argument kthread_cpus=<cpulist>.  i.e., drbd-reissue, drbd<x>_submit,
+  jbd2/drbd<x>-8, drbd_as_<x>.
+
+- the drbd_a_<x> and drbd_as_<x> show up when DRBD is duplex.
+
+- the drbd_a_<x> threads have SCHED_RR scheduling policy.
+---
+ manifests/resource.pp    | 3 +++
+ templates/header.res.erb | 6 ++++++
+ 2 files changed, 9 insertions(+)
+
+diff --git a/manifests/resource.pp b/manifests/resource.pp
+index d19ad8b..17e6142 100644
+--- a/manifests/resource.pp
++++ b/manifests/resource.pp
+@@ -26,6 +26,8 @@
+ #  [link_speed] replication link network speed mbps
+ #  [num_parallel] number of parallel drbd filesystems to sync
+ #  [rtt_ms] round-trip-time milliseconds (i.e., ping between replication nodes)
++#  [cpumask] cpu-affinity-mask for DRBD kernel threads (hexidecimal notation).
++#    0 means spread over all CPUs of the machine.
+ define drbd::resource (
+   $host1          = undef,
+   $host2          = undef,
+@@ -48,6 +50,7 @@ define drbd::resource (
+   $num_parallel   = false,
+   $rtt_ms         = false,
+   $resync_after   = undef,
++  $cpumask        = false,
+   $net_parameters = false,
+   $manage         = true,
+   $ha_primary     = false,
+diff --git a/templates/header.res.erb b/templates/header.res.erb
+index be53761..df52544 100644
+--- a/templates/header.res.erb
++++ b/templates/header.res.erb
+@@ -29,6 +29,12 @@ resource <%= @name %> {
+ <% end -%>
+   }
++<% if @cpumask -%>
++  options {
++    cpu-mask <%= @cpumask %>;
++  }
++<% end -%>
++
+   net {
+     after-sb-0pri discard-zero-changes;
+     after-sb-1pri discard-secondary;
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/0007-Add-disk-by-path-test.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/0007-Add-disk-by-path-test.patch
new file mode 100644 (file)
index 0000000..1eb12f5
--- /dev/null
@@ -0,0 +1,51 @@
+From 30ae8c86d9471980a0058823d6593e7548e19506 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Thu, 15 Jun 2017 17:34:30 -0400
+Subject: [PATCH] Add disk by-path test
+
+---
+ manifests/resource/up.pp | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/manifests/resource/up.pp b/manifests/resource/up.pp
+index 3e2fdac..ea379a8 100644
+--- a/manifests/resource/up.pp
++++ b/manifests/resource/up.pp
+@@ -8,6 +8,17 @@ define drbd::resource::up (
+   $mountpoint,
+   $automount,
+ ) {
++
++  # Ensure disk by-path link exists
++  exec { "test disk by-path for ${name}":
++    command => "udevadm settle",
++    unless => "test -e ${disk}",
++    before => Service['drbd'],
++    require => [
++        Exec['modprobe drbd']
++      ],
++  }
++
+   # create metadata on device, except if resource seems already initalized.
+   # drbd is very tenacious about asking for aproval if there is data on the
+   # volume already.
+@@ -18,6 +29,7 @@ define drbd::resource::up (
+     before  => Service['drbd'],
+     require => [
+       Exec['modprobe drbd'],
++      Exec["test disk by-path for ${name}"],
+       Concat["/etc/drbd.d/${name}.res"],
+       ],
+     notify  => Service['drbd'],
+@@ -35,7 +47,7 @@ define drbd::resource::up (
+     notify  => Service['drbd'],
+   }
+-  exec { "reuse existing DRBD resoure ${name}":
++  exec { "reuse existing DRBD resource ${name}":
+     command => "drbdadm adjust ${name}",
+     onlyif  => "test -e ${disk} && (drbdadm show-gi ${name} | grep 'meta-data: need apply-al')",
+     before  => Service['drbd'],
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/0008-CGTS-7953-support-for-new-drbd-resources.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/0008-CGTS-7953-support-for-new-drbd-resources.patch
new file mode 100644 (file)
index 0000000..8306729
--- /dev/null
@@ -0,0 +1,40 @@
+From a29598365183c10e4650088675a6e3181b340187 Mon Sep 17 00:00:00 2001
+From: Kristine Bujold <kristine.bujold@windriver.com>
+Date: Wed, 17 Jan 2018 18:18:15 -0500
+Subject: [PATCH 1/1] foo bar
+
+---
+ manifests/init.pp                | 2 ++
+ templates/global_common.conf.erb | 5 +++++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/manifests/init.pp b/manifests/init.pp
+index 76ce9c9..5e6bdc0 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -8,6 +8,8 @@ class drbd(
+   $service_enable = true,
+   $service_ensure = 'running',
+   $package_name = 'drbd-utils',
++  $wfc_timeout = 0,
++  $degr_wfc_timeout = 0,
+ ) {
+   include ::drbd::service
+diff --git a/templates/global_common.conf.erb b/templates/global_common.conf.erb
+index 921a637..0253ef3 100644
+--- a/templates/global_common.conf.erb
++++ b/templates/global_common.conf.erb
+@@ -3,4 +3,9 @@ global {
+ }
+ common {
+   protocol C;
++
++  startup {
++    wfc-timeout <%= @wfc_timeout %>;
++    degr-wfc-timeout <%= @degr_wfc_timeout %>;
++  }
+ }
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/0009-drbd-slow-before-swact.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/0009-drbd-slow-before-swact.patch
new file mode 100644 (file)
index 0000000..f037d29
--- /dev/null
@@ -0,0 +1,25 @@
+From 2628193e8aef471caab27ada848fa8d7de6d93ec Mon Sep 17 00:00:00 2001
+From: Daniel Badea <daniel.badea@windriver.com>
+Date: Wed, 13 Jun 2018 14:16:53 +0000
+Subject: [PATCH] drbd slow before swact
+
+---
+ manifests/resource/up.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/resource/up.pp b/manifests/resource/up.pp
+index c3557e1..11cf7ee 100644
+--- a/manifests/resource/up.pp
++++ b/manifests/resource/up.pp
+@@ -23,7 +23,7 @@ define drbd::resource::up (
+   # drbd is very tenacious about asking for aproval if there is data on the
+   # volume already.
+   exec { "initialize DRBD metadata for ${name}":
+-    command => "yes yes | drbdadm create-md ${name}",
++    command => "yes yes | drbdadm create-md ${name} -W--peer-max-bio-size=128k",
+     onlyif  => "test -e ${disk}",
+     unless  => "drbdadm dump-md ${name} || (drbdadm cstate ${name} | egrep -q '^(PausedSync|Sync|Connected|WFConnection|StandAlone|Verify)') || (drbdadm show-gi ${name} | grep 'meta-data: need apply-al')",
+     before  => Service['drbd'],
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-drbd/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-drbd/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..19d973b
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppet-drbd.gemspec      2019-10-30 21:51:28.271880621 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-drbd'
++  s.version     = '0.3.1'
++  s.date        = '2016-12-30'
++  s.summary     = "DRBD Module"
++  s.description = s.summary
++  s.authors     = ["Vox Pupuli"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/voxpupuli/puppet-drbd'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-etcd/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-etcd/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..91b4982
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-etcd.gemspec      2019-10-28 10:02:42.678631063 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-etcd'
++  s.version     = '1.11.0'
++  s.date        = '2016-08-21'
++  s.summary     = "Installs and configures etcd"
++  s.description = s.summary
++  s.authors     = ["Cristian Falcas"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/cristifalcas/puppet-etcd'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-etcd/puppet-etcd-changes-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-etcd/puppet-etcd-changes-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..0c3d2fe
--- /dev/null
@@ -0,0 +1,27 @@
+diff -ru a/manifests/config.pp b/manifests/config.pp
+--- a/manifests/config.pp      2020-03-05 15:24:05.754122863 +0800
++++ b/manifests/config.pp      2020-03-05 16:45:19.090255672 +0800
+@@ -14,6 +14,7 @@
+     content => template("${module_name}/etc/etcd/etcd.yml.erb"),
+   }
++  # only available for Redhat??
+   if $::etcd::manage_package and $::etcd::journald_forward_enable and $::operatingsystemmajrelease == '7' {
+     file { '/etc/systemd/system/etcd.service.d':
+       ensure => 'directory',
+diff -ru a/manifests/params.pp b/manifests/params.pp
+--- a/manifests/params.pp      2020-03-05 15:24:05.754122863 +0800
++++ b/manifests/params.pp      2020-03-05 16:15:42.434207254 +0800
+@@ -17,7 +17,11 @@
+       }
+     }
+     'Debian' : {
+-      $config_file_path = '/etc/default/etcd.conf'
++      if ($::operatingsystem == 'poky-stx') {
++        $config_file_path = '/etc/etcd/etcd.conf'
++      } else {
++        $config_file_path = '/etc/default/etcd.conf'
++      }
+     }
+     default  : {
+       fail('Unsupported OS.')
diff --git a/meta-stx/recipes-support/puppet/files/puppet-filemapper/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-filemapper/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..1829722
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/adrien-filemapper.gemspec        2019-10-31 11:12:27.233178439 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'adrien-filemapper'
++  s.version     = '1.1.3'
++  s.date        = '2014-09-13'
++  s.summary     = "Puppet provider file manipulation extension"
++  s.description = s.summary
++  s.authors     = ["Adrien Thebo <adrien@somethingsinistral.net>"]
++  s.email       = ''
++  s.files       = %w(Modulefile README.markdown ) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/adrienthebo/puppet-filemapper'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-filemapper/metadata.json.patch b/meta-stx/recipes-support/puppet/files/puppet-filemapper/metadata.json.patch
new file mode 100644 (file)
index 0000000..97dc8aa
--- /dev/null
@@ -0,0 +1,14 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/metadata.json    2019-10-31 13:24:24.780569778 -0700
+@@ -0,0 +1,11 @@
++{
++  "name": "adrien-filemapper",
++  "version": "1.1.3",
++  "author": "Adrien Tehbo <adrien@somethingsinistral.net>",
++  "summary": "Puppet provider file manipulation extension",
++  "license": "Apache-2.0",
++  "source": "https://github.com/adrienthebo/puppet-filemapper",
++  "project_page": "https://github.com/adrienthebo/puppet-filemapper",
++  "issues_url": "https://tickets.puppetlabs.com/browse/MODULES",
++  "dependencies": [ ]
++}
diff --git a/meta-stx/recipes-support/puppet/files/puppet-horizon/0001-Update-memcached-dependency.patch b/meta-stx/recipes-support/puppet/files/puppet-horizon/0001-Update-memcached-dependency.patch
new file mode 100644 (file)
index 0000000..846449d
--- /dev/null
@@ -0,0 +1,32 @@
+From f5a7c8b73de90a50c66d60824dea8b85d1acf15c Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Mon, 28 Oct 2019 14:47:58 -0700
+Subject: [PATCH] Update memcached dependency
+
+---
+ metadata.json | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/metadata.json b/metadata.json
+index 27843d7..d48c0c2 100644
+--- a/metadata.json
++++ b/metadata.json
+@@ -11,7 +11,7 @@
+         },
+         {
+             "name": "saz/memcached",
+-            "version_requirement": ">=2.0.2 <3.0.0"
++            "version_requirement": ">=2.0.2 <=3.0.2"
+         }
+     ],
+     "description": "Installs and configures OpenStack Horizon (Dashboard).",
+@@ -58,4 +58,4 @@
+     "source": "git://github.com/openstack/puppet-horizon.git",
+     "summary": "Puppet module for OpenStack Horizon",
+     "version": "11.5.0"
+-}
+\ No newline at end of file
++}
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-horizon/0002-puppet-horizon-changes-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-horizon/0002-puppet-horizon-changes-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..e202c3a
--- /dev/null
@@ -0,0 +1,34 @@
+diff -ru a/manifests/params.pp b/manifests/params.pp
+--- a/manifests/params.pp      2020-04-13 18:34:56.702263928 +0800
++++ b/manifests/params.pp      2020-04-13 18:36:23.158266284 +0800
+@@ -32,17 +32,27 @@
+       $static_path                 = '/var/lib'
+       $apache_user                 = 'www-data'
+       $apache_group                = 'www-data'
+-      $wsgi_user                   = 'horizon'
+-      $wsgi_group                  = 'horizon'
+-      $memcache_package            = 'python-memcache'
+       case $::os_package_type {
+         'debian': {
+             $package_name          = 'openstack-dashboard-apache'
+             $httpd_config_file     = '/etc/apache2/sites-available/openstack-dashboard-alias-only.conf'
++            $memcache_package      = 'python-memcache'
++            $wsgi_user             = 'horizon'
++            $wsgi_group            = 'horizon'
++        }
++        'poky': {
++            $package_name          = 'python-django-horizon'
++            $httpd_config_file     = '/etc/httpd/conf.d/openstack-dashboard.conf'
++            $memcache_package      = 'python-memcached'
++            $wsgi_user             = 'apache'
++            $wsgi_group            = 'apache'
+         }
+         default: {
+             $package_name          = 'openstack-dashboard'
+             $httpd_config_file     = '/etc/apache2/conf-available/openstack-dashboard.conf'
++            $memcache_package      = 'python-memcache'
++            $wsgi_user             = 'horizon'
++            $wsgi_group            = 'horizon'
+         }
+       }
+     }
diff --git a/meta-stx/recipes-support/puppet/files/puppet-horizon/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-horizon/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..7b1c7d6
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-horizon.gemspec   2019-10-26 22:29:30.802030276 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-horizon'
++  s.version     = '11.5.0'
++  s.date        = '2019-09-24'
++  s.summary     = "Puppet horizon module"
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/openstack/puppet-horizon'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-keystone/0001-pike-rebase-squash-titanium-patches.patch b/meta-stx/recipes-support/puppet/files/puppet-keystone/0001-pike-rebase-squash-titanium-patches.patch
new file mode 100644 (file)
index 0000000..69858ac
--- /dev/null
@@ -0,0 +1,440 @@
+From 2c51d6f4ccf4a473089c08857262e769f7a8fc3a Mon Sep 17 00:00:00 2001
+From: Al Bailey <Al.Bailey@windriver.com>
+Date: Thu, 7 Mar 2019 13:43:40 -0600
+Subject: [PATCH 1/4] WRS: Patch1:
+ 0001-pike-rebase-squash-titanium-patches.patch
+
+---
+ lib/puppet/provider/keystone.rb                   | 79 ++++++++++++++++++++++-
+ manifests/db/sync.pp                              |  3 +
+ manifests/init.pp                                 | 76 +++++++++++++++++-----
+ manifests/ldap.pp                                 |  7 ++
+ manifests/logging.pp                              |  2 +-
+ manifests/resource/service_identity.pp            |  7 ++
+ manifests/security_compliance.pp                  | 45 +++++++++++++
+ spec/classes/keystone_security_compliance_spec.rb | 19 ++++++
+ 8 files changed, 220 insertions(+), 18 deletions(-)
+ create mode 100644 manifests/security_compliance.pp
+ create mode 100644 spec/classes/keystone_security_compliance_spec.rb
+
+diff --git a/lib/puppet/provider/keystone.rb b/lib/puppet/provider/keystone.rb
+index 3841418..0857ac1 100644
+--- a/lib/puppet/provider/keystone.rb
++++ b/lib/puppet/provider/keystone.rb
+@@ -3,6 +3,7 @@ require 'puppet/provider/openstack'
+ require 'puppet/provider/openstack/auth'
+ require 'puppet/provider/openstack/credentials'
+ require File.join(File.dirname(__FILE__), '..','..', 'puppet/provider/keystone/util')
++require 'hiera_puppet'
+ class Puppet::Provider::Keystone < Puppet::Provider::Openstack
+@@ -230,12 +231,88 @@ class Puppet::Provider::Keystone < Puppet::Provider::Openstack
+     end
+   end
++  ### WRS Modifications (Start) ###
++
++  def self.hiera_lookup(key)
++    HieraPuppet.lookup(key, :undef, self, nil, :priority)
++  end
++
++  def self.initial_config_primary?
++    return true if ENV['INITIAL_CONFIG_PRIMARY'] == "true"
++  end
++
++
++  def self.upgrading?
++    return true if hiera_lookup('platform::params::controller_upgrade') == true 
++  end
++
+   def self.request(service, action, properties=nil, options={})
+     super
+   rescue Puppet::Error::OpenstackAuthInputError, Puppet::Error::OpenstackUnauthorizedError => error
+-    request_by_service_token(service, action, error, properties, options=options)
++    if initial_config_primary?
++      # admin user account might not have been created
++      request_by_service_token(service, action, error, properties)
++    else
++      if upgrading?
++        # when running the Keystone manifest during an upgrade
++        # (on controller-1), we need to use an AUTH token and
++        # a bypass URL since using the default AUTL URL will
++        # send the Request to the service catalog URL (internalURL),
++        # running on the non-upgraded controller-0 which cannot
++        # service this request
++        request_by_upgrading_token(service, action, error, properties)
++      else
++        request_by_admin_credential(service, action, error, properties)
++      end
++    end
+   end
++  def self.request_by_admin_credential(service, action, error, properties=nil)
++    properties ||= []
++    @credentials.username = hiera_lookup('platform::client::params::admin_username')
++    @credentials.password = hiera_lookup('keystone::admin_password')
++    @credentials.project_name = 'admin'
++    @credentials.auth_url = service_url
++    @credentials.identity_api_version = @credentials.version
++    if @credentials.version == '3'
++      @credentials.user_domain_name = hiera_lookup('platform::client::params::admin_user_domain')
++      @credentials.project_domain_name = hiera_lookup('platform::client::params::admin_project_domain')
++    end
++    raise error unless @credentials.set?
++    Puppet::Provider::Openstack.request(service, action, properties, @credentials)
++  end
++
++  def self.get_upgrade_token
++    upgrade_token_file = hiera_lookup('openstack::keystone::upgrade::upgrade_token_file')
++    # the upgrade token file may get refreshed by the same Puppet event
++    # that triggered this call, and therefore may not be available 
++    # immediately. Try for timeout before quitting with error
++    timeout = 10 # 10 seconds
++    1.upto(timeout) do |iter|
++      if File.exists?(upgrade_token_file)
++        upgrade_token = File.read(upgrade_token_file).strip
++        notice("Found #{upgrade_token_file} token file and upgrade token #{upgrade_token}.")
++        return upgrade_token
++      else
++        Puppet.debug("#{upgrade_token_file} not found. Retrying for #{iter} more seconds.")
++        sleep(1)
++      end
++    end
++    raise(Puppet::ExecutionFailure, "Can't retrieve #{upgrade_token_file} in #{timeout}s retry attempts.")
++  end
++
++
++  def self.request_by_upgrading_token(service, action, error, properties=nil, options={})
++    properties ||= []
++    @credentials.token = get_upgrade_token
++    @credentials.url   = hiera_lookup('openstack::keystone::upgrade::url')
++    raise error unless @credentials.service_token_set?
++    Puppet::Provider::Openstack.request(service, action, properties, @credentials, options)
++  end
++
++  ### WRS Additions (End) ###
++
++
+   def self.request_by_service_token(service, action, error, properties=nil, options={})
+     properties ||= []
+     @credentials.token = admin_token
+diff --git a/manifests/db/sync.pp b/manifests/db/sync.pp
+index cee869b..cea217c 100644
+--- a/manifests/db/sync.pp
++++ b/manifests/db/sync.pp
+@@ -36,5 +36,8 @@ class keystone::db::sync(
+     ],
+     notify      => Anchor['keystone::dbsync::end'],
+     tag         => 'keystone-exec',
++    # Only do the db sync if both controllers are running the same software
++    # version. Avoids impacting mate controller during an upgrade.
++    onlyif      => "test $::controller_sw_versions_match = true",
+   }
+ }
+diff --git a/manifests/init.pp b/manifests/init.pp
+index 2adc685..4d79d30 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -28,6 +28,15 @@
+ #   The admin_token has been deprecated by the Keystone service and this
+ #   will be deprecated in a future changeset. Required.
+ #
++# [*upgrade_token_cmd*]
++#   (optional) WRS - if we are in an upgrade scenario, an upgrade token
++#   will be required to bypass authentication.
++#   Defaults to undef
++# 
++# [*upgrade_token_file*]
++#   (optional) WRS - the file where the upgrade token will be stowed
++#   Defaults to undef
++#
+ # [*admin_password*]
+ #   Keystone password for the admin user. This is not the admin_token.
+ #   This is the password that the admin user signs into keystone with.
+@@ -663,6 +672,8 @@
+ #
+ class keystone(
+   $admin_token,
++  $upgrade_token_cmd                    = undef,
++  $upgrade_token_file                   = undef,
+   $admin_password                       = undef,
+   $package_ensure                       = 'present',
+   $client_package_ensure                = 'present',
+@@ -857,10 +868,13 @@ admin_token will be removed in a later release")
+   keystone_config {
+     'DEFAULT/admin_token':      value => $admin_token, secret => true;
++    # WRS: the following options are deprecated for removal
++    # however public_bind_host and admin_bind_host are still required as long as
++    # keystone is running under eventlet
+     'DEFAULT/public_bind_host': value => $public_bind_host;
+     'DEFAULT/admin_bind_host':  value => $admin_bind_host;
+-    'DEFAULT/public_port':      value => $public_port;
+-    'DEFAULT/admin_port':       value => $admin_port;
++    #'DEFAULT/public_port':      value => $public_port;
++    #'DEFAULT/admin_port':       value => $admin_port;
+     'DEFAULT/member_role_id':   value => $member_role_id;
+     'DEFAULT/member_role_name': value => $member_role_name;
+     'paste_deploy/config_file': value => $paste_config;
+@@ -897,18 +911,21 @@ admin_token will be removed in a later release")
+   # ssl config
+   if ($enable_ssl) {
+     keystone_config {
+-      'ssl/enable':              value  => true;
++      # WRS ssl/enable is deprecated for removal
++      #'ssl/enable':              value  => true;
+       'ssl/certfile':            value  => $ssl_certfile;
+       'ssl/keyfile':             value  => $ssl_keyfile;
+       'ssl/ca_certs':            value  => $ssl_ca_certs;
+       'ssl/ca_key':              value  => $ssl_ca_key;
+       'ssl/cert_subject':        value  => $ssl_cert_subject;
+     }
+-  } else {
+-    keystone_config {
+-      'ssl/enable':              value  => false;
+-    }
+   }
++  # WRS ssl/enable is deprecated for removal
++  # else {
++  #  keystone_config {
++  #    'ssl/enable':              value  => false;
++  #  }
++  #}
+   if !is_service_default($memcache_servers) or !is_service_default($cache_memcache_servers) {
+     Service<| title == 'memcached' |> -> Anchor['keystone::service::begin']
+@@ -1016,14 +1033,15 @@ Fernet or UUID tokens are recommended.")
+ Fernet or UUID tokens are recommended.")
+   }
+-  keystone_config {
+-    'signing/certfile':     value => $signing_certfile;
+-    'signing/keyfile':      value => $signing_keyfile;
+-    'signing/ca_certs':     value => $signing_ca_certs;
+-    'signing/ca_key':       value => $signing_ca_key;
+-    'signing/cert_subject': value => $signing_cert_subject;
+-    'signing/key_size':     value => $signing_key_size;
+-  }
++  # WRS: the following signing options are deprecated for removal
++  #keystone_config {
++  #  'signing/certfile':     value => $signing_certfile;
++  #  'signing/keyfile':      value => $signing_keyfile;
++  #  'signing/ca_certs':     value => $signing_ca_certs;
++  #  'signing/ca_key':       value => $signing_ca_key;
++  #  'signing/cert_subject': value => $signing_cert_subject;
++  #  'signing/key_size':     value => $signing_key_size;
++  #}
+   # Only do pki_setup if we were asked to do so.  This is needed
+   # regardless of the token provider since token revocation lists
+@@ -1089,6 +1107,9 @@ Fernet or UUID tokens are recommended.")
+     heartbeat_rate              => $rabbit_heartbeat_rate,
+   }
++  # WRS: The following options are deprecated for removal
++  # however they are still required as long as keystone
++  # is running under eventlet
+   keystone_config {
+     'eventlet_server/admin_workers':  value => $admin_workers;
+     'eventlet_server/public_workers': value => $public_workers;
+@@ -1135,7 +1156,8 @@ Fernet or UUID tokens are recommended.")
+         validate     => false,
+       }
+     }
+-    warning("Keystone under Eventlet has been deprecated during the Kilo cycle. \
++    # Drop this to info.
++    info("Keystone under Eventlet has been deprecated during the Kilo cycle. \
+ Support for deploying under eventlet will be dropped as of the M-release of OpenStack.")
+   } elsif $service_name == 'httpd' {
+     include ::apache::params
+@@ -1280,6 +1302,27 @@ running as a standalone service, or httpd for being run by a httpd server")
+     }
+   }
++  # WRS: Now that the keystone service has started,
++  # check if we are in an Upgrade scenario, and generate
++  # an upgrade token which will be used to bypass Keystone
++  # authentication (specifically the service catalog) for
++  # all operations during upgrades.
++  # This operation is similar to the keystone bootstrap
++  # operation (above) which would generate an admin
++  # token, and therefore also requires the database to
++  # be up and running and configured and is only run once,
++  # so we don't need to notify the service
++  if $upgrade_token_cmd and $upgrade_token_file {
++    exec { 'upgrade token issue':
++      command     => "${upgrade_token_cmd} > ${upgrade_token_file}",
++      path        => '/usr/bin',
++      creates     => $upgrade_token_file,
++      subscribe   => Service[$service_name],
++      notify      => Anchor['keystone::service::end'],
++      tag         => 'keystone-exec',
++    }
++  }
++
+   if $using_domain_config {
+     validate_absolute_path($domain_config_directory)
+     # Better than ensure resource.  We don't want to conflict with any
+@@ -1311,4 +1354,5 @@ running as a standalone service, or httpd for being run by a httpd server")
+       {'value' => $domain_config_directory}
+     )
+   }
++
+ }
+diff --git a/manifests/ldap.pp b/manifests/ldap.pp
+index 11620bf..728ca40 100644
+--- a/manifests/ldap.pp
++++ b/manifests/ldap.pp
+@@ -4,6 +4,11 @@
+ #
+ # === parameters:
+ #
++# [*debug_level*]
++#   LDAP debugging level for LDAP calls; a value of zero("0") disables
++#   debugging. (integer value)
++#  Defaults to 'undef'
++#
+ # [*url*]
+ #   URL for connecting to the LDAP server. (string value)
+ #   Defaults to 'undef'
+@@ -384,6 +389,7 @@
+ # Copyright 2012 Puppetlabs Inc, unless otherwise noted.
+ #
+ class keystone::ldap(
++  $debug_level                          = undef,
+   $url                                  = undef,
+   $user                                 = undef,
+   $password                             = undef,
+@@ -494,6 +500,7 @@ class keystone::ldap(
+   }
+   keystone_config {
++    'ldap/debug_level':                          value => $debug_level; 
+     'ldap/url':                                  value => $url;
+     'ldap/user':                                 value => $user;
+     'ldap/password':                             value => $password, secret => true;
+diff --git a/manifests/logging.pp b/manifests/logging.pp
+index e737c4f..3d8df63 100644
+--- a/manifests/logging.pp
++++ b/manifests/logging.pp
+@@ -110,7 +110,7 @@ class keystone::logging(
+   $log_file                      = $::os_service_default,
+   $debug                         = $::os_service_default,
+   $logging_context_format_string = $::os_service_default,
+-  $logging_default_format_string = $::os_service_default,
++  $logging_default_format_string = 'keystone:log %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s',
+   $logging_debug_format_suffix   = $::os_service_default,
+   $logging_exception_prefix      = $::os_service_default,
+   $logging_user_identity_format  = $::os_service_default,
+diff --git a/manifests/resource/service_identity.pp b/manifests/resource/service_identity.pp
+index 09e7d94..243c9ec 100644
+--- a/manifests/resource/service_identity.pp
++++ b/manifests/resource/service_identity.pp
+@@ -187,6 +187,8 @@ define keystone::resource::service_identity(
+     if $service_type {
+       ensure_resource('keystone_service', "${service_name_real}::${service_type}", {
+         'ensure'      => $ensure,
++        'name'        => $service_name_real,
++        'type'        => $service_type,
+         'description' => $service_description,
+       })
+     } else {
+@@ -199,6 +201,9 @@ define keystone::resource::service_identity(
+       if $public_url and $admin_url and $internal_url {
+         ensure_resource('keystone_endpoint', "${region}/${service_name_real}::${service_type}", {
+           'ensure'       => $ensure,
++          'name'         => $service_name_real,
++          'type'         => $service_type,
++          'region'       => $region,
+           'public_url'   => $public_url,
+           'admin_url'    => $admin_url,
+           'internal_url' => $internal_url,
+@@ -210,6 +215,8 @@ define keystone::resource::service_identity(
+       if $public_url and $admin_url and $internal_url {
+         ensure_resource('keystone_endpoint', "${region}/${service_name_real}", {
+           'ensure'       => $ensure,
++          'name'         => $service_name_real,
++          'region'       => $region,
+           'public_url'   => $public_url,
+           'admin_url'    => $admin_url,
+           'internal_url' => $internal_url,
+diff --git a/manifests/security_compliance.pp b/manifests/security_compliance.pp
+new file mode 100644
+index 0000000..64830ec
+--- /dev/null
++++ b/manifests/security_compliance.pp
+@@ -0,0 +1,45 @@
++# == class: keystone::security_compliance
++#
++# Implements security compliance configuration for keystone.
++#
++# === parameters:
++#
++# [*unique_last_password_count*]
++#   This controls the number of previous user password iterations
++#   to keep in history, in order to enforce that newly created passwords
++#   are unique. Setting the value to 1 (the default) disables this feature.
++#   (integer value)
++#   Defaults to 'undef'
++#
++# [*password_regex*]
++#   The regular expression used to validate password strength 
++#   requirements. By default, the regular expression will match
++#   any password. (string value)
++#   Defaults to 'undef'
++#
++# [*password_regex_description*]
++#   If a password fails to match the regular expression (*password_regex*),
++#   the contents of this configuration will be returned to users to explain
++#   why their requested password was insufficient. (string value)
++#   Defaults to 'undef'
++#
++# === DEPRECATED group/name
++#
++# == Copyright
++#
++# Copyright 2017 Wind River Systems, unless otherwise noted.
++#
++class keystone::security_compliance(
++  $unique_last_password_count          = undef,
++  $password_regex                      = undef,
++  $password_regex_description          = undef,
++) {
++  
++  include ::keystone::deps
++
++  keystone_config {
++    'security_compliance/unique_last_password_count':  value => $unique_last_password_count;
++    'security_compliance/password_regex':              value => $password_regex;
++    'security_compliance/password_regex_description':  value => $password_regex_description;
++  } 
++}
+diff --git a/spec/classes/keystone_security_compliance_spec.rb b/spec/classes/keystone_security_compliance_spec.rb
+new file mode 100644
+index 0000000..d0d4724
+--- /dev/null
++++ b/spec/classes/keystone_security_compliance_spec.rb
+@@ -0,0 +1,19 @@
++require 'spec_helper'
++
++describe 'keystone::security_compliance' do
++  describe 'with basic params' do
++    let :params do
++      {
++        :unique_last_password_count => 2,
++        :password_regex => '^(?=.*\d)(?=.*[a-zA-Z]).{7,}$',
++        :password_regex_description => 'password must be at least 7 characters long and contain 1 digit',
++      }
++    end
++    it 'should have basic params' do
++      # basic params
++      is_expected.to contain_keystone_config('security_compliance/unique_last_password_count').with_value('2')
++      is_expected.to contain_keystone_config('security_compliance/password_regex').with_value('^(?=.*\d)(?=.*[a-zA-Z]).{7,}$')
++      is_expected.to contain_keystone_config('security_compliance/password_regex_description').with_value('password must be at least 7 characters long and contain 1 digit')
++    end
++  end
++end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-keystone/0002-remove-the-Keystone-admin-app.patch b/meta-stx/recipes-support/puppet/files/puppet-keystone/0002-remove-the-Keystone-admin-app.patch
new file mode 100644 (file)
index 0000000..7499a8c
--- /dev/null
@@ -0,0 +1,37 @@
+From 0fb9013aa056db642457e93a20499fd9b46ba436 Mon Sep 17 00:00:00 2001
+From: Kam Nasim <kam.nasim@windriver.com>
+Date: Mon, 22 Jan 2018 11:18:08 -0500
+Subject: [PATCH] CGTS-8701: Remove the Keystone-admin app
+
+Following the Pike rebase, no services are using Identity V2 and
+therefore we can shut off the Keystone admin port / app, as in Identity
+V3 the public endpoint and admin endpoints both offer identical services
+---
+ lib/puppet/provider/keystone.rb | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/lib/puppet/provider/keystone.rb b/lib/puppet/provider/keystone.rb
+index 8eb171d..3c310dc 100644
+--- a/lib/puppet/provider/keystone.rb
++++ b/lib/puppet/provider/keystone.rb
+@@ -171,12 +171,16 @@ class Puppet::Provider::Keystone < Puppet::Provider::Openstack
+   end
+   def self.get_admin_endpoint
++    # NOTE (knasim-wrs): As of the Pike rebase, the public port(5000)
++    # provides the same functionality as the admin port(35357). We
++    # shall therefore not deploy the keystone-admin app and return
++    # the public port
+     endpoint = nil
+     if keystone_file
+       if url = get_section('DEFAULT', 'admin_endpoint')
+         endpoint = url.chomp('/')
+       else
+-        admin_port = get_section('DEFAULT', 'admin_port') || '35357'
++        admin_port = get_section('DEFAULT', 'public_port') || '5000'
+         host = clean_host(get_section('DEFAULT', 'admin_bind_host'))
+         protocol = ssl? ? 'https' : 'http'
+         endpoint = "#{protocol}://#{host}:#{admin_port}"
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-keystone/0003-remove-eventlet_bindhost-from-Keystoneconf.patch b/meta-stx/recipes-support/puppet/files/puppet-keystone/0003-remove-eventlet_bindhost-from-Keystoneconf.patch
new file mode 100644 (file)
index 0000000..905118c
--- /dev/null
@@ -0,0 +1,39 @@
+commit bb91ab26622a5ec695c6564af5a9e5e54fdc903c
+Author: Shoaib Nasir <shoaib.nasir@windriver.com>
+Date:   Thu Feb 15 15:04:55 2018 -0500
+
+    WRS: Patch3: 0003-remove-eventlet_bindhost-from-Keystoneconf.patch
+
+diff --git a/manifests/init.pp b/manifests/init.pp
+index 4d79d30..d64638c 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -871,8 +871,9 @@ admin_token will be removed in a later release")
+     # WRS: the following options are deprecated for removal
+     # however public_bind_host and admin_bind_host are still required as long as
+     # keystone is running under eventlet
+-    'DEFAULT/public_bind_host': value => $public_bind_host;
+-    'DEFAULT/admin_bind_host':  value => $admin_bind_host;
++    # WRS: bind_host options removed from keystone.conf [DEFAULT]
++    #'DEFAULT/public_bind_host': value => $public_bind_host;
++    #'DEFAULT/admin_bind_host':  value => $admin_bind_host;
+     #'DEFAULT/public_port':      value => $public_port;
+     #'DEFAULT/admin_port':       value => $admin_port;
+     'DEFAULT/member_role_id':   value => $member_role_id;
+@@ -1110,10 +1111,12 @@ Fernet or UUID tokens are recommended.")
+   # WRS: The following options are deprecated for removal
+   # however they are still required as long as keystone
+   # is running under eventlet
+-  keystone_config {
+-    'eventlet_server/admin_workers':  value => $admin_workers;
+-    'eventlet_server/public_workers': value => $public_workers;
+-  }
++  # WRS(snasir): Removing these options from keystone.conf 
++  # since they are now populated in keystone-api.conf
++  #keystone_config {
++  #  'eventlet_server/admin_workers':  value => $admin_workers;
++  #  'eventlet_server/public_workers': value => $public_workers;
++  #}
+   if $manage_service {
+     if $enabled {
diff --git a/meta-stx/recipes-support/puppet/files/puppet-keystone/0004-escape-special-characters-in-bootstrap.patch b/meta-stx/recipes-support/puppet/files/puppet-keystone/0004-escape-special-characters-in-bootstrap.patch
new file mode 100644 (file)
index 0000000..edb5243
--- /dev/null
@@ -0,0 +1,39 @@
+From 70d22113cc8d58b6546cb4917c27f9aae51787c5 Mon Sep 17 00:00:00 2001
+From: Kam Nasim <kam.nasim@windriver.com>
+Date: Mon, 2 Apr 2018 16:13:31 -0400
+Subject: [PATCH] CGTS-9320: config_controller fails when admin pw containing $
+
+Escape special characters when executing the keystone-manage bootstrap
+command since the keystone CLI argparse will parse "Madawa$ka1" as
+"Madawa" which will cause the Keystone ADMIN acct to be created with an
+incorrect password. Puppet will detect this and attempt to course
+correct by sending an UPDATE User request to Keystone, which does set
+the right password but causes other failures in config_controller
+---
+ manifests/init.pp | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/manifests/init.pp b/manifests/init.pp
+index d64638c..89af303 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -1292,10 +1292,15 @@ running as a standalone service, or httpd for being run by a httpd server")
+   }
+   if $enable_bootstrap {
++    #(NOTE: knasim-wrs): escape special characters in the password otherwise the
++    # keyword-manage bootstrap CLI may parse the password incorrectly, causing
++    # the admin account to be created with an incorrect password
++    $admin_password_escaped = shell_escape($admin_password_real)
++
+     # this requires the database to be up and running and configured
+     # and is only run once, so we don't need to notify the service
+     exec { 'keystone-manage bootstrap':
+-      command     => "keystone-manage bootstrap --bootstrap-password ${admin_password_real}",
++      command     => "keystone-manage bootstrap --bootstrap-password ${admin_password_escaped}",
+       user        => $keystone_user,
+       path        => '/usr/bin',
+       refreshonly => true,
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-keystone/0005-Add-support-for-fernet-receipts.patch b/meta-stx/recipes-support/puppet/files/puppet-keystone/0005-Add-support-for-fernet-receipts.patch
new file mode 100644 (file)
index 0000000..5ba045d
--- /dev/null
@@ -0,0 +1,43 @@
+From 65de0c6615e2e94a4fd234fc1826e3eb403bb575 Mon Sep 17 00:00:00 2001
+From: Tyler Smith <tyler.smith@windriver.com>
+Date: Wed, 10 Apr 2019 15:37:25 -0400
+Subject: [PATCH 1/1] Add support for fernet receipts
+
+---
+ manifests/init.pp | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/manifests/init.pp b/manifests/init.pp
+index 89af303..a6d5cc3 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -1247,18 +1247,21 @@ running as a standalone service, or httpd for being run by a httpd server")
+   if $fernet_key_repository {
+     keystone_config {
+-      'fernet_tokens/key_repository': value => $fernet_key_repository;
++      'fernet_tokens/key_repository':   value => $fernet_key_repository;
++      'fernet_receipts/key_repository': value => $fernet_key_repository;
+     }
+   } else {
+     keystone_config {
+-      'fernet_tokens/key_repository': ensure => absent;
++      'fernet_tokens/key_repository':   ensure => absent;
++      'fernet_receipts/key_repository': ensure => absent;
+     }
+   }
+   keystone_config {
+-    'token/revoke_by_id':            value => $revoke_by_id;
+-    'fernet_tokens/max_active_keys': value => $fernet_max_active_keys;
+-    'credential/key_repository':     value => $credential_key_repository;
++    'token/revoke_by_id':              value => $revoke_by_id;
++    'fernet_tokens/max_active_keys':   value => $fernet_max_active_keys;
++    'fernet_receipts/max_active_keys': value => $fernet_max_active_keys;
++    'credential/key_repository':       value => $credential_key_repository;
+   }
+   # Update this code when https://bugs.launchpad.net/keystone/+bug/1472285 is addressed.
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-keystone/0006-workaround-Adjust-keystone-naming-to-poky.patch b/meta-stx/recipes-support/puppet/files/puppet-keystone/0006-workaround-Adjust-keystone-naming-to-poky.patch
new file mode 100644 (file)
index 0000000..13d05c6
--- /dev/null
@@ -0,0 +1,28 @@
+From 5b6e47a7b1eb8c286e7c40479ba19131a2f696d7 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Thu, 26 Dec 2019 12:00:31 -0800
+Subject: [PATCH] workaround: Adjust keystone naming to poky
+
+Issue 43:
+In the puppet apply, openstack-keystone is used as the keystone package
+name, but in yocto, the package name is 'keystone'.
+---
+ manifests/params.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/params.pp b/manifests/params.pp
+index 0d61acfb..b2bc44b1 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -18,7 +18,7 @@ class keystone::params {
+       $openidc_package_name         = 'libapache2-mod-auth-openidc'
+     }
+     'RedHat': {
+-      $package_name                 = 'openstack-keystone'
++      $package_name                 = 'keystone'
+       $service_name                 = 'openstack-keystone'
+       $keystone_wsgi_script_path    = '/var/www/cgi-bin/keystone'
+       $python_memcache_package_name = 'python-memcached'
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-keystone/0007-puppet-keystone-specify-full-path-to-openrc.patch b/meta-stx/recipes-support/puppet/files/puppet-keystone/0007-puppet-keystone-specify-full-path-to-openrc.patch
new file mode 100644 (file)
index 0000000..d61c158
--- /dev/null
@@ -0,0 +1,43 @@
+From 3e976dc2357aa7a6f9a0352ff2a82e7203f28fba Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Wed, 8 Jan 2020 15:54:44 -0800
+Subject: [PATCH] puppet-keystone: specify full path to openrc
+
+issue 48: keystone endpoints cannot be populated properly
+---
+ manifests/deps.pp | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/manifests/deps.pp b/manifests/deps.pp
+index 30c29528..d98154dd 100644
+--- a/manifests/deps.pp
++++ b/manifests/deps.pp
+@@ -63,18 +63,25 @@ class keystone::deps {
+   # The following resources need to be provisioned after the service is up.
+   Anchor['keystone::service::end']
+   -> Keystone_domain<||>
++  -> File['/etc/platform/openrc']
+   Anchor['keystone::service::end']
+   -> Keystone_endpoint<||>
++  -> File['/etc/platform/openrc']
+   Anchor['keystone::service::end']
+   -> Keystone_role<||>
++  -> File['/etc/platform/openrc']
+   Anchor['keystone::service::end']
+   -> Keystone_service<||>
++  -> File['/etc/platform/openrc']
+   Anchor['keystone::service::end']
+   -> Keystone_tenant<||>
++  -> File['/etc/platform/openrc']
+   Anchor['keystone::service::end']
+   -> Keystone_user<||>
++  -> File['/etc/platform/openrc']
+   Anchor['keystone::service::end']
+   -> Keystone_user_role<||>
++  -> File['/etc/platform/openrc']
+   # Installation or config changes will always restart services.
+   Anchor['keystone::install::end'] ~> Anchor['keystone::service::begin']
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-keystone/0008-params.pp-fix-the-service-name-of-openstack-keystone.patch b/meta-stx/recipes-support/puppet/files/puppet-keystone/0008-params.pp-fix-the-service-name-of-openstack-keystone.patch
new file mode 100644 (file)
index 0000000..da652f2
--- /dev/null
@@ -0,0 +1,26 @@
+From dc2512633a9552272acfcc180ddc86c621b51313 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Wed, 11 Mar 2020 11:12:34 +0800
+Subject: [PATCH] params.pp: fix the service name of openstack-keystone
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ manifests/params.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/params.pp b/manifests/params.pp
+index 0d61acf..590e02b 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -11,7 +11,7 @@ class keystone::params {
+   case $::osfamily {
+     'Debian': {
+       $package_name                 = 'keystone'
+-      $service_name                 = 'keystone'
++      $service_name                 = 'openstack-keystone'
+       $keystone_wsgi_script_path    = '/usr/lib/cgi-bin/keystone'
+       $python_memcache_package_name = 'python-memcache'
+       $mellon_package_name          = 'libapache2-mod-auth-mellon'
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-keystone/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-keystone/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..5df931b
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-keystone.gemspec  2019-10-28 15:09:10.705761083 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-keystone'
++  s.version     = '11.3.0'
++  s.date        = '2017-08-21'
++  s.summary     = "Puppet module for OpenStack Keystone."
++  s.description = s.summary
++  s.authors     = ["Puppet Labs and OpenStack Contributors"]
++  s.email       = ''
++  s.files       = %w(LICENSE) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://launchpad.net/puppet-keystone'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-kmod/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-kmod/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..8e8d83e
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/camptocamp-kmod.gemspec  2019-10-31 13:36:53.819223247 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'camptocamp-kmod'
++  s.version     = '2.1.0'
++  s.date        = '2016-06-24'
++  s.summary     = "Manage Linux kernel modules with Puppet"
++  s.description = s.summary
++  s.authors     = ["camptocamp"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/camptocamp/puppet-kmod'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ldap/0001-puppet-ldap-add-os-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-ldap/0001-puppet-ldap-add-os-poky-stx.patch
new file mode 100644 (file)
index 0000000..dd4dd7b
--- /dev/null
@@ -0,0 +1,9 @@
+diff --git a/manifests/os/poky-stx.pp b/manifests/os/poky-stx.pp
+new file mode 100644
+index 0000000..03b1aa1
+--- /dev/null
++++ b/manifests/os/poky-stx.pp
+@@ -0,0 +1,3 @@
++
++class ldap::os::poky-stx {
++}
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ldap/0002-puppet-ldap-poky-stx-fix-pkg-name.patch b/meta-stx/recipes-support/puppet/files/puppet-ldap/0002-puppet-ldap-poky-stx-fix-pkg-name.patch
new file mode 100644 (file)
index 0000000..14ff8d3
--- /dev/null
@@ -0,0 +1,16 @@
+diff -ru a/manifests/params.pp b/manifests/params.pp
+--- a/manifests/params.pp      2020-03-05 13:55:59.165978792 +0800
++++ b/manifests/params.pp      2020-03-05 14:03:14.677990660 +0800
+@@ -2,7 +2,11 @@
+ class ldap::params {
+   case $::osfamily {
+     'Debian' : {
+-      $package = ['ldap-utils']
++      if $::operatingsystem == 'poky-stx' {
++          $package = ['libldap-2.4-slapd', 'libldap-2.4-bin']
++      } else {
++          $package = ['ldap-utils']
++      }
+       $prefix = '/etc/ldap'
+       $owner = 'root'
diff --git a/meta-stx/recipes-support/puppet/files/puppet-ldap/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-ldap/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..b8aa120
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppet-ldap.gemspec      2019-10-30 21:04:14.309536996 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-ldap'
++  s.version     = '0.2.4'
++  s.date        = '2014-09-13'
++  s.summary     = "OpenLDAP module for Puppet."
++  s.description = s.summary
++  s.authors     = ["Emiliano Castagnari ecastag@gmail.com (aka Torian)"]
++  s.email       = ''
++  s.files       = %w(Modulefile README.md) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/torian/puppet-ldap'
++  s.license     = 'GPL-2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-memcached/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-memcached/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..9de0795
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-memcached.gemspec 2019-10-27 06:27:38.193407895 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-memcached'
++  s.version     = '3.0.2'
++  s.date        = '2017-05-10'
++  s.summary     = "Puppet memcached module"
++  s.description = s.summary
++  s.authors     = ["Puppet saz "]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/saz/puppet-memcached'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/0001-Stx-uses-puppet-boolean-instead-of-adrien-boolean.patch b/meta-stx/recipes-support/puppet/files/puppet-network/0001-Stx-uses-puppet-boolean-instead-of-adrien-boolean.patch
new file mode 100644 (file)
index 0000000..0df7113
--- /dev/null
@@ -0,0 +1,26 @@
+From 0d6914c558620f8e82b2c84ce7bb4d35fdb882fd Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Thu, 31 Oct 2019 13:15:05 -0700
+Subject: [PATCH] Stx uses puppet-boolean instead of adrien-boolean
+
+Use puppet-boolean instead of adrien-boolean
+---
+ metadata.json | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/metadata.json b/metadata.json
+index 66dc6d7..2ca38b9 100644
+--- a/metadata.json
++++ b/metadata.json
+@@ -10,7 +10,7 @@
+   "dependencies": [
+       {"name":"puppetlabs/stdlib","version_requirement":">= 2.3.0 < 5.0.0"},
+       {"name":"adrien/filemapper","version_requirement":">= 1.1.0 < 5.0.0"},
+-      {"name":"adrien/boolean","version_requirement":">= 1.0.0"},
++      {"name":"puppet/boolean","version_requirement":">= 1.0.0"},
+       {"name":"camptocamp/kmod","version_requirement":">= 0.0.x"}
+   ]
+ }
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-network/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..264ea4e
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppet-network.gemspec   2019-10-30 21:12:14.606035966 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-network'
++  s.version     = '0.5.0'
++  s.date        = '2015-06-03'
++  s.summary     = "Manage non-volatile network configuration"
++  s.description = s.summary
++  s.authors     = ["Adrien Thebo <adrien@somethingsinistral.net>"]
++  s.email       = ''
++  s.files       = %w(Modulefile README.markdown Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/adrienthebo/puppet-network'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/Don-t-write-absent-to-redhat-route-files-and-test-fo.patch b/meta-stx/recipes-support/puppet/files/puppet-network/Don-t-write-absent-to-redhat-route-files-and-test-fo.patch
new file mode 100644 (file)
index 0000000..efc5446
--- /dev/null
@@ -0,0 +1,71 @@
+From 49e103bbeb4d6efe1ca75f581d41ee6a8ed7caf5 Mon Sep 17 00:00:00 2001
+From: Romanos Skiadas <rom.skiad@gmail.com>
+Date: Wed, 2 Nov 2016 14:51:47 -0400
+Subject: [PATCH] Don't write absent to redhat route files and test for this
+
+Signed-off-by: Allain Legacy <allain.legacy@windriver.com>
+---
+ .../network/lib/puppet/provider/network_route/redhat.rb |  9 +++++++--
+ .../spec/unit/provider/network_route/redhat_spec.rb     | 17 ++++++++++++++++-
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+index f45eab5..9841c8e 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+@@ -84,10 +84,15 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+         raise Puppet::Error, "#{provider.name} does not have a #{prop}." if provider.send(prop).nil?
+       end
+       if provider.network == "default"
+-        contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface} #{provider.options}\n"
++        contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface}\n"
+       else
+-        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface} #{provider.options}\n"
++        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface}\n"
+       end
++      contents << if provider.options == :absent
++                    "\n"
++                  else
++                    " #{provider.options}\n"
++                  end
+     end
+     contents.join
+   end
+diff --git a/packstack/puppet/modules/network/spec/unit/provider/network_route/redhat_spec.rb b/packstack/puppet/modules/network/spec/unit/provider/network_route/redhat_spec.rb
+index dfc9d6b..1ad2128 100644
+--- a/packstack/puppet/modules/network/spec/unit/provider/network_route/redhat_spec.rb
++++ b/packstack/puppet/modules/network/spec/unit/provider/network_route/redhat_spec.rb
+@@ -91,7 +91,18 @@ describe Puppet::Type.type(:network_route).provider(:redhat) do
+       )
+     end
+-    let(:content) { described_class.format_file('', [route1_provider, route2_provider, defaultroute_provider]) }
++    let(:nooptions_provider) do
++      stub('nooptions_provider',
++           name: 'default',
++           network: 'default',
++           netmask: '',
++           gateway: '10.0.0.1',
++           interface: 'eth2',
++           options: :absent
++      )
++    end
++
++    let(:content) { described_class.format_file('', [route1_provider, route2_provider, defaultroute_provider, nooptions_provider]) }
+     describe "writing the route line" do
+       describe "For standard (non-default) routes" do
+@@ -122,6 +133,10 @@ describe Puppet::Type.type(:network_route).provider(:redhat) do
+       it "should have the correct fields appended" do
+         content.scan(/^default .*$/).first.should be_include("default via 10.0.0.1 dev eth1")
+       end
++
++      it 'should not contain the word absent when no options are defined' do
++        expect(content).to_not match(/absent/)
++      end
+     end
+   end
+ end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/fix-absent-options.patch b/meta-stx/recipes-support/puppet/files/puppet-network/fix-absent-options.patch
new file mode 100644 (file)
index 0000000..23c738f
--- /dev/null
@@ -0,0 +1,113 @@
+From f22d4c9d24939afb8f29323adffe3eb570f14804 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=3D=3FUTF-8=3Fq=3FIgor=3D20Gali=3DC4=3D87=3F=3D?=
+ <i.galic@brainsware.org>
+Date: Wed, 2 Nov 2016 14:54:28 -0400
+Subject: [PATCH] fix "absent" options
+
+analogous to redhat, we check if options are absent, before appending
+them to the file. This fixes #160
+
+Signed-off-by: Allain Legacy <allain.legacy@windriver.com>
+---
+ .../lib/puppet/provider/network_route/redhat.rb    | 10 ++---
+ .../lib/puppet/provider/network_route/routes.rb    |  3 +-
+ .../unit/provider/network_route/routes_spec.rb     | 48 ++++++++++++++++++++++
+ 3 files changed, 53 insertions(+), 8 deletions(-)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+index 9841c8e..7123d44 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+@@ -84,15 +84,11 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+         raise Puppet::Error, "#{provider.name} does not have a #{prop}." if provider.send(prop).nil?
+       end
+       if provider.network == "default"
+-        contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface}\n"
++        contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface}"
+       else
+-        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface}\n"
++        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface}"
+       end
+-      contents << if provider.options == :absent
+-                    "\n"
+-                  else
+-                    " #{provider.options}\n"
+-                  end
++      contents << (provider.options == :absent ? "\n" : " #{provider.options}\n")
+     end
+     contents.join
+   end
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/routes.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/routes.rb
+index 2dd579f..ca7066d 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/routes.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/routes.rb
+@@ -93,7 +93,8 @@ Puppet::Type.type(:network_route).provide(:routes) do
+       raise Puppet::Error, "#{provider.name} is missing the required parameter 'gateway'." if provider.gateway.nil?
+       raise Puppet::Error, "#{provider.name} is missing the required parameter 'interface'." if provider.interface.nil?
+-      contents << "#{provider.network} #{provider.netmask} #{provider.gateway} #{provider.interface} #{provider.options}\n"
++      contents << "#{provider.network} #{provider.netmask} #{provider.gateway} #{provider.interface}"
++      contents << (provider.options == :absent ? "\n" : " #{provider.options}\n")
+     end
+     contents.join
+diff --git a/packstack/puppet/modules/network/spec/unit/provider/network_route/routes_spec.rb b/packstack/puppet/modules/network/spec/unit/provider/network_route/routes_spec.rb
+index 2e55eba..9376739 100644
+--- a/packstack/puppet/modules/network/spec/unit/provider/network_route/routes_spec.rb
++++ b/packstack/puppet/modules/network/spec/unit/provider/network_route/routes_spec.rb
+@@ -93,4 +93,52 @@ describe Puppet::Type.type(:network_route).provider(:routes) do
+       end
+     end
+   end
++  describe 'when formatting simple files' do
++    let(:route1_provider) do
++      stub('route1_provider',
++           name: '172.17.67.0',
++           network: '172.17.67.0',
++           netmask: '255.255.255.0',
++           gateway: '172.18.6.2',
++           interface: 'vlan200',
++           options: :absent,
++      )
++    end
++
++    let(:route2_provider) do
++      stub('lo_provider',
++           name: '172.28.45.0',
++           network: '172.28.45.0',
++           netmask: '255.255.255.0',
++           gateway: '172.18.6.2',
++           interface: 'eth0',
++           options: :absent,
++      )
++    end
++
++    let(:content) { described_class.format_file('', [route1_provider, route2_provider]) }
++
++    describe 'writing the route line' do
++      it 'should write only fields' do
++        expect(content.scan(/^172.17.67.0 .*$/).length).to eq(1)
++        expect(content.scan(/^172.17.67.0 .*$/).first.split(/\s/, 5).length).to eq(4)
++      end
++
++      it 'should have the correct fields appended' do
++        expect(content.scan(/^172.17.67.0 .*$/).first).to include('172.17.67.0 255.255.255.0 172.18.6.2 vlan200')
++      end
++
++      it 'should fail if the netmask property is not defined' do
++        route2_provider.unstub(:netmask)
++        route2_provider.stubs(:netmask).returns nil
++        expect { content }.to raise_exception
++      end
++
++      it 'should fail if the gateway property is not defined' do
++        route2_provider.unstub(:gateway)
++        route2_provider.stubs(:gateway).returns nil
++        expect { content }.to raise_exception
++      end
++    end
++  end
+ end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/ipv6-static-route-support.patch b/meta-stx/recipes-support/puppet/files/puppet-network/ipv6-static-route-support.patch
new file mode 100644 (file)
index 0000000..10456b1
--- /dev/null
@@ -0,0 +1,100 @@
+From 49820add1d1e5f63343615ead9b551b8679f466d Mon Sep 17 00:00:00 2001
+From: Kevin Smith <kevin.smith@windriver.com>
+Date: Mon, 16 Oct 2017 15:06:37 -0500
+Subject: [PATCH 1/1] ipv6 static route support
+
+---
+ .../lib/puppet/provider/network_route/redhat.rb    |  3 ++-
+ .../network/lib/puppet/type/network_route.rb       | 26 ++++++++++++++--------
+ .../network/spec/unit/type/network_route_spec.rb   |  5 +++++
+ 3 files changed, 24 insertions(+), 10 deletions(-)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+index 5073519..c289f5f 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+@@ -93,7 +93,8 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+       if provider.network == "default"
+         contents << "#{provider.network} via #{provider.gateway} dev #{provider.interface}"
+       else
+-        contents << "#{provider.network}/#{provider.netmask} via #{provider.gateway} dev #{provider.interface}"
++        # provider.name will have cidr notation 
++        contents << "#{provider.name} via #{provider.gateway} dev #{provider.interface}"
+       end
+       contents << (provider.options == :absent ? "\n" : " #{provider.options}\n")
+     end
+diff --git a/packstack/puppet/modules/network/lib/puppet/type/network_route.rb b/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
+index 7ab67dd..fd52c58 100644
+--- a/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
++++ b/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
+@@ -5,8 +5,6 @@ Puppet::Type.newtype(:network_route) do
+   ensurable
+-  IPV4_ADDRESS_REGEX = /^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$/
+-
+   newparam(:name) do
+     isnamevar
+     desc "The name of the network route"
+@@ -18,7 +16,7 @@ Puppet::Type.newtype(:network_route) do
+     validate do |value|
+       begin
+         t = IPAddr.new(value) unless value == "default"
+-      rescue ArgumentError
++      rescue
+         fail("Invalid value for network: #{value}")
+       end
+     end
+@@ -29,17 +27,27 @@ Puppet::Type.newtype(:network_route) do
+     desc "The subnet mask to apply to the route"
+     validate do |value|
+-      unless (value.length <= 2 or value =~ IPV4_ADDRESS_REGEX)
++      unless value.length <= 3 || (IPAddr.new(value) rescue false)
+         fail("Invalid value for argument netmask: #{value}")
+       end
+     end
+     munge do |value|
+-      case value
+-      when IPV4_ADDRESS_REGEX
+-        value
+-      when /^\d+$/
+-        IPAddr.new('255.255.255.255').mask(value.strip.to_i).to_s
++      # '255.255.255.255'.to_i  will return 255, so we try to convert it back:
++      if value.to_i.to_s == value
++        if value.to_i <= 32
++          IPAddr.new('255.255.255.255').mask(value.strip.to_i).to_s
++        else
++          IPAddr.new('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff').mask(value.strip.to_i).to_s
++        end
++      else
++        if (IPAddr.new(value).ipv6? rescue false)
++          IPAddr.new('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff').mask(value).to_s
++        elsif (IPAddr.new(value).ipv4? rescue false)
++          IPAddr.new('255.255.255.255').mask(value).to_s
++        else
++          raise("Invalid value for argument netmask: #{value}")
++        end
+       end
+     end
+   end
+diff --git a/packstack/puppet/modules/network/spec/unit/type/network_route_spec.rb b/packstack/puppet/modules/network/spec/unit/type/network_route_spec.rb
+index 24e9da3..6e6f3e4 100644
+--- a/packstack/puppet/modules/network/spec/unit/type/network_route_spec.rb
++++ b/packstack/puppet/modules/network/spec/unit/type/network_route_spec.rb
+@@ -55,6 +55,11 @@ describe Puppet::Type.type(:network_route) do
+         r[:netmask].should == '255.255.255.0'
+       end
++      it 'should convert IPv6 netmasks of the CIDR form' do
++        r = Puppet::Type.type(:network_route).new(name: 'lxd bridge', network: 'fd58:281b:6eef:eb3d::', netmask: '64', gateway: 'fd58:281b:6eef:eb3d::1', interface: 'lxdbr0')
++        expect(r[:netmask]).to eq('ffff:ffff:ffff:ffff::')
++      end
++
+       it "should convert netmasks of the expanded netmask form" do
+         r = described_class.new(:name => '192.168.1.0/24', :network => '192.168.1.0', :netmask => '255.255.128.0', :gateway => '23.23.23.42', :interface => 'eth0')
+         r[:netmask].should == '255.255.128.0'
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/metadata.json.patch b/meta-stx/recipes-support/puppet/files/puppet-network/metadata.json.patch
new file mode 100644 (file)
index 0000000..bdb5696
--- /dev/null
@@ -0,0 +1,19 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/metadata.json    2019-10-31 13:07:35.519435748 -0700
+@@ -0,0 +1,16 @@
++{
++  "name": "adrien-network",
++  "version": "0.5.0",
++  "author": "Adrien Tehbo <adrien@somethingsinistral.net>",
++  "summary": "Manage non-volatile network configuration",
++  "license": "Apache-2.0",
++  "source": "https://github.com/adrienthebo/puppet-network",
++  "project_page": "https://github.com/adrienthebo/puppet-network",
++  "issues_url": "https://tickets.puppetlabs.com/browse/MODULES",
++  "dependencies": [
++      {"name":"puppetlabs/stdlib","version_requirement":">= 2.3.0 < 5.0.0"},
++      {"name":"adrien/filemapper","version_requirement":">= 1.1.0 < 5.0.0"},
++      {"name":"adrien/boolean","version_requirement":">= 1.0.0"},
++      {"name":"camptocamp/kmod","version_requirement":">= 0.0.x"}
++  ]
++}
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/permit-inservice-update-of-static-routes.patch b/meta-stx/recipes-support/puppet/files/puppet-network/permit-inservice-update-of-static-routes.patch
new file mode 100644 (file)
index 0000000..66e7623
--- /dev/null
@@ -0,0 +1,55 @@
+From 46ec08e58419bb73bf49b44cf32fa3d304236615 Mon Sep 17 00:00:00 2001
+From: Kevin Smith <kevin.smith@windriver.com>
+Date: Thu, 5 Oct 2017 13:33:12 -0500
+Subject: [PATCH 1/1] permit inservice update of static routes
+
+---
+ .../network/lib/puppet/provider/network_route/redhat.rb  | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+index 7123d44..5073519 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/redhat.rb
+@@ -18,12 +18,18 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+   has_feature :provider_options
++  # WRS: Generate temporary copies.  It will get compared to files under
++  # /etc/sysconfig/network-scripts afterward.  Only config that have changed
++  # will get replaced.  Don't let puppet directly manage them, else it will
++  # trigger un-wanted networking actions (like up/down).
++  RSCRIPT_DIRECTORY = "/var/run/network-scripts.puppet"
++
+   def select_file
+-    "/etc/sysconfig/network-scripts/route-#{@resource[:interface]}"
++    "#{RSCRIPT_DIRECTORY}/route-#{@resource[:interface]}"
+   end
+   def self.target_files
+-    Dir["/etc/sysconfig/network-scripts/route-*"]
++    Dir["#{RSCRIPT_DIRECTORY}/route-*"]
+   end
+   def self.parse_file(filename, contents)
+@@ -76,6 +82,7 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+   # Generate an array of sections
+   def self.format_file(filename, providers)
++    Dir.mkdir(RSCRIPT_DIRECTORY) unless File.exists?(RSCRIPT_DIRECTORY)
+     contents = []
+     contents << header
+     # Build routes
+@@ -103,4 +110,9 @@ Puppet::Type.type(:network_route).provide(:redhat) do
+ HEADER
+     str
+   end
++
++  def self.post_flush_hook(filename)
++    File.chmod(0644, filename)
++  end
++
+ end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-Kilo-quilt-changes.patch b/meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-Kilo-quilt-changes.patch
new file mode 100644 (file)
index 0000000..841198f
--- /dev/null
@@ -0,0 +1,658 @@
+From 8e14e2e258a8f2f7189ed37c6337c41fbff0362a Mon Sep 17 00:00:00 2001
+From: Al Bailey <al.bailey@windriver.com>
+Date: Mon, 6 Jun 2016 17:13:09 -0400
+Subject: [PATCH] puppet-network Kilo quilt changes
+
+---
+ .../lib/puppet/provider/network_config/redhat.rb   |  39 ++-
+ .../lib/puppet/provider/network_config/wrlinux.rb  | 296 +++++++++++++++++++++
+ .../lib/puppet/provider/network_route/wrlinux.rb   | 109 ++++++++
+ .../network/lib/puppet/type/network_config.rb      |   4 +
+ packstack/puppet/modules/network/manifests/bond.pp |  22 ++
+ .../puppet/modules/network/manifests/bond/setup.pp |   2 +
+ .../modules/network/manifests/bond/wrlinux.pp      |  56 ++++
+ 7 files changed, 521 insertions(+), 7 deletions(-)
+ create mode 100644 packstack/puppet/modules/network/lib/puppet/provider/network_config/wrlinux.rb
+ create mode 100644 packstack/puppet/modules/network/lib/puppet/provider/network_route/wrlinux.rb
+ create mode 100644 packstack/puppet/modules/network/manifests/bond/wrlinux.pp
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
+index 4b6de7e..758f387 100644
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
+@@ -19,7 +19,12 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+   has_feature :provider_options
+   # @return [String] The path to network-script directory on redhat systems
+-  SCRIPT_DIRECTORY = "/etc/sysconfig/network-scripts"
++  # SCRIPT_DIRECTORY = "/etc/sysconfig/network-scripts"
++  # WRS: Generate temporary copies.  It will get compared to files under
++  # /etc/sysconfig/network-scripts afterward.  Only config that have changed
++  # will get replaced.  Don't let puppet directly manage them, else it will
++  # trigger un-wanted networking actions (like up/down).
++  SCRIPT_DIRECTORY = "/var/run/network-scripts.puppet"
+   # The valid vlan ID range is 0-4095; 4096 is out of range
+   VLAN_RANGE_REGEX = %r[\d{1,3}|40[0-9][0-5]]
+@@ -35,6 +40,7 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+     :name       => 'DEVICE',
+     :hotplug    => 'HOTPLUG',
+     :mtu        => 'MTU',
++    :gateway => 'GATEWAY',
+   }
+   # Map provider instances to files based on their name
+@@ -60,8 +66,14 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+   #   RedhatProvider.target_files
+   #   # => ['/etc/sysconfig/network-scripts/ifcfg-eth0', '/etc/sysconfig/network-scripts/ifcfg-eth1']
+   def self.target_files(script_dir = SCRIPT_DIRECTORY)
+-    entries = Dir.entries(script_dir).select {|entry| entry.match SCRIPT_REGEX}
+-    entries.map {|entry| File.join(SCRIPT_DIRECTORY, entry)}
++    entries = []
++    if Dir.exists?(SCRIPT_DIRECTORY)
++      Dir.foreach(SCRIPT_DIRECTORY) do |item|
++        next if not item.match SCRIPT_REGEX
++        entries << item
++      end
++    end
++    entries
+   end
+   # Convert a redhat network script into a hash
+@@ -184,6 +196,8 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+   end
+   def self.format_file(filename, providers)
++    Dir.mkdir(SCRIPT_DIRECTORY) unless File.exists?(SCRIPT_DIRECTORY)
++
+     if providers.length == 0
+       return ""
+     elsif providers.length > 1
+@@ -193,11 +207,11 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+     provider = providers[0]
+     props    = {}
+-    # Map everything to a flat hash
+-    props = (provider.options || {})
++    props = provider.options if provider.options && provider.options != :absent
++    # Map everything to a flat hash
+     NAME_MAPPINGS.keys.each do |type_name|
+-      if (val = provider.send(type_name))
++      if (val = provider.send(type_name)) && val != :absent
+         props[type_name] = val
+       end
+     end
+@@ -214,11 +228,11 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+       str << %{#{key}=#{val}\n}
+     end
++    content.prepend(header)
+     content
+   end
+   def self.unmunge(props)
+-
+     pairs = {}
+     [:onboot, :hotplug].each do |bool_property|
+@@ -245,6 +259,17 @@ Puppet::Type.type(:network_config).provide(:redhat) do
+     pairs
+   end
++  def self.header
++    str = <<-HEADER
++# HEADER: This file is is being managed by puppet. Changes to
++# HEADER: interfaces that are not being managed by puppet will persist;
++# HEADER: however changes to interfaces that are being managed by puppet will
++# HEADER: be overwritten. In addition, file order is NOT guaranteed.
++# HEADER: Last generated at: #{Time.now}
++HEADER
++    str
++  end
++
+   def self.post_flush_hook(filename)
+     File.chmod(0644, filename)
+   end
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_config/wrlinux.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_config/wrlinux.rb
+new file mode 100644
+index 0000000..44c645a
+--- /dev/null
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_config/wrlinux.rb
+@@ -0,0 +1,296 @@
++require 'puppetx/filemapper'
++
++Puppet::Type.type(:network_config).provide(:wrlinux) do
++  # Wind River Linux network_config interfaces provider.
++  #
++  # This provider uses the filemapper mixin to map the interfaces file to a
++  # collection of network_config providers, and back.
++  #
++  include PuppetX::FileMapper
++
++  desc "Wind River interfaces style provider"
++
++  confine    :osfamily => :wrlinux
++  defaultfor :osfamily => :wrlinux
++
++  has_feature :provider_options
++  has_feature :hotpluggable
++
++  def select_file
++    '/var/run/interfaces.puppet'
++  end
++
++  def self.target_files
++    ['/var/run/interfaces.puppet']
++  end
++
++  class MalformedInterfacesError < Puppet::Error
++    def initialize(msg = nil)
++      msg = 'Malformed wrlinux interfaces file; cannot instantiate network_config resources' if msg.nil?
++      super
++    end
++  end
++
++  def self.raise_malformed
++    @failed = true
++    raise MalformedInterfacesError
++  end
++
++  class Instance
++
++    attr_reader :name
++
++    # Booleans
++    attr_accessor :onboot, :hotplug
++
++
++    # These fields are going to get rearranged to resolve issue 16
++    # https://github.com/adrienthebo/puppet-network/issues/16
++    attr_accessor :ipaddress, :netmask, :family, :method, :mtu
++
++    # Options hash
++    attr_reader :options
++
++    def initialize(name)
++      @name = name
++
++      @options = Hash.new {|hash, key| hash[key] = []}
++    end
++
++    def to_hash
++      h = {
++        :name      => @name,
++        :onboot    => @onboot,
++        :hotplug   => @hotplug,
++        :ipaddress => @ipaddress,
++        :netmask   => @netmask,
++        :family    => @family,
++        :method    => @method,
++        :mtu       => @mtu,
++        :options   => squeeze_options
++      }
++
++      h.inject({}) do |hash, (key, val)|
++        hash[key] = val unless val.nil?
++        hash
++      end
++    end
++
++    def squeeze_options
++      @options.inject({}) do |hash, (key, value)|
++        if value.size <= 1
++          hash[key] = value.pop
++        else
++          hash[key] = value
++        end
++
++      hash
++      end
++    end
++
++    class << self
++
++      def reset!
++        @interfaces = {}
++      end
++
++      # @return [Array<Instance>] All class instances
++      def all_instances
++        @interfaces ||= {}
++        @interfaces
++      end
++
++      def [](name)
++        if all_instances[name]
++          obj = all_instances[name]
++        else
++          obj = self.new(name)
++          all_instances[name] = obj
++        end
++
++        obj
++      end
++    end
++  end
++
++  def self.parse_file(filename, contents)
++    # Debian has a very irregular format for the interfaces file. The
++    # parse_file method is somewhat derived from the ifup executable
++    # supplied in the debian ifupdown package. The source can be found at
++    # http://packages.debian.org/squeeze/ifupdown
++
++
++    # The debian interfaces implementation requires global state while parsing
++    # the file; namely, the stanza being parsed as well as the interface being
++    # parsed.
++    status = :none
++    current_interface = nil
++
++    lines = contents.split("\n")
++    # TODO Join lines that end with a backslash
++
++    # Iterate over all lines and determine what attributes they create
++    lines.each do |line|
++
++      # Strip off any trailing comments
++      line.sub!(/#.*$/, '')
++
++      case line
++      when /^\s*#|^\s*$/
++        # Ignore comments and blank lines
++        next
++
++      when /^auto|^allow-auto/
++        # Parse out any auto sections
++        interfaces = line.split(' ')
++        interfaces.delete_at(0)
++
++        interfaces.each do |name|
++          Instance[name].onboot = true
++        end
++
++        # Reset the current parse state
++        current_interface = nil
++
++      when /^allow-hotplug/
++        # parse out allow-hotplug lines
++
++        interfaces = line.split(' ')
++        interfaces.delete_at(0)
++
++        interfaces.each do |name|
++          Instance[name].hotplug = true
++        end
++
++        # Don't reset Reset the current parse state
++      when /^iface/
++
++        # Format of the iface line:
++        #
++        # iface <iface> <family> <method>
++        # zero or more options for <iface>
++
++        if match = line.match(/^iface\s+(\S+)\s+(\S+)\s+(\S+)/)
++          name   = match[1]
++          family = match[2]
++          method = match[3]
++
++          # If an iface block for this interface has been seen, the file is
++          # malformed.
++          raise_malformed if Instance[name] and Instance[name].family
++
++          status = :iface
++          current_interface = name
++
++          # This is done automatically
++          #Instance[name].name   = name
++          Instance[name].family = family
++          Instance[name].method = method
++
++        else
++          # If we match on a string with a leading iface, but it isn't in the
++          # expected format, malformed blar blar
++          raise_malformed
++        end
++
++      when /^mapping/
++
++        # XXX dox
++        raise Puppet::DevError, "Debian interfaces mapping parsing not implemented."
++        status = :mapping
++
++      else
++        # We're currently examining a line that is within a mapping or iface
++        # stanza, so we need to validate the line and add the options it
++        # specifies to the known state of the interface.
++
++        case status
++        when :iface
++          if match = line.match(/(\S+)\s+(\S.*)/)
++            # If we're parsing an iface stanza, then we should receive a set of
++            # lines that contain two or more space delimited strings. Append
++            # them as options to the iface in an array.
++
++            key = match[1]
++            val = match[2]
++
++            name = current_interface
++
++            case key
++            when 'address'; Instance[name].ipaddress    = val
++            when 'netmask'; Instance[name].netmask      = val
++            when 'mtu';     Instance[name].mtu          = val
++            else            Instance[name].options[key] << val
++            end
++          else
++            raise_malformed
++          end
++        when :mapping
++          raise Puppet::DevError, "Debian interfaces mapping parsing not implemented."
++        when :none
++          raise_malformed
++        end
++      end
++    end
++
++    Instance.all_instances.map {|name, instance| instance.to_hash }
++  end
++
++  # Generate an array of sections
++  def self.format_file(filename, providers)
++    contents = []
++    contents << header
++
++    # Add onboot interfaces
++    if (auto_interfaces = providers.select {|provider| provider.onboot == true })
++      stanza = []
++      stanza << "auto " + auto_interfaces.map(&:name).sort.join(" ")
++      contents << stanza.join("\n")
++    end
++
++    # Build iface stanzas
++    providers.sort_by(&:name).each do |provider|
++      # TODO add validation method
++      raise Puppet::Error, "#{provider.name} does not have a method." if provider.method.nil?
++      raise Puppet::Error, "#{provider.name} does not have a family." if provider.family.nil?
++
++      stanza = []
++      stanza << %{iface #{provider.name} #{provider.family} #{provider.method}}
++
++      [
++        [:ipaddress, 'address'],
++        [:netmask,   'netmask'],
++        [:mtu,       'mtu'],
++      ].each do |(property, section)|
++        stanza << "    #{section} #{provider.send property}" if provider.send(property) and provider.send(property) != :absent
++      end
++
++      if provider.options and provider.options != :absent
++        provider.options.each_pair do |key, val|
++          if val.is_a? String
++            stanza << "    #{key} #{val}"
++          elsif val.is_a? Array
++            val.each { |entry| stanza << "    #{key} #{entry}" }
++          else
++            raise Puppet::Error, "#{self} options key #{key} expects a String or Array, got #{val.class}"
++          end
++        end
++      end
++
++      contents << stanza.join("\n")
++    end
++
++    contents.map {|line| line + "\n\n"}.join
++  end
++
++  def self.header
++    str = <<-HEADER
++# HEADER: This file is is being managed by puppet. Changes to
++# HEADER: interfaces that are not being managed by puppet will persist;
++# HEADER: however changes to interfaces that are being managed by puppet will
++# HEADER: be overwritten. In addition, file order is NOT guaranteed.
++# HEADER: Last generated at: #{Time.now}
++HEADER
++    str
++  end
++end
+diff --git a/packstack/puppet/modules/network/lib/puppet/provider/network_route/wrlinux.rb b/packstack/puppet/modules/network/lib/puppet/provider/network_route/wrlinux.rb
+new file mode 100644
+index 0000000..d3fa7b5
+--- /dev/null
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_route/wrlinux.rb
+@@ -0,0 +1,109 @@
++require 'ipaddr'
++require 'puppetx/filemapper'
++
++Puppet::Type.type(:network_route).provide(:wrlinux) do
++  # Wind River Linux network_route routes provider.
++  #
++  # This provider uses the filemapper mixin to map the routes file to a
++  # collection of network_route providers, and back.
++  #
++  include PuppetX::FileMapper
++
++  desc "Wind River routes style provider"
++
++  confine    :osfamily => :wrlinux
++
++  # $ dpkg -S /etc/network/if-up.d/20static-routes
++  # ifupdown-extra: /etc/network/if-up.d/20static-routes
++  confine    :exists   => '/etc/network/if-up.d/20static-routes'
++
++  defaultfor :osfamily => :wrlinux
++
++  has_feature :provider_options
++
++  def select_file
++    '/etc/network/routes'
++  end
++
++  def self.target_files
++    ['/etc/network/routes']
++  end
++
++  class MalformedRoutesError < Puppet::Error
++    def initialize(msg = nil)
++      msg = 'Malformed wrlinux routes file; cannot instantiate network_route resources' if msg.nil?
++      super
++    end
++  end
++
++  def self.raise_malformed
++    @failed = true
++    raise MalformedRoutesError
++  end
++
++  def self.parse_file(filename, contents)
++    # Build out an empty hash for new routes for storing their configs.
++    route_hash = Hash.new do |hash, key|
++      hash[key] = {}
++      hash[key][:name] = key
++      hash[key]
++    end
++
++    lines = contents.split("\n")
++    lines.each do |line|
++      # Strip off any trailing comments
++      line.sub!(/#.*$/, '')
++
++      if line =~ /^\s*#|^\s*$/
++        # Ignore comments and blank lines
++        next
++      end
++
++      route = line.split(' ', 5)
++
++      if route.length < 4
++        raise_malformed
++      end
++
++      # use the CIDR version of the target as :name
++      cidr_target = "#{route[0]}/#{IPAddr.new(route[1]).to_i.to_s(2).count('1')}"
++
++      route_hash[cidr_target][:network] = route[0]
++      route_hash[cidr_target][:netmask] = route[1]
++      route_hash[cidr_target][:gateway] = route[2]
++      route_hash[cidr_target][:interface] = route[3]
++      route_hash[cidr_target][:options] = route[4] if route[4]
++    end
++
++    route_hash.values
++  end
++
++  # Generate an array of sections
++  def self.format_file(filename, providers)
++    contents = []
++    contents << header
++
++    # Build routes
++    providers.sort_by(&:name).each do |provider|
++      raise Puppet::Error, "#{provider.name} is missing the required parameter 'network'." if provider.network.nil?
++      raise Puppet::Error, "#{provider.name} is missing the required parameter 'netmask'." if provider.netmask.nil?
++      raise Puppet::Error, "#{provider.name} is missing the required parameter 'gateway'." if provider.gateway.nil?
++      raise Puppet::Error, "#{provider.name} is missing the required parameter 'interface'." if provider.interface.nil?
++
++      contents << "#{provider.network} #{provider.netmask} #{provider.gateway} #{provider.interface} #{provider.options}\n"
++    end
++
++    contents.join
++  end
++
++  def self.header
++    str = <<-HEADER
++# HEADER: This file is is being managed by puppet. Changes to
++# HEADER: routes that are not being managed by puppet will persist;
++# HEADER: however changes to routes that are being managed by puppet will
++# HEADER: be overwritten. In addition, file order is NOT guaranteed.
++# HEADER: Last generated at: #{Time.now}
++HEADER
++    str
++  end
++end
+diff --git a/packstack/puppet/modules/network/lib/puppet/type/network_config.rb b/packstack/puppet/modules/network/lib/puppet/type/network_config.rb
+index a50a0df..1297ad7 100644
+--- a/packstack/puppet/modules/network/lib/puppet/type/network_config.rb
++++ b/packstack/puppet/modules/network/lib/puppet/type/network_config.rb
+@@ -95,6 +95,10 @@ Puppet::Type.newtype(:network_config) do
+     defaultto :raw
+   end
++  newproperty(:gateway) do
++    desc 'The IP address of the network router or gateway device (if any)'
++  end
++
+   # `:options` provides an arbitrary passthrough for provider properties, so
+   # that provider specific behavior doesn't clutter up the main type but still
+   # allows for more powerful actions to be taken.
+diff --git a/packstack/puppet/modules/network/manifests/bond.pp b/packstack/puppet/modules/network/manifests/bond.pp
+index d6d98ce..26ca104 100644
+--- a/packstack/puppet/modules/network/manifests/bond.pp
++++ b/packstack/puppet/modules/network/manifests/bond.pp
+@@ -188,6 +188,28 @@ define network::bond(
+         require          => Kmod::Alias[$name],
+       }
+     }
++    WRLinux: {
++      network::bond::wrlinux { $name:
++        slaves    => $slaves,
++        ensure    => $ensure,
++        ipaddress => $ipaddress,
++        netmask   => $netmask,
++        method    => $method,
++        family    => $family,
++        onboot    => $onboot,
++
++        mode             => $mode,
++        miimon           => $miimon,
++        downdelay        => $downdelay,
++        updelay          => $updelay,
++        lacp_rate        => $lacp_rate,
++        primary          => $primary,
++        primary_reselect => $primary_reselect,
++        xmit_hash_policy => $xmit_hash_policy,
++
++        require   => Kmod::Alias[$name],
++      }
++    }
+     RedHat: {
+       network::bond::redhat { $name:
+         ensure           => $ensure,
+diff --git a/packstack/puppet/modules/network/manifests/bond/setup.pp b/packstack/puppet/modules/network/manifests/bond/setup.pp
+index abe1252..0a30767 100644
+--- a/packstack/puppet/modules/network/manifests/bond/setup.pp
++++ b/packstack/puppet/modules/network/manifests/bond/setup.pp
+@@ -10,5 +10,7 @@ class network::bond::setup {
+         ensure => present,
+       }
+     }
++    WRLinux: {
++    }
+   }
+ }
+diff --git a/packstack/puppet/modules/network/manifests/bond/wrlinux.pp b/packstack/puppet/modules/network/manifests/bond/wrlinux.pp
+new file mode 100644
+index 0000000..e240341
+--- /dev/null
++++ b/packstack/puppet/modules/network/manifests/bond/wrlinux.pp
+@@ -0,0 +1,56 @@
++# = Define: network::bond::wrlinux
++#
++# Instantiate bonded interfaces on Debian based systems.
++#
++# == See also
++#
++# * Debian Network Bonding http://wiki.wrlinux.org/Bonding
++define network::bond::wrlinux(
++  $slaves,
++  $ensure    = present,
++  $ipaddress = undef,
++  $netmask   = undef,
++  $method    = undef,
++  $family    = undef,
++  $onboot    = undef,
++
++  $mode             = undef,
++  $miimon           = undef,
++  $downdelay        = undef,
++  $updelay          = undef,
++  $lacp_rate        = undef,
++  $primary          = undef,
++  $primary_reselect = undef,
++  $xmit_hash_policy = undef,
++) {
++
++  $raw = {
++    'bond-slaves'    => join($slaves, ' '),
++    'bond-mode'      => $mode,
++    'bond-miimon'    => $miimon,
++    'bond-downdelay' => $downdelay,
++    'bond-updelay'   => $updelay,
++    'bond-lacp-rate' => $lacp_rate,
++    'bond-primary'   => $primary,
++    'bond-primary-reselect' => $primary_reselect,
++    'bond-xmit-hash-policy' => $xmit_hash_policy,
++  }
++
++  $opts = compact_hash($raw)
++
++  network_config { $name:
++    ensure    => $ensure,
++    ipaddress => $ipaddress,
++    netmask   => $netmask,
++    family    => $family,
++    method    => $method,
++    onboot    => $onboot,
++    options   => $opts,
++  }
++
++  network_config { $slaves:
++    ensure      => absent,
++    reconfigure => true,
++    before      => Network_config[$name],
++  }
++}
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-config-poky-provider.patch b/meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-config-poky-provider.patch
new file mode 100644 (file)
index 0000000..f5387b8
--- /dev/null
@@ -0,0 +1,326 @@
+diff -ruN a/lib/puppet/provider/network_config/interfaces.rb b/lib/puppet/provider/network_config/interfaces.rb
+--- a/lib/puppet/provider/network_config/interfaces.rb 2020-04-14 15:30:26.488316830 +0800
++++ b/lib/puppet/provider/network_config/interfaces.rb 2020-04-14 15:30:11.388316418 +0800
+@@ -14,7 +14,7 @@
+   desc "Debian interfaces style provider"
+   confine    :osfamily => :debian
+-  defaultfor :osfamily => :debian
++  defaultfor :operatingsystem => [:debian, :ubuntu]
+   has_feature :provider_options
+   has_feature :hotpluggable
+diff -ruN a/lib/puppet/provider/network_config/poky-stx.rb b/lib/puppet/provider/network_config/poky-stx.rb
+--- a/lib/puppet/provider/network_config/poky-stx.rb   1970-01-01 08:00:00.000000000 +0800
++++ b/lib/puppet/provider/network_config/poky-stx.rb   2020-04-15 15:40:31.266687901 +0800
+@@ -0,0 +1,310 @@
++require 'puppetx/filemapper'
++
++Puppet::Type.type(:network_config).provide(:pokystx) do
++  # Wind River Linux network_config interfaces provider.
++  #
++  # This provider uses the filemapper mixin to map the interfaces file to a
++  # collection of network_config providers, and back.
++  #
++  include PuppetX::FileMapper
++
++  desc "Poky starlingX interfaces style provider"
++
++  defaultfor :operatingsystem => :'poky-stx'
++
++  has_feature :provider_options
++  has_feature :hotpluggable
++
++  def select_file
++    '/var/run/interfaces.puppet'
++  end
++
++  def self.target_files
++    ['/var/run/interfaces.puppet']
++  end
++
++  class MalformedInterfacesError < Puppet::Error
++    def initialize(msg = nil)
++      msg = 'Malformed poky-stx interfaces file; cannot instantiate network_config resources' if msg.nil?
++      super
++    end
++  end
++
++  def self.raise_malformed
++    @failed = true
++    raise MalformedInterfacesError
++  end
++
++  class Instance
++
++    attr_reader :name
++
++    # Booleans
++    attr_accessor :onboot, :hotplug
++
++
++    # These fields are going to get rearranged to resolve issue 16
++    # https://github.com/adrienthebo/puppet-network/issues/16
++    attr_accessor :ipaddress, :netmask, :family, :method, :mtu
++
++    # Options hash
++    attr_reader :options
++
++    def initialize(name)
++      @name = name
++
++      @options = Hash.new {|hash, key| hash[key] = []}
++    end
++
++    def to_hash
++      h = {
++        :name      => @name,
++        :onboot    => @onboot,
++        :hotplug   => @hotplug,
++        :ipaddress => @ipaddress,
++        :netmask   => @netmask,
++        :family    => @family,
++        :method    => @method,
++        :mtu       => @mtu,
++        :options   => squeeze_options
++      }
++
++      h.inject({}) do |hash, (key, val)|
++        hash[key] = val unless val.nil?
++        hash
++      end
++    end
++
++    def squeeze_options
++      @options.inject({}) do |hash, (key, value)|
++        if value.size <= 1
++          hash[key] = value.pop
++        else
++          hash[key] = value
++        end
++
++      hash
++      end
++    end
++
++    class << self
++
++      def reset!
++        @interfaces = {}
++      end
++
++      # @return [Array<Instance>] All class instances
++      def all_instances
++        @interfaces ||= {}
++        @interfaces
++      end
++
++      def [](name)
++        if all_instances[name]
++          obj = all_instances[name]
++        else
++          obj = self.new(name)
++          all_instances[name] = obj
++        end
++
++        obj
++      end
++    end
++  end
++
++  def self.parse_file(filename, contents)
++    # Debian has a very irregular format for the interfaces file. The
++    # parse_file method is somewhat derived from the ifup executable
++    # supplied in the debian ifupdown package. The source can be found at
++    # http://packages.debian.org/squeeze/ifupdown
++
++
++    # The debian interfaces implementation requires global state while parsing
++    # the file; namely, the stanza being parsed as well as the interface being
++    # parsed.
++    status = :none
++    current_interface = nil
++
++    lines = contents.split("\n")
++    # TODO Join lines that end with a backslash
++
++    # Iterate over all lines and determine what attributes they create
++    lines.each do |line|
++
++      # Strip off any trailing comments
++      line.sub!(/#.*$/, '')
++
++      case line
++      when /^\s*#|^\s*$/
++        # Ignore comments and blank lines
++        next
++
++      when /^auto|^allow-auto/
++        # Parse out any auto sections
++        interfaces = line.split(' ')
++        interfaces.delete_at(0)
++
++        interfaces.each do |name|
++          Instance[name].onboot = true
++        end
++
++        # Reset the current parse state
++        current_interface = nil
++
++      when /^allow-hotplug/
++        # parse out allow-hotplug lines
++
++        interfaces = line.split(' ')
++        interfaces.delete_at(0)
++
++        interfaces.each do |name|
++          Instance[name].hotplug = true
++        end
++
++        # Don't reset Reset the current parse state
++      when /^iface/
++
++        # Format of the iface line:
++        #
++        # iface <iface> <family> <method>
++        # zero or more options for <iface>
++
++        if match = line.match(/^iface\s+(\S+)\s+(\S+)\s+(\S+)/)
++          name   = match[1]
++          family = match[2]
++          method = match[3]
++
++          # If an iface block for this interface has been seen, the file is
++          # malformed.
++          raise_malformed if Instance[name] and Instance[name].family
++
++          status = :iface
++          current_interface = name
++
++          # This is done automatically
++          #Instance[name].name   = name
++          Instance[name].family = family
++          Instance[name].method = method
++
++        else
++          # If we match on a string with a leading iface, but it isn't in the
++          # expected format, malformed blar blar
++          raise_malformed
++        end
++
++      when /^mapping/
++
++        # XXX dox
++        raise Puppet::DevError, "Debian interfaces mapping parsing not implemented."
++        status = :mapping
++
++      else
++        # We're currently examining a line that is within a mapping or iface
++        # stanza, so we need to validate the line and add the options it
++        # specifies to the known state of the interface.
++
++        case status
++        when :iface
++          if match = line.match(/(\S+)\s+(\S.*)/)
++            # If we're parsing an iface stanza, then we should receive a set of
++            # lines that contain two or more space delimited strings. Append
++            # them as options to the iface in an array.
++
++            key = match[1]
++            val = match[2]
++
++            name = current_interface
++
++            case key
++            when 'address'; Instance[name].ipaddress    = val
++            when 'netmask'; Instance[name].netmask      = val
++            when 'mtu';     Instance[name].mtu          = val
++            else            Instance[name].options[key] << val
++            end
++          else
++            raise_malformed
++          end
++        when :mapping
++          raise Puppet::DevError, "Debian interfaces mapping parsing not implemented."
++        when :none
++          raise_malformed
++        end
++      end
++    end
++
++    Instance.all_instances.map {|name, instance| instance.to_hash }
++  end
++
++  # Generate an array of sections
++  def self.format_file(filename, providers)
++    contents = []
++    contents << header
++
++    # Add onboot interfaces
++    if (auto_interfaces = providers.select {|provider| provider.onboot == true })
++      stanza = []
++      stanza << "auto " + auto_interfaces.map(&:name).sort.join(" ")
++      contents << stanza.join("\n")
++    end
++
++    # Build iface stanzas
++    providers.sort_by(&:name).each do |provider|
++      # TODO add validation method
++      raise Puppet::Error, "#{provider.name} does not have a method." if provider.method.nil?
++      raise Puppet::Error, "#{provider.name} does not have a family." if provider.family.nil?
++
++      stanza = []
++      if provider.method == :static and (not provider.ipaddress or provider.ipaddress == :absent)
++        stanza << %{iface #{provider.name} #{provider.family} manual}
++      else
++        stanza << %{iface #{provider.name} #{provider.family} #{provider.method}}
++      end
++
++      [
++        [:ipaddress, 'address'],
++        [:netmask,   'netmask'],
++        [:gateway,   'gateway'],
++        [:mtu,       'mtu'],
++      ].each do |(property, section)|
++        stanza << "    #{section} #{provider.send property}" if provider.send(property) and provider.send(property) != :absent
++      end
++
++      if provider.options and provider.options != :absent
++        provider.options.each_pair do |key_f, val|
++          key = key_f.gsub('_', '-')
++          if ['pre-up', 'up', 'post-up', 'down', 'pre-down', 'post-down'].include? key
++            if val.is_a? String
++              stanza << "    #{key} #{val}"
++            elsif val.is_a? Array
++              val.each { |entry| stanza << "    #{key} #{entry}" }
++            else
++              raise Puppet::Error, "#{self} options key #{key} expects a String or Array, got #{val.class}"
++            end
++          end
++          if key == 'SCOPE'
++            if val.is_a? String
++              stanza << "    #{val}"
++            else
++              raise Puppet::Error, "#{self} options key #{key} expects a String, got #{val.class}"
++            end
++          end
++        end
++      end
++
++      contents << stanza.join("\n")
++    end
++
++    contents.map {|line| line + "\n\n"}.join
++  end
++
++  def self.header
++    str = <<-HEADER
++# HEADER: This file is is being managed by puppet. Changes to
++# HEADER: interfaces that are not being managed by puppet will persist;
++# HEADER: however changes to interfaces that are being managed by puppet will
++# HEADER: be overwritten. In addition, file order is NOT guaranteed.
++# HEADER: Last generated at: #{Time.now}
++HEADER
++    str
++  end
++end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-support-ipv6.patch b/meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-support-ipv6.patch
new file mode 100644 (file)
index 0000000..b6d2f3c
--- /dev/null
@@ -0,0 +1,46 @@
+Index: packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
+--- a/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
++++ b/packstack/puppet/modules/network/lib/puppet/provider/network_config/redhat.rb
+@@ -224,6 +224,11 @@
+     pairs = self.unmunge props
++    ip_version = provider.send(:family)
++    if (ip_version.to_s == "inet6")
++        pairs = self.ipv6_fixup pairs
++    end
++
+     content = pairs.inject('') do |str, (key, val)|
+       str << %{#{key}=#{val}\n}
+     end
+@@ -259,6 +264,30 @@
+     pairs
+   end
++  def self.ipv6_fixup(pairs)
++    pairs['IPV6INIT'] = 'yes'
++
++    if (pairs.include? 'NETMASK' and pairs.include? 'IPADDR')
++        pairs['IPV6ADDR'] =  pairs['IPADDR'].to_s + "/" + pairs['NETMASK'].to_s
++        pairs.delete('NETMASK')
++        pairs.delete('IPADDR')
++    elsif (pairs.include? 'IPADDR')
++        pairs['IPV6ADDR'] = pairs['IPADDR'].to_s
++        pairs.delete('IPADDR')
++    end
++
++    if (pairs.include? 'GATEWAY')
++        pairs['IPV6_DEFAULTGW'] = pairs['GATEWAY']
++        pairs.delete('GATEWAY')
++    end
++
++    if (pairs['BOOTPROTO'].to_s == 'dhcp')
++        pairs['DHCPV6C'] = 'yes'
++        pairs['DHCLIENTARGS'] = '-1'
++    end
++    pairs
++  end
++
+   def self.header
+     str = <<-HEADER
+ # HEADER: This file is is being managed by puppet. Changes to
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-network/puppet-network-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..06920a1
--- /dev/null
@@ -0,0 +1,155 @@
+diff --git a/manifests/bond/poky-stx.pp b/manifests/bond/poky-stx.pp
+new file mode 100644
+index 0000000..c6af9c9
+--- /dev/null
++++ b/manifests/bond/poky-stx.pp
+@@ -0,0 +1,56 @@
++# = Define: network::bond::poky-stx
++#
++# Instantiate bonded interfaces on Debian based systems.
++#
++# == See also
++#
++# * Debian Network Bonding http://wiki.wrlinux.org/Bonding
++define network::bond::poky-stx(
++  $slaves,
++  $ensure    = present,
++  $ipaddress = undef,
++  $netmask   = undef,
++  $method    = undef,
++  $family    = undef,
++  $onboot    = undef,
++
++  $mode             = undef,
++  $miimon           = undef,
++  $downdelay        = undef,
++  $updelay          = undef,
++  $lacp_rate        = undef,
++  $primary          = undef,
++  $primary_reselect = undef,
++  $xmit_hash_policy = undef,
++) {
++
++  $raw = {
++    'bond-slaves'    => join($slaves, ' '),
++    'bond-mode'      => $mode,
++    'bond-miimon'    => $miimon,
++    'bond-downdelay' => $downdelay,
++    'bond-updelay'   => $updelay,
++    'bond-lacp-rate' => $lacp_rate,
++    'bond-primary'   => $primary,
++    'bond-primary-reselect' => $primary_reselect,
++    'bond-xmit-hash-policy' => $xmit_hash_policy,
++  }
++
++  $opts = compact_hash($raw)
++
++  network_config { $name:
++    ensure    => $ensure,
++    ipaddress => $ipaddress,
++    netmask   => $netmask,
++    family    => $family,
++    method    => $method,
++    onboot    => $onboot,
++    options   => $opts,
++  }
++
++  network_config { $slaves:
++    ensure      => absent,
++    reconfigure => true,
++    before      => Network_config[$name],
++  }
++}
+diff --git a/manifests/bond/setup.pp b/manifests/bond/setup.pp
+index 0a30767..780722c 100644
+--- a/manifests/bond/setup.pp
++++ b/manifests/bond/setup.pp
+@@ -6,8 +6,10 @@ class network::bond::setup {
+       # is available by default
+     }
+     Debian: {
+-      package { 'ifenslave-2.6':
+-        ensure => present,
++      if $::operatingsystem != 'poky-stx' {
++        package { 'ifenslave-2.6':
++          ensure => present,
++        }
+       }
+     }
+     WRLinux: {
+diff --git a/manifests/bond.pp b/manifests/bond.pp
+index 26ca104..5f8e254 100644
+--- a/manifests/bond.pp
++++ b/manifests/bond.pp
+@@ -164,28 +164,51 @@ define network::bond(
+   case $::osfamily {
+     Debian: {
+-      network::bond::debian { $name:
+-        ensure           => $ensure,
+-        slaves           => $slaves,
+-        ipaddress        => $ipaddress,
+-        netmask          => $netmask,
+-        method           => $method,
+-        family           => $family,
+-        onboot           => $onboot,
+-        hotplug          => $hotplug,
+-        options          => $options,
+-        slave_options    => $slave_options,
++      if $::operatingsystem == 'poky-stx' {
++        network::bond::poky-stx { $name:
++          slaves    => $slaves,
++          ensure    => $ensure,
++          ipaddress => $ipaddress,
++          netmask   => $netmask,
++          method    => $method,
++          family    => $family,
++          onboot    => $onboot,
+-        mode             => $mode,
+-        miimon           => $miimon,
+-        downdelay        => $downdelay,
+-        updelay          => $updelay,
+-        lacp_rate        => $lacp_rate,
+-        primary          => $primary,
+-        primary_reselect => $primary_reselect,
+-        xmit_hash_policy => $xmit_hash_policy,
++          mode             => $mode,
++          miimon           => $miimon,
++          downdelay        => $downdelay,
++          updelay          => $updelay,
++          lacp_rate        => $lacp_rate,
++          primary          => $primary,
++          primary_reselect => $primary_reselect,
++          xmit_hash_policy => $xmit_hash_policy,
+-        require          => Kmod::Alias[$name],
++          require   => Kmod::Alias[$name],
++        }
++      } else {
++        network::bond::debian { $name:
++          ensure           => $ensure,
++          slaves           => $slaves,
++          ipaddress        => $ipaddress,
++          netmask          => $netmask,
++          method           => $method,
++          family           => $family,
++          onboot           => $onboot,
++          hotplug          => $hotplug,
++          options          => $options,
++          slave_options    => $slave_options,
++
++          mode             => $mode,
++          miimon           => $miimon,
++          downdelay        => $downdelay,
++          updelay          => $updelay,
++          lacp_rate        => $lacp_rate,
++          primary          => $primary,
++          primary_reselect => $primary_reselect,
++          xmit_hash_policy => $xmit_hash_policy,
++
++          require          => Kmod::Alias[$name],
++        }
+       }
+     }
+     WRLinux: {
diff --git a/meta-stx/recipes-support/puppet/files/puppet-network/route-options-support.patch b/meta-stx/recipes-support/puppet/files/puppet-network/route-options-support.patch
new file mode 100644 (file)
index 0000000..37bf138
--- /dev/null
@@ -0,0 +1,28 @@
+From c26a70ab9d5839f90148c578edc5d15133355194 Mon Sep 17 00:00:00 2001
+From: Kevin Smith <kevin.smith@windriver.com>
+Date: Wed, 25 Oct 2017 07:37:52 -0500
+Subject: [PATCH 1/1] route options support
+
+---
+ packstack/puppet/modules/network/lib/puppet/type/network_route.rb | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/packstack/puppet/modules/network/lib/puppet/type/network_route.rb b/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
+index fd52c58..13ca06a 100644
+--- a/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
++++ b/packstack/puppet/modules/network/lib/puppet/type/network_route.rb
+@@ -3,6 +3,11 @@ require 'ipaddr'
+ Puppet::Type.newtype(:network_route) do
+   @doc = "Manage non-volatile route configuration information"
++  feature :provider_options, <<-EOD
++    The provider can accept an arbitrary options string. The semantics of
++    these options will depend on the provider.
++  EOD
++
+   ensurable
+   newparam(:name) do
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-nslcd/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-nslcd/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..1d92568
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppet-nslcd.gemspec     2019-10-30 21:26:00.013491029 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-nslcd'
++  s.version     = '0.0.1'
++  s.date        = '2015-01-26'
++  s.summary     = "Manages the nslcd daemon"
++  s.description = s.summary
++  s.authors     = ["Johan Lyheden"]
++  s.email       = ''
++  s.files       = %w(Modulefile README.markdown ) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/adrienthebo/puppet-filemapper'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-nslcd/metadata.json.patch b/meta-stx/recipes-support/puppet/files/puppet-nslcd/metadata.json.patch
new file mode 100644 (file)
index 0000000..2813ba0
--- /dev/null
@@ -0,0 +1,16 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/metadata.json    2019-10-31 14:52:09.693312689 -0700
+@@ -0,0 +1,13 @@
++{
++  "name": "jlyheden-nslcd",
++  "version": "0.0.1",
++  "author": "John Lyheden",
++  "summary": "Manages the nslcd daemon.",
++  "license": "Apache-2.0",
++  "source": "git://github.com/jlyheden/puppet-nslcd",
++  "project_page": "https://github.com/jlyheden/puppet-nslcd",
++  "issues_url": "https://tickets.puppetlabs.com/browse/MODULES",
++  "dependencies": [
++      {"name":"puppetlabs/stdlib","version_requirement":">= 2.6.0 < 5.0.0"}
++  ]
++}
diff --git a/meta-stx/recipes-support/puppet/files/puppet-nslcd/puppet-nslcd-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-nslcd/puppet-nslcd-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..61d3fdc
--- /dev/null
@@ -0,0 +1,13 @@
+diff --git a/manifests/params.pp b/manifests/params.pp
+index d050723..0f167e4 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -29,7 +29,7 @@ class nslcd::params {
+   # To add support for other distributions simply add
+   # it here
+   case $::operatingsystem {
+-    'Ubuntu','Debian': {
++    'Ubuntu','Debian', 'poky-stx': {
+       $user = 'nslcd'
+       $group = 'nslcd'
+       $package = 'nslcd'
diff --git a/meta-stx/recipes-support/puppet/files/puppet-nssdb/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-nssdb/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..e909d98
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-nssdb.gemspec     2019-10-28 13:06:00.202131209 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-nssdb'
++  s.version     = '1.0.1'
++  s.date        = '2017-05-17'
++  s.summary     = "NSS database Puppet Module"
++  s.description = s.summary
++  s.authors     = ["rcritten"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/rcritten/puppet-nssdb'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-nssdb/metadata.json.patch b/meta-stx/recipes-support/puppet/files/puppet-nssdb/metadata.json.patch
new file mode 100644 (file)
index 0000000..4e6e27f
--- /dev/null
@@ -0,0 +1,16 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/metadata.json    2019-10-31 15:37:09.270028865 -0700
+@@ -0,0 +1,13 @@
++{
++  "name": "rcritten/nssdb",
++  "version": "1.0.1",
++  "author": "Rob Crittenden",
++  "summary": "NSS database Puppet Module.",
++  "license": "Apache-2.0",
++  "source": "https://github.com/netmanagers/puppet-dnsmasq",
++  "project_page": "https://github.com/rcritten/puppet-nssdb",
++  "issues_url": "https://tickets.puppetlabs.com/browse/MODULES",
++  "dependencies": [
++      {"name":"puppetlabs/stdlib","version_requirement":">= 0.0.1"}
++  ]
++}
diff --git a/meta-stx/recipes-support/puppet/files/puppet-openstacklib/0001-Roll-up-TIS-patches.patch b/meta-stx/recipes-support/puppet/files/puppet-openstacklib/0001-Roll-up-TIS-patches.patch
new file mode 100644 (file)
index 0000000..9f0328c
--- /dev/null
@@ -0,0 +1,85 @@
+From be4aad7589bc63f90b98b7d5692701a8368f8b04 Mon Sep 17 00:00:00 2001
+From: Al Bailey <Al.Bailey@windriver.com>
+Date: Mon, 11 Mar 2019 07:55:54 -0500
+Subject: [PATCH 1/3] Roll up TIS patches
+
+---
+ lib/puppet/provider/openstack.rb             |  1 +
+ lib/puppet/provider/openstack/auth.rb        | 16 ++++++++++++++--
+ lib/puppet/provider/openstack/credentials.rb |  2 --
+ 3 files changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/lib/puppet/provider/openstack.rb b/lib/puppet/provider/openstack.rb
+index 0240ce4..8b8e564 100644
+--- a/lib/puppet/provider/openstack.rb
++++ b/lib/puppet/provider/openstack.rb
+@@ -45,6 +45,7 @@ class Puppet::Provider::Openstack < Puppet::Provider
+     begin
+       action = args[1]
+       Timeout.timeout(command_timeout(action)) do
++        args.unshift('--os-interface', 'internal')
+         openstack_command *args
+       end
+     rescue Timeout::Error
+diff --git a/lib/puppet/provider/openstack/auth.rb b/lib/puppet/provider/openstack/auth.rb
+index 743071d..4026aec 100644
+--- a/lib/puppet/provider/openstack/auth.rb
++++ b/lib/puppet/provider/openstack/auth.rb
+@@ -1,9 +1,19 @@
+ #require 'puppet/provider/openstack/credentials'
+ require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/openstack/credentials')
++require 'hiera_puppet'
+ module Puppet::Provider::Openstack::Auth
+-  RCFILENAME = "#{ENV['HOME']}/openrc"
++  RCFILENAME = "/etc/platform/openrc"
++
++  def lookup_hiera(key)
++    HieraPuppet.lookup(key, :undef, self, nil, :priority)
++  end
++
++  def get_admin_password
++   value=lookup_hiera('keystone::admin_password')
++   return value
++  end
+   def get_os_vars_from_env
+     env = {}
+@@ -17,7 +27,7 @@ module Puppet::Provider::Openstack::Auth
+     unless rcfile.nil?
+       File.open(rcfile).readlines.delete_if{|l| l=~ /^#|^$/ }.each do |line|
+         # we only care about the OS_ vars from the file LP#1699950
+-        if line =~ /OS_/
++        if line =~ /OS_/ and line.include?('=')
+           key, value = line.split('=')
+           key = key.split(' ').last
+           value = value.chomp.gsub(/'/, '')
+@@ -38,6 +48,8 @@ module Puppet::Provider::Openstack::Auth
+     unless @credentials.set?
+       @credentials.unset
+       set_credentials(@credentials, get_os_vars_from_rcfile(rc_filename))
++      # retrieves the password from hiera data since keyring is not yet available
++      @credentials.password = get_admin_password
+     end
+     unless @credentials.set?
+       raise(Puppet::Error::OpenstackAuthInputError, 'Insufficient credentials to authenticate')
+diff --git a/lib/puppet/provider/openstack/credentials.rb b/lib/puppet/provider/openstack/credentials.rb
+index 2765b2b..9c831e3 100644
+--- a/lib/puppet/provider/openstack/credentials.rb
++++ b/lib/puppet/provider/openstack/credentials.rb
+@@ -70,11 +70,9 @@ class Puppet::Provider::Openstack::CredentialsV3 < Puppet::Provider::Openstack::
+     :domain_id,
+     :domain_name,
+     :key,
+-    :project_domain_id,
+     :project_domain_name,
+     :project_id,
+     :trust_id,
+-    :user_domain_id,
+     :user_domain_name,
+     :user_id
+   ]
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-openstacklib/0002-puppet-openstacklib-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-openstacklib/0002-puppet-openstacklib-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..3854aa1
--- /dev/null
@@ -0,0 +1,13 @@
+diff --git a/lib/facter/os_package_type.rb b/lib/facter/os_package_type.rb
+index 134509e..adc39d9 100644
+--- a/lib/facter/os_package_type.rb
++++ b/lib/facter/os_package_type.rb
+@@ -24,6 +24,8 @@ Facter.add('os_package_type') do
+     when 'Debian'
+       if Facter.value(:operatingsystem) == 'Debian' then
+         'debian'
++      elsif Facter.value(:operatingsystem) == 'poky-stx' then
++        'poky'
+       else
+         'ubuntu'
+       end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-openstacklib/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-openstacklib/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..97ba806
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-openstacklib.gemspec      2019-10-28 19:41:18.343027332 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-openstacklib'
++  s.version     = '11.3.0'
++  s.date        = '2017-08-21'
++  s.summary     = "Puppet OpenStack Libraries"
++  s.description = s.summary
++  s.authors     = ["Puppet Labs and OpenStack Contributors"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://launchpad.net/puppet-openstacklib'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-oslo/0001-Remove-log_dir-from-conf-files.patch b/meta-stx/recipes-support/puppet/files/puppet-oslo/0001-Remove-log_dir-from-conf-files.patch
new file mode 100644 (file)
index 0000000..3443b2e
--- /dev/null
@@ -0,0 +1,36 @@
+From b8dee2da527c3d3010e2b5b4e49f87f430afa826 Mon Sep 17 00:00:00 2001
+From: Al Bailey <Al.Bailey@windriver.com>
+Date: Fri, 3 Nov 2017 17:48:38 -0500
+Subject: [PATCH] Remove log_dir from conf files
+
+---
+ manifests/log.pp | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/manifests/log.pp b/manifests/log.pp
+index 8778d46..771a674 100644
+--- a/manifests/log.pp
++++ b/manifests/log.pp
+@@ -27,9 +27,7 @@
+ #   Defaults to $::os_service_default
+ #
+ # [*log_dir*]
+-#   (Optional) Directory where logs should be stored.
+-#   If set to $::os_service_default, it will not log to any directory.
+-#   Defaults to $::os_service_default
++#   WRS: Remove log_dir to ensure services log via syslog
+ #
+ # [*watch_log_file*]
+ #   (Optional) Uses logging handler designed to watch file system (boolean value).
+@@ -136,7 +134,7 @@ define oslo::log(
+     'DEFAULT/log_config_append'             => { value => $log_config_append },
+     'DEFAULT/log_date_format'               => { value => $log_date_format },
+     'DEFAULT/log_file'                      => { value => $log_file },
+-    'DEFAULT/log_dir'                       => { value => $log_dir },
++    'DEFAULT/log_dir'                       => { ensure => absent },
+     'DEFAULT/watch_log_file'                => { value => $watch_log_file },
+     'DEFAULT/use_syslog'                    => { value => $use_syslog },
+     'DEFAULT/syslog_log_facility'           => { value => $syslog_log_facility },
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-oslo/0002-add-psycopg2-drivername-to-postgresql-settings.patch b/meta-stx/recipes-support/puppet/files/puppet-oslo/0002-add-psycopg2-drivername-to-postgresql-settings.patch
new file mode 100644 (file)
index 0000000..7340a06
--- /dev/null
@@ -0,0 +1,51 @@
+From 1823423c329675a72ea5b3497c31f8c407dcdf27 Mon Sep 17 00:00:00 2001
+From: Al Bailey <Al.Bailey@windriver.com>
+Date: Thu, 14 Dec 2017 11:08:30 -0600
+Subject: [PATCH] add psycopg2 drivername to postgresql settings
+
+---
+ manifests/db.pp | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/manifests/db.pp b/manifests/db.pp
+index 172c8ae..4b918bc 100644
+--- a/manifests/db.pp
++++ b/manifests/db.pp
+@@ -132,10 +132,13 @@ define oslo::db(
+   if !is_service_default($connection) {
+-    validate_re($connection,
+-      '^(sqlite|mysql(\+pymysql)?|postgresql|mongodb):\/\/(\S+:\S+@\S+\/\S+)?')
++    if $connection !~ '^(sqlite|mysql(\+pymysql)?|postgresql(\+psycopg2)?|mongodb):\/\/(\S+:\S+@\S+\/\S+)?' {
++      err{'invalid database connection parameter: $connection':}
++    }
++    # add psycopg2 drivername to postgresql if using driverless postgres setting
++    $real_connection = regsubst($connection,'^postgresql:','postgresql+psycopg2:')
+-    case $connection {
++    case $real_connection {
+       /^mysql(\+pymysql)?:\/\//: {
+         require '::mysql::bindings'
+         require '::mysql::bindings::python'
+@@ -145,7 +148,7 @@ define oslo::db(
+           $backend_package = false
+         }
+       }
+-      /^postgresql:\/\//: {
++      /^postgresql(\+psycopg2)?:\/\//: {
+         $backend_package = false
+         require '::postgresql::lib::python'
+       }
+@@ -178,7 +181,7 @@ to connect to the database.")
+   $database_options = {
+     'database/sqlite_synchronous'    => { value => $sqlite_synchronous },
+     'database/backend'               => { value => $backend },
+-    'database/connection'            => { value => $connection, secret => true },
++    'database/connection'            => { value => $real_connection, secret => true },
+     'database/slave_connection'      => { value => $slave_connection, secret => true },
+     'database/mysql_sql_mode'        => { value => $mysql_sql_mode },
+     'database/idle_timeout'          => { value => $idle_timeout },
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppet-oslo/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-oslo/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..6af8a15
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-oslo.gemspec      2019-10-28 08:59:29.609410185 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-oslo'
++  s.version     = '11.3.0'
++  s.date        = '2016-08-21'
++  s.summary     = "Puppet oslo module"
++  s.description = s.summary
++  s.authors     = ["OpenStack Contributors"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/openstack/puppet-oslo.git'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-puppi/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-puppi/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..30f88e5
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppet-puppi.gemspec     2019-10-30 21:37:32.685810531 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-puppi'
++  s.version     = '2.2.3'
++  s.date        = '2017-03-10'
++  s.summary     = "Installs and configures Puppi"
++  s.description = s.summary
++  s.authors     = ["Alessandro Franceschi"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/example42/puppi'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-puppi/puppet-puppi-adjust-path.patch b/meta-stx/recipes-support/puppet/files/puppet-puppi/puppet-puppi-adjust-path.patch
new file mode 100644 (file)
index 0000000..7a69c06
--- /dev/null
@@ -0,0 +1,26 @@
+diff --git a/manifests/init.pp b/manifests/init.pp
+index cebceec..cec4139 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -47,7 +47,7 @@ class puppi (
+   # Manage Version
+   $puppi_ensure = $puppi::version ? {
+     '1' => '/usr/sbin/puppi.one',
+-    '2' => '/usr/local/bin/puppi',
++    '2' => '/usr/bin/puppi',
+   }
+   file { 'puppi.link':
+diff --git a/manifests/mcollective/client.pp b/manifests/mcollective/client.pp
+index 24f2f63..5260139 100644
+--- a/manifests/mcollective/client.pp
++++ b/manifests/mcollective/client.pp
+@@ -23,7 +23,7 @@ class puppi::mcollective::client {
+   require puppi::mcollective::server
+ # OLD STYLE mc-puppi command
+-  file { '/usr/local/bin/mc-puppi':
++  file { '/usr/bin/mc-puppi':
+     ensure  => 'present',
+     mode    => '0755',
+     owner   => 'root',
diff --git a/meta-stx/recipes-support/puppet/files/puppet-puppi/puppet-puppi-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet-puppi/puppet-puppi-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..571f95b
--- /dev/null
@@ -0,0 +1,30 @@
+diff --git a/manifests/extras.pp b/manifests/extras.pp
+index 14f88a7..3d20115 100644
+--- a/manifests/extras.pp
++++ b/manifests/extras.pp
+@@ -107,6 +107,7 @@ class puppi::extras {
+   $packages_run = $::operatingsystem ? {
+     /(?i:RedHat|CentOS|Scientific|Amazon|Linux)/ => [ 'yum repolist' , 'rpm -qa' ] ,
+     /(?i:Debian|Ubuntu|Mint)/                    => [ 'apt-config dump' , 'apt-cache stats' , 'apt-key list' , 'dpkg -l' ],
++    /(?i:poky-stx)/                              => [ 'rpm -qa' ] ,
+     /(Solaris)/                                  => [ 'pkginfo' ],
+     /(Archlinux)/                                => [ 'pacman -Qet' ],
+     default                                      => [ 'echo' ],
+diff --git a/manifests/params.pp b/manifests/params.pp
+index e236fb4..9fc34a1 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -74,11 +74,13 @@ class puppi::params  {
+   $info_package_query = $::operatingsystem ? {
+     /(?i:RedHat|CentOS|Scientific|Amazon|Linux)/ => 'rpm -qi',
+     /(?i:Ubuntu|Debian|Mint)/          => 'dpkg -s',
++    /(?i:poky-stx)/                              => 'rpm -qi' ,
+     default                    => 'echo',
+   }
+   $info_package_list = $::operatingsystem ? {
+     /(?i:RedHat|CentOS|Scientific|Amazon|Linux)/ => 'rpm -ql',
+     /(?i:Ubuntu|Debian|Mint)/                    => 'dpkg -L',
++    /(?i:poky-stx)/                              => 'rpm -ql',
+     default                                      => 'echo',
+   }
+   $info_service_check = $::operatingsystem ? {
diff --git a/meta-stx/recipes-support/puppet/files/puppet-staging/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-staging/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..2af4ce1
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/nanliu-staging.gemspec   2019-10-31 09:06:27.338571776 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'nanliu-staging'
++  s.version     = '1.0.4'
++  s.date        = '2015-01-29'
++  s.summary     = "Compressed file staging and deployment"
++  s.description = s.summary
++  s.authors     = ["Nan Liu"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/nanliu/puppet-staging'
++  s.license     = 'Apache-2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-sysctl/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-sysctl/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..0016a17
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/duritong-sysctl.gemspec  2019-10-31 10:26:28.088576478 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'duritong-sysctl'
++  s.version     = '0.0.11'
++  s.date        = '2015-06-05'
++  s.summary     = "This modules allows you to configure sysctl."
++  s.description = s.summary
++  s.authors     = ["duritong"]
++  s.email       = ''
++  s.files       = %w(README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/duritong/puppet-sysctl'
++  s.license     = 'GPL-2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-vlan/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet-vlan/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..6868d3a
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppet-vlan.gemspec      2019-10-28 13:57:07.799844748 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppet-vlan'
++  s.version     = '0.1.0'
++  s.date        = '2012-12-04'
++  s.summary     = "very simple puppet module to install a vlan"
++  s.description = s.summary
++  s.authors     = ["derekhiggins"]
++  s.email       = ''
++  s.files       = %w(LICENSE) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/derekhiggins/puppet-vlan.git'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppet-vlan/metadata.json.patch b/meta-stx/recipes-support/puppet/files/puppet-vlan/metadata.json.patch
new file mode 100644 (file)
index 0000000..15fbcec
--- /dev/null
@@ -0,0 +1,14 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/metadata.json    2019-10-31 17:19:57.284059054 -0700
+@@ -0,0 +1,11 @@
++{
++  "name": "puppet-vlan",
++  "version": "0.1.0",
++  "author": "derekhiggins",
++  "summary": "very simple puppet module to install a vlan",
++  "license": "Apache-2.0",
++  "source": "https://github.com/derekhiggins/puppet-vlan.git",
++  "project_page": "https://github.com/derekhiggins/puppet-vlan.git",
++  "issues_url": "https://github.com/derekhiggins/puppet-vlan.git",
++  "dependencies": [ ]
++}
diff --git a/meta-stx/recipes-support/puppet/files/puppet/4.8.2/add_puppet_gemspec.patch b/meta-stx/recipes-support/puppet/files/puppet/4.8.2/add_puppet_gemspec.patch
new file mode 100644 (file)
index 0000000..50c215c
--- /dev/null
@@ -0,0 +1,23 @@
+diff -urN puppet-3.7.3_ori/puppet.gemspec puppet-3.7.3/puppet.gemspec
+--- puppet-3.7.3_ori/puppet.gemspec    1970-01-01 08:00:00.000000000 +0800
++++ puppet-3.7.3/puppet.gemspec        2014-11-12 14:30:07.000000000 +0800
+@@ -0,0 +1,19 @@
++$:.unshift(File.dirname(__FILE__) + '/lib')
++require 'puppet/version'
++
++Gem::Specification.new do |s|
++  s.name = 'puppet'
++  s.version = Puppet::PUPPETVERSION
++  s.platform = Gem::Platform::RUBY
++  s.summary = 'Open source Puppet is a configuration management system'
++  s.description = s.summary
++  s.author = 'Yang Haibo'
++  s.email = 'b40869@freescale.com'
++  s.homepage = 'https://puppetlabs.com/puppet/puppet-open-source'
++  s.license = 'Apache 2.0'
++
++  s.bindir       = 'bin'
++  s.executables  = ['puppet']
++  s.require_path = 'lib'
++  s.files = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++end
diff --git a/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet-poky-dnf.patch b/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet-poky-dnf.patch
new file mode 100644 (file)
index 0000000..93fb391
--- /dev/null
@@ -0,0 +1,12 @@
+diff --git a/lib/puppet/provider/package/dnf.rb b/lib/puppet/provider/package/dnf.rb
+index e144a1a..8497bec 100644
+--- a/lib/puppet/provider/package/dnf.rb
++++ b/lib/puppet/provider/package/dnf.rb
+@@ -29,6 +29,7 @@ Puppet::Type.type(:package).provide :dnf, :parent => :yum do
+   end
+   defaultfor :operatingsystem => :fedora, :operatingsystemmajrelease => ['22', '23', '24']
++  defaultfor :operatingsystem => :"poky-stx"
+   def self.update_command
+     # In DNF, update is deprecated for upgrade
diff --git a/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..a524d78
--- /dev/null
@@ -0,0 +1,12 @@
+diff --git a/lib/puppet/provider/service/systemd.rb b/lib/puppet/provider/service/systemd.rb
+index a673550..faf7b7b 100644
+--- a/lib/puppet/provider/service/systemd.rb
++++ b/lib/puppet/provider/service/systemd.rb
+@@ -23,6 +23,7 @@ Puppet::Type.type(:service).provide :systemd, :parent => :base do
+   defaultfor :osfamily => :redhat, :operatingsystem => :fedora
+   defaultfor :osfamily => :suse
+   defaultfor :operatingsystem => :debian, :operatingsystemmajrelease => "8"
++  defaultfor :operatingsystem => :"poky-stx",   :operatingsystemmajrelease => ["2"]
+   defaultfor :operatingsystem => :ubuntu, :operatingsystemmajrelease => ["15.04","15.10","16.04","16.10"]
+   defaultfor :operatingsystem => :cumuluslinux, :operatingsystemmajrelease => ["3"]
diff --git a/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet.conf b/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet.conf
new file mode 100644 (file)
index 0000000..63d5e5a
--- /dev/null
@@ -0,0 +1,24 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+[main]
+logdir=/var/log/puppet
+vardir=/var/lib/puppet
+ssldir=/var/lib/puppet/ssl
+rundir=/var/run/puppet
+factpath=$vardir/lib/facter
+
+[agent]
+server=puppet-server
diff --git a/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet.init b/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet.init
new file mode 100644 (file)
index 0000000..64ab32e
--- /dev/null
@@ -0,0 +1,72 @@
+#!/bin/bash
+#
+# chkconfig: 35 20 80
+# description: The puppet agent connects to a puppet master, requests a
+#              catalog of resources, and configures the local system.
+#
+
+# Get function from functions library
+. /etc/init.d/functions
+
+PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
+DAEMON=/usr/bin/puppet
+DAEMON_OPTS="agent --server master --no-daemonize"
+NAME="agent"
+DESC="puppet agent"
+PIDFILE="/var/run/${NAME}.pid"
+PID=`test -f $PIDFILE && cat $PIDFILE`
+RETVAL=0
+
+test -x $DAEMON || exit 0
+
+[ -r /etc/default/puppet ] && . /etc/default/puppet
+
+reload_puppet_agent() {
+    start-stop-daemon --stop --quiet --signal HUP --pidfile $PIDFILE
+}
+
+start_puppet_agent() {
+    start-stop-daemon --start --quiet --pidfile $PIDFILE \
+        --startas $DAEMON -- $NAME $DAEMON_OPTS
+}
+
+stop_puppet_agent() {
+    start-stop-daemon --stop --retry TERM/10/KILL/5 --quiet --oknodo --pidfile $PIDFILE
+}
+
+status_puppet_agent() {
+    status_of_proc -p "${PIDFILE}" "${DAEMON}" "${NAME}"
+}
+
+case "$1" in
+    start)
+        echo -n "Starting $DESC"
+        start_puppet_agent
+        log_end_msg $?
+        ;;
+    stop)
+        echo -n "Stopping $DESC"
+        stop_puppet_agent
+        log_end_msg $?
+        ;;
+    reload)
+        echo -n "Reloading $DESC"
+        reload_puppet_agent
+        log_end_msg $?
+        ;;
+    status)
+        status_puppet_agent
+        ;;
+    restart|force-reload)
+        echo -n "Restarting $DESC"
+        stop_puppet_agent
+        start_puppet_agent
+        log_end_msg $?
+        ;;
+*)
+        echo "Usage: $0 {start|stop|status|restart|force-reload|reload}" >&2
+        exit 1
+        ;;
+esac
+
+exit 0
diff --git a/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet.service b/meta-stx/recipes-support/puppet/files/puppet/4.8.2/puppet.service
new file mode 100644 (file)
index 0000000..c49dacf
--- /dev/null
@@ -0,0 +1,10 @@
+[Unit]
+Description=Puppet agent
+After=network.target
+
+[Service]
+ExecStart=/usr/bin/puppet agent --server master --no-daemonize
+ExecReload=/bin/kill -HUP $MAINPID
+
+[Install]
+WantedBy=multi-user.target
diff --git a/meta-stx/recipes-support/puppet/files/puppet/puppet-poky-yum.patch b/meta-stx/recipes-support/puppet/files/puppet/puppet-poky-yum.patch
new file mode 100644 (file)
index 0000000..1198f36
--- /dev/null
@@ -0,0 +1,11 @@
+diff -ru a/lib/puppet/provider/package/yum.rb b/lib/puppet/provider/package/yum.rb
+--- a/lib/puppet/provider/package/yum.rb       2020-03-05 20:30:49.582624408 +0800
++++ b/lib/puppet/provider/package/yum.rb       2020-03-05 21:13:39.870694454 +0800
+@@ -24,6 +24,7 @@
+   end
+   defaultfor :osfamily => :redhat
++  defaultfor :operatingsystem => :"poky-stx"
+   def self.prefetch(packages)
+     raise Puppet::Error, _("The yum provider can only be used as root") if Process.euid != 0
diff --git a/meta-stx/recipes-support/puppet/files/puppet/puppet-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppet/puppet-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..c49116e
--- /dev/null
@@ -0,0 +1,12 @@
+diff --git a/lib/puppet/provider/service/systemd.rb b/lib/puppet/provider/service/systemd.rb
+index 1c86404..a6363cb 100644
+--- a/lib/puppet/provider/service/systemd.rb
++++ b/lib/puppet/provider/service/systemd.rb
+@@ -25,6 +25,7 @@ Puppet::Type.type(:service).provide :systemd, :parent => :base do
+   defaultfor :osfamily => :coreos
+   defaultfor :operatingsystem => :amazon, :operatingsystemmajrelease => ["2"]
+   defaultfor :operatingsystem => :debian, :operatingsystemmajrelease => ["8", "stretch/sid", "9", "buster/sid"]
++  defaultfor :operatingsystem => :"poky-stx",   :operatingsystemmajrelease => ["2"]
+   defaultfor :operatingsystem => :ubuntu, :operatingsystemmajrelease => ["15.04","15.10","16.04","16.10"]
+   defaultfor :operatingsystem => :cumuluslinux, :operatingsystemmajrelease => ["3"]
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-apache/0001-maint-Fix-conditional-in-vhost-ssl-template.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-apache/0001-maint-Fix-conditional-in-vhost-ssl-template.patch
new file mode 100644 (file)
index 0000000..15ed203
--- /dev/null
@@ -0,0 +1,25 @@
+From aa11baed37639d37adfbb18f5431f597a6cedee8 Mon Sep 17 00:00:00 2001
+From: Bryan Jen <bryan.jen@gmail.com>
+Date: Thu, 15 Dec 2016 23:22:53 +0000
+Subject: [PATCH 1/2] (maint) Fix conditional in vhost ssl template
+
+---
+ templates/vhost/_ssl.erb | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/templates/vhost/_ssl.erb b/templates/vhost/_ssl.erb
+index 1ca7cbfb..c92f70c8 100644
+--- a/templates/vhost/_ssl.erb
++++ b/templates/vhost/_ssl.erb
+@@ -49,7 +49,7 @@
+   <%- if @ssl_stapling_timeout && scope.function_versioncmp([@apache_version, '2.4']) >= 0 -%>
+   SSLStaplingResponderTimeout <%= @ssl_stapling_timeout %>
+   <%- end -%>
+-  <%- if not @ssl_stapling_return_errors.nil? && scope.function_versioncmp([@apache_version, '2.4']) >= 0 -%>
++  <%- if (not @ssl_stapling_return_errors.nil? && scope.function_versioncmp([@apache_version, '2.4']) >= 0) -%>
+   SSLStaplingReturnResponderErrors <%= scope.function_bool2httpd([@ssl_stapling_return_errors]) %>
+   <%- end -%>
+ <% end -%>
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-apache/0002-maint-Fix-the-vhost-ssl-template-correctly-this-time.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-apache/0002-maint-Fix-the-vhost-ssl-template-correctly-this-time.patch
new file mode 100644 (file)
index 0000000..38bab57
--- /dev/null
@@ -0,0 +1,25 @@
+From 90e50eedd7c8ba0d1e1c17ba678525630bb7e023 Mon Sep 17 00:00:00 2001
+From: Bryan Jen <bryan.jen@gmail.com>
+Date: Fri, 16 Dec 2016 15:57:46 +0000
+Subject: [PATCH 2/2] (maint) Fix the vhost ssl template correctly this time
+
+---
+ templates/vhost/_ssl.erb | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/templates/vhost/_ssl.erb b/templates/vhost/_ssl.erb
+index c92f70c8..e70efebd 100644
+--- a/templates/vhost/_ssl.erb
++++ b/templates/vhost/_ssl.erb
+@@ -49,7 +49,7 @@
+   <%- if @ssl_stapling_timeout && scope.function_versioncmp([@apache_version, '2.4']) >= 0 -%>
+   SSLStaplingResponderTimeout <%= @ssl_stapling_timeout %>
+   <%- end -%>
+-  <%- if (not @ssl_stapling_return_errors.nil? && scope.function_versioncmp([@apache_version, '2.4']) >= 0) -%>
++  <%- if (not @ssl_stapling_return_errors.nil?) && (scope.function_versioncmp([@apache_version, '2.4']) >= 0) -%>
+   SSLStaplingReturnResponderErrors <%= scope.function_bool2httpd([@ssl_stapling_return_errors]) %>
+   <%- end -%>
+ <% end -%>
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-apache/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-apache/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..1a5c2fc
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppetlabs-apache.gemspec        2019-10-28 12:43:41.741170991 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-apache'
++  s.version     = '1.10.0'
++  s.date        = '2017-05-17'
++  s.summary     = "Installs, configures, and manages Apache virtual hosts, web services, and modules."
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-apache'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-apache/puppetlabs-apache-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-apache/puppetlabs-apache-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..45dcddb
--- /dev/null
@@ -0,0 +1,26 @@
+diff --git a/lib/puppet/provider/a2mod/a2mod.rb b/lib/puppet/provider/a2mod/a2mod.rb
+index e257a579..96d6151d 100644
+--- a/lib/puppet/provider/a2mod/a2mod.rb
++++ b/lib/puppet/provider/a2mod/a2mod.rb
+@@ -8,7 +8,7 @@ Puppet::Type.type(:a2mod).provide(:a2mod, :parent => Puppet::Provider::A2mod) do
+     commands :apache2ctl => "apache2ctl"
+     confine :osfamily => :debian
+-    defaultfor :operatingsystem => [:debian, :ubuntu]
++    defaultfor :operatingsystem => [:debian, :ubuntu, :'poky-stx']
+     def self.instances
+       modules = apache2ctl("-M").lines.collect { |line|
+diff --git a/manifests/params.pp b/manifests/params.pp
+index 55682f3b..2f849752 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -220,7 +220,7 @@ class apache::params inherits ::apache::version {
+     $suphp_addhandler    = 'x-httpd-php'
+     $suphp_engine        = 'off'
+     $suphp_configpath    = '/etc/php5/apache2'
+-    if ($::operatingsystem == 'Ubuntu' and versioncmp($::operatingsystemrelease, '16.04') < 0) or ($::operatingsystem == 'Debian' and versioncmp($::operatingsystemrelease, '9') < 0) {
++    if ($::operatingsystem == 'Ubuntu' and versioncmp($::operatingsystemrelease, '16.04') < 0) or ($::operatingsystem == 'Debian' and versioncmp($::operatingsystemrelease, '9') < 0) or ($::operatingsystem == 'poky-stx') {
+       # Only the major version is used here
+       $php_version = '5'
+     } else {
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-concat/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-concat/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..c8ca025
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppetlabs-concat.gemspec        2019-10-29 12:44:01.905771063 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-concat'
++  s.version     = '2.2.0'
++  s.date        = '2016-06-27'
++  s.summary     = "Construct files from multiple fragments."
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-concat'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-create-resources/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-create-resources/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..f0bb465
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppetlabs-create-resources.gemspec      2019-10-30 09:22:32.165637534 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-create-resources'
++  s.version     = '0.0.1'
++  s.date        = '2013-09-30'
++  s.summary     = "Function to dynamically create resources from hashes."
++  s.description = s.summary
++  s.authors     = ["puppetlabs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-create_resources'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-create-resources/metadata.json.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-create-resources/metadata.json.patch
new file mode 100644 (file)
index 0000000..d44c9a0
--- /dev/null
@@ -0,0 +1,14 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/metadata.json    2019-10-31 12:18:48.281729608 -0700
+@@ -0,0 +1,11 @@
++{
++  "name": "puppetlabs-create_resources",
++  "version": "0.0.1",
++  "author": "Puppet Labs",
++  "summary": "Function to dynamically create resources from hashes.",
++  "license": "Apache-2.0",
++  "source": "git://github.com/puppetlabs/puppetlabs-create_resources.git",
++  "project_page": "https://github.com/puppetlabs/puppetlabs-create_resources",
++  "issues_url": "https://tickets.puppetlabs.com/browse/MODULES",
++  "dependencies": [ ]
++}
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..e07355b
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppetlabs-firewall.gemspec      2019-10-28 13:27:42.249066706 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-firewall'
++  s.version     = '1.8.2'
++  s.date        = '2017-01-09'
++  s.summary     = "Manages Firewalls such as iptables"
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.markdown Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-firewall'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/poky-firewall-updates.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/poky-firewall-updates.patch
new file mode 100644 (file)
index 0000000..f9073a4
--- /dev/null
@@ -0,0 +1,13 @@
+diff --git a/packstack/puppet/modules/firewall/manifests/linux.pp b/packstack/puppet/modules/firewall/manifests/linux.pp
+index 0fd758a..d9280f5 100644
+--- a/packstack/puppet/modules/firewall/manifests/linux.pp
++++ b/packstack/puppet/modules/firewall/manifests/linux.pp
+@@ -40,7 +40,7 @@ class firewall::linux (
+         require         => Package['iptables'],
+       }
+     }
+-    'Debian', 'Ubuntu': {
++    'Debian', 'Ubuntu', 'poky': {
+       class { "${title}::debian":
+         ensure       => $ensure,
+         enable       => $enable,
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/puppet-firewall-poky.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/puppet-firewall-poky.patch
new file mode 100644 (file)
index 0000000..2c28ebd
--- /dev/null
@@ -0,0 +1,35 @@
+diff -ru a/lib/puppet/util/firewall.rb b/lib/puppet/util/firewall.rb
+--- a/lib/puppet/util/firewall.rb      2020-03-05 19:13:11.474497464 +0800
++++ b/lib/puppet/util/firewall.rb      2020-03-05 19:18:52.030506745 +0800
+@@ -154,6 +154,8 @@
+       'RedHat'
+     when 'Debian', 'Ubuntu'
+       'Debian'
++    when 'poky'
++      'Debian_poky'
+     else
+       Facter.value(:operatingsystem)
+     end
+@@ -208,7 +210,7 @@
+       when :IPv4
+         ["/bin/sh", "-c", "/sbin/iptables-save > /etc/iptables/rules"]
+       end
+-    when :Archlinux
++    when :Archlinux, :Debian_poky
+       case proto.to_sym
+       when :IPv4
+         ["/bin/sh", "-c", "/usr/sbin/iptables-save > /etc/iptables/iptables.rules"]
+diff -ru a/manifests/params.pp b/manifests/params.pp
+--- a/manifests/params.pp      2020-03-05 19:13:11.478497464 +0800
++++ b/manifests/params.pp      2020-03-05 19:13:20.794497718 +0800
+@@ -47,6 +47,10 @@
+           }
+         }
++        'poky': {
++            $service_name = 'firewalld'
++            $package_name = 'firewalld'
++        }
+         default: {
+           $service_name = 'iptables-persistent'
+           $package_name = 'iptables-persistent'
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/puppet-firewall-random-fully-support.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/puppet-firewall-random-fully-support.patch
new file mode 100644 (file)
index 0000000..136bdd9
--- /dev/null
@@ -0,0 +1,71 @@
+diff -ru x/lib/puppet/provider/firewall/iptables.rb y/lib/puppet/provider/firewall/iptables.rb
+--- x/lib/puppet/provider/firewall/iptables.rb 2020-04-13 14:18:35.001844743 +0800
++++ y/lib/puppet/provider/firewall/iptables.rb 2020-04-13 14:44:03.565886399 +0800
+@@ -54,6 +54,12 @@
+     mark_flag = '--set-xmark'
+   end
++  kernelversion = Facter.value('kernelversion')
++  if (kernelversion && Puppet::Util::Package.versioncmp(kernelversion, '3.13') >= 0) &&
++     (iptables_version && Puppet::Util::Package.versioncmp(iptables_version, '1.6.2') >= 0)
++    has_feature :random_fully
++  end
++
+   @protocol = "IPv4"
+   @resource_map = {
+@@ -94,6 +100,7 @@
+     :proto                 => "-p",
+     :queue_num             => "--queue-num",
+     :queue_bypass          => "--queue-bypass",
++    :random_fully          => "--random-fully",
+     :random                => "--random",
+     :rdest                 => "--rdest",
+     :reap                  => "--reap",
+@@ -271,7 +278,7 @@
+     :rhitcount, :rttl, :rname, :mask, :rsource, :rdest, :ipset, :string, :string_algo,
+     :string_from, :string_to, :jump, :goto, :clusterip_new, :clusterip_hashmode,
+     :clusterip_clustermac, :clusterip_total_nodes, :clusterip_local_node, :clusterip_hash_init, :queue_num, :queue_bypass,
+-    :clamp_mss_to_pmtu, :gateway, :set_mss, :set_dscp, :set_dscp_class, :todest, :tosource, :toports, :to, :checksum_fill, :random, :log_prefix,
++    :clamp_mss_to_pmtu, :gateway, :set_mss, :set_dscp, :set_dscp_class, :todest, :tosource, :toports, :to, :checksum_fill, :random_fully, :random, :log_prefix,
+     :log_level, :log_uid, :reject, :set_mark, :match_mark, :mss, :connlimit_above, :connlimit_mask, :connmark, :time_start, :time_stop,
+     :month_days, :week_days, :date_start, :date_stop, :time_contiguous, :kernel_timezone
+   ]
+@@ -399,6 +406,8 @@
+         # only replace those -f that are not followed by an l to
+         # distinguish between -f and the '-f' inside of --tcp-flags.
+         values = values.sub(/\s-f(?!l)(?=.*--comment)/, ' -f true')
++      elsif bool == :random
++        values = values.sub(%r{#{resource_map[bool]}(\s|$)(?!"!")}, "#{resource_map[bool]} true")
+       else
+         values = values.sub(/#{resource_map[bool]}/, "#{resource_map[bool]} true")
+       end
+diff -ru x/lib/puppet/type/firewall.rb y/lib/puppet/type/firewall.rb
+--- x/lib/puppet/type/firewall.rb      2020-04-13 14:18:35.001844743 +0800
++++ y/lib/puppet/type/firewall.rb      2020-04-13 14:44:03.565886399 +0800
+@@ -63,6 +63,7 @@
+   feature :string_matching, "String matching features"
+   feature :queue_num, "Which NFQUEUE to send packets to"
+   feature :queue_bypass, "If nothing is listening on queue_num, allow packets to bypass the queue"
++  feature :random_fully, 'The ability to use --random-fully flag'
+   # provider specific features
+   feature :iptables, "The provider provides iptables features."
+@@ -564,6 +565,17 @@
+     EOS
+   end
++  newproperty(:random_fully, required_features: :random_fully) do
++    desc <<-EOS
++      When using a jump value of "MASQUERADE", "DNAT", "REDIRECT", or "SNAT"
++      this boolean will enable fully randomized port mapping.
++
++      **NOTE** Requires Kernel >= 3.13 and iptables >= 1.6.2
++    EOS
++
++    newvalues(:true, :false)
++  end
++
+   newproperty(:random, :required_features => :dnat) do
+     desc <<-EOS
+       When using a jump value of "MASQUERADE", "DNAT", "REDIRECT", or "SNAT"
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/puppet-firewall-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-firewall/puppet-firewall-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..22f9bbb
--- /dev/null
@@ -0,0 +1,48 @@
+diff -ru a/lib/puppet/util/firewall.rb b/lib/puppet/util/firewall.rb
+--- a/lib/puppet/util/firewall.rb      2020-03-05 19:13:11.474497464 +0800
++++ b/lib/puppet/util/firewall.rb      2020-03-05 19:18:52.030506745 +0800
+@@ -154,6 +154,8 @@
+       'RedHat'
+     when 'Debian', 'Ubuntu'
+       'Debian'
++    when 'poky-stx'
++      'Debian_poky-stx'
+     else
+       Facter.value(:operatingsystem)
+     end
+@@ -208,7 +210,7 @@
+       when :IPv4
+         ["/bin/sh", "-c", "/sbin/iptables-save > /etc/iptables/rules"]
+       end
+-    when :Archlinux
++    when :Archlinux, :Debian_poky-stx
+       case proto.to_sym
+       when :IPv4
+         ["/bin/sh", "-c", "/usr/sbin/iptables-save > /etc/iptables/iptables.rules"]
+diff -ru a/manifests/params.pp b/manifests/params.pp
+--- a/manifests/params.pp      2020-03-05 19:13:11.478497464 +0800
++++ b/manifests/params.pp      2020-03-05 19:13:20.794497718 +0800
+@@ -47,6 +47,10 @@
+           }
+         }
++        'poky-stx': {
++            $service_name = 'firewalld'
++            $package_name = 'firewalld'
++        }
+         default: {
+           $service_name = 'iptables-persistent'
+           $package_name = 'iptables-persistent'
+diff --git a/manifests/linux.pp b/manifests/linux.pp
+index 0fd758a..d9280f5 100644
+--- a/manifests/linux.pp
++++ b/manifests/linux.pp
+@@ -40,7 +40,7 @@ class firewall::linux (
+         require         => Package['iptables'],
+       }
+     }
+-    'Debian', 'Ubuntu': {
++    'Debian', 'Ubuntu', 'poky-stx': {
+       class { "${title}::debian":
+         ensure       => $ensure,
+         enable       => $enable,
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0001-Roll-up-TIS-patches.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0001-Roll-up-TIS-patches.patch
new file mode 100644 (file)
index 0000000..15b3575
--- /dev/null
@@ -0,0 +1,43 @@
+From 4485b6cbf5a8bf1d3830b0406685aba3ece4e413 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Wed, 11 Jan 2017 13:05:12 -0500
+Subject: [PATCH] Roll up TIS patches
+
+---
+ manifests/config.pp | 4 ++--
+ manifests/init.pp   | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/manifests/config.pp b/manifests/config.pp
+index 51c2741..4007bb8 100644
+--- a/manifests/config.pp
++++ b/manifests/config.pp
+@@ -75,8 +75,8 @@ define haproxy::config (
+   if $_global_options['chroot'] {
+     file { $_global_options['chroot']:
+       ensure => directory,
+-      owner  => $_global_options['user'],
+-      group  => $_global_options['group'],
++      owner  => 'root',
++      group  => 'root',
+     }
+   }
+ }
+diff --git a/manifests/init.pp b/manifests/init.pp
+index f1109d0..54a1640 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -110,8 +110,8 @@
+ class haproxy (
+   $package_ensure      = 'present',
+   $package_name        = $haproxy::params::package_name,
+-  $service_ensure      = 'running',
+-  $service_manage      = true,
++  $service_ensure    = false,
++  $service_manage    = false,
+   $service_options     = $haproxy::params::service_options,
+   $sysconfig_options   = $haproxy::params::sysconfig_options,
+   $global_options      = $haproxy::params::global_options,
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0002-disable-config-validation-prechecks.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0002-disable-config-validation-prechecks.patch
new file mode 100644 (file)
index 0000000..43c3067
--- /dev/null
@@ -0,0 +1,123 @@
+From 50ef964cc4f918982d2889610c5f6e7506741518 Mon Sep 17 00:00:00 2001
+From: Kam Nasim <kam.nasim@windriver.com>
+Date: Fri, 26 May 2017 17:04:32 -0400
+Subject: [PATCH] disable configuration validation during haproxy manifest
+ apply since some files/options referenced in the configuration (such as for
+ TPM) may still be in flight while the haproxy manifest applies. This
+ validation option is a bit of an overkill anyways since it doesn't cause
+ Packstack to fail the manifest application, and is a soft error log but with
+ the added disadvantage of not applying any haproxy configuration (even the
+ sane bits) on a validation failure
+
+---
+ manifests/config.pp   | 8 --------
+ manifests/init.pp     | 7 -------
+ manifests/instance.pp | 7 -------
+ manifests/params.pp   | 1 -
+ 4 files changed, 23 deletions(-)
+
+diff --git a/manifests/config.pp b/manifests/config.pp
+index 4007bb8..b8d4ef4 100644
+--- a/manifests/config.pp
++++ b/manifests/config.pp
+@@ -8,7 +8,6 @@ define haproxy::config (
+   $config_dir = undef,  # A default is required for Puppet 2.7 compatibility. When 2.7 is no longer supported, this parameter default should be removed.
+   $custom_fragment = undef,  # A default is required for Puppet 2.7 compatibility. When 2.7 is no longer supported, this parameter default should be removed.
+   $merge_options = $haproxy::merge_options,
+-  $config_validate_cmd = $haproxy::config_validate_cmd
+ ) {
+   if $caller_module_name != $module_name {
+@@ -50,13 +49,6 @@ define haproxy::config (
+       mode  => '0640',
+     }
+-    # validate_cmd introduced in Puppet 3.5
+-    if ((!defined('$::puppetversion') or (versioncmp($::puppetversion, '3.5') >= 0)) and (!defined('$::serverversion') or versioncmp($::serverversion, '3.5') >= 0)) {
+-      Concat[$_config_file] {
+-        validate_cmd => $config_validate_cmd,
+-      }
+-    }
+-
+     # Simple Header
+     concat::fragment { "${instance_name}-00-header":
+       target  => $_config_file,
+diff --git a/manifests/init.pp b/manifests/init.pp
+index 54a1640..d84755e 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -72,11 +72,6 @@
+ #   Optional. Path to the haproxy config file.
+ #   Default depends on platform.
+ #
+-# [*config_validate_cmd*]
+-#   Optional. Command used by concat validate_cmd to validate new
+-#   config file concat is a valid haproxy config.
+-#   Default /usr/sbin/haproxy -f % -c
+-#
+ # === Examples
+ #
+ #  class { 'haproxy':
+@@ -122,7 +117,6 @@ class haproxy (
+   $config_dir          = $haproxy::params::config_dir,
+   $config_file         = $haproxy::params::config_file,
+   $manage_config_dir   = $haproxy::params::manage_config_dir,
+-  $config_validate_cmd = $haproxy::params::config_validate_cmd,
+   # Deprecated
+   $manage_service   = undef,
+@@ -183,7 +177,6 @@ class haproxy (
+     merge_options       => $merge_options,
+     service_options     => $service_options,
+     sysconfig_options   => $sysconfig_options,
+-    config_validate_cmd => $config_validate_cmd,
+   }
+ }
+diff --git a/manifests/instance.pp b/manifests/instance.pp
+index 3dffdae..7f37751 100644
+--- a/manifests/instance.pp
++++ b/manifests/instance.pp
+@@ -63,11 +63,6 @@
+ #    The parent directory will be created automatically.
+ #  Defaults to undef.
+ #
+-# [*config_validate_cmd*]
+-#   Command used by concat validate_cmd to validate new
+-#   config file concat is a valid haproxy config.
+-#   Default /usr/sbin/haproxy -f % -c
+-#
+ # === Examples
+ #
+ # A single instance of haproxy with all defaults
+@@ -153,7 +148,6 @@ define haproxy::instance (
+   $merge_options     = $haproxy::params::merge_options,
+   $service_options   = $haproxy::params::service_options,
+   $sysconfig_options = $haproxy::params::sysconfig_options,
+-  $config_validate_cmd = $haproxy::params::config_validate_cmd,
+ ) {
+   if $service_ensure != true and $service_ensure != false {
+@@ -208,7 +202,6 @@ define haproxy::instance (
+     custom_fragment     => $custom_fragment,
+     merge_options       => $merge_options,
+     package_ensure      => $package_ensure,
+-    config_validate_cmd => $config_validate_cmd,
+   }
+   haproxy::install { $title:
+     package_name   => $package_name,
+diff --git a/manifests/params.pp b/manifests/params.pp
+index d7b9fa9..21e6bb0 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -10,7 +10,6 @@ class haproxy::params {
+   $service_options  = "ENABLED=1\n"  # Only used by Debian.
+   $sysconfig_options = 'OPTIONS=""' #Only used by Redhat/CentOS etc
+-  $config_validate_cmd = '/usr/sbin/haproxy -f % -c'
+   case $::osfamily {
+     'Archlinux', 'Debian', 'Redhat', 'Gentoo', 'Suse' : {
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0003-Fix-global_options-log-default-value.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0003-Fix-global_options-log-default-value.patch
new file mode 100644 (file)
index 0000000..79d1e82
--- /dev/null
@@ -0,0 +1,26 @@
+From 16163f14c8f9b1b81b6e394c31c72030938435c7 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Tue, 4 Sep 2018 20:01:53 +0800
+Subject: [PATCH] Fix global_options log default value
+
+Signed-off-by: zhipengl <zhipengs.liu@intel.com>
+---
+ manifests/params.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/params.pp b/manifests/params.pp
+index 21e6bb0..b29e427 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -15,7 +15,7 @@ class haproxy::params {
+     'Archlinux', 'Debian', 'Redhat', 'Gentoo', 'Suse' : {
+       $package_name      = 'haproxy'
+       $global_options    = {
+-        'log'     => "${::ipaddress} local0",
++        'log'     => 'global',
+         'chroot'  => '/var/lib/haproxy',
+         'pidfile' => '/var/run/haproxy.pid',
+         'maxconn' => '4000',
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0004-Stop-invalid-warning-message b/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/0004-Stop-invalid-warning-message
new file mode 100644 (file)
index 0000000..44c9b52
--- /dev/null
@@ -0,0 +1,31 @@
+From 7e1cff1503d9980e6fa346ec0e9cb93dc69774bf Mon Sep 17 00:00:00 2001
+From: zhipengl <zhipengs.liu@intel.com>
+Date: Fri, 7 Sep 2018 19:24:01 +0800
+Subject: [PATCH] Stop-invalid-warning-message.patch
+
+It can fix config_controller failure at step 6.
+
+The upstream fix is at below link
+https://github.com/puppetlabs/puppetlabs-haproxy/commit/20db4474c5938c21ac441f0092254c78b5978cc4
+
+Signed-off-by: zhipengl <zhipengs.liu@intel.com>
+---
+ manifests/frontend.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/frontend.pp b/manifests/frontend.pp
+index 1623115..709a08c 100644
+--- a/manifests/frontend.pp
++++ b/manifests/frontend.pp
+@@ -102,7 +102,7 @@ define haproxy::frontend (
+   $defaults_use_backend    = true,
+   $config_file             = undef,
+   # Deprecated
+-  $bind_options            = undef,
++  $bind_options            = '',
+ ) {
+   if $ports and $bind {
+     fail('The use of $ports and $bind is mutually exclusive, please choose either one')
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-haproxy/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..f47fc48
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppetlabs-haproxy.gemspec       2019-10-28 21:37:14.320204197 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-haproxy'
++  s.version     = '1.5.0'
++  s.date        = '2017-01-09'
++  s.summary     = "Configures HAProxy servers and manages the configuration of backend member servers."
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-haproxy'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-inifile/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-inifile/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..2aefb28
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-23 20:19:32.602763995 -0700
++++ b/puppetlabs-inifile.gemspec       2019-10-28 09:20:41.514908926 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-inifile'
++  s.version     = '1.6.0'
++  s.date        = '2016-08-30'
++  s.summary     = "Resource types for managing settings in INI files"
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-inifile'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0001-puppet-lvm-kilo-quilt-changes.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0001-puppet-lvm-kilo-quilt-changes.patch
new file mode 100644 (file)
index 0000000..eaaa9a1
--- /dev/null
@@ -0,0 +1,694 @@
+From b80e106ace391d88de683c3da5e03878ce1ffa1d Mon Sep 17 00:00:00 2001
+From: Al Bailey <al.bailey@windriver.com>
+Date: Tue, 7 Jun 2016 10:36:17 -0400
+Subject: [PATCH] puppet-lvm kilo quilt changes
+
+---
+ .../lvm/lib/puppet/provider/logical_volume/lvm.rb  | 194 ++++++++++++++-------
+ .../lvm/lib/puppet/provider/physical_volume/lvm.rb |   2 +-
+ .../lvm/lib/puppet/provider/volume_group/lvm.rb    |  60 ++++++-
+ .../modules/lvm/lib/puppet/type/logical_volume.rb  |  32 +++-
+ .../puppet/modules/lvm/manifests/logical_volume.pp |   6 +
+ packstack/puppet/modules/lvm/manifests/volume.pp   |  11 +-
+ .../puppet/provider/logical_volume/lvm_spec.rb     |  55 +++---
+ 7 files changed, 267 insertions(+), 93 deletions(-)
+
+diff --git a/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb b/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb
+index e813193..2f41695 100755
+--- a/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb
++++ b/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb
+@@ -3,24 +3,56 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+     commands :lvcreate   => 'lvcreate',
+              :lvremove   => 'lvremove',
++             :lvresize   => 'lvresize',
+              :lvextend   => 'lvextend',
+              :lvs        => 'lvs',
+-             :resize2fs  => 'resize2fs',
++             :vgs        => 'vgs',
+              :umount     => 'umount',
+              :blkid      => 'blkid',
+              :dmsetup    => 'dmsetup',
++             :dd         => 'dd',
+              :lvconvert  => 'lvconvert',
+-             :lvdisplay  => 'lvdisplay'
+-
+-    optional_commands :xfs_growfs => 'xfs_growfs',
+-                      :resize4fs  => 'resize4fs'
++             :lvdisplay  => 'lvdisplay',
++             :fsadm      => 'fsadm',
++             :dd         => 'dd'
++
++    def round_to_extent(size)
++      lvm_size_units = {
++        "K" => 1**0, "M" => 1024**1, "G" => 1024**2, "T" => 1024**3, "P" => 1024**4, "E" => 1025**5,
++      }
++
++      if @resource[:size] =~ /^([0-9]+(\.[0-9]+)?)([KMGTPE])/i
++        size_value = $1.to_f
++        size_unit  = $3.upcase
++        size_kibi = (size_value * lvm_size_units[size_unit]).to_i
++        if vgs('--noheading', '-o', 'vg_extent_size', '--units', 'k', "#{@resource[:volume_group]}") =~ /\s+(\d+)\.\d+k/i
++          vg_extent_size_kibi = $1.to_i
++        end
++        new_size_kibi = ((size_kibi + vg_extent_size_kibi - 1) / vg_extent_size_kibi) * vg_extent_size_kibi
++        "#{new_size_kibi}k"
++      else
++        size
++      end
++    end
+     def create
+         args = ['-n', @resource[:name]]
+         if @resource[:size]
+-            args.push('--size', @resource[:size])
++            size = @resource[:size]
++            if size == 'max'
++                size = vgs('--noheading', '-o', 'vg_size', '--units', 'k', "#{@resource[:volume_group]}").strip
++            elsif @resource[:round_to_extent] then
++                size = round_to_extent(size)
++            end
++            args.push('--size', size)
+         elsif @resource[:initial_size]
+-            args.push('--size', @resource[:initial_size])
++            args.push(
++                '--size',
++                if @resource[:round_to_extent] then
++                    round_to_extent(@resource[:initial_size])
++                else
++                    @resource[:initial_size]
++                end)
+         end
+         if @resource[:extents]
+             args.push('--extents', @resource[:extents])
+@@ -63,6 +95,7 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+         args << @resource[:volume_group]
+         lvcreate(*args)
++        lvzero
+     end
+     def destroy
+@@ -75,9 +108,16 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+         lvs(@resource[:volume_group]) =~ lvs_pattern
+     end
++    def exec_cmd(*cmd)
++      output = Puppet::Util::Execution.execute(cmd, :failonfail => false, :combine => true)
++      {:out => output, :exit => $CHILD_STATUS.exitstatus}
++    end
++
+     def size
+         if @resource[:size] =~ /^\d+\.?\d{0,2}([KMGTPE])/i
+             unit = $1.downcase
++        else
++            unit = 'k'
+         end
+         raw = lvs('--noheading', '--unit', unit, path)
+@@ -92,64 +132,87 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+     end
+     def size=(new_size)
+-        lvm_size_units = { "K" => 1, "M" => 1024, "G" => 1048576, "T" => 1073741824, "P" => 1099511627776, "E" => 1125899906842624 }
+-        lvm_size_units_match = lvm_size_units.keys().join('|')
++      lvm_size_units = {
++        "K" => 1**0, "M" => 1024**1, "G" => 1024**2, "T" => 1024**3, "P" => 1024**4, "E" => 1025**5,
++      }
+-        resizeable = false
+-        current_size = size()
++      current_size = size()
+-        if current_size =~ /(\d+\.{0,1}\d{0,2})(#{lvm_size_units_match})/i
+-            current_size_bytes = $1.to_i
+-            current_size_unit  = $2.upcase
+-        end
++      if current_size =~ /^([0-9]+(\.[0-9]+)?)([KMGTPE])/i
++        current_size_value = $1.to_f
++        current_size_unit  = $3.upcase
++        current_size = (current_size_value * lvm_size_units[current_size_unit]).to_i
++      end
+-        if new_size =~ /(\d+)(#{lvm_size_units_match})/i
+-            new_size_bytes = $1.to_i
+-            new_size_unit  = $2.upcase
+-        end
++      info( "Current: value=#{current_size_value}, unit=#{current_size_unit}, kibi=#{current_size}" )
+-        ## Get the extend size
+-        if lvs('--noheading', '-o', 'vg_extent_size', '--units', 'k', path) =~ /\s+(\d+)\.\d+k/i
+-            vg_extent_size = $1.to_i
+-        end
++      if new_size == 'max'
++        new_size = vgs('--noheading', '-o', 'vg_size', '--units', 'k', "#{@resource[:volume_group]}").strip
++      end
+-        ## Verify that it's a extension: Reduce is potentially dangerous and should be done manually
+-        if lvm_size_units[current_size_unit] < lvm_size_units[new_size_unit]
+-            resizeable = true
+-        elsif lvm_size_units[current_size_unit] > lvm_size_units[new_size_unit]
+-            if (current_size_bytes * lvm_size_units[current_size_unit]) < (new_size_bytes * lvm_size_units[new_size_unit])
+-                resizeable = true
+-            end
+-        elsif lvm_size_units[current_size_unit] == lvm_size_units[new_size_unit]
+-            if new_size_bytes > current_size_bytes
+-                resizeable = true
+-            end
+-        end
++      if new_size =~ /^([0-9]+(\.[0-9]+)?)([KMGTPE])/i
++        new_size_value = $1.to_f
++        new_size_unit  = $3.upcase
++        new_size = (new_size_value * lvm_size_units[new_size_unit]).to_i
++      end
+-        if not resizeable
+-            if @resource[:size_is_minsize] == :true or @resource[:size_is_minsize] == true or @resource[:size_is_minsize] == 'true'
+-                info( "Logical volume already has minimum size of #{new_size} (currently #{current_size})" )
+-            else
+-                fail( "Decreasing the size requires manual intervention (#{new_size} < #{current_size})" )
+-            end
+-        else
+-            ## Check if new size fits the extend blocks
+-            if new_size_bytes * lvm_size_units[new_size_unit] % vg_extent_size != 0
+-                fail( "Cannot extend to size #{new_size} because VG extent size is #{vg_extent_size} KB" )
+-            end
++      info( "New: value=#{new_size_value}, unit=#{new_size_unit}, kibi=#{new_size}" )
+-            lvextend( '-L', new_size, path) || fail( "Cannot extend to size #{new_size} because lvextend failed." )
++      ## Get the extend size
++      if lvs('--noheading', '-o', 'vg_extent_size', '--units', 'k', path) =~ /\s+(\d+)\.\d+k/i
++        vg_extent_size = $1.to_i
++      end
+-            blkid_type = blkid(path)
+-            if command(:resize4fs) and blkid_type =~ /\bTYPE=\"(ext4)\"/
+-              resize4fs( path) || fail( "Cannot resize file system to size #{new_size} because resize2fs failed." )
+-            elsif blkid_type =~ /\bTYPE=\"(ext[34])\"/
+-              resize2fs( path) || fail( "Cannot resize file system to size #{new_size} because resize2fs failed." )
+-            elsif blkid_type =~ /\bTYPE=\"(xfs)\"/
+-              xfs_growfs( path) || fail( "Cannot resize filesystem to size #{new_size} because xfs_growfs failed." )
++      if new_size < current_size
++        if @resource[:size_is_minsize] == :true or @resource[:size_is_minsize] == true or @resource[:size_is_minsize] == 'true'
++          info( "Logical volume already has minimum size of #{new_size} (currently #{current_size})" )
++        else
++          if not @resource[:allow_reduce]
++            fail( "Decreasing the size requires manual intervention (#{new_size} < #{current_size})" )
++          end
++          if new_size % vg_extent_size != 0
++            if @resource[:round_to_extent]
++              new_size = ((new_size + vg_extent_size - 1) / vg_extent_size) * vg_extent_size
++              if new_size >= current_size
++                info( "Logical volume already has a size of #{current_size}" )
++                return
++              end
++            else
++              fail( "Cannot reduce to size #{new_size} because VG extent size is #{vg_extent_size} KB" )
+             end
+-
++          end
++          exec_cmd('umount', path)
++          exec_cmd('fsadm', '-y', 'check', path )
++          r = exec_cmd('fsadm', '-y', 'resize', path, "#{new_size}k")
++          if r[:exit] != 0 and @resource[:nuke_fs_on_resize_failure]
++            exec_cmd('dd', 'if=/dev/zero', "of=#{path}", "bs=512", "count=16", "conv=notrunc")
++            blkid('-g')
++          end
++          lvresize( '-f', '-L', "#{new_size}k", path) || fail( "Cannot reduce to size #{new_size} because lvresize failed." )
++        end
++      elsif new_size > current_size
++        if new_size % vg_extent_size != 0
++          if @resource[:round_to_extent]
++            new_size = ((new_size + vg_extent_size - 1) / vg_extent_size) * vg_extent_size
++            if new_size <= current_size
++              info( "Logical volume already has a size of #{current_size}" )
++              return
++            end
++          else
++            fail( "Cannot extend to size #{new_size} because VG extent size is #{vg_extent_size} KB" )
++          end
++        end
++        lvextend( '-L', "#{new_size}k", path) || fail( "Cannot extend to size #{new_size} because lvextend failed." )
++        exec_cmd('umount', path)
++        exec_cmd('fsadm', '-y', 'check', path )
++        r = exec_cmd('fsadm', '-y', 'resize', path, "#{new_size}k")
++        if r[:exit] != 0 and @resource[:nuke_fs_on_resize_failure]
++          exec_cmd('dd', 'if=/dev/zero', "of=#{path}", "bs=512", "count=16", "conv=notrunc")
++          blkid('-g')
+         end
++      else
++        info( "Logical volume already has a size of #{current_size}" )
++      end
+     end
+@@ -161,7 +224,7 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+             # Minus one because it says "2" when there is only one spare. And so on.
+             n = ($1.to_i)-1
+             #puts " current mirrors: #{n}"
+-            return n.to_s 
++            return n.to_s
+         end
+         return 0.to_s
+     end
+@@ -176,7 +239,7 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+             end
+             # Region size cannot be changed on an existing mirror (not even when changing to zero mirrors).
+-            
++
+             if @resource[:alloc]
+                 args.push( '--alloc', @resource[:alloc] )
+             end
+@@ -222,9 +285,6 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+         end
+     end
+-
+-
+-
+     private
+     def lvs_pattern
+@@ -240,4 +300,18 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+         "/dev/#{@resource[:volume_group]}"
+     end
++    def lvzero
++        if lvs('--noheading', '-o', 'lv_size', '--units', 'm', path) =~ /\s+(\d+)\.\d+m/i
++            lv_size = $1.to_i
++            lv_size = lv_size - 2
++            begin
++                dd('if=/dev/zero', 'of=' + path, 'bs=1M', "seek=#{lv_size}")
++            rescue
++            end
++            begin
++                dd('if=/dev/zero', 'of=' + path, 'bs=1M', 'count=100')
++            rescue
++            end
++        end
++    end
+ end
+diff --git a/packstack/puppet/modules/lvm/lib/puppet/provider/physical_volume/lvm.rb b/packstack/puppet/modules/lvm/lib/puppet/provider/physical_volume/lvm.rb
+index eaefc92..6ac6e0a 100644
+--- a/packstack/puppet/modules/lvm/lib/puppet/provider/physical_volume/lvm.rb
++++ b/packstack/puppet/modules/lvm/lib/puppet/provider/physical_volume/lvm.rb
+@@ -4,7 +4,7 @@ Puppet::Type.type(:physical_volume).provide(:lvm) do
+     commands :pvcreate  => 'pvcreate', :pvremove => 'pvremove', :pvs => 'pvs', :vgs => 'vgs'
+     def create
+-        pvcreate(@resource[:name])
++        pvcreate('-y', @resource[:name])
+     end
+     def destroy
+diff --git a/packstack/puppet/modules/lvm/lib/puppet/provider/volume_group/lvm.rb b/packstack/puppet/modules/lvm/lib/puppet/provider/volume_group/lvm.rb
+index c8de071..3d54dba 100644
+--- a/packstack/puppet/modules/lvm/lib/puppet/provider/volume_group/lvm.rb
++++ b/packstack/puppet/modules/lvm/lib/puppet/provider/volume_group/lvm.rb
+@@ -1,12 +1,18 @@
++require 'csv'
++
+ Puppet::Type.type(:volume_group).provide :lvm do
+     desc "Manages LVM volume groups"
+     commands :vgcreate => 'vgcreate',
+              :vgremove => 'vgremove',
++             :pvremove => 'pvremove',
+              :vgs      => 'vgs',
+              :vgextend => 'vgextend',
+              :vgreduce => 'vgreduce',
+-             :pvs      => 'pvs'
++             :vgscan   => 'vgscan',
++             :pvs      => 'pvs',
++             :lvremove => 'lvremove',
++             :umount   => 'umount'
+     def create
+         vgcreate(@resource[:name], *@resource.should(:physical_volumes))
+@@ -22,17 +28,55 @@ Puppet::Type.type(:volume_group).provide :lvm do
+         false
+     end
++    def exec_cmd(*cmd)
++      output = Puppet::Util::Execution.execute(cmd, :failonfail => false, :combine => true)
++      {:out => output, :exit => $CHILD_STATUS.exitstatus}
++    end
++
+     def physical_volumes=(new_volumes = [])
+-        # Only take action if createonly is false just to be safe
+-        #  this is really only here to enforce the createonly setting
+-        #  if something goes wrong in physical_volumes
+-        if @resource[:createonly].to_s == "false"
+-          existing_volumes = physical_volumes
+-          extraneous = existing_volumes - new_volumes
+-          extraneous.each { |volume| reduce_with(volume) }
++      # Only take action if createonly is false just to be safe
++      #  this is really only here to enforce the createonly setting
++      #  if something goes wrong in physical_volumes
++      if @resource[:createonly].to_s == "false"
++        vgreduce('--removemissing', '--force', @resource[:name])
++        existing_volumes = physical_volumes
++        extraneous = existing_volumes - new_volumes
++        pv_to_lv={}
++        pv_to_dev={}
++        csv = CSV.new(pvs('-o', 'pv_name,vg_name,lv_name', '--separator', ','),
++                     :headers => true, :header_converters => :symbol)
++        csv.to_a.map {|row| row.to_hash}.each do |m|
++          unless m[:lv].nil?
++            pv_to_lv[m[:_pv].strip] = "#{m[:vg]}/#{m[:lv]}"
++            pv_to_dev[m[:_pv].strip] = "#{m[:vg].gsub('-','--')}-#{m[:lv].gsub('-','--')}"
++          end
++        end
++
++        if extraneous == existing_volumes
++          extraneous.each do |volume|
++            if pv_to_lv.has_key?(volume)
++              exec_cmd('/bin/umount', "/dev/mapper/#{pv_to_dev[volume]}")
++              lvremove('-f', pv_to_lv[volume])
++            end
++          end
++          vgremove(@resource[:name], '--force')
++          extraneous.each do |volume|
++              pvremove(volume)
++          end
++          vgcreate(@resource[:name], *new_volumes)
++        else
++          extraneous.each do |volume|
++            if pv_to_lv.has_key?(volume)
++              exec_cmd('/bin/umount', "/dev/mapper/#{pv_to_dev[volume]}")
++              lvremove('-f', pv_to_lv[volume])
++            end
++            reduce_with(volume)
++            pvremove(volume)
++          end
+           missing = new_volumes - existing_volumes
+           missing.each { |volume| extend_with(volume) }
+         end
++      end
+     end
+     def physical_volumes
+diff --git a/packstack/puppet/modules/lvm/lib/puppet/type/logical_volume.rb b/packstack/puppet/modules/lvm/lib/puppet/type/logical_volume.rb
+index f907e08..3081650 100644
+--- a/packstack/puppet/modules/lvm/lib/puppet/type/logical_volume.rb
++++ b/packstack/puppet/modules/lvm/lib/puppet/type/logical_volume.rb
+@@ -31,7 +31,7 @@ Puppet::Type.newtype(:logical_volume) do
+   newproperty(:size) do
+     desc "The size of the logical volume. Set to undef to use all available space"
+     validate do |value|
+-      unless value =~ /^[0-9]+(\.[0-9]+)?[KMGTPE]/i
++      unless value =~ /(^[0-9]+(\.[0-9]+)?[KMGTPE]|max)/i
+         raise ArgumentError , "#{value} is not a valid logical volume size"
+       end
+     end
+@@ -50,6 +50,36 @@ Puppet::Type.newtype(:logical_volume) do
+     desc "Configures the logical volume type. AIX only"
+   end
++  newparam(:allow_reduce) do
++    desc "Allow reducing logical volume size."
++    validate do |value|
++      unless [:true, true, "true", :false, false, "false"].include?(value)
++        raise ArgumentError , "allow_reduce must either be true or false"
++      end
++    end
++    defaultto :false
++  end
++
++  newparam(:round_to_extent) do
++    desc "Allow rounding of logical volume size to extent size."
++    validate do |value|
++      unless [:true, true, "true", :false, false, "false"].include?(value)
++        raise ArgumentError , "round_to_extent must either be true or false"
++      end
++    end
++    defaultto :false
++  end
++
++  newparam(:nuke_fs_on_resize_failure) do
++    desc "Remove filesystem on logical volume resize failure."
++    defaultto :false
++    validate do |value|
++      unless [:true, true, "true", :false, false, "false"].include?(value)
++        raise ArgumentError , "nuke_fs_on_resize_failure must either be true or false"
++      end
++    end
++  end
++
+   newparam(:range) do
+     desc "Sets the inter-physical volume allocation policy. AIX only"
+     validate do |value|
+diff --git a/packstack/puppet/modules/lvm/manifests/logical_volume.pp b/packstack/puppet/modules/lvm/manifests/logical_volume.pp
+index e6e5e78..4888b5d 100644
+--- a/packstack/puppet/modules/lvm/manifests/logical_volume.pp
++++ b/packstack/puppet/modules/lvm/manifests/logical_volume.pp
+@@ -3,7 +3,9 @@
+ define lvm::logical_volume (
+   $volume_group,
+   $size              = undef,
++  $size_is_minsize   = false,
+   $initial_size      = undef,
++  $round_to_extent   = false,
+   $ensure            = present,
+   $options           = 'defaults',
+   $pass              = '2',
+@@ -12,6 +14,7 @@ define lvm::logical_volume (
+   $mkfs_options      = undef,
+   $mountpath         = "/${name}",
+   $mountpath_require = false,
++  $remounts          = true,
+   $createfs          = true,
+   $extents           = undef,
+   $stripes           = undef,
+@@ -21,6 +24,7 @@ define lvm::logical_volume (
+ ) {
+   validate_bool($mountpath_require)
++  validate_bool($size_is_minsize)
+   if ($name == undef) {
+     fail("lvm::logical_volume \$name can't be undefined")
+@@ -51,6 +55,7 @@ define lvm::logical_volume (
+     ensure       => $ensure,
+     volume_group => $volume_group,
+     size         => $size,
++    size_is_minsize => $size_is_minsize,
+     initial_size => $initial_size,
+     stripes      => $stripes,
+     stripesize   => $stripesize,
+@@ -81,6 +86,7 @@ define lvm::logical_volume (
+       pass    => $pass,
+       dump    => $dump,
+       atboot  => true,
++      remounts => $remounts,
+     }
+   }
+ }
+diff --git a/packstack/puppet/modules/lvm/manifests/volume.pp b/packstack/puppet/modules/lvm/manifests/volume.pp
+index a8bc3c8..bdfc937 100644
+--- a/packstack/puppet/modules/lvm/manifests/volume.pp
++++ b/packstack/puppet/modules/lvm/manifests/volume.pp
+@@ -59,7 +59,10 @@ define lvm::volume (
+   $fstype  = undef,
+   $size    = undef,
+   $extents = undef,
+-  $initial_size = undef
++  $initial_size = undef,
++  $allow_reduce = false,
++  $round_to_extent = false,
++  $nuke_fs_on_resize_failure = false
+ ) {
+   if ($name == undef) {
+@@ -88,6 +91,9 @@ define lvm::volume (
+           volume_group => $vg,
+           size         => $size,
+           initial_size => $initial_size,
++          allow_reduce => $allow_reduce,
++          round_to_extent => $round_to_extent,
++          nuke_fs_on_resize_failure => $nuke_fs_on_resize_failure,
+           before       => Volume_group[$vg]
+         }
+       }
+@@ -124,6 +130,9 @@ define lvm::volume (
+         volume_group => $vg,
+         size         => $size,
+         extents      => $extents,
++        allow_reduce => $allow_reduce,
++        round_to_extent => $round_to_extent,
++        nuke_fs_on_resize_failure => $nuke_fs_on_resize_failure,
+         require      => Volume_group[$vg]
+       }
+diff --git a/packstack/puppet/modules/lvm/spec/unit/puppet/provider/logical_volume/lvm_spec.rb b/packstack/puppet/modules/lvm/spec/unit/puppet/provider/logical_volume/lvm_spec.rb
+index 56c32a5..a465a7f 100644
+--- a/packstack/puppet/modules/lvm/spec/unit/puppet/provider/logical_volume/lvm_spec.rb
++++ b/packstack/puppet/modules/lvm/spec/unit/puppet/provider/logical_volume/lvm_spec.rb
+@@ -12,23 +12,24 @@ describe provider_class do
+   describe 'when creating' do
+     context 'with size' do
+       it "should execute 'lvcreate' with a '--size' option" do
+-        @resource.expects(:[]).with(:name).returns('mylv')
+-        @resource.expects(:[]).with(:volume_group).returns('myvg')
++        @resource.expects(:[]).with(:name).returns('mylv').at_least_once
++        @resource.expects(:[]).with(:volume_group).returns('myvg').at_least_once
+         @resource.expects(:[]).with(:size).returns('1g').at_least_once
+         @resource.expects(:[]).with(:extents).returns(nil).at_least_once
+         @resource.expects(:[]).with(:stripes).returns(nil).at_least_once
+         @resource.expects(:[]).with(:stripesize).returns(nil).at_least_once
+         @resource.expects(:[]).with(:readahead).returns(nil).at_least_once
+         @resource.expects(:[]).with(:mirror).returns(nil).at_least_once
+-        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once        
++        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
++        @resource.expects(:[]).with(:round_to_extent).returns(false).at_least_once
+         @provider.expects(:lvcreate).with('-n', 'mylv', '--size', '1g', 'myvg')
+         @provider.create
+       end
+     end
+     context 'with initial_size' do
+       it "should execute 'lvcreate' with a '--size' option" do
+-        @resource.expects(:[]).with(:name).returns('mylv')
+-        @resource.expects(:[]).with(:volume_group).returns('myvg')
++        @resource.expects(:[]).with(:name).returns('mylv').at_least_once
++        @resource.expects(:[]).with(:volume_group).returns('myvg').at_least_once
+         @resource.expects(:[]).with(:initial_size).returns('1g').at_least_once
+         @resource.expects(:[]).with(:size).returns(nil).at_least_once
+         @resource.expects(:[]).with(:extents).returns(nil).at_least_once
+@@ -36,15 +37,16 @@ describe provider_class do
+         @resource.expects(:[]).with(:stripesize).returns(nil).at_least_once
+         @resource.expects(:[]).with(:readahead).returns(nil).at_least_once
+         @resource.expects(:[]).with(:mirror).returns(nil).at_least_once
+-        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once        
++        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
++        @resource.expects(:[]).with(:round_to_extent).returns(false).at_least_once
+         @provider.expects(:lvcreate).with('-n', 'mylv', '--size', '1g', 'myvg')
+         @provider.create
+       end
+     end
+      context 'without size and without extents' do
+       it "should execute 'lvcreate' without a '--size' option or a '--extents' option" do
+-        @resource.expects(:[]).with(:name).returns('mylv')
+-        @resource.expects(:[]).with(:volume_group).returns('myvg')
++        @resource.expects(:[]).with(:name).returns('mylv').at_least_once
++        @resource.expects(:[]).with(:volume_group).returns('myvg').at_least_once
+         @resource.expects(:[]).with(:size).returns(nil).at_least_once
+         @resource.expects(:[]).with(:initial_size).returns(nil).at_least_once
+         @resource.expects(:[]).with(:extents).returns(nil).at_least_once
+@@ -52,45 +54,47 @@ describe provider_class do
+         @resource.expects(:[]).with(:stripesize).returns(nil).at_least_once
+         @resource.expects(:[]).with(:readahead).returns(nil).at_least_once
+         @resource.expects(:[]).with(:mirror).returns(nil).at_least_once
+-        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once        
++        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
+         @provider.expects(:lvcreate).with('-n', 'mylv', '--extents', '100%FREE', 'myvg')
+         @provider.create
+       end
+     end
+     context 'with extents' do
+       it "should execute 'lvcreate' with a '--extents' option" do
+-        @resource.expects(:[]).with(:name).returns('mylv')
+-        @resource.expects(:[]).with(:volume_group).returns('myvg')
++        @resource.expects(:[]).with(:name).returns('mylv').at_least_once
++        @resource.expects(:[]).with(:volume_group).returns('myvg').at_least_once
+         @resource.expects(:[]).with(:size).returns('1g').at_least_once
+         @resource.expects(:[]).with(:extents).returns('80%vg').at_least_once
+         @resource.expects(:[]).with(:stripes).returns(nil).at_least_once
+         @resource.expects(:[]).with(:stripesize).returns(nil).at_least_once
+         @resource.expects(:[]).with(:readahead).returns(nil).at_least_once
+         @resource.expects(:[]).with(:mirror).returns(nil).at_least_once
+-        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once        
++        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
++        @resource.expects(:[]).with(:round_to_extent).returns(false).at_least_once
+         @provider.expects(:lvcreate).with('-n', 'mylv', '--size', '1g', '--extents', '80%vg', 'myvg')
+         @provider.create
+       end
+     end
+     context 'without extents' do
+       it "should execute 'lvcreate' without a '--extents' option" do
+-        @resource.expects(:[]).with(:name).returns('mylv')
+-        @resource.expects(:[]).with(:volume_group).returns('myvg')
++        @resource.expects(:[]).with(:name).returns('mylv').at_least_once
++        @resource.expects(:[]).with(:volume_group).returns('myvg').at_least_once
+         @resource.expects(:[]).with(:size).returns('1g').at_least_once
+         @resource.expects(:[]).with(:extents).returns(nil).at_least_once
+         @resource.expects(:[]).with(:stripes).returns(nil).at_least_once
+         @resource.expects(:[]).with(:stripesize).returns(nil).at_least_once
+         @resource.expects(:[]).with(:readahead).returns(nil).at_least_once
+         @resource.expects(:[]).with(:mirror).returns(nil).at_least_once
+-        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once        
++        @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
++        @resource.expects(:[]).with(:round_to_extent).returns(false).at_least_once
+         @provider.expects(:lvcreate).with('-n', 'mylv', '--size', '1g', 'myvg')
+         @provider.create
+       end
+     end
+     context 'with initial_size and mirroring' do
+       it "should execute 'lvcreate' with '--size' and '--mirrors' and '--mirrorlog' options" do
+-        @resource.expects(:[]).with(:name).returns('mylv')
+-        @resource.expects(:[]).with(:volume_group).returns('myvg')
++        @resource.expects(:[]).with(:name).returns('mylv').at_least_once
++        @resource.expects(:[]).with(:volume_group).returns('myvg').at_least_once
+         @resource.expects(:[]).with(:initial_size).returns('1g').at_least_once
+         @resource.expects(:[]).with(:size).returns(nil).at_least_once
+         @resource.expects(:[]).with(:extents).returns(nil).at_least_once
+@@ -102,6 +106,7 @@ describe provider_class do
+         @resource.expects(:[]).with(:region_size).returns(nil).at_least_once
+         @resource.expects(:[]).with(:no_sync).returns(nil).at_least_once
+         @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
++        @resource.expects(:[]).with(:round_to_extent).returns(false).at_least_once
+         @provider.expects(:lvcreate).with('-n', 'mylv', '--size', '1g', '--mirrors', '1', '--mirrorlog', 'core', 'myvg')
+         @provider.create
+       end
+@@ -120,13 +125,15 @@ describe provider_class do
+           @resource.expects(:[]).with(:stripesize).returns(nil).at_least_once
+           @resource.expects(:[]).with(:readahead).returns(nil).at_least_once
+           @resource.expects(:[]).with(:mirror).returns(nil).at_least_once
+-          @resource.expects(:[]).with(:alloc).returns(nil).at_least_once        
++          @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
++          @resource.expects(:[]).with(:round_to_extent).returns(false).at_least_once
++          @resource.expects(:[]).with(:nuke_fs_on_resize_failure).returns(false).at_least_once
+           @provider.expects(:lvcreate).with('-n', 'mylv', '--size', '1g', 'myvg')
+           @provider.create
+           @provider.expects(:lvs).with('--noheading', '--unit', 'g', '/dev/myvg/mylv').returns(' 1.00g').at_least_once
+           @provider.expects(:lvs).with('--noheading', '-o', 'vg_extent_size', '--units', 'k', '/dev/myvg/mylv').returns(' 1000.00k')
+           @provider.expects(:lvextend).with('-L', '2000000k', '/dev/myvg/mylv').returns(true)
+-          @provider.expects(:blkid).with('/dev/myvg/mylv')
++          #@provider.expects(:blkid).with('/dev/myvg/mylv')
+           @provider.size = '2000000k'
+         end
+       end
+@@ -140,7 +147,8 @@ describe provider_class do
+           @resource.expects(:[]).with(:stripesize).returns(nil).at_least_once
+           @resource.expects(:[]).with(:readahead).returns(nil).at_least_once
+           @resource.expects(:[]).with(:mirror).returns(nil).at_least_once
+-          @resource.expects(:[]).with(:alloc).returns(nil).at_least_once        
++          @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
++          @resource.expects(:[]).with(:round_to_extent).returns(false).at_least_once
+           @provider.expects(:lvcreate).with('-n', 'mylv', '--size', '1g', 'myvg')
+           @provider.create
+           @provider.expects(:lvs).with('--noheading', '--unit', 'g', '/dev/myvg/mylv').returns(' 1.00g').at_least_once
+@@ -161,7 +169,9 @@ describe provider_class do
+           @resource.expects(:[]).with(:readahead).returns(nil).at_least_once
+           @resource.expects(:[]).with(:size_is_minsize).returns(:false).at_least_once
+           @resource.expects(:[]).with(:mirror).returns(nil).at_least_once
+-          @resource.expects(:[]).with(:alloc).returns(nil).at_least_once        
++          @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
++          @resource.expects(:[]).with(:round_to_extent).returns(false).at_least_once
++          @resource.expects(:[]).with(:allow_reduce).returns(false).at_least_once
+           @provider.expects(:lvcreate).with('-n', 'mylv', '--size', '1g', 'myvg')
+           @provider.create
+           @provider.expects(:lvs).with('--noheading', '--unit', 'g', '/dev/myvg/mylv').returns(' 1.00g').at_least_once
+@@ -182,7 +192,8 @@ describe provider_class do
+           @resource.expects(:[]).with(:readahead).returns(nil).at_least_once
+           @resource.expects(:[]).with(:size_is_minsize).returns(:true).at_least_once
+           @resource.expects(:[]).with(:mirror).returns(nil).at_least_once
+-          @resource.expects(:[]).with(:alloc).returns(nil).at_least_once       
++          @resource.expects(:[]).with(:alloc).returns(nil).at_least_once
++          @resource.expects(:[]).with(:round_to_extent).returns(false).at_least_once
+           @provider.expects(:lvcreate).with('-n', 'mylv', '--size', '1g', 'myvg')
+           @provider.create
+           @provider.expects(:lvs).with('--noheading', '--unit', 'g', '/dev/myvg/mylv').returns(' 1.00g').at_least_once
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0002-UEFI-pvcreate-fix.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0002-UEFI-pvcreate-fix.patch
new file mode 100644 (file)
index 0000000..3ac7b91
--- /dev/null
@@ -0,0 +1,46 @@
+From ac6a60e4d65e33017f8db0eca499f8dd898acb3c Mon Sep 17 00:00:00 2001
+From: Kristine Bujold <kristine.bujold@windriver.com>
+Date: Fri, 15 Jul 2016 16:55:16 -0400
+Subject: [PATCH] US80802 - PXE Installation changes for UEFI support. Fixing
+ pvcreate issue.
+
+---
+ .../lvm/lib/puppet/provider/physical_volume/lvm.rb      | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/packstack/puppet/modules/lvm/lib/puppet/provider/physical_volume/lvm.rb b/packstack/puppet/modules/lvm/lib/puppet/provider/physical_volume/lvm.rb
+index 6ac6e0a..18183ae 100644
+--- a/packstack/puppet/modules/lvm/lib/puppet/provider/physical_volume/lvm.rb
++++ b/packstack/puppet/modules/lvm/lib/puppet/provider/physical_volume/lvm.rb
+@@ -1,12 +1,27 @@
+ Puppet::Type.type(:physical_volume).provide(:lvm) do
+     desc "Manages LVM physical volumes"
+-    commands :pvcreate  => 'pvcreate', :pvremove => 'pvremove', :pvs => 'pvs', :vgs => 'vgs'
++
++    commands :pvcreate  => 'pvcreate',
++             :pvremove => 'pvremove',
++             :pvs => 'pvs',
++             :vgs => 'vgs',
++             :dd  => 'dd'
+     def create
++        # Delete the first few bytes at the start and end of the partition. This is required with
++        # GPT partitions, they save partition info at the start and the end of the block.
++        exec_cmd('dd', 'if=/dev/zero', "of=#{@resource[:name]}", "bs=512", "count=34")
++        exec_cmd('dd', 'if=/dev/zero', "of=#{@resource[:name]}", "bs=512", "count=34", "seek=$((`blockdev --getsz #{@resource[:name]}` - 34))")
++
+         pvcreate('-y', @resource[:name])
+     end
++    def exec_cmd(*cmd)
++      output = Puppet::Util::Execution.execute(cmd, :failonfail => false, :combine => true)
++      {:out => output, :exit => $CHILD_STATUS.exitstatus}
++    end
++
+     def destroy
+         pvremove(@resource[:name])
+     end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0003-US94222-Persistent-Dev-Naming.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0003-US94222-Persistent-Dev-Naming.patch
new file mode 100644 (file)
index 0000000..a1ddc0d
--- /dev/null
@@ -0,0 +1,25 @@
+From b05de190832bba08ce410c267c4b2f8a74916f7a Mon Sep 17 00:00:00 2001
+From: Robert Church <robert.church@windriver.com>
+Date: Wed, 1 Mar 2017 09:12:34 +0000
+Subject: [PATCH] US94222: Persistent Dev Naming
+
+---
+ packstack/puppet/modules/lvm/lib/puppet/provider/volume_group/lvm.rb | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/packstack/puppet/modules/lvm/lib/puppet/provider/volume_group/lvm.rb b/packstack/puppet/modules/lvm/lib/puppet/provider/volume_group/lvm.rb
+index 3d54dba..04dea64 100644
+--- a/packstack/puppet/modules/lvm/lib/puppet/provider/volume_group/lvm.rb
++++ b/packstack/puppet/modules/lvm/lib/puppet/provider/volume_group/lvm.rb
+@@ -81,7 +81,7 @@ Puppet::Type.type(:volume_group).provide :lvm do
+     def physical_volumes
+         if @resource[:createonly].to_s == "false" || ! vgs(@resource[:name])
+-          lines = pvs('-o', 'pv_name,vg_name', '--separator', ',')
++          lines = `pvs -o pv_name,vg_name --separator ',' | awk -F ',' 'NR>1{cmd="find -L /dev/disk/by-path/ -samefile" $1; cmd | getline $1;print $1 "," $2; next};{print}'`
+           lines.split(/\n/).grep(/,#{@resource[:name]}$/).map { |s|
+             s.split(/,/)[0].strip
+           }
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0004-extendind-nuke_fs_on_resize_failure-functionality.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/0004-extendind-nuke_fs_on_resize_failure-functionality.patch
new file mode 100644 (file)
index 0000000..a0115a7
--- /dev/null
@@ -0,0 +1,34 @@
+From f2676c5ac0e17a18726815b72ef449c804e07135 Mon Sep 17 00:00:00 2001
+From: Stefan Dinescu <stefan.dinescu@windriver.com>
+Date: Wed, 6 Dec 2017 12:50:14 +0000
+Subject: [PATCH 1/1] extendind nuke_fs_on_resize_failure functionality
+
+---
+ .../modules/lvm/lib/puppet/provider/logical_volume/lvm.rb     | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb b/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb
+index 2f41695..2abfea3 100755
+--- a/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb
++++ b/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb
+@@ -188,7 +188,16 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+             exec_cmd('dd', 'if=/dev/zero', "of=#{path}", "bs=512", "count=16", "conv=notrunc")
+             blkid('-g')
+           end
+-          lvresize( '-f', '-L', "#{new_size}k", path) || fail( "Cannot reduce to size #{new_size} because lvresize failed." )
++          r = exec_cmd('lvresize', '-r', '-f', '-L', "#{new_size}k", path)
++          if r[:exit] != 0
++            if @resource[:nuke_fs_on_resize_failure]
++              exec_cmd('dd', 'if=/dev/zero', "of=#{path}", "bs=512", "count=16", "conv=notrunc")
++              blkid('-g')
++              lvresize( '-f', '-L', "#{new_size}k", path) || fail( "Cannot reduce to size #{new_size} because lvresize failed." )
++            else
++              fail( "Cannot reduce to size #{new_size} because lvresize failed." )
++            end
++          end
+         end
+       elsif new_size > current_size
+         if new_size % vg_extent_size != 0
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..a0ce4cb
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppetlabs-lvm.gemspec   2019-10-30 21:16:54.774974158 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-lvm'
++  s.version     = '0.5.0'
++  s.date        = '2014-09-13'
++  s.summary     = "Provides Puppet types and providers to manage Logical Resource Management (LVM) features."
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(metadata.json README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-lvm'
++  s.license     = 'GPL-2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/Fix-the-logical-statement-for-nuke_fs_on_resize.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-lvm/Fix-the-logical-statement-for-nuke_fs_on_resize.patch
new file mode 100644 (file)
index 0000000..e1796ba
--- /dev/null
@@ -0,0 +1,45 @@
+From 21d2c4e714611ad08e5aa999e555e1e7591f2717 Mon Sep 17 00:00:00 2001
+From: Kristine Bujold <kristine.bujold@windriver.com>
+Date: Thu, 19 Jul 2018 09:02:27 -0400
+Subject: [PATCH 1/1] Patch4:
+ Fix-the-logical-statement-for-nuke_fs_on_resize_2.patch
+
+---
+ .../puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb b/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb
+index 2abfea3..f9b1c66 100755
+--- a/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb
++++ b/packstack/puppet/modules/lvm/lib/puppet/provider/logical_volume/lvm.rb
+@@ -184,13 +184,15 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+           exec_cmd('umount', path)
+           exec_cmd('fsadm', '-y', 'check', path )
+           r = exec_cmd('fsadm', '-y', 'resize', path, "#{new_size}k")
+-          if r[:exit] != 0 and @resource[:nuke_fs_on_resize_failure]
++          if r[:exit] != 0 and [:true, "true", true ].include? @resource[:nuke_fs_on_resize_failure]
++            info( "Failed 'fsadm resize' erase the disk #{r}" )
+             exec_cmd('dd', 'if=/dev/zero', "of=#{path}", "bs=512", "count=16", "conv=notrunc")
+             blkid('-g')
+           end
+           r = exec_cmd('lvresize', '-r', '-f', '-L', "#{new_size}k", path)
+           if r[:exit] != 0
+-            if @resource[:nuke_fs_on_resize_failure]
++            if [:true, "true", true ].include? @resource[:nuke_fs_on_resize_failure]
++              info( "Failed 'fsadm resize' erase the disk #{r}" )
+               exec_cmd('dd', 'if=/dev/zero', "of=#{path}", "bs=512", "count=16", "conv=notrunc")
+               blkid('-g')
+               lvresize( '-f', '-L', "#{new_size}k", path) || fail( "Cannot reduce to size #{new_size} because lvresize failed." )
+@@ -215,7 +217,8 @@ Puppet::Type.type(:logical_volume).provide :lvm do
+         exec_cmd('umount', path)
+         exec_cmd('fsadm', '-y', 'check', path )
+         r = exec_cmd('fsadm', '-y', 'resize', path, "#{new_size}k")
+-        if r[:exit] != 0 and @resource[:nuke_fs_on_resize_failure]
++        if r[:exit] != 0 and [:true, "true", true ].include? @resource[:nuke_fs_on_resize_failure]
++          info( "Failed 'fsadm resize' erase the disk #{r}" )
+           exec_cmd('dd', 'if=/dev/zero', "of=#{path}", "bs=512", "count=16", "conv=notrunc")
+           blkid('-g')
+         end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0001-Fix-ruby-path.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0001-Fix-ruby-path.patch
new file mode 100644 (file)
index 0000000..77a40c2
--- /dev/null
@@ -0,0 +1,33 @@
+From c92a9d11002184a67f8bedf542d84f527eea3107 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Tue, 29 Oct 2019 15:12:18 -0700
+Subject: [PATCH] Fix ruby path
+
+---
+ tasks/export.rb | 2 +-
+ tasks/sql.rb    | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tasks/export.rb b/tasks/export.rb
+index efd9d81..18106ec 100755
+--- a/tasks/export.rb
++++ b/tasks/export.rb
+@@ -1,4 +1,4 @@
+-#!/opt/puppetlabs/puppet/bin/ruby
++#!/usr/bin/ruby
+ require 'json'
+ require 'open3'
+ require 'puppet'
+diff --git a/tasks/sql.rb b/tasks/sql.rb
+index 29b2c6b..7256f2d 100755
+--- a/tasks/sql.rb
++++ b/tasks/sql.rb
+@@ -1,4 +1,4 @@
+-#!/opt/puppetlabs/puppet/bin/ruby
++#!/usr/bin/ruby
+ require 'json'
+ require 'open3'
+ require 'puppet'
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0001-Stx-uses-nanliu-staging-module.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0001-Stx-uses-nanliu-staging-module.patch
new file mode 100644 (file)
index 0000000..ca02843
--- /dev/null
@@ -0,0 +1,26 @@
+From 3b1cb5e63416223336d2bd6a43f66007b9d388e3 Mon Sep 17 00:00:00 2001
+From: babak sarashki <babak.sarashki@windriver.com>
+Date: Thu, 31 Oct 2019 13:54:47 -0700
+Subject: [PATCH] Stx uses nanliu-staging module
+
+Use nanliu-staging instead of puppet-staging
+---
+ metadata.json | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/metadata.json b/metadata.json
+index 94e9d69..682c1e1 100644
+--- a/metadata.json
++++ b/metadata.json
+@@ -9,7 +9,7 @@
+   "issues_url": "https://tickets.puppetlabs.com/browse/MODULES",
+   "dependencies": [
+     {"name":"puppetlabs/stdlib","version_requirement":">= 3.2.0 < 5.0.0"},
+-    {"name":"puppet/staging","version_requirement":">= 1.0.1 < 3.0.0"}
++    {"name":"nanliu/staging","version_requirement":">= 1.0.1 < 3.0.0"}
+   ],
+   "operatingsystem_support": [
+     {
+-- 
+2.17.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0002-puppet-mysql-changes-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0002-puppet-mysql-changes-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..33d65d5
--- /dev/null
@@ -0,0 +1,28 @@
+diff -ru a/manifests/params.pp b/manifests/params.pp
+--- a/manifests/params.pp      2020-03-05 19:24:51.246516534 +0800
++++ b/manifests/params.pp      2020-03-05 19:50:08.630557886 +0800
+@@ -167,7 +167,7 @@
+     }
+     'Debian': {
+-      if $::operatingsystem == 'Debian' and versioncmp($::operatingsystemrelease, '9') >= 0 {
++      if ($::operatingsystem == 'Debian' and versioncmp($::operatingsystemrelease, '9') >= 0 or $::operatingsystem == 'poky-stx' {
+         $provider = 'mariadb'
+       } else {
+         $provider = 'mysql'
+@@ -188,8 +188,13 @@
+       }
+       $basedir                 = '/usr'
+-      $config_file             = '/etc/mysql/my.cnf'
+-      $includedir              = '/etc/mysql/conf.d'
++      if $::operatingsystem == 'poky-stx' {
++        $config_file             = '/etc/my.cnf.d/server.cnf'
++        $includedir              = '/etc/my.cnf.d'
++      } else {
++        $config_file             = '/etc/mysql/my.cnf'
++        $includedir              = '/etc/mysql/conf.d'
++      }
+       $datadir                 = '/var/lib/mysql'
+       $log_error               = '/var/log/mysql/error.log'
+       $pidfile                 = '/var/run/mysqld/mysqld.pid'
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0003-puppet-mysqltuner-adjust-path.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/0003-puppet-mysqltuner-adjust-path.patch
new file mode 100644 (file)
index 0000000..2900583
--- /dev/null
@@ -0,0 +1,34 @@
+diff --git a/manifests/server/mysqltuner.pp b/manifests/server/mysqltuner.pp
+index ae91e63..4857c0c 100644
+--- a/manifests/server/mysqltuner.pp
++++ b/manifests/server/mysqltuner.pp
+@@ -40,14 +40,14 @@ class mysql::server::mysqltuner(
+       source      => $_source,
+       environment => $environment,
+     }
+-    file { '/usr/local/bin/mysqltuner':
++    file { '/usr/bin/mysqltuner':
+       ensure  => $ensure,
+       mode    => '0550',
+       source  => "${::staging::path}/mysql/mysqltuner-${_version}",
+       require => Staging::File["mysqltuner-${_version}"],
+     }
+   } else {
+-    file { '/usr/local/bin/mysqltuner':
++    file { '/usr/bin/mysqltuner':
+       ensure => $ensure,
+     }
+   }
+diff --git a/spec/classes/mysql_server_mysqltuner_spec.rb b/spec/classes/mysql_server_mysqltuner_spec.rb
+index 4fef3d5..c9cf5f0 100644
+--- a/spec/classes/mysql_server_mysqltuner_spec.rb
++++ b/spec/classes/mysql_server_mysqltuner_spec.rb
+@@ -21,7 +21,7 @@ describe 'mysql::server::mysqltuner' do
+       context 'ensure => absent' do
+         let(:params) {{ :ensure => 'absent' }}
+         it { is_expected.to compile }
+-        it { is_expected.to contain_file('/usr/local/bin/mysqltuner').with(:ensure => 'absent') }
++        it { is_expected.to contain_file('/usr/bin/mysqltuner').with(:ensure => 'absent') }
+       end
+       context 'custom version' do
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-mysql/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..0df5526
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppetlabs-mysql.gemspec 2019-10-31 11:20:18.066704740 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-mysql'
++  s.version     = '3.11.0'
++  s.date        = '2019-09-24'
++  s.summary     = "Installs, configures, and manages the MySQL service."
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-mysql'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0001-Roll-up-TIS-patches.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0001-Roll-up-TIS-patches.patch
new file mode 100644 (file)
index 0000000..ed917cd
--- /dev/null
@@ -0,0 +1,87 @@
+From 94cc61ad7f76d94791fee4f596d3c8c3124c0526 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Wed, 11 Jan 2017 14:25:20 -0500
+Subject: [PATCH] Roll up TIS patches
+
+---
+ manifests/params.pp        | 2 +-
+ manifests/server/config.pp | 8 ++++++--
+ manifests/server/initdb.pp | 9 +++++++++
+ 3 files changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/manifests/params.pp b/manifests/params.pp
+index d40a1eb..45be360 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -74,7 +74,7 @@ class postgresql::params inherits postgresql::globals {
+       }
+       $psql_path           = pick($psql_path, "${bindir}/psql")
+-      $service_status      = $service_status
++      $service_status      = "systemctl is-active postgresql"
+       $service_reload      = "service ${service_name} reload"
+       $perl_package_name   = pick($perl_package_name, 'perl-DBD-Pg')
+       $python_package_name = pick($python_package_name, 'python-psycopg2')
+diff --git a/manifests/server/config.pp b/manifests/server/config.pp
+index 205dd22..2ecad4b 100644
+--- a/manifests/server/config.pp
++++ b/manifests/server/config.pp
+@@ -111,6 +111,12 @@ class postgresql::server::config {
+   postgresql::server::config_entry { 'data_directory':
+     value => $datadir,
+   }
++  postgresql::server::config_entry { 'hba_file':
++    value => $pg_hba_conf_path,
++  }
++  postgresql::server::config_entry { 'ident_file':
++    value => $pg_ident_conf_path,
++  }
+   if $timezone {
+     postgresql::server::config_entry { 'timezone':
+       value => $timezone,
+@@ -154,7 +160,6 @@ class postgresql::server::config {
+     concat { $pg_ident_conf_path:
+       owner  => $user,
+       group  => $group,
+-      force  => true, # do not crash if there is no pg_ident_rules
+       mode   => '0640',
+       warn   => true,
+       notify => Class['postgresql::server::reload'],
+@@ -165,7 +170,6 @@ class postgresql::server::config {
+     concat { $recovery_conf_path:
+       owner  => $user,
+       group  => $group,
+-      force  => true, # do not crash if there is no recovery conf file
+       mode   => '0640',
+       warn   => true,
+       notify => Class['postgresql::server::reload'],
+diff --git a/manifests/server/initdb.pp b/manifests/server/initdb.pp
+index 2252a19..5e263e3 100644
+--- a/manifests/server/initdb.pp
++++ b/manifests/server/initdb.pp
+@@ -3,6 +3,7 @@ class postgresql::server::initdb {
+   $needs_initdb   = $postgresql::server::needs_initdb
+   $initdb_path    = $postgresql::server::initdb_path
+   $datadir        = $postgresql::server::datadir
++  $confdir      = $postgresql::server::confdir
+   $xlogdir        = $postgresql::server::xlogdir
+   $logdir         = $postgresql::server::logdir
+   $encoding       = $postgresql::server::encoding
+@@ -41,6 +42,14 @@ class postgresql::server::initdb {
+     seltype => $seltype,
+   }
++  # Make sure the conf directory exists, and has the correct permissions.
++  file { $confdir:
++    ensure => directory,
++    owner  => $user,
++    group  => $group,
++    mode   => '0700',
++  }
++
+   if($xlogdir) {
+     # Make sure the xlog directory exists, and has the correct permissions.
+     file { $xlogdir:
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0002-remove-puppetlabs-apt-as-a-requirement.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0002-remove-puppetlabs-apt-as-a-requirement.patch
new file mode 100644 (file)
index 0000000..d19b49c
--- /dev/null
@@ -0,0 +1,24 @@
+From dd019f3e222c799afff53cb00447c130839f7d39 Mon Sep 17 00:00:00 2001
+From: Al Bailey <Al.Bailey@windriver.com>
+Date: Wed, 3 Jan 2018 14:11:08 -0600
+Subject: [PATCH] remove puppetlabs-apt as a requirement
+
+---
+ metadata.json | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/metadata.json b/metadata.json
+index 2a59dc9..b1de7f0 100644
+--- a/metadata.json
++++ b/metadata.json
+@@ -9,7 +9,6 @@
+   "issues_url": "https://tickets.puppetlabs.com/browse/MODULES",
+   "dependencies": [
+     {"name":"puppetlabs/stdlib","version_requirement":"4.x"},
+-    {"name":"puppetlabs/apt","version_requirement":">=1.8.0 <3.0.0"},
+     {"name":"puppetlabs/concat","version_requirement":">= 1.1.0 <3.0.0"}
+   ],
+   "data_provider": null,
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0003-puppetlabs-postgresql-account-for-naming-diffs.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0003-puppetlabs-postgresql-account-for-naming-diffs.patch
new file mode 100644 (file)
index 0000000..a863ef1
--- /dev/null
@@ -0,0 +1,67 @@
+From 01c2f67ba1a938c18f609a9ff3dabe30a547af6d Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Thu, 26 Dec 2019 11:32:12 -0800
+Subject: [PATCH] puppetlabs postgresql: account for naming diffs
+
+Centos and other distros package postgresql server into postgresql-server,
+whereas OE provides postgresql package. This causes problems during bootstrap
+and consequent failure.
+
+Here we are adding this patch as a bbappend to be removed once puppet support
+is fully functional.
+---
+ manifests/params.pp              | 8 ++++----
+ manifests/server/install.pp      | 2 +-
+ spec/unit/classes/server_spec.rb | 2 +-
+ 3 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/manifests/params.pp b/manifests/params.pp
+index 45be360..3254d63 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -34,12 +34,12 @@ class postgresql::params inherits postgresql::globals {
+       $package_version    = "${version_parts[0]}${version_parts[1]}"
+       if $version == $postgresql::globals::default_version and $::operatingsystem != 'Amazon' {
+-        $client_package_name    = pick($client_package_name, 'postgresql')
+-        $server_package_name    = pick($server_package_name, 'postgresql-server')
++        $client_package_name    = pick($client_package_name, 'postgresql-client')
++        $server_package_name    = pick($server_package_name, 'postgresql')
+         $contrib_package_name   = pick($contrib_package_name,'postgresql-contrib')
+-        $devel_package_name     = pick($devel_package_name, 'postgresql-devel')
++        $devel_package_name     = pick($devel_package_name, 'postgresql-dev')
+         $java_package_name      = pick($java_package_name, 'postgresql-jdbc')
+-        $docs_package_name      = pick($docs_package_name, 'postgresql-docs')
++        $docs_package_name      = pick($docs_package_name, 'postgresql-doc')
+         $plperl_package_name    = pick($plperl_package_name, 'postgresql-plperl')
+         $plpython_package_name  = pick($plpython_package_name, 'postgresql-plpython')
+         $service_name           = pick($service_name, 'postgresql')
+diff --git a/manifests/server/install.pp b/manifests/server/install.pp
+index 8724f9f..ebee6af 100644
+--- a/manifests/server/install.pp
++++ b/manifests/server/install.pp
+@@ -11,7 +11,7 @@ class postgresql::server::install {
+     default => $package_ensure,
+   }
+-  package { 'postgresql-server':
++  package { 'postgresql':
+     ensure => $_package_ensure,
+     name   => $package_name,
+diff --git a/spec/unit/classes/server_spec.rb b/spec/unit/classes/server_spec.rb
+index 852b7e4..24e64f5 100644
+--- a/spec/unit/classes/server_spec.rb
++++ b/spec/unit/classes/server_spec.rb
+@@ -119,7 +119,7 @@ describe 'postgresql::server', :type => :class do
+     end
+     it 'should remove the package' do
+-      is_expected.to contain_package('postgresql-server').with({
++      is_expected.to contain_package('postgresql').with({
+         :ensure => 'purged',
+       })
+     end
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0004-poky-postgresql-updates.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0004-poky-postgresql-updates.patch
new file mode 100644 (file)
index 0000000..e1bb401
--- /dev/null
@@ -0,0 +1,12 @@
+diff --git a/manifests/params.pp b/manifests/params.pp
+index 3254d63..20f91ef 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -133,6 +133,7 @@ class postgresql::params inherits postgresql::globals {
+         $needs_initdb = pick($needs_initdb, false)
+         $service_name = $::operatingsystem ? {
+           'Debian' => pick($service_name, 'postgresql'),
++          'poky-stx'   => pick($service_name, 'postgresql'),
+           'Ubuntu' => $::lsbmajdistrelease ? {
+             /^10/ => pick($service_name, "postgresql-${version}"),
+             default => pick($service_name, 'postgresql'),
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0005-puppetlabs-postgresql-poky.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0005-puppetlabs-postgresql-poky.patch
new file mode 100644 (file)
index 0000000..1ca1552
--- /dev/null
@@ -0,0 +1,46 @@
+diff -ru a/manifests/globals.pp b/manifests/globals.pp
+--- a/manifests/globals.pp     2020-03-04 10:59:33.651335723 +0800
++++ b/manifests/globals.pp     2020-03-04 11:00:13.407336807 +0800
+@@ -94,6 +94,10 @@
+         /^(16.04)$/ => '9.5',
+         default => undef,
+       },
++      'poky-stx' => $::operatingsystemrelease ? {
++        /^2\./ => '11.5',
++        default => undef,
++      },
+       default => undef,
+     },
+     'Archlinux' => $::operatingsystem ? {
+@@ -136,6 +140,7 @@
+     '9.4'   => '2.1',
+     '9.5'   => '2.2',
+     '9.6'   => '2.3',
++    '11.5'   => '2.5',
+     default => undef,
+   }
+   $globals_postgis_version = $postgis_version ? {
+diff -ru a/manifests/params.pp b/manifests/params.pp
+--- a/manifests/params.pp      2020-03-04 10:59:33.651335723 +0800
++++ b/manifests/params.pp      2020-03-04 10:59:42.423335963 +0800
+@@ -166,6 +166,8 @@
+       } elsif $::operatingsystem == 'Ubuntu' and versioncmp($::operatingsystemrelease, '15.04') >= 0 {
+         # Ubuntu releases since vivid use systemd
+         $service_status = pick($service_status, "/usr/sbin/service ${service_name} status")
++      } elsif $::operatingsystem == 'poky-stx' {
++        $service_status = "systemctl is-active ${service_name}"
+       } else {
+         $service_status = pick($service_status, "/etc/init.d/${service_name} status | /bin/egrep -q 'Running clusters: .+|online'")
+       }
+diff -ru a/templates/systemd-override.erb b/templates/systemd-override.erb
+--- a/templates/systemd-override.erb   2020-03-04 10:59:33.735335726 +0800
++++ b/templates/systemd-override.erb   2020-03-04 10:59:42.423335963 +0800
+@@ -2,6 +2,8 @@
+ .include /usr/lib64/systemd/system/<%= @service_name %>.service
+ <%- elsif scope.lookupvar('::operatingsystem') == 'Fedora' -%>
+ .include /lib/systemd/system/<%= @service_name %>.service
++<%- elsif scope.lookupvar('::operatingsystem') == 'poky-stx' -%>
++.include /lib/systemd/system/<%= @service_name %>.service
+ <% else -%>
+ .include /usr/lib/systemd/system/<%= @service_name %>.service
+ <% end -%>
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0006-adjust_path-remove-refs-to-local-bin.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/0006-adjust_path-remove-refs-to-local-bin.patch
new file mode 100644 (file)
index 0000000..6e1d7f5
--- /dev/null
@@ -0,0 +1,26 @@
+diff --git a/manifests/params.pp b/manifests/params.pp
+index 20f91ef..a894736 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -298,7 +298,7 @@ class postgresql::params inherits postgresql::globals {
+     }
+   }
+-  $validcon_script_path = pick($validcon_script_path, '/usr/local/bin/validate_postgresql_connection.sh')
++  $validcon_script_path = pick($validcon_script_path, '/usr/bin/validate_postgresql_connection.sh')
+   $initdb_path          = pick($initdb_path, "${bindir}/initdb")
+   $pg_hba_conf_path     = pick($pg_hba_conf_path, "${confdir}/pg_hba.conf")
+   $pg_hba_conf_defaults = pick($pg_hba_conf_defaults, true)
+diff --git a/spec/unit/defines/validate_db_connection_spec.rb b/spec/unit/defines/validate_db_connection_spec.rb
+index c7406dc..02adc04 100644
+--- a/spec/unit/defines/validate_db_connection_spec.rb
++++ b/spec/unit/defines/validate_db_connection_spec.rb
+@@ -34,7 +34,7 @@ describe 'postgresql::validate_db_connection', :type => :define do
+     it 'should have proper path for validate command' do
+       is_expected.to contain_exec('validate postgres connection for test@test:5432/test').with({
+-        :unless => %r'^/usr/local/bin/validate_postgresql_connection.sh\s+\d+'
++        :unless => %r'^/usr/bin/validate_postgresql_connection.sh\s+\d+'
+       })
+     end
+   end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..1b136bb
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppetlabs-postgresql.gemspec    2019-10-29 18:57:08.102259309 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-postgresql'
++  s.version     = '4.8.0'
++  s.date        = '2016-12-22'
++  s.summary     = "Offers support for basic management of PostgreSQL databases."
++  s.description = s.summary
++  s.authors     = ["Inkling/Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-postgresql'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/postgresql.service b/meta-stx/recipes-support/puppet/files/puppetlabs-postgresql/postgresql.service
new file mode 100644 (file)
index 0000000..3b4c5ca
--- /dev/null
@@ -0,0 +1,22 @@
+[Unit]
+Description=PostgreSQL database server
+After=network.target
+
+[Install]
+WantedBy=multi-user.target
+
+[Service]
+Type=forking
+User=postgres
+Group=postgres
+# Disable OOM kill on the postmaster
+OOMScoreAdjust=-17
+
+PermissionsStartOnly=true
+ExecStartPre=-/etc/postgresql/postgresql-init initdb
+ExecStart=/usr/bin/pg_ctl start -D ${PGDATA} -s -o "-p ${PGPORT}" -w -t 300
+ExecStop=/usr/bin/pg_ctl stop -D ${PGDATA} -s -m fast
+ExecReload=/usr/bin/pg_ctl reload -D ${PGDATA} -s
+
+# Give a reasonable amount of time for the server to start up/shut down
+TimeoutSec=300
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0001-Roll-up-TIS-patches.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0001-Roll-up-TIS-patches.patch
new file mode 100644 (file)
index 0000000..a8fda48
--- /dev/null
@@ -0,0 +1,143 @@
+From 6170b01db0dea2b58fc0f150704205f7aac82ab4 Mon Sep 17 00:00:00 2001
+From: Al Bailey <Al.Bailey@windriver.com>
+Date: Thu, 2 Nov 2017 09:22:58 -0500
+Subject: [PATCH 1/2] WRS: Patch1: 0001-Roll-up-TIS-patches.patch
+
+---
+ lib/puppet/provider/rabbitmq_policy/rabbitmqctl.rb | 6 ++++++
+ manifests/config.pp                                | 6 +++---
+ manifests/init.pp                                  | 5 +++--
+ manifests/install.pp                               | 4 +++-
+ manifests/install/rabbitmqadmin.pp                 | 3 ++-
+ manifests/params.pp                                | 1 +
+ 6 files changed, 18 insertions(+), 7 deletions(-)
+
+diff --git a/lib/puppet/provider/rabbitmq_policy/rabbitmqctl.rb b/lib/puppet/provider/rabbitmq_policy/rabbitmqctl.rb
+index 7e73295..438d9cc 100644
+--- a/lib/puppet/provider/rabbitmq_policy/rabbitmqctl.rb
++++ b/lib/puppet/provider/rabbitmq_policy/rabbitmqctl.rb
+@@ -95,6 +95,12 @@ Puppet::Type.type(:rabbitmq_policy).provide(:rabbitmqctl, :parent => Puppet::Pro
+       resource[:definition] ||= definition
+       resource[:pattern]    ||= pattern
+       resource[:priority]   ||= priority
++      # WRS. Values passed in from packstack are in string format. These need
++      # to be converted back to integer for certain parameters (e.g. max-length,
++      # expires)
++      if (resource[:definition].keys & ["max-length", "expires"]).any?
++       resource[:definition].each {|k,v| resource[:definition][k] = v.to_i}
++      end
+       # rabbitmq>=3.2.0
+       if Puppet::Util::Package.versioncmp(self.class.rabbitmq_version, '3.2.0') >= 0
+         rabbitmqctl('set_policy',
+diff --git a/manifests/config.pp b/manifests/config.pp
+index 6e1f7f5..66a8b08 100644
+--- a/manifests/config.pp
++++ b/manifests/config.pp
+@@ -116,7 +116,7 @@ class rabbitmq::config {
+     ensure => directory,
+     owner  => '0',
+     group  => '0',
+-    mode   => '0644',
++    mode   => '0640',
+   }
+   file { '/etc/rabbitmq/ssl':
+@@ -132,7 +132,7 @@ class rabbitmq::config {
+     content => template($config),
+     owner   => '0',
+     group   => '0',
+-    mode    => '0644',
++    mode    => '0640',
+     notify  => Class['rabbitmq::service'],
+   }
+@@ -142,7 +142,7 @@ class rabbitmq::config {
+     content => template($env_config),
+     owner   => '0',
+     group   => '0',
+-    mode    => '0644',
++    mode    => '0640',
+     notify  => Class['rabbitmq::service'],
+   }
+diff --git a/manifests/init.pp b/manifests/init.pp
+index 363c70d..3451599 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -11,6 +11,7 @@ class rabbitmq(
+   Hash $config_shovel_statics                    = $rabbitmq::params::config_shovel_statics,
+   String $default_user                           = $rabbitmq::params::default_user,
+   String $default_pass                           = $rabbitmq::params::default_pass,
++  String $default_host                           = $rabbitmq::params::default_host,
+   Boolean $delete_guest_user                     = $rabbitmq::params::delete_guest_user,
+   String $env_config                             = $rabbitmq::params::env_config,
+   Stdlib::Absolutepath $env_config_path          = $rabbitmq::params::env_config_path,
+@@ -186,7 +187,7 @@ class rabbitmq(
+     rabbitmq_plugin { 'rabbitmq_management':
+       ensure   => present,
+-      require  => Class['rabbitmq::install'],
++      require => [ File['/etc/rabbitmq'], Class['rabbitmq::install'] ],
+       notify   => Class['rabbitmq::service'],
+       provider => 'rabbitmqplugins',
+     }
+@@ -206,7 +207,7 @@ class rabbitmq(
+   if ($ldap_auth) {
+     rabbitmq_plugin { 'rabbitmq_auth_backend_ldap':
+       ensure  => present,
+-      require => Class['rabbitmq::install'],
++      require => [ File['/etc/rabbitmq'], Class['rabbitmq::install'] ],
+       notify  => Class['rabbitmq::service'],
+     }
+   }
+diff --git a/manifests/install.pp b/manifests/install.pp
+index 20ca090..45072c4 100644
+--- a/manifests/install.pp
++++ b/manifests/install.pp
+@@ -11,7 +11,9 @@ class rabbitmq::install {
+   package { 'rabbitmq-server':
+     ensure   => $package_ensure,
+     name     => $package_name,
+-    provider => $package_provider,
++    # DPENNEY: For some reason, package_provider is coming out as yum.
++    # Hardcode as rpm for now.
++    provider => 'rpm',
+     notify   => Class['rabbitmq::service'],
+     require  => $package_require,
+   }
+diff --git a/manifests/install/rabbitmqadmin.pp b/manifests/install/rabbitmqadmin.pp
+index e0ab7c7..9a3a8dd 100644
+--- a/manifests/install/rabbitmqadmin.pp
++++ b/manifests/install/rabbitmqadmin.pp
+@@ -11,6 +11,7 @@ class rabbitmq::install::rabbitmqadmin {
+   $default_user = $rabbitmq::default_user
+   $default_pass = $rabbitmq::default_pass
++  $default_host = $rabbitmq::default_host
+   $node_ip_address = $rabbitmq::node_ip_address
+   if $rabbitmq::node_ip_address == 'UNSET' {
+@@ -27,7 +28,7 @@ class rabbitmq::install::rabbitmqadmin {
+   staging::file { 'rabbitmqadmin':
+     target      => "${rabbitmq::rabbitmq_home}/rabbitmqadmin",
+-    source      => "${protocol}://${default_user}:${default_pass}@${sanitized_ip}:${management_port}/cli/rabbitmqadmin",
++    source      => "${protocol}://${default_user}:${default_pass}@${default_host}:${management_port}/cli/rabbitmqadmin",
+     curl_option => "-k ${curl_prefix} --retry 30 --retry-delay 6",
+     timeout     => '180',
+     wget_option => '--no-proxy',
+diff --git a/manifests/params.pp b/manifests/params.pp
+index ffface9..da0d2b4 100644
+--- a/manifests/params.pp
++++ b/manifests/params.pp
+@@ -87,6 +87,7 @@ class rabbitmq::params {
+   $config_shovel_statics       = {}
+   $default_user                = 'guest'
+   $default_pass                = 'guest'
++  $default_host               = 'localhost'
+   $delete_guest_user           = false
+   $env_config                  = 'rabbitmq/rabbitmq-env.conf.erb'
+   $env_config_path             = '/etc/rabbitmq/rabbitmq-env.conf'
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0002-Changed-cipher-specification-to-openssl-format.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0002-Changed-cipher-specification-to-openssl-format.patch
new file mode 100644 (file)
index 0000000..a58966d
--- /dev/null
@@ -0,0 +1,35 @@
+From c6a94f3bbc69d82c74cc597b6b7b1fe5813b0537 Mon Sep 17 00:00:00 2001
+From: Al Bailey <Al.Bailey@windriver.com>
+Date: Thu, 2 Nov 2017 09:22:58 -0500
+Subject: [PATCH 2/2] WRS: Patch2:
+ 0002-Changed-cipher-specification-to-openssl-format.patch
+
+---
+ templates/rabbitmq.config.erb | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/templates/rabbitmq.config.erb b/templates/rabbitmq.config.erb
+index cf2a388..b9612f3 100644
+--- a/templates/rabbitmq.config.erb
++++ b/templates/rabbitmq.config.erb
+@@ -72,7 +72,7 @@
+                    <%- end -%>
+                    <%- if @ssl_ciphers and @ssl_ciphers.size > 0 -%>
+                    ,{ciphers,[
+-                     <%= @ssl_ciphers.sort.map{|k| "{#{k}}"}.join(",\n                     ") %>
++                     <%= @ssl_ciphers.sort.map{|k| "\"#{k}\""}.join(",\n                     ") %>
+                    ]}
+                    <%- end -%>
+                   ]},
+@@ -111,7 +111,7 @@
+                    <%- end -%>
+                   <%- if @ssl_ciphers and @ssl_ciphers.size > 0 -%>
+                   ,{ciphers,[
+-                      <%= @ssl_ciphers.sort.map{|k| "{#{k}}"}.join(",\n                      ") %>
++                      <%= @ssl_ciphers.sort.map{|k| "\"#{k}\""}.join(",\n                      ") %>
+                   ]}
+                   <%- end -%>
+                  ]}
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0004-Partially-revert-upstream-commit-f7c3a4a637d59f3065d.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0004-Partially-revert-upstream-commit-f7c3a4a637d59f3065d.patch
new file mode 100644 (file)
index 0000000..4575722
--- /dev/null
@@ -0,0 +1,32 @@
+From 5c8fa2301ee9fa92267ff351e3fa3e59f2b2df79 Mon Sep 17 00:00:00 2001
+From: Al Bailey <Al.Bailey@windriver.com>
+Date: Wed, 24 Jan 2018 16:01:48 -0600
+Subject: [PATCH] Partially revert upstream commit
+ f7c3a4a637d59f3065d8129e9ebacba992dfc469
+
+Upstream converted the code based on rabbitmqctl 3.6.10 changes
+We are using 3.6.5  but hopefully this expression will match both
+
+Status of node rabbit@localhost ...
+or
+Status of node rabbit@localhost
+---
+ lib/facter/rabbitmq_nodename.rb | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/facter/rabbitmq_nodename.rb b/lib/facter/rabbitmq_nodename.rb
+index 301e3c1..d5c6352 100644
+--- a/lib/facter/rabbitmq_nodename.rb
++++ b/lib/facter/rabbitmq_nodename.rb
+@@ -2,7 +2,7 @@ Facter.add(:rabbitmq_nodename) do
+   setcode do
+     if Facter::Core::Execution.which('rabbitmqctl')
+       rabbitmq_nodename = Facter::Core::Execution.execute('rabbitmqctl status 2>&1')
+-      %r{^Status of node '?([\w\.]+@[\w\.\-]+)'?}.match(rabbitmq_nodename)[1]
++      %r{^Status of node '?([\w\.]+@[\w\.\-]+)'?( \.+)?$}.match(rabbitmq_nodename)[1]
+     end
+   end
+ end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0005-Remove-the-rabbitmq_nodename-fact.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0005-Remove-the-rabbitmq_nodename-fact.patch
new file mode 100644 (file)
index 0000000..dbe7ce4
--- /dev/null
@@ -0,0 +1,87 @@
+From 02c56be5340b079797fdb9944e1e048f1c3a18b7 Mon Sep 17 00:00:00 2001
+From: Al Bailey <Al.Bailey@windriver.com>
+Date: Fri, 2 Feb 2018 12:30:22 -0600
+Subject: [PATCH] Remove the rabbitmq_nodename fact
+
+This fact will not work on a standby node.  it requires a drbd folder in order
+for rabbitmqctl status to work.
+---
+ lib/facter/rabbitmq_nodename.rb     |  8 ------
+ spec/unit/rabbitmq_nodename_spec.rb | 50 -------------------------------------
+ 2 files changed, 58 deletions(-)
+ delete mode 100644 lib/facter/rabbitmq_nodename.rb
+ delete mode 100644 spec/unit/rabbitmq_nodename_spec.rb
+
+diff --git a/lib/facter/rabbitmq_nodename.rb b/lib/facter/rabbitmq_nodename.rb
+deleted file mode 100644
+index 2ee7926..0000000
+--- a/lib/facter/rabbitmq_nodename.rb
++++ /dev/null
+@@ -1,8 +0,0 @@
+-Facter.add(:rabbitmq_nodename) do
+-  setcode do
+-    if Facter::Core::Execution.which('rabbitmqctl')
+-      rabbitmq_nodename = Facter::Core::Execution.execute('rabbitmqctl status 2>&1')
+-      %r{^Status of node '?([\w\.]+@[\w\.\-]+)'?( \.+)?$}.match(rabbitmq_nodename)[1]
+-    end
+-  end
+-end
+diff --git a/spec/unit/rabbitmq_nodename_spec.rb b/spec/unit/rabbitmq_nodename_spec.rb
+deleted file mode 100644
+index 621d7eb..0000000
+--- a/spec/unit/rabbitmq_nodename_spec.rb
++++ /dev/null
+@@ -1,50 +0,0 @@
+-require "spec_helper"
+-
+-describe Facter::Util::Fact do
+-  before {
+-    Facter.clear
+-  }
+-
+-  describe "rabbitmq_nodename" do
+-    context 'with value' do
+-      before :each do
+-        Facter::Core::Execution.stubs(:which).with('rabbitmqctl').returns(true)
+-        Facter::Core::Execution.stubs(:execute).with('rabbitmqctl status 2>&1').returns('Status of node monty@rabbit1 ...')
+-      end
+-      it {
+-        expect(Facter.fact(:rabbitmq_nodename).value).to eq('monty@rabbit1')
+-      }
+-    end
+-
+-    context 'with dashes in hostname' do
+-      before :each do
+-        Facter::Core::Execution.stubs(:which).with('rabbitmqctl').returns(true)
+-        Facter::Core::Execution.stubs(:execute).with('rabbitmqctl status 2>&1').returns('Status of node monty@rabbit-1 ...')
+-      end
+-      it {
+-        expect(Facter.fact(:rabbitmq_nodename).value).to eq('monty@rabbit-1')
+-      }
+-    end
+-
+-    context 'with quotes around node name' do
+-      before :each do
+-        Facter::Core::Execution.stubs(:which).with('rabbitmqctl').returns(true)
+-        Facter::Core::Execution.stubs(:execute).with('rabbitmqctl status 2>&1').returns('Status of node \'monty@rabbit-1\' ...')
+-      end
+-      it {
+-        expect(Facter.fact(:rabbitmq_nodename).value).to eq('monty@rabbit-1')
+-      }
+-    end
+-
+-    context 'without trailing points' do
+-      before :each do
+-        Facter::Core::Execution.stubs(:which).with('rabbitmqctl').returns(true)
+-        Facter::Core::Execution.stubs(:execute).with('rabbitmqctl status 2>&1').returns('Status of node monty@rabbit-1')
+-      end
+-      it {
+-        expect(Facter.fact(:rabbitmq_nodename).value).to eq('monty@rabbit-1')
+-      }
+-    end
+-
+-  end
+-end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0007-init.pp-do-not-check-the-apt-resource.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0007-init.pp-do-not-check-the-apt-resource.patch
new file mode 100644 (file)
index 0000000..3786ac8
--- /dev/null
@@ -0,0 +1,26 @@
+From dfce3cde414089af920d60b0aa3e922137474cfe Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Sat, 14 Mar 2020 10:50:02 +0800
+Subject: [PATCH] init.pp: do not check the apt resource
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ manifests/init.pp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/manifests/init.pp b/manifests/init.pp
+index 7808464..9f3f55e 100644
+--- a/manifests/init.pp
++++ b/manifests/init.pp
+@@ -127,7 +127,7 @@ class rabbitmq(
+     warning('$manage_repos is now deprecated. Please use $repos_ensure instead')
+   }
+-  if $manage_repos != false {
++  if $repos_ensure != false {
+     case $::osfamily {
+       'RedHat', 'SUSE': {
+           include '::rabbitmq::repo::rhel'
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0008-puppet-rabbitmq-poky.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0008-puppet-rabbitmq-poky.patch
new file mode 100644 (file)
index 0000000..24b5408
--- /dev/null
@@ -0,0 +1,12 @@
+diff -ru a/manifests/config.pp b/manifests/config.pp
+--- a/manifests/config.pp      2020-03-05 15:10:18.442100317 +0800
++++ b/manifests/config.pp      2020-03-05 15:10:31.086100661 +0800
+@@ -170,7 +170,7 @@
+   case $::osfamily {
+     'Debian': {
+-      if versioncmp($::operatingsystemmajrelease, '16.04') >= 0 {
++      if versioncmp($::operatingsystemmajrelease, '16.04') >= 0 or $::operatingsystem == 'poky-stx' {
+         file { '/etc/systemd/system/rabbitmq-server.service.d':
+           ensure                  => directory,
+           owner                   => '0',
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0009-remove-apt-requirement.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/0009-remove-apt-requirement.patch
new file mode 100644 (file)
index 0000000..2568193
--- /dev/null
@@ -0,0 +1,12 @@
+diff --git a/metadata.json b/metadata.json
+index 5803cf5..b3426f6 100644
+--- a/metadata.json
++++ b/metadata.json
+@@ -48,7 +48,6 @@
+   ],
+   "dependencies": [
+     {"name":"puppetlabs/stdlib","version_requirement":">= 3.13.1 < 5.0.0"},
+-    {"name":"puppetlabs/apt","version_requirement":">= 1.8.0 < 5.0.0"},
+     {"name":"puppet/staging","version_requirement":">= 0.3.1 < 2.0.0"}
+   ]
+ }
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-rabbitmq/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..4ad613a
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppetlabs-rabbitmq.gemspec      2019-10-28 21:37:51.198972160 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-rabbitmq'
++  s.version     = '5.6.0'
++  s.date        = '2017-01-09'
++  s.summary     = "Installs, configures, and manages RabbitMQ."
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-rabbitmq'
++  s.license     = 'Apache 2.0'
++ end
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-stdlib/0001-Filter-password-in-logs.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-stdlib/0001-Filter-password-in-logs.patch
new file mode 100644 (file)
index 0000000..73f7267
--- /dev/null
@@ -0,0 +1,36 @@
+From d95ec2abaa68a1da308c3c8b01c700fcc544a788 Mon Sep 17 00:00:00 2001
+From: Don Penney <don.penney@windriver.com>
+Date: Mon, 1 May 2017 14:37:22 -0400
+Subject: [PATCH] Filter password in logs
+
+---
+ lib/puppet/parser/functions/ensure_resource.rb | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/lib/puppet/parser/functions/ensure_resource.rb b/lib/puppet/parser/functions/ensure_resource.rb
+index 1ba6a44..b9c3242 100644
+--- a/lib/puppet/parser/functions/ensure_resource.rb
++++ b/lib/puppet/parser/functions/ensure_resource.rb
+@@ -30,15 +30,17 @@ ENDOFDOC
+   raise(ArgumentError, 'Must specify a type') unless type
+   raise(ArgumentError, 'Must specify a title') unless title
+   params ||= {}
++  filtered_params = Marshal.load(Marshal.dump(params)) # deep copy
++  filtered_params.delete("password")
+   items = [title].flatten
+   items.each do |item|
+     Puppet::Parser::Functions.function(:defined_with_params)
+     if function_defined_with_params(["#{type}[#{item}]", params])
+-      Puppet.debug("Resource #{type}[#{item}] with params #{params} not created because it already exists")
++      Puppet.debug("Resource #{type}[#{item}] with params #{filtered_params} not created because it already exists")
+     else
+-      Puppet.debug("Create new resource #{type}[#{item}] with params #{params}")
++      Puppet.debug("Create new resource #{type}[#{item}] with params #{filtered_params}")
+       Puppet::Parser::Functions.function(:create_resources)
+       function_create_resources([type.capitalize, { item => params }])
+     end
+-- 
+1.8.3.1
+
diff --git a/meta-stx/recipes-support/puppet/files/puppetlabs-stdlib/Add-gemspec.patch b/meta-stx/recipes-support/puppet/files/puppetlabs-stdlib/Add-gemspec.patch
new file mode 100644 (file)
index 0000000..5520afe
--- /dev/null
@@ -0,0 +1,15 @@
+--- /dev/null  2019-10-28 19:50:04.372284581 -0700
++++ b/puppetlabs-stdlib.gemspec        2019-10-29 15:09:32.804637910 -0700
+@@ -0,0 +1,12 @@
++Gem::Specification.new do |s|
++  s.name        = 'puppetlabs-stdlib'
++  s.version     = '4.18.0'
++  s.date        = '2017-08-10'
++  s.summary     = "Puppet Labs Standard Library module"
++  s.description = s.summary
++  s.authors     = ["Puppet Labs"]
++  s.email       = ''
++  s.files       = %w(LICENSE README.md Rakefile) + Dir.glob('{lib,spec}/**/*')
++  s.homepage    = 'https://github.com/puppetlabs/puppetlabs-stdlib'
++  s.license     = 'Apache 2.0'
++end
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0001-puppet-manifest-apply-rebase-adjust-path.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0001-puppet-manifest-apply-rebase-adjust-path.patch
new file mode 100644 (file)
index 0000000..d948beb
--- /dev/null
@@ -0,0 +1,25 @@
+From f6d38d1003cdab21626f93c83bd94305881fb4c6 Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Fri, 6 Mar 2020 22:52:19 -0800
+Subject: [PATCH] stx.3.0 rebase: adjust path
+
+---
+ puppet-manifests/src/bin/puppet-manifest-apply.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/puppet-manifests/src/bin/puppet-manifest-apply.sh b/puppet-manifests/src/bin/puppet-manifest-apply.sh
+index 95e9958..4d36059 100755
+--- a/puppet-manifests/src/bin/puppet-manifest-apply.sh
++++ b/puppet-manifests/src/bin/puppet-manifest-apply.sh
+@@ -98,7 +98,7 @@ export STDLIB_LOG_DEPRECATIONS=false
+ echo "Applying puppet ${MANIFEST} manifest..."
+ flock /var/run/puppet.lock \
+-    puppet apply --debug --trace --modulepath ${PUPPET_MODULES_PATH} ${PUPPET_MANIFEST} \
++    puppet apply  --hiera_config=/etc/puppet/hiera.yaml --debug --trace --modulepath ${PUPPET_MODULES_PATH} ${PUPPET_MANIFEST} \
+         < /dev/null 2>&1 | awk ' { system("date -u +%FT%T.%3N | tr \"\n\" \" \""); print $0; fflush(); } ' > ${LOGFILE}
+ if [ $? -ne 0 ]; then
+     echo "[FAILED]"
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0002-puppet-manifests-port-Adjust-path-default-bindir.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0002-puppet-manifests-port-Adjust-path-default-bindir.patch
new file mode 100644 (file)
index 0000000..0e0fc06
--- /dev/null
@@ -0,0 +1,132 @@
+From e321b75d4810c4bd66fe4ec10b974ad77184d74f Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Sat, 7 Mar 2020 00:18:43 -0800
+Subject: [PATCH] puppet-manifests port: Adjust path default bindir
+
+---
+ .../src/modules/platform/files/ldap.cgcs-shell.ldif  |  2 +-
+ .../src/modules/platform/manifests/collectd.pp       |  2 +-
+ .../src/modules/platform/manifests/drbd.pp           |  4 ++--
+ .../src/modules/platform/manifests/ldap.pp           |  4 ++--
+ .../src/modules/platform/manifests/network.pp        |  2 +-
+ .../src/modules/platform/manifests/remotelogging.pp  |  2 +-
+ .../modules/platform/templates/ldapscripts.conf.erb  | 12 ++++++------
+ 7 files changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/puppet-manifests/src/modules/platform/files/ldap.cgcs-shell.ldif b/puppet-manifests/src/modules/platform/files/ldap.cgcs-shell.ldif
+index 95005fd..6ae4838 100644
+--- a/puppet-manifests/src/modules/platform/files/ldap.cgcs-shell.ldif
++++ b/puppet-manifests/src/modules/platform/files/ldap.cgcs-shell.ldif
+@@ -1,4 +1,4 @@
+ dn: uid=operator,ou=People,dc=cgcs,dc=local
+ changetype: modify
+ replace: loginShell
+-loginShell: /usr/local/bin/cgcs_cli
++loginShell: /usr/bin/cgcs_cli
+diff --git a/puppet-manifests/src/modules/platform/manifests/collectd.pp b/puppet-manifests/src/modules/platform/manifests/collectd.pp
+index 99e1d2d..a35bc6f 100644
+--- a/puppet-manifests/src/modules/platform/manifests/collectd.pp
++++ b/puppet-manifests/src/modules/platform/manifests/collectd.pp
+@@ -58,6 +58,6 @@ class platform::collectd::runtime {
+ class platform::collectd::restart {
+   include ::platform::collectd
+   exec { 'collectd-restart':
+-      command => '/usr/local/sbin/pmon-restart collectd'
++      command => '/usr/sbin/pmon-restart collectd'
+   }
+ }
+diff --git a/puppet-manifests/src/modules/platform/manifests/drbd.pp b/puppet-manifests/src/modules/platform/manifests/drbd.pp
+index f7dc3bc..bb20b82 100644
+--- a/puppet-manifests/src/modules/platform/manifests/drbd.pp
++++ b/puppet-manifests/src/modules/platform/manifests/drbd.pp
+@@ -79,9 +79,9 @@ define platform::drbd::filesystem (
+     mountpoint    => $mountpoint,
+     handlers      => {
+       before-resync-target =>
+-        "/usr/local/sbin/sm-notify -s ${sm_service} -e sync-start",
++        "/usr/sbin/sm-notify -s ${sm_service} -e sync-start",
+       after-resync-target  =>
+-        "/usr/local/sbin/sm-notify -s ${sm_service} -e sync-end",
++        "/usr/sbin/sm-notify -s ${sm_service} -e sync-end",
+     },
+     host1         => $::platform::drbd::params::host1,
+     host2         => $::platform::drbd::params::host2,
+diff --git a/puppet-manifests/src/modules/platform/manifests/ldap.pp b/puppet-manifests/src/modules/platform/manifests/ldap.pp
+index b3d6ee7..8770bff 100644
+--- a/puppet-manifests/src/modules/platform/manifests/ldap.pp
++++ b/puppet-manifests/src/modules/platform/manifests/ldap.pp
+@@ -60,7 +60,7 @@ class platform::ldap::server::local
+   # don't populate the adminpw if binding anonymously
+   if ! $bind_anonymous {
+-    file { '/usr/local/etc/ldapscripts/ldapscripts.passwd':
++    file { '/etc/ldapscripts/ldapscripts.passwd':
+       content => $admin_pw,
+     }
+   }
+@@ -104,7 +104,7 @@ class platform::ldap::client
+   }
+   if $::personality == 'controller' {
+-    file { '/usr/local/etc/ldapscripts/ldapscripts.conf':
++    file { '/etc/ldapscripts/ldapscripts.conf':
+       ensure  => 'present',
+       replace => true,
+       content => template('platform/ldapscripts.conf.erb'),
+diff --git a/puppet-manifests/src/modules/platform/manifests/network.pp b/puppet-manifests/src/modules/platform/manifests/network.pp
+index 5b94521..dbeb6d2 100644
+--- a/puppet-manifests/src/modules/platform/manifests/network.pp
++++ b/puppet-manifests/src/modules/platform/manifests/network.pp
+@@ -225,7 +225,7 @@ class platform::network (
+   $management_interface = $::platform::network::mgmt::params::interface_name
+-  $testcmd = '/usr/local/bin/connectivity_test'
++  $testcmd = '/usr/bin/connectivity_test'
+   if $::personality != 'controller' {
+     if $management_interface {
+diff --git a/puppet-manifests/src/modules/platform/manifests/remotelogging.pp b/puppet-manifests/src/modules/platform/manifests/remotelogging.pp
+index acf1dfd..b30163e 100644
+--- a/puppet-manifests/src/modules/platform/manifests/remotelogging.pp
++++ b/puppet-manifests/src/modules/platform/manifests/remotelogging.pp
+@@ -42,7 +42,7 @@ class platform::remotelogging
+       content => template('platform/remotelogging.conf.erb'),
+     }
+     -> exec { 'remotelogging-update-tc':
+-      command => "/usr/local/bin/remotelogging_tc_setup.sh ${port}"
++      command => "/usr/bin/remotelogging_tc_setup.sh ${port}"
+     }
+     -> Exec['syslog-ng-reload']
+diff --git a/puppet-manifests/src/modules/platform/templates/ldapscripts.conf.erb b/puppet-manifests/src/modules/platform/templates/ldapscripts.conf.erb
+index e3bc6e0..89d6d6c 100644
+--- a/puppet-manifests/src/modules/platform/templates/ldapscripts.conf.erb
++++ b/puppet-manifests/src/modules/platform/templates/ldapscripts.conf.erb
+@@ -46,7 +46,7 @@ SASLAUTH=""
+ <%- if @bind_anonymous != true -%>
+ BINDDN="cn=ldapadmin,dc=cgcs,dc=local"
+-BINDPWDFILE="/usr/local/etc/ldapscripts/ldapscripts.passwd"
++BINDPWDFILE="/etc/ldapscripts/ldapscripts.passwd"
+ <%- end -%>
+ # For older versions of OpenLDAP, it is still possible to use
+@@ -155,9 +155,9 @@ GETENTGRCMD="getent group"
+ #GTEMPLATE="/path/to/ldapaddgroup.template"
+ #UTEMPLATE="/path/to/ldapadduser.template"
+ #MTEMPLATE="/path/to/ldapaddmachine.template"
+-GTEMPLATE="/usr/local/etc/ldapscripts/ldapaddgroup.template.cgcs"
+-UTEMPLATE="/usr/local/etc/ldapscripts/ldapadduser.template.cgcs"
+-UMTEMPLATE="/usr/local/etc/ldapscripts/ldapmoduser.template.cgcs"
+-STEMPLATE="/usr/local/etc/ldapscripts/ldapaddsudo.template.cgcs"
+-SMTEMPLATE="/usr/local/etc/ldapscripts/ldapmodsudo.template.cgcs"
++GTEMPLATE="/etc/ldapscripts/ldapaddgroup.template.cgcs"
++UTEMPLATE="/etc/ldapscripts/ldapadduser.template.cgcs"
++UMTEMPLATE="/etc/ldapscripts/ldapmoduser.template.cgcs"
++STEMPLATE="/etc/ldapscripts/ldapaddsudo.template.cgcs"
++SMTEMPLATE="/etc/ldapscripts/ldapmodsudo.template.cgcs"
+ MTEMPLATE=""
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0003-puppet-dcmanager-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0003-puppet-dcmanager-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..c396c83
--- /dev/null
@@ -0,0 +1,40 @@
+diff --git a/modules/puppet-dcmanager/src/dcmanager/manifests/params.pp b/modules/puppet-dcmanager/src/dcmanager/manifests/params.pp
+index 5cbfb50..0a4d91b 100644
+--- a/modules/puppet-dcmanager/src/dcmanager/manifests/params.pp
++++ b/modules/puppet-dcmanager/src/dcmanager/manifests/params.pp
+@@ -13,13 +13,28 @@ class dcmanager::params {
+   $dcmanager_conf = '/etc/dcmanager/dcmanager.conf'
+   if $::osfamily == 'Debian' {
+-    $package_name       = 'distributedcloud-dcmanager'
+-    $client_package     = 'distributedcloud-client-dcmanagerclient'
+-    $api_package        = 'distributedcloud-dcmanager'
+-    $api_service        = 'dcmanager-api'
+-    $manager_package     = 'distributedcloud-dcmanager'
+-    $manager_service     = 'dcmanager-manager'
+-    $db_sync_command    = 'dcmanager-manage db_sync'
++
++    if $::operatingsystem == 'poky-stx' {
++
++      $package_name       = 'distributedcloud-dcmanager'
++      $client_package     = 'distributedcloud-client-dcmanager'
++      $api_package        = false
++      $api_service        = 'dcmanager-api'
++      $manager_package     = false
++      $manager_service     = 'dcmanager-manager'
++      $db_sync_command    = 'dcmanager-manage db_sync'
++
++    } else {
++
++      $package_name       = 'distributedcloud-dcmanager'
++      $client_package     = 'distributedcloud-client-dcmanagerclient'
++      $api_package        = 'distributedcloud-dcmanager'
++      $api_service        = 'dcmanager-api'
++      $manager_package     = 'distributedcloud-dcmanager'
++      $manager_service     = 'dcmanager-manager'
++      $db_sync_command    = 'dcmanager-manage db_sync'
++
++    }
+   } elsif($::osfamily == 'RedHat') {
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0004-puppet-dcorch-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0004-puppet-dcorch-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..2a87b36
--- /dev/null
@@ -0,0 +1,53 @@
+diff --git a/modules/puppet-dcorch/src/dcorch/manifests/params.pp b/modules/puppet-dcorch/src/dcorch/manifests/params.pp
+index 76d5fa1..334cfeb 100644
+--- a/modules/puppet-dcorch/src/dcorch/manifests/params.pp
++++ b/modules/puppet-dcorch/src/dcorch/manifests/params.pp
+@@ -14,18 +14,37 @@ class dcorch::params {
+   $dcorch_paste_api_ini = '/etc/dcorch/api-paste.ini'
+   if $::osfamily == 'Debian' {
+-    $package_name          = 'distributedcloud-dcorch'
+-    $client_package        = 'distributedcloud-client-dcorchclient'
+-    $api_package           = 'distributedcloud-dcorch'
+-    $api_service           = 'dcorch-api'
+-    $engine_package        = 'distributedcloud-dcorch'
+-    $engine_service        = 'dcorch-engine'
+-    $snmp_package          = 'distributedcloud-dcorch'
+-    $snmp_service          = 'dcorch-snmp'
+-    $api_proxy_package      = 'distributedcloud-dcorch'
+-    $api_proxy_service      = 'dcorch-api-proxy'
+-    $db_sync_command       = 'dcorch-manage db_sync'
++    if $::operatingsystem == 'poky-stx' {
++
++      $package_name          = 'dcorch'
++      $client_package        = 'distributedcloud-client-dcorchclient'
++      $api_package           = false
++      $api_service           = 'dcorch-api'
++      $snmp_package          = false
++      $snmp_service          = 'dcorch-snmp'
++      $engine_package        = false
++      $engine_service        = 'dcorch-engine'
++      $api_proxy_package      = false
++      $api_proxy_service      = 'dcorch-api-proxy'
++      $db_sync_command       = 'dcorch-manage db_sync'
++
++    } else {
++
++      $package_name          = 'distributedcloud-dcorch'
++      $client_package        = 'distributedcloud-client-dcorchclient'
++      $api_package           = 'distributedcloud-dcorch'
++      $api_service           = 'dcorch-api'
++      $engine_package        = 'distributedcloud-dcorch'
++      $engine_service        = 'dcorch-engine'
++      $snmp_package          = 'distributedcloud-dcorch'
++      $snmp_service          = 'dcorch-snmp'
++      $api_proxy_package      = 'distributedcloud-dcorch'
++      $api_proxy_service      = 'dcorch-api-proxy'
++
++      $db_sync_command       = 'dcorch-manage db_sync'
++
++    }
+   } elsif($::osfamily == 'RedHat') {
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0005-puppet-sysinv-updates-for-poky-stx.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0005-puppet-sysinv-updates-for-poky-stx.patch
new file mode 100644 (file)
index 0000000..98edcf9
--- /dev/null
@@ -0,0 +1,44 @@
+diff --git a/modules/puppet-sysinv/src/sysinv/manifests/params.pp b/modules/puppet-sysinv/src/sysinv/manifests/params.pp
+index 438aa37..9fc72e9 100644
+--- a/modules/puppet-sysinv/src/sysinv/manifests/params.pp
++++ b/modules/puppet-sysinv/src/sysinv/manifests/params.pp
+@@ -21,15 +21,30 @@ class sysinv::params {
+   $sysinv_paste_api_ini = '/etc/sysinv/api-paste.ini'
+   if $::osfamily == 'Debian' {
+-    $package_name       = 'sysinv'
+-    $client_package     = 'cgtsclient'
+-    $api_package        = 'sysinv'
+-    $api_service        = 'sysinv-api'
+-    $conductor_package  = 'sysinv'
+-    $conductor_service  = 'sysinv-conductor'
+-    $agent_package      = 'sysinv'
+-    $agent_service      = 'sysinv-agent'
+-    $db_sync_command    = 'sysinv-dbsync'
++    if $::operatingsystem == 'poky-stx' {
++
++      $package_name       = 'sysinv'
++      $client_package     = 'cgts-client'
++      $api_package        = false
++      $api_service        = 'sysinv-api'
++      $conductor_package  = false
++      $conductor_service  = 'sysinv-conductor'
++      $agent_package      = 'sysinv-agent'
++      $agent_service      = 'sysinv-agent'
++      $db_sync_command    = 'sysinv-dbsync'
++
++    } else {
++
++      $package_name       = 'sysinv'
++      $client_package     = 'cgtsclient'
++      $api_package        = 'sysinv'
++      $api_service        = 'sysinv-api'
++      $conductor_package  = 'sysinv'
++      $conductor_service  = 'sysinv-conductor'
++      $agent_package      = 'sysinv'
++      $agent_service      = 'sysinv-agent'
++      $db_sync_command    = 'sysinv-dbsync'
++    }
+   } elsif($::osfamily == 'RedHat') {
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0006-puppet-manifest-apply-do-not-treat-warnings-as-block.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0006-puppet-manifest-apply-do-not-treat-warnings-as-block.patch
new file mode 100644 (file)
index 0000000..9169f66
--- /dev/null
@@ -0,0 +1,31 @@
+From f027236dd57ca3ba20b6f827026a639c6fb373d8 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Fri, 20 Mar 2020 13:16:35 +0800
+Subject: [PATCH] puppet-manifest-apply: do not treat warnings as blocker issue
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ puppet-manifests/src/bin/puppet-manifest-apply.sh | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/puppet-manifests/src/bin/puppet-manifest-apply.sh b/puppet-manifests/src/bin/puppet-manifest-apply.sh
+index 4d36059..18eec2c 100755
+--- a/puppet-manifests/src/bin/puppet-manifest-apply.sh
++++ b/puppet-manifests/src/bin/puppet-manifest-apply.sh
+@@ -105,10 +105,10 @@ if [ $? -ne 0 ]; then
+     echo "See ${LOGFILE} for details"
+     exit 1
+ else
+-    grep -qE '^(.......)?Warning|^....-..-..T..:..:..([.]...)?(.......)?.Warning|^(.......)?Error|^....-..-..T..:..:..([.]...)?(.......)?.Error' ${LOGFILE}
++    grep -qE '^(.......)?Error|^....-..-..T..:..:..([.]...)?(.......)?.Error' ${LOGFILE}
+     if [ $? -eq 0 ]; then
+-        echo "[WARNING]"
+-        echo "Warnings found. See ${LOGFILE} for details"
++        echo "[ERROR]"
++        echo "Errors found. See ${LOGFILE} for details"
+         exit 1
+     fi
+     echo "[DONE]"
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0007-puppet-manifests-etcd-override-typo-and-journalctl.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0007-puppet-manifests-etcd-override-typo-and-journalctl.patch
new file mode 100644 (file)
index 0000000..074e115
--- /dev/null
@@ -0,0 +1,25 @@
+From 804d05b0e188b8e694def2abcd0d9b0979b40b6f Mon Sep 17 00:00:00 2001
+From: "Sar Ashki, Babak" <Babak.SarAshki@windriver.com>
+Date: Tue, 7 Jan 2020 16:08:24 -0800
+Subject: [PATCH] puppet-manifests:etcd-override typo and journalctl
+
+---
+ .../src/modules/platform/files/etcd-override.conf             | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/puppet-manifests/src/modules/platform/files/etcd-override.conf b/puppet-manifests/src/modules/platform/files/etcd-override.conf
+index 09d2ed47..5860aab4 100644
+--- a/puppet-manifests/src/modules/platform/files/etcd-override.conf
++++ b/puppet-manifests/src/modules/platform/files/etcd-override.conf
+@@ -4,6 +4,6 @@ User=root
+ NotifyAccess=all
+ Type=notify
+ ExecStart=
+-ExecStart=-/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" 2>&1 | /usr/bin/forward-journald -tag etcd"
++ExecStart=-/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" 2>&1"
+ ExecStartPost=/bin/bash -c 'echo $MAINPID >/var/run/etcd.pid'
+-ExecStopPost=/bin/bash/rm -f /var/run/etcd.pid
++ExecStopPost=/bin/bash -c 'rm -f /var/run/etcd.pid'
+-- 
+2.23.0
+
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0008-puppet-manifests-keystone-include-platform-client.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0008-puppet-manifests-keystone-include-platform-client.patch
new file mode 100644 (file)
index 0000000..3716d06
--- /dev/null
@@ -0,0 +1,25 @@
+From 27eefdede1dc6e45704a14480c79585e66b7939e Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Thu, 26 Mar 2020 10:17:20 +0800
+Subject: [PATCH] keystone: include ::platform::client
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ puppet-manifests/src/modules/openstack/manifests/keystone.pp | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/puppet-manifests/src/modules/openstack/manifests/keystone.pp b/puppet-manifests/src/modules/openstack/manifests/keystone.pp
+index 1cb65bc..837e551 100644
+--- a/puppet-manifests/src/modules/openstack/manifests/keystone.pp
++++ b/puppet-manifests/src/modules/openstack/manifests/keystone.pp
+@@ -333,6 +333,7 @@ class openstack::keystone::server::runtime {
+ class openstack::keystone::endpoint::runtime {
+   if str2bool($::is_controller_active) {
++    include ::platform::client
+     include ::keystone::endpoint
+     include ::sysinv::keystone::auth
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0009-puppet-manifests-lvm-remove-lvmetad.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0009-puppet-manifests-lvm-remove-lvmetad.patch
new file mode 100644 (file)
index 0000000..83a4892
--- /dev/null
@@ -0,0 +1,42 @@
+From af1da8e54d08caa065243077f601801bbc8eb8e0 Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Fri, 3 Apr 2020 17:03:53 +0800
+Subject: [PATCH] lvm.pp: remove lvmetad
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ puppet-manifests/src/modules/platform/manifests/lvm.pp | 18 +-----------------
+ 1 file changed, 1 insertion(+), 17 deletions(-)
+
+diff --git a/puppet-manifests/src/modules/platform/manifests/lvm.pp b/puppet-manifests/src/modules/platform/manifests/lvm.pp
+index cf16e54..64533b9 100644
+--- a/puppet-manifests/src/modules/platform/manifests/lvm.pp
++++ b/puppet-manifests/src/modules/platform/manifests/lvm.pp
+@@ -7,23 +7,7 @@ class platform::lvm::params (
+ class platform::lvm
+   inherits platform::lvm::params {
+-  # Mask socket unit as well to make sure
+-  # systemd socket activation does not happen
+-  service { 'lvm2-lvmetad.socket':
+-    ensure => 'stopped',
+-    enable => mask,
+-  }
+-  # Masking service unit ensures that it is not started again
+-  -> service { 'lvm2-lvmetad':
+-    ensure => 'stopped',
+-    enable => mask,
+-  }
+-  # Since masking is changing unit symlinks to point to /dev/null,
+-  # we need to reload systemd configuration
+-  -> exec { 'lvmetad-systemd-daemon-reload':
+-    command => 'systemctl daemon-reload',
+-  }
+-  -> file_line { 'use_lvmetad':
++  file_line { 'use_lvmetad':
+     path  => '/etc/lvm/lvm.conf',
+     match => '^[^#]*use_lvmetad = 1',
+     line  => '        use_lvmetad = 0',
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/0010-puppet-manifest-apply-workaround-to-ignore-known-err.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/0010-puppet-manifest-apply-workaround-to-ignore-known-err.patch
new file mode 100644 (file)
index 0000000..53f75ce
--- /dev/null
@@ -0,0 +1,35 @@
+From 1735d6504b319c2d05ffbd2ae8ff6a4515982aed Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Sun, 19 Apr 2020 21:38:59 +0800
+Subject: [PATCH] puppet-manifest-apply: workaround to ignore known errors
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ puppet-manifests/src/bin/puppet-manifest-apply.sh | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/puppet-manifests/src/bin/puppet-manifest-apply.sh b/puppet-manifests/src/bin/puppet-manifest-apply.sh
+index 18eec2c..3ac6cfc 100755
+--- a/puppet-manifests/src/bin/puppet-manifest-apply.sh
++++ b/puppet-manifests/src/bin/puppet-manifest-apply.sh
+@@ -89,6 +89,8 @@ function finish {
+ }
+ trap finish EXIT
++# Pattern for know errors, will be ignored when checking errors
++KNOWN_ERRORS="Error.*remount /scratch"
+ # Set Keystone endpoint type to internal to prevent SSL cert failures during config
+ export OS_ENDPOINT_TYPE=internalURL
+@@ -105,7 +107,7 @@ if [ $? -ne 0 ]; then
+     echo "See ${LOGFILE} for details"
+     exit 1
+ else
+-    grep -qE '^(.......)?Error|^....-..-..T..:..:..([.]...)?(.......)?.Error' ${LOGFILE}
++    grep -E -v "${KNOWN_ERRORS}" ${LOGFILE} | grep -qE '^(.......)?Error|^....-..-..T..:..:..([.]...)?(.......)?.Error'
+     if [ $? -eq 0 ]; then
+         echo "[ERROR]"
+         echo "Errors found. See ${LOGFILE} for details"
+-- 
+2.7.4
+
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/apply_network_config_poky.sh b/meta-stx/recipes-support/puppet/files/stx-puppet/apply_network_config_poky.sh
new file mode 100755 (executable)
index 0000000..9d182be
--- /dev/null
@@ -0,0 +1,305 @@
+#!/bin/bash
+
+################################################################################
+# Copyright (c) 2016 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+################################################################################
+
+#
+#  Purpose of this script is to copy the puppet-built
+#  network config file from the puppet dir to the /etc/network/interfaces  
+#  Only copied when difference detected 
+#
+#  Please note:  function is_eq_ifcfg() is used to determine if
+#                cfg files are different
+#
+
+ACQUIRE_LOCK=1
+RELEASE_LOCK=0
+
+if [ ! -f /var/run/interfaces.puppet ] ; then
+    # No puppet file? Nothing to do!
+    exit 1
+fi
+
+function log_it {
+    logger "${0} ${1}"
+}
+
+function do_if_up {
+    local iface=$1
+    log_it "Bringing $iface up"
+    /sbin/ifup $iface
+}
+
+function do_if_down {
+    local iface=$1
+    log_it "Bringing $iface down"
+    /sbin/ifdown $iface
+}
+
+function do_rm {
+    local theFile=$1
+    log_it "Removing $theFile"
+    /bin/rm  $theFile
+}
+
+function do_cp {
+    local srcFile=$1
+    local dstFile=$2
+    log_it "copying network cfg $srcFile to $dstFile"
+    cp  $srcFile $dstFile
+}
+
+function do_mv {
+    local srcFile=$1
+    local dstFile=$2
+    log_it "Moving network cfg $srcFile to $dstFile"
+    mv  $srcFile $dstFile
+}
+
+# Return items in list1 that are not in list2
+array_diff () {
+    list1=${!1}
+    list2=${!2}
+
+    result=()
+    l2=" ${list2[*]} "
+    for item in ${list1[@]}; do
+        if [[ ! $l2 =~ " $item " ]] ; then
+            result+=($item)
+        fi
+    done
+
+    echo  ${result[@]}
+}
+
+function normalized_cfg_attr_value {
+    local cfg=$1
+    local attr_name=$2
+    local attr_value
+    attr_value=$(cat $cfg | grep $attr_name= | awk -F "=" {'print $2'})
+
+
+    #
+    # Special case BONDING_OPTS attribute.
+    #
+    # The BONDING_OPTS attribute contains '=' characters, so is not correctly
+    # parsed by splitting on '=' as done above.  This results in changes to
+    # BONDING_OPTS not causing the interface to be restarted, so the old
+    # BONDING_OPTS still be used.  Because this is only checking for changes,
+    # rather than actually using the returned value, we can return the whole
+    # line.
+    #
+    if [[ "${attr_name}" == "BONDING_OPTS" ]]; then
+        echo "$(cat $cfg | grep $attr_name=)"
+        return $(true)
+    fi
+
+    if [[ "${attr_name}" != "BOOTPROTO" ]]; then
+        echo "${attr_value}"
+        return $(true)
+    fi
+    #
+    # Special case BOOTPROTO attribute.
+    #
+    # The BOOTPROTO attribute is not populated consistently by various aspects
+    # of the system.  Different values are used to indicate a manually
+    # configured interfaces (i.e., one that does not expect to have an IP
+    # address) and so to avoid reconfiguring an interface that has different
+    # values with the same meaning we normalize them here before making any
+    # decisions.
+    #
+    # From a user perspective the values "manual", "none", and "" all have the
+    # same meaning - an interface without an IP address while "dhcp" and
+    # "static" are distinct values with a separate meaning.  In practice
+    # however, the only value that matters from a ifup/ifdown script point of
+    # view is "dhcp".  All other values are ignored.
+    #
+    # In our system we set BOOTPROTO to "static" to indicate that IP address
+    # attributes exist and to "manual"/"none" to indicate that no IP address
+    # attributes exist.  These are not needed by ifup/ifdown as it looks for
+    # the "IPADDR" attribute whenever BOOTPROTO is set to anything other than
+    # "dhcp".
+    #
+    if [[ "${attr_value}" == "none" ]]; then
+        attr_value="none"
+    fi
+    if [[ "${attr_value}" == "manual" ]]; then
+        attr_value="none"
+    fi
+    if [[ "${attr_value}" == "" ]]; then
+        attr_value="none"
+    fi
+    echo "${attr_value}"
+    return $(true)
+}
+
+#
+# returns $(true) if cfg file ( $1 ) has property propName ( $2 ) with a value of propValue ( $3 )
+#
+function cfg_has_property_with_value {
+    local cfg=$1
+    local propname=$2
+    local propvalue=$3
+    if [ -f $cfg ]; then
+        if [[ "$(normalized_cfg_attr_value $cfg $propname)" == "${propvalue}" ]]; then
+            return $(true)
+        fi
+    fi
+    return $(false)
+}
+
+#
+# returns $(true) if cfg file is configured as a slave
+#
+function is_slave {
+    cfg_has_property_with_value $1 "SLAVE" "yes"
+    return $?
+}
+
+#
+# returns $(true) if cfg file is configured for DHCP
+#
+function is_dhcp {
+    cfg_has_property_with_value $1 "BOOTPROTO" "dhcp"
+}
+
+#
+# returns $(true) if cfg file is configured as a VLAN interface
+#
+function is_vlan {
+    cfg_has_property_with_value $1 "VLAN" "yes"
+    return $?
+}
+
+#
+# returns $(true) if cfg file is configured as an ethernet interface.  For the
+# purposes of this script "ethernet" is considered as any interface that is not
+# a vlan or a slave.  This includes both regular ethernet interfaces and bonded
+# interfaces.
+#
+function is_ethernet {
+    if ! is_vlan $1; then
+        if ! is_slave $1; then
+            return $(true)
+        fi
+    fi
+    return $(false)
+}
+
+#
+# returns $(true) if cfg file represents an interface of the specified type.
+#
+function iftype_filter {
+    local iftype=$1
+
+    return $(is_$iftype $2)
+}
+
+#
+# returns $(true) if ifcfg files have the same number of VFs
+#
+#
+function is_eq_sriov_numvfs {
+    local cfg_1=$1
+    local cfg_2=$2
+    local sriov_numvfs_1
+    sriov_numvfs_1=$(grep -o 'echo *[1-9].*sriov_numvfs' $cfg_1 | awk {'print $2'})
+    local sriov_numvfs_2
+    sriov_numvfs_2=$(grep -o 'echo *[1-9].*sriov_numvfs' $cfg_2 | awk {'print $2'})
+
+    sriov_numvfs_1=${sriov_numvfs_1:-0}
+    sriov_numvfs_2=${sriov_numvfs_2:-0}
+
+    if [[ "${sriov_numvfs_1}" != "${sriov_numvfs_2}" ]]; then
+        log_it "$cfg_1 and $cfg_2 differ on attribute sriov_numvfs [${sriov_numvfs_1}:${sriov_numvfs_2}]"
+        return $(false)
+    fi
+
+    return $(true)
+}
+
+#
+# returns $(true) if ifcfg files are equal
+#
+# Warning:  Only compares against cfg file attributes:
+#            BOOTPROTO DEVICE IPADDR NETMASK GATEWAY MTU BONDING_OPTS SRIOV_NUMVFS
+#
+function is_eq_ifcfg {
+    local cfg_1=$1
+    local cfg_2=$2
+
+    for attr in BOOTPROTO DEVICE IPADDR NETMASK GATEWAY MTU BONDING_OPTS; do
+        local attr_value1
+        attr_value1=$(normalized_cfg_attr_value $cfg_1 $attr)
+        local attr_value2
+        attr_value2=$(normalized_cfg_attr_value $cfg_2 $attr)
+        if [[ "${attr_value1}" != "${attr_value2}"  ]]; then
+            log_it "$cfg_1 and $cfg_2 differ on attribute $attr"
+            return $(false)
+        fi
+    done
+
+    is_eq_sriov_numvfs $1 $2
+    return $?
+}
+
+# Synchronize with sysinv-agent audit (ifup/down to query link speed).
+function sysinv_agent_lock {
+    case $1 in
+    $ACQUIRE_LOCK)
+        local lock_file="/var/run/apply_network_config.lock"
+        # Lock file should be the same as defined in sysinv agent code
+        local lock_timeout=5
+        local max=15
+        local n=1
+        LOCK_FD=0
+        exec {LOCK_FD}>$lock_file
+        while [[ $n -le $max ]]; do
+
+            flock -w $lock_timeout $LOCK_FD && break
+            log_it "Failed to get lock($LOCK_FD) after $lock_timeout seconds ($n/$max), will retry"
+            sleep 1
+            n=$(($n+1))
+        done
+        if [[ $n -gt $max ]]; then
+            log_it "Failed to acquire lock($LOCK_FD) even after $max retries"
+            exit 1
+        fi
+        ;;
+    $RELEASE_LOCK)
+        [[ $LOCK_FD -gt 0 ]] && flock -u $LOCK_FD
+        ;;
+    esac
+}
+
+
+# synchronize with sysinv-agent audit
+sysinv_agent_lock $ACQUIRE_LOCK
+
+# now copy the puppet changed interfaces to /etc/network/interfaces
+do_mv /var/run/interfaces.puppet /etc/network/interfaces
+
+# now restart networking service 
+/etc/init.d/networking restart
+
+sleep 5
+
+# workaround the loopback label addresses cannot be configured as scope of host
+ip addr show lo | egrep "inet.*lo:" > /tmp/loop$$
+
+while read addr_info; do 
+       echo $addr_info
+       log_it "replace $addr_info with scope host"
+       addr=`echo $addr_info | cut -d' ' -f 2`
+       ifname=`echo $addr_info | cut -d' ' -f 5`
+       ip addr del $addr dev lo label $ifname
+       ip addr add $addr dev lo scope host label $ifname
+done < /tmp/loop$$
+
+
+# unlock: synchronize with sysinv-agent audit
+sysinv_agent_lock $RELEASE_LOCK
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/get-boot-device-from-cmdline.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/get-boot-device-from-cmdline.patch
new file mode 100644 (file)
index 0000000..e4a0913
--- /dev/null
@@ -0,0 +1,12 @@
+diff --git a/puppet-manifests/src/modules/platform/lib/facter/is_primary_disk_rotational.rb b/puppet-manifests/src/modules/platform/lib/facter/is_primary_disk_rotational.rb
+index d80896f..599fcd8 100644
+--- a/puppet-manifests/src/modules/platform/lib/facter/is_primary_disk_rotational.rb
++++ b/puppet-manifests/src/modules/platform/lib/facter/is_primary_disk_rotational.rb
+@@ -1,6 +1,6 @@
+ require 'facter'
+ Facter.add(:is_primary_disk_rotational) do
+-  rootfs_partition = Facter::Core::Execution.exec("df --output=source / | tail -1")
++  rootfs_partition = Facter::Core::Execution.exec('sed -e "s/^.*\(root=\)\(.* \).*$/\2/g" /proc/cmdline')
+   rootfs_device = Facter::Core::Execution.exec("basename #{rootfs_partition} | sed 's/[0-9]*$//;s/p[0-9]*$//'")
+   setcode "cat /sys/block/#{rootfs_device}/queue/rotational"
+ end
diff --git a/meta-stx/recipes-support/puppet/files/stx-puppet/poky-specific-apply-network-config-script.patch b/meta-stx/recipes-support/puppet/files/stx-puppet/poky-specific-apply-network-config-script.patch
new file mode 100644 (file)
index 0000000..4cb2344
--- /dev/null
@@ -0,0 +1,20 @@
+diff -ru a/puppet-manifests/src/modules/platform/manifests/network.pp b/puppet-manifests/src/modules/platform/manifests/network.pp
+--- a/puppet-manifests/src/modules/platform/manifests/network.pp       2020-04-08 19:10:04.478548420 +0800
++++ b/puppet-manifests/src/modules/platform/manifests/network.pp       2020-04-08 19:10:22.674548916 +0800
+@@ -208,8 +208,14 @@
+   -> Network_route6 <| |>
+   -> Exec['apply-network-config']
+-  exec {'apply-network-config':
+-    command => 'apply_network_config.sh',
++  if($::operatingsystem == 'poky-stx') {
++    exec {'apply-network-config':
++      command => 'apply_network_config_poky.sh',
++    }
++  } else {
++    exec {'apply-network-config':
++      command => 'apply_network_config.sh',
++    }
+   }
+ }
diff --git a/meta-stx/recipes-support/puppet/puppet-barbican_git.bb b/meta-stx/recipes-support/puppet/puppet-barbican_git.bb
new file mode 100644 (file)
index 0000000..51b73f9
--- /dev/null
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Puppet module for OpenStack Barbican"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=fce88ac1cd1315adf28a52502c9f7f6b"
+
+RDEPENDS_${PN} += " \
+       puppetlabs-inifile \
+       "
+
+PV = "11.3.0"
+SRCREV = "8241a1d13be6c3ee6344fa46dcfc045439044e76"
+PROTOCOL = "https"
+BRANCH = "stable/pike"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/puppet-barbican.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-barbican/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-barbican-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/barbican
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/barbican
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-barbican_git.bbappend b/meta-stx/recipes-support/puppet/puppet-barbican_git.bbappend
new file mode 100644 (file)
index 0000000..d81808b
--- /dev/null
@@ -0,0 +1,21 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/puppet-barbican-fix-the-pkg-and-service-names-for-poky-stx.patch \
+       file://${BPN}/puppet-barbican-do-not-fail-for-poky-stx.patch \
+       "
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-boolean_git.bb b/meta-stx/recipes-support/puppet/puppet-boolean_git.bb
new file mode 100644 (file)
index 0000000..05221ad
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Logical Resource Management (LVM) features for Puppet."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=8cbd44f4ffbc81632df22fdd4ae87811"
+
+PV = "1.0.1"
+SRCREV = "22b726dd78b0a60a224cc7054aebbf28e9306f62"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/adrienthebo/puppet-boolean;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/boolean
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/boolean/
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-ceph_git.bb b/meta-stx/recipes-support/puppet/puppet-ceph_git.bb
new file mode 100644 (file)
index 0000000..66def5e
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Community Developed Ceph Module."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=0e5ccf641e613489e66aa98271dbe798"
+
+PV = "2.4.1"
+SRCREV = "ebea4b703d002d64d0b623cc51d42890b187ab97"
+PROTOCOL = "https"
+BRANCH = "stable/jewel"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/puppet-ceph.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-ceph/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-ceph-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/ceph
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/ceph
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-ceph_git.bbappend b/meta-stx/recipes-support/puppet/puppet-ceph_git.bbappend
new file mode 100644 (file)
index 0000000..0088901
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/0001-Roll-up-TIS-patches.patch \
+       file://${BPN}/0002-Newton-rebase-fixes.patch \
+       file://${BPN}/0003-Ceph-Jewel-rebase.patch \
+       file://${BPN}/0004-US92424-Add-OSD-support-for-persistent-naming.patch \
+       file://${BPN}/0005-Remove-puppetlabs-apt-as-ceph-requirement.patch \
+       file://${BPN}/0006-ceph-disk-prepare-invalid-data-disk-value.patch \
+       file://${BPN}/0007-Add-StarlingX-specific-restart-command-for-Ceph-moni.patch \
+       file://${BPN}/0008-ceph-mimic-prepare-activate-osd.patch \
+       file://${BPN}/0009-fix-ceph-osd-disk-partition-for-nvme-disks.patch \
+       file://${BPN}/0010-wipe-unprepared-disks.patch \
+       file://${BPN}/0011-puppet-ceph-changes-for-poky-stx.patch \
+       "
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-certmonger_git.bb b/meta-stx/recipes-support/puppet/puppet-certmonger_git.bb
new file mode 100644 (file)
index 0000000..9bb7674
--- /dev/null
@@ -0,0 +1,55 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Deprecated: Use saltedsignal/puppet-certmonger instead."
+DESCRIPTION = " \
+       This puppet module allows you to request and manage certificates using certmonger. \
+       "
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=5e8f921d6b978d3605270db1f39e199f"
+
+PV = "1.1.1"
+SRCREV = "1157a7e552d87696e80ed4ab54bf0608a1c5ffff"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/earsdown/puppet-certmonger.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-certmonger/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-certmonger-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/certmonger
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/certmonger
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-certmonger_git.bbappend b/meta-stx/recipes-support/puppet/puppet-certmonger_git.bbappend
new file mode 100644 (file)
index 0000000..2d22d75
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += "file://${PN}/0001-puppet-certmonger-adjust-path-to-poky-rootfs.patch"
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-collectd_git.bb b/meta-stx/recipes-support/puppet/puppet-collectd_git.bb
new file mode 100644 (file)
index 0000000..773098c
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Puppet module for configuring collectd and plugins."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=03ff50a0bc4a15eb462d4129e89f9656"
+
+PV = "5.1.0"
+SRCREV = "f5a44db033c7cea9990c405519f70d18fc2b05c0"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/voxpupuli/puppet-collectd;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/collectd
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/collectd
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-dnsmasq_git.bb b/meta-stx/recipes-support/puppet/puppet-dnsmasq_git.bb
new file mode 100644 (file)
index 0000000..103fa2a
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "puppet module for dnsmasq"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://README.md;md5=71e3bfa9ffc5e93324727bbffae917f5"
+
+PV = "1.1.0"
+STABLE = "master"
+PROTOCOL = "https"
+BRANCH = "master"
+SRCREV = "cff07e90890662972c97684a2baee964f68ff3ed"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/netmanagers/puppet-dnsmasq;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       file://${PN}/metadata.json.patch \
+       "
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+do_install_append () {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/dnsmasq
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/dnsmasq/
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-dnsmasq_git.bbappend b/meta-stx/recipes-support/puppet/puppet-dnsmasq_git.bbappend
new file mode 100644 (file)
index 0000000..d699ce5
--- /dev/null
@@ -0,0 +1,25 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/0001-puppet-dnsmasq-Kilo-quilt-patches.patch;striplevel=5 \
+       file://${BPN}/0002-Fixing-mismatched-permission-on-dnsmasq-conf.patch;striplevel=5 \
+       file://${BPN}/0003-Support-management-of-tftp_max-option.patch;striplevel=5 \
+       file://${BPN}/0004-Enable-clear-DNS-cache-on-reload.patch;striplevel=5 \
+       file://${BPN}/0005-puppet-dnsmasq-updates-for-poky-stx.patch;striplevel=5 \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-drbd_git.bb b/meta-stx/recipes-support/puppet/puppet-drbd_git.bb
new file mode 100644 (file)
index 0000000..ad70d04
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "puppet-drbd"
+
+PV = "0.3.1"
+SRCREV = "496b3ba9cd74a2d12636f9e90a718739a5451169"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=6089b6bd1f0d807edb8bdfd76da0b038 "
+
+SRC_URI = " \
+       git://github.com/voxpupuli/puppet-drbd;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+inherit ruby 
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/drbd
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/drbd
+}
+
+FILES_${PN} += "${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-drbd_git.bbappend b/meta-stx/recipes-support/puppet/puppet-drbd_git.bbappend
new file mode 100644 (file)
index 0000000..b076d02
--- /dev/null
@@ -0,0 +1,29 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${PN}/0001-TIS-Patches.patch \
+       file://${PN}/0002-Disable-timeout-for-mkfs-command.patch \
+       file://${PN}/0003-drbd-parallel-to-serial-synchronization.patch \
+       file://${PN}/0004-US-96914-reuse-existing-drbd-cinder-resource.patch \
+       file://${PN}/0005-Add-PausedSync-states-to-acceptable-cstate.patch \
+       file://${PN}/0006-CGTS-7164-Add-resource-options-cpu-mask-to-affine-drbd-kernel-threads.patch \
+       file://${PN}/0007-Add-disk-by-path-test.patch \
+       file://${PN}/0008-CGTS-7953-support-for-new-drbd-resources.patch \
+       file://${PN}/0009-drbd-slow-before-swact.patch \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-etcd_git.bb b/meta-stx/recipes-support/puppet/puppet-etcd_git.bb
new file mode 100644 (file)
index 0000000..d2849c9
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Installs and configures etcd"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=e3fc50a88d0a364313df4b21ef20c29e"
+
+PV = "1.11.0"
+SRCREV = "f43e1292a9554766f799cd5a14b67cc19ce5b00e"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/cristifalcas/puppet-etcd;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-etcd/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-etcd-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/etcd
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/etcd
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-etcd_git.bbappend b/meta-stx/recipes-support/puppet/puppet-etcd_git.bbappend
new file mode 100644 (file)
index 0000000..bdaed2f
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/puppet-etcd-changes-for-poky-stx.patch \
+       "
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-filemapper_git.bb b/meta-stx/recipes-support/puppet/puppet-filemapper_git.bb
new file mode 100644 (file)
index 0000000..38afafa
--- /dev/null
@@ -0,0 +1,55 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A Puppet module to map files to resources and back."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=b43880e14353cbc12cf3981fb93a0944"
+
+PV = "1.1.3"
+SRCREV = "9b53310278e76827bbe12a36cc6470d77071abb2"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/adrienthebo/puppet-filemapper;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       file://${PN}/metadata.json.patch \
+       " 
+
+inherit ruby 
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_BUILD_GEMS = "adrien-filemapper.gemspec"
+RUBY_INSTALL_GEMS = "adrien-filemapper-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/filemapper
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/filemapper
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-horizon_git.bb b/meta-stx/recipes-support/puppet/puppet-horizon_git.bb
new file mode 100644 (file)
index 0000000..8e39f4c
--- /dev/null
@@ -0,0 +1,58 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Intended managing the entirety of horizon."
+DESCRIPTION = " \
+       The horizon module is a thorough attempt to make Puppet capable of \
+       managing the entirety of horizon. Horizon is a fairly classic django \
+       application, which results in a fairly simply Puppet module.\
+       "
+
+# HOMEPAGE = "https://github.com/openstack/puppet-horizon"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=12a15a9ebddda7d856c783f745e5ee47"
+
+PV = "11.5.0"
+SRCREV = "d75706e38fdf63f9c3174a526a7d07799390dfeb"
+PROTOCOL = "https"
+BRANCH = "stable/pike"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/puppet-horizon.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-horizon-${PV}.gem"
+
+do_install_append() { 
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/horizon
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/horizon
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-horizon_git.bbappend b/meta-stx/recipes-support/puppet/puppet-horizon_git.bbappend
new file mode 100644 (file)
index 0000000..2100667
--- /dev/null
@@ -0,0 +1,21 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/0001-Update-memcached-dependency.patch \
+       file://${BPN}/0002-puppet-horizon-changes-for-poky-stx.patch \
+       "
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-keystone_git.bb b/meta-stx/recipes-support/puppet/puppet-keystone_git.bb
new file mode 100644 (file)
index 0000000..59bba4b
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Puppet module for OpenStack Keystone"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=12a15a9ebddda7d856c783f745e5ee47"
+
+PV = "11.3.0"
+SRCREV = "305c91cac00f720ad6461b442e71b52b12f9ae57"
+PROTOCOL = "https"
+BRANCH = "stable/pike"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/puppet-keystone.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-keystone/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-keystone-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/keystone
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/keystone
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-keystone_git.bbappend b/meta-stx/recipes-support/puppet/puppet-keystone_git.bbappend
new file mode 100644 (file)
index 0000000..51a45f7
--- /dev/null
@@ -0,0 +1,37 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${PN}/0001-pike-rebase-squash-titanium-patches.patch \
+       file://${PN}/0002-remove-the-Keystone-admin-app.patch \
+       file://${PN}/0003-remove-eventlet_bindhost-from-Keystoneconf.patch \
+       file://${PN}/0004-escape-special-characters-in-bootstrap.patch \
+       file://${PN}/0005-Add-support-for-fernet-receipts.patch \
+       file://${PN}/0007-puppet-keystone-specify-full-path-to-openrc.patch \
+       file://${PN}/0008-params.pp-fix-the-service-name-of-openstack-keystone.patch \
+       "
+
+do_install_append () {
+       # fix the name of python-memcached
+       sed -i -e 's/python-memcache\b/python-memcached/' ${D}/${datadir}/puppet/modules/keystone/manifests/params.pp
+}
+
+RDEPENDS_${PN} += " \
+       python-memcached \
+       python-ldappool \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-kmod_git.bb b/meta-stx/recipes-support/puppet/puppet-kmod_git.bb
new file mode 100644 (file)
index 0000000..d14e47d
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Manage Linux kernel modules with Puppet"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=0e5ccf641e613489e66aa98271dbe798"
+
+PV = "2.1.0"
+SRCREV = "0d69a96e8d0d3a08da0d5f476c733134df4fb9ee"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/camptocamp/puppet-kmod;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_BUILD_GEMS = "camptocamp-kmod.gemspec"
+RUBY_INSTALL_GEMS = "camptocamp-kmod-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/kmod
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/kmod
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-ldap_git.bb b/meta-stx/recipes-support/puppet/puppet-ldap_git.bb
new file mode 100644 (file)
index 0000000..b99b031
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "OpenLDAP module for Puppet."
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://metadata.json;md5=4244fe391bee02e9ee7259aa7f8dda8b"
+
+PV = "0.2.4"
+SRCREV = "480f13af6d17d1d3fcf0dc7b4bd04b49fa4099e1"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/torian/puppet-ldap;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-ldap/Add-gemspec.patch \
+       "
+inherit ruby 
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/ldap
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/ldap
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-ldap_git.bbappend b/meta-stx/recipes-support/puppet/puppet-ldap_git.bbappend
new file mode 100644 (file)
index 0000000..1060e41
--- /dev/null
@@ -0,0 +1,22 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/0001-puppet-ldap-add-os-poky-stx.patch \
+       file://${BPN}/0002-puppet-ldap-poky-stx-fix-pkg-name.patch \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-memcached_git.bb b/meta-stx/recipes-support/puppet/puppet-memcached_git.bb
new file mode 100644 (file)
index 0000000..cbe4713
--- /dev/null
@@ -0,0 +1,59 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Intended managing the entirety of horizon."
+DESCRIPTION = " \
+       The horizon module is a thorough attempt to make Puppet capable of \
+       managing the entirety of horizon. Horizon is a fairly classic django \
+       application, which results in a fairly simply Puppet module.\
+       "
+
+HOMEPAGE = "https://github.com/saz/puppet-memcached.git"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=1322039bbc8e3ff4e74252ec65303861"
+
+PV = "3.0.2"
+SRCREV = "c2a0b543dc28f34ab68e905ede3173b00246ddca"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/saz/puppet-memcached.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-memcached/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-memcached-${PV}.gem"
+
+do_install_append() { 
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/memcached
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/memcached
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-network_git.bb b/meta-stx/recipes-support/puppet/puppet-network_git.bb
new file mode 100644 (file)
index 0000000..d9e5240
--- /dev/null
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "A Puppet module to manage non volatile network and route configuration."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=b43880e14353cbc12cf3981fb93a0944"
+
+
+PV = "0.5.0"
+SRCREV = "7deacd5fdc22c0543455878a8d1872f2f5417c1d"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/voxpupuli/puppet-network;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-network/Add-gemspec.patch \
+       file://puppet-network/metadata.json.patch \
+       " 
+
+inherit ruby 
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/network
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/network
+       # Remove broken symlinks for now
+       rm -f ${D}/${libdir}/ruby/gems/2.5.0/gems/puppet-network-0.5.0/spec/fixtures/modules/network/manifests
+       rm -f ${D}/${libdir}/ruby/gems/2.5.0/gems/puppet-network-0.5.0/spec/fixtures/modules/network/templates
+       rm -f ${D}/${libdir}/ruby/gems/2.5.0/gems/puppet-network-0.5.0/spec/fixtures/modules/network/files
+       rm -f ${D}/${libdir}/ruby/gems/2.5.0/gems/puppet-network-0.5.0/spec/fixtures/modules/network/lib
+       rm -f ${D}/${datadir}/puppet/modules/network/spec/fixtures/modules/network/files
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-network_git.bbappend b/meta-stx/recipes-support/puppet/puppet-network_git.bbappend
new file mode 100644 (file)
index 0000000..b0ee2e6
--- /dev/null
@@ -0,0 +1,38 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/puppet-network-Kilo-quilt-changes.patch;striplevel=5 \
+       file://${BPN}/puppet-network-support-ipv6.patch;striplevel=5 \
+       file://${BPN}/Don-t-write-absent-to-redhat-route-files-and-test-fo.patch;striplevel=5 \
+       file://${BPN}/fix-absent-options.patch;striplevel=5 \
+       file://${BPN}/permit-inservice-update-of-static-routes.patch;striplevel=5 \
+       file://${BPN}/ipv6-static-route-support.patch;striplevel=5 \
+       file://${BPN}/route-options-support.patch;striplevel=5 \
+       file://${BPN}/0001-Stx-uses-puppet-boolean-instead-of-adrien-boolean.patch \
+       file://${BPN}/puppet-network-updates-for-poky-stx.patch \
+       file://${BPN}/puppet-network-config-poky-provider.patch \
+       " 
+
+inherit openssl10
+
+do_configure_append() {
+       rm -f spec/fixtures/modules/network/files
+}
+
+RDEPENDS_${PN} += "\
+       vlan \
+"
diff --git a/meta-stx/recipes-support/puppet/puppet-nslcd_git.bb b/meta-stx/recipes-support/puppet/puppet-nslcd_git.bb
new file mode 100644 (file)
index 0000000..ba81276
--- /dev/null
@@ -0,0 +1,54 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "A Puppet module to manage the nslcd daemon which provides authentication via LDAP"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://Modulefile;md5=674f57ad12dfafcf6c3943f34d459ded"
+
+PV = "0.0.1"
+SRCREV = "b8c19b1ada89865f2e50758e054583798ad8011a"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/jlyheden/puppet-nslcd;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       file://${PN}/metadata.json.patch \
+       " 
+
+inherit ruby 
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/nslcd
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/nslcd
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-nslcd_git.bbappend b/meta-stx/recipes-support/puppet/puppet-nslcd_git.bbappend
new file mode 100644 (file)
index 0000000..0c15937
--- /dev/null
@@ -0,0 +1,23 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/puppet-nslcd-updates-for-poky-stx.patch \
+       "
+
+RDEPENDS_${PN} += "nss-pam-ldapd"
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-nssdb_git.bb b/meta-stx/recipes-support/puppet/puppet-nssdb_git.bb
new file mode 100644 (file)
index 0000000..ca8bbc2
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "NSS database Puppet Module"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=0409d65ae3bec182108fd45c64bd0ef2"
+
+PV = "1.0.1"
+SRCREV = "2e163a21fb80d828afede2d4be6214f1171c4887"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/rcritten/puppet-nssdb.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       file://${PN}/metadata.json.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-nssdb-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/nssdb
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/nssdb
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-openstacklib_git.bb b/meta-stx/recipes-support/puppet/puppet-openstacklib_git.bb
new file mode 100644 (file)
index 0000000..3867d92
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Puppet OpenStack Libraries."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=12a15a9ebddda7d856c783f745e5ee47"
+
+PV = "11.3.0"
+SRCREV = "79a799f5d78667b5eee81e71782e8591f2e62ecc"
+PROTOCOL = "https"
+BRANCH = "stable/pike"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/puppet-openstacklib.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-openstacklib/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-openstacklib-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/openstacklib
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/openstacklib
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-openstacklib_git.bbappend b/meta-stx/recipes-support/puppet/puppet-openstacklib_git.bbappend
new file mode 100644 (file)
index 0000000..c96f9b6
--- /dev/null
@@ -0,0 +1,22 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/0001-Roll-up-TIS-patches.patch \
+       file://${BPN}/0002-puppet-openstacklib-updates-for-poky-stx.patch \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-oslo_git.bb b/meta-stx/recipes-support/puppet/puppet-oslo_git.bb
new file mode 100644 (file)
index 0000000..6a0ec5a
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "The oslo module is a part of OpenStack intended to provide continuous integration testing and code review."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=86e96afa80a52eedda5dccc1af36b4f2"
+
+PV = "11.3.0"
+SRCREV = "5ad200e9d8af200a8f50f86a2db4cf3f36ab2a4c"
+PROTOCOL = "https"
+BRANCH = "stable/pike"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/openstack/puppet-oslo.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-oslo/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppet-oslo-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/oslo
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/oslo
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-oslo_git.bbappend b/meta-stx/recipes-support/puppet/puppet-oslo_git.bbappend
new file mode 100644 (file)
index 0000000..4553bb1
--- /dev/null
@@ -0,0 +1,29 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://puppet-oslo/0001-Remove-log_dir-from-conf-files.patch \
+       file://puppet-oslo/0002-add-psycopg2-drivername-to-postgresql-settings.patch \
+       "
+
+do_install_append () {
+       # fix the name of python-memcached
+       sed -i -e 's/python-memcache\b/python-memcached/' ${D}/${datadir}/puppet/modules/oslo/manifests/params.pp
+}
+
+RDEPENDS_${PN} += "python-memcached"
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-puppi_git.bb b/meta-stx/recipes-support/puppet/puppet-puppi_git.bb
new file mode 100644 (file)
index 0000000..8057900
--- /dev/null
@@ -0,0 +1,57 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Installs and configures Puppi."
+DESCRIPTION = " \
+       This module provides the Puppi libraries required by Example42 modules and, if explicitely included, the \
+       puppi command, its working environment, the defines and procedures to deploy applications.\
+       "
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=a300b604c66de62cf6e923cca89c9d83"
+
+PV = "2.2.3"
+SRCREV = "c1c47f4edfd761d1bbde32a75da0c3fa7cc93a81"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/example42/puppi;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       " 
+
+inherit ruby 
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/puppi
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/puppi
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-puppi_git.bbappend b/meta-stx/recipes-support/puppet/puppet-puppi_git.bbappend
new file mode 100644 (file)
index 0000000..ff545ef
--- /dev/null
@@ -0,0 +1,22 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/puppet-puppi-updates-for-poky-stx.patch \
+       file://${BPN}/puppet-puppi-adjust-path.patch \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppet-staging_git.bb b/meta-stx/recipes-support/puppet/puppet-staging_git.bb
new file mode 100644 (file)
index 0000000..98a81f7
--- /dev/null
@@ -0,0 +1,54 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Manages staging directory, along with download/extraction of compressed files."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=cdbf8d74b765504fbdf8e154bb4458a1"
+
+PV = "1.0.4"
+SRCREV = "bc434a71e19aae54223d57c274e2e1a7f9546d5e"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/nanliu/puppet-staging;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppet-staging/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_BUILD_GEMS = "nanliu-staging.gemspec"
+RUBY_INSTALL_GEMS = "nanliu-staging-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/staging
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/staging
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-sysctl_git.bb b/meta-stx/recipes-support/puppet/puppet-sysctl_git.bb
new file mode 100644 (file)
index 0000000..cfea440
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "This modules allows you to configure sysctl."
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://README.md;md5=b5335702ab6b120493cf88faaf9be346"
+
+PV = "0.0.11"
+SRCREV = "65ffe839a4ce785bc3901452488197a0ef158cd8"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/duritong/puppet-sysctl;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_BUILD_GEMS = "duritong-sysctl.gemspec"
+RUBY_INSTALL_GEMS = "duritong-sysctl-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/sysctl
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/sysctl
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-vlan_git.bb b/meta-stx/recipes-support/puppet/puppet-vlan_git.bb
new file mode 100644 (file)
index 0000000..b3c422e
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Puppet module for OpenStack Barbican"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=f05b73a1f91c0e30dece85ed11819aca"
+
+PV = "0.1.0"
+SRCREV = "c937de75c28e63fba8d8738ad6a5f2ede517e53d"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/derekhiggins/puppet-vlan.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       file://${PN}/metadata.json.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/vlan
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/vlan
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppet-vswitch_%.bbappend b/meta-stx/recipes-support/puppet/puppet-vswitch_%.bbappend
new file mode 100644 (file)
index 0000000..e4cec84
--- /dev/null
@@ -0,0 +1,25 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/vswitch
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/vswitch
+}
+
+FILES_${PN} += " ${datadir}"
+
+inherit openssl10
+RDEPENDS_${PN}_append = " perl"
diff --git a/meta-stx/recipes-support/puppet/puppet_4.8.2.bb b/meta-stx/recipes-support/puppet/puppet_4.8.2.bb
new file mode 100644 (file)
index 0000000..677faba
--- /dev/null
@@ -0,0 +1,79 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Open source Puppet is a configuration management system"
+HOMEPAGE = "https://puppetlabs.com/puppet/puppet-open-source"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=7c9045ec00cc0d6b6e0e09ee811da4a0"
+
+SRC_URI = " \
+    https://downloads.puppetlabs.com/puppet/puppet-${PV}.tar.gz \
+    file://${BPN}/${PV}/add_puppet_gemspec.patch \
+    file://${BPN}/${PV}/puppet-updates-for-poky-stx.patch \
+    file://${BPN}/${PV}/puppet-poky-dnf.patch \
+    file://${BPN}/${PV}/puppet.conf \
+    file://${BPN}/${PV}/puppet.init \
+    file://${BPN}/${PV}/puppet.service \
+"
+SRC_URI[md5sum] = "095ef8bddd94dd1ee0562a3c816ab05f"
+SRC_URI[sha256sum] = "fc71ca0be64b4b1282e0064b8fbf115c547cb87ca6b209da56e1b9569567404a"
+
+inherit ruby update-rc.d systemd
+inherit openssl10
+
+DEPENDS += " \
+        ruby \
+        facter \
+       libffi \
+"
+
+RDEPENDS_${PN} += " \
+        ruby \
+        facter \
+        ruby-shadow \
+        bash \
+"
+
+RUBY_INSTALL_GEMS = "puppet-${PV}.gem"
+
+INITSCRIPT_NAME = "${BPN}"
+INITSCRIPT_PARAMS = "start 02 5 3 2 . stop 20 0 1 6 ."
+
+SYSTEMD_AUTO_ENABLE = "enable"
+SYSTEMD_PACKAGES = "${PN}"
+SYSTEMD_SERVICE_${PN} = "${BPN}.service"
+
+do_install_append() {
+    install -d ${D}${sysconfdir}/puppet
+    install -d ${D}${sysconfdir}/puppet/manifests
+    install -d ${D}${sysconfdir}/puppet/modules
+
+    install -m 655 ${S}/conf/auth.conf ${D}${sysconfdir}/puppet/
+    install -m 655 ${S}/conf/fileserver.conf ${D}${sysconfdir}/puppet/
+    install -m 655 ${S}/conf/environment.conf ${D}${sysconfdir}/puppet/
+    install -m 655 ${WORKDIR}/${BPN}/${PV}/puppet.conf ${D}${sysconfdir}/puppet/
+
+    install -d ${D}${systemd_unitdir}/system
+    install -m 0644 ${WORKDIR}/${BPN}/${PV}/puppet.service ${D}${systemd_unitdir}/system
+
+    install -d ${D}${sysconfdir}/init.d
+    install -m 0755 ${WORKDIR}/${BPN}/${PV}/puppet.init ${D}${sysconfdir}/init.d/puppet
+
+    # Install puppet environment and moudlepath
+
+    install -m 0755 -d ${D}/${sysconfdir}/puppetlabs/code/environments/production
+    echo "modulepath = /usr/share/puppet/modules:/usr/share/openstack-puppet/modules" >  \
+       ${D}/${sysconfdir}/puppetlabs/code/environments/production/environment.conf
+}
diff --git a/meta-stx/recipes-support/puppet/puppet_5.4.0.bbappend b/meta-stx/recipes-support/puppet/puppet_5.4.0.bbappend
new file mode 100644 (file)
index 0000000..42c3a23
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/puppet-updates-for-poky-stx.patch \
+       file://${BPN}/puppet-poky-yum.patch \
+       "
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-apache_git.bb b/meta-stx/recipes-support/puppet/puppetlabs-apache_git.bb
new file mode 100644 (file)
index 0000000..23294a6
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Installs, configures, and manages Apache virtual hosts, web services, and modules."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+PV = "1.10.0"
+SRCREV = "410309f5facd0df7d836ea66c27ca9514031b6e3"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/puppetlabs/puppetlabs-apache.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppetlabs-apache-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/apache
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/apache
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-apache_git.bbappend b/meta-stx/recipes-support/puppet/puppetlabs-apache_git.bbappend
new file mode 100644 (file)
index 0000000..eef009d
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/puppetlabs-apache-updates-for-poky-stx.patch \
+       "
+#SRC_URI += " \
+#      file://${BPN}/0001-maint-Fix-conditional-in-vhost-ssl-template.patch \
+#      file://${BPN}/0002-maint-Fix-the-vhost-ssl-template-correctly-this-time.patch \
+#      "
+
+RDEPENDS_${PN} += " \
+       apache2 \
+       mod-wsgi \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-concat_git.bb b/meta-stx/recipes-support/puppet/puppetlabs-concat_git.bb
new file mode 100644 (file)
index 0000000..8fea71b
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Construct files from multiple fragments."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+PV = "2.2.0"
+SRCREV = "fdf4a84534ccb1b9ae8ffb654c7e160a31e294ee"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/puppetlabs/puppetlabs-concat.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+    file://puppetlabs-concat/Add-gemspec.patch \
+    "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppetlabs-concat-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/concat
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/concat
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-create-resources_git.bb b/meta-stx/recipes-support/puppet/puppetlabs-create-resources_git.bb
new file mode 100644 (file)
index 0000000..a20bcb2
--- /dev/null
@@ -0,0 +1,55 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Function to dynamically create resources from hashes."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=6089b6bd1f0d807edb8bdfd76da0b038 "
+
+PV = "0.0.1"
+STABLE = "master"
+PROTOCOL = "https"
+BRANCH = "master"
+SRCREV = "4639819a7f3a4fa9310d2ba583c63e467df7e2c3"
+S = "${WORKDIR}/git"
+
+
+SRC_URI = " \
+       git://github.com/puppetlabs/puppetlabs-create_resources.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       file://${PN}/metadata.json.patch \
+       "
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+do_install_append () {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/create_resources
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/create_resources
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-firewall_git.bb b/meta-stx/recipes-support/puppet/puppetlabs-firewall_git.bb
new file mode 100644 (file)
index 0000000..627707e
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Manages Firewalls such as iptables"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+PV = "1.8.2"
+SRCREV = "23016934d23c5c2f3f3edbc2ec8279f8faac2457"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/puppetlabs/puppetlabs-firewall;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppetlabs-firewall/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppetlabs-firewall-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/firewall
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/firewall
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-firewall_git.bbappend b/meta-stx/recipes-support/puppet/puppetlabs-firewall_git.bbappend
new file mode 100644 (file)
index 0000000..895a495
--- /dev/null
@@ -0,0 +1,23 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " \
+       file://${BPN}/puppet-firewall-updates-for-poky-stx.patch \
+       file://${BPN}/puppet-firewall-random-fully-support.patch \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-haproxy_git.bb b/meta-stx/recipes-support/puppet/puppetlabs-haproxy_git.bb
new file mode 100644 (file)
index 0000000..2b31c89
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Configures HAProxy servers and manages the configuration of backend member servers."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+PV = "1.5.0"
+SRCREV = "3ac513c0ceb3bcfe35dd2936875189ccfc991a34"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/puppetlabs/puppetlabs-haproxy;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppetlabs-haproxy/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppetlabs-haproxy-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/haproxy
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/haproxy
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-haproxy_git.bbappend b/meta-stx/recipes-support/puppet/puppetlabs-haproxy_git.bbappend
new file mode 100644 (file)
index 0000000..d44a8ad
--- /dev/null
@@ -0,0 +1,24 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://puppetlabs-haproxy/0001-Roll-up-TIS-patches.patch \
+       file://puppetlabs-haproxy/0002-disable-config-validation-prechecks.patch \
+       file://puppetlabs-haproxy/0003-Fix-global_options-log-default-value.patch \
+       file://puppetlabs-haproxy/0004-Stop-invalid-warning-message \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-inifile.bb b/meta-stx/recipes-support/puppet/puppetlabs-inifile.bb
new file mode 100644 (file)
index 0000000..4115dfc
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Resource types for managing settings in INI files"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+PV = "1.6.0"
+SRCREV = "88bf9868b532ddf556bdb617f67eda9de0b8dc0f"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/puppetlabs/puppetlabs-inifile;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppetlabs-inifile/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppetlabs-inifile-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/inifile
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/inifile
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-lvm_git.bb b/meta-stx/recipes-support/puppet/puppetlabs-lvm_git.bb
new file mode 100644 (file)
index 0000000..02c7d7c
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Provides Puppet types and providers to manage Logical Resource Management (LVM) features."
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=751419260aa954499f7abaabaa882bbe"
+
+PV = "0.5.0"
+SRCREV = "d0283da637ae24550fb4ba109a48ef8d5d8c8b84"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/puppetlabs/puppetlabs-lvm;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+
+inherit ruby 
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/lvm
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/lvm/
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-lvm_git.bbappend b/meta-stx/recipes-support/puppet/puppetlabs-lvm_git.bbappend
new file mode 100644 (file)
index 0000000..7c5b27b
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${PN}/0001-puppet-lvm-kilo-quilt-changes.patch;striplevel=5 \
+       file://${PN}/0002-UEFI-pvcreate-fix.patch;striplevel=5 \
+       file://${PN}/0003-US94222-Persistent-Dev-Naming.patch;striplevel=5 \
+       file://${PN}/0004-extendind-nuke_fs_on_resize_failure-functionality.patch;striplevel=5 \
+       file://${PN}/Fix-the-logical-statement-for-nuke_fs_on_resize.patch;striplevel=5 \
+       "
+RDEPENDS_${PN} += " \
+       lvm2 \
+       lvm2-scripts \
+       lvm2-udevrules \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-mysql_git.bb b/meta-stx/recipes-support/puppet/puppetlabs-mysql_git.bb
new file mode 100644 (file)
index 0000000..cb8668c
--- /dev/null
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "The mysql module installs, configures, and manages the MySQL service."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+PV = "3.11.0"
+SRCREV = "920dd76214d87d9b26f0db105886ee89ac266c4e"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/puppetlabs/puppetlabs-mysql.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+    file://puppetlabs-mysql/Add-gemspec.patch \
+    "
+#   file://puppetlabs-mysql/0001-Fix-ruby-path.patch
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppetlabs-mysql-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/mysql
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/mysql
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-mysql_git.bbappend b/meta-stx/recipes-support/puppet/puppetlabs-mysql_git.bbappend
new file mode 100644 (file)
index 0000000..d18958c
--- /dev/null
@@ -0,0 +1,23 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/0001-Stx-uses-nanliu-staging-module.patch \
+       file://${BPN}/0002-puppet-mysql-changes-for-poky-stx.patch \
+       file://${BPN}/0003-puppet-mysqltuner-adjust-path.patch \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-postgresql_git.bb b/meta-stx/recipes-support/puppet/puppetlabs-postgresql_git.bb
new file mode 100644 (file)
index 0000000..d8ca7de
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "A Puppet module for managing PostgreSQL databases."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+PV = "4.8.0"
+SRCREV = "d022a56b28b2174456fc0f6adc51a4b54493afad"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+       git://github.com/puppetlabs/puppetlabs-postgresql;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+
+inherit ruby 
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       "
+
+RUBY_INSTALL_GEMS = "${PN}-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/postgresql
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/postgresql
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-postgresql_git.bbappend b/meta-stx/recipes-support/puppet/puppetlabs-postgresql_git.bbappend
new file mode 100644 (file)
index 0000000..26466d6
--- /dev/null
@@ -0,0 +1,49 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+SRC_URI += " \
+       file://${BPN}/0001-Roll-up-TIS-patches.patch \
+       file://${BPN}/0002-remove-puppetlabs-apt-as-a-requirement.patch \
+       file://${BPN}/0003-puppetlabs-postgresql-account-for-naming-diffs.patch \
+       file://${BPN}/0004-poky-postgresql-updates.patch \
+       file://${BPN}/0005-puppetlabs-postgresql-poky.patch \
+       file://${BPN}/0006-adjust_path-remove-refs-to-local-bin.patch \
+       file://${BPN}/postgresql.service \
+       "
+
+#      file://${PN}/0004-postgresql-service-restart-with-systemctl.patch 
+
+RDEPENDS_${PN}_append = " \
+       postgresql \
+       postgresql-contrib \
+       postgresql-client \
+       postgresql-timezone \
+       postgresql-plperl \
+       postgresql-plpython \
+       "
+#postgresql-dev
+#postgresql-pltcl
+#postgresql-setup
+
+
+do_install_append() {
+       install -d -m0755 ${D}/usr/lib/systemd/system
+       install -m0644 ${WORKDIR}/${PN}/postgresql.service ${D}/usr/lib/systemd/system
+}
+
+FILES_${PN}_append = " /usr/lib/systemd/system/postgresql.service"
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-rabbitmq_git.bb b/meta-stx/recipes-support/puppet/puppetlabs-rabbitmq_git.bb
new file mode 100644 (file)
index 0000000..422e1a8
--- /dev/null
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+SUMMARY = "Installs, configures, and manages RabbitMQ."
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+PV = "5.6.0"
+SRCREV = "5ac45dedd9b409c9efac654724bc74867cb9233b"
+PROTOCOL = "https"
+BRANCH = "master"
+S = "${WORKDIR}/git"
+
+SRC_URI = "git://github.com/puppetlabs/puppetlabs-rabbitmq;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${PN}/Add-gemspec.patch \
+       "
+
+inherit ruby
+
+DEPENDS += " \
+       ruby \
+       facter \
+       "
+
+RDEPENDS_${PN} += " \
+       ruby \
+       facter \
+       puppet \
+       perl \
+       "
+
+RUBY_INSTALL_GEMS = "puppetlabs-rabbitmq-${PV}.gem"
+
+do_install_append() {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/rabbitmq
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/rabbitmq
+}
+
+FILES_${PN} += " ${datadir}"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-rabbitmq_git.bbappend b/meta-stx/recipes-support/puppet/puppetlabs-rabbitmq_git.bbappend
new file mode 100644 (file)
index 0000000..a45bf1c
--- /dev/null
@@ -0,0 +1,30 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+SRC_URI += " \
+       file://${BPN}/0001-Roll-up-TIS-patches.patch \
+       file://${BPN}/0002-Changed-cipher-specification-to-openssl-format.patch \
+       file://${BPN}/0004-Partially-revert-upstream-commit-f7c3a4a637d59f3065d.patch \
+       file://${BPN}/0005-Remove-the-rabbitmq_nodename-fact.patch \
+       file://${BPN}/0007-init.pp-do-not-check-the-apt-resource.patch \
+       file://${BPN}/0008-puppet-rabbitmq-poky.patch \
+       file://${BPN}/0009-remove-apt-requirement.patch \
+       "
+
+inherit openssl10
+
+DEPENDS_append = " puppet-staging"
diff --git a/meta-stx/recipes-support/puppet/puppetlabs-stdlib_git.bbappend b/meta-stx/recipes-support/puppet/puppetlabs-stdlib_git.bbappend
new file mode 100644 (file)
index 0000000..7003fce
--- /dev/null
@@ -0,0 +1,58 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRCREV = "b89d5f388ca701e38a0e0337408f5ccb7e68565f"
+PROTOCOL = "https"
+BRANCH = "master"
+PV = "4.18.0"
+
+SRC_URI = " \
+       git://github.com/puppetlabs/puppetlabs-stdlib.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://puppetlabs-stdlib/Add-gemspec.patch \
+       file://puppetlabs-stdlib/0001-Filter-password-in-logs.patch \
+       "
+
+S = "${WORKDIR}/git"
+
+RUBY_INSTALL_GEMS = "puppetlabs-stdlib-${PV}.gem"
+
+do_install_append () {
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/stdlib
+       tar -C ${S} -cf - --exclude "patches" --exclude "*.gem*" . | tar --no-same-owner -xf - -C ${D}/${datadir}/puppet/modules/stdlib
+}
+
+FILES_${PN} += " ${datadir}"
+
+RDEPENDS_${PN}_append = " perl"
diff --git a/meta-stx/recipes-support/puppet/stx-puppet_git.bb b/meta-stx/recipes-support/puppet/stx-puppet_git.bb
new file mode 100644 (file)
index 0000000..e58d2dd
--- /dev/null
@@ -0,0 +1,272 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "stx-puppet modules"
+
+STABLE = "starlingx/master"
+PROTOCOL = "https"
+SRCNAME = "stx-puppet"
+BRANCH = "r/stx.3.0"
+SRCREV = "678fe78b72b70e213eae32b1932afe97cc8c16b4"
+S = "${WORKDIR}/git"
+PV = "1.0.0"
+
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = " \
+       file://modules/puppet-dcdbsync/src/LICENSE;md5=0e5ccf641e613489e66aa98271dbe798 \
+       file://modules/puppet-dcmanager/src/LICENSE;md5=0e5ccf641e613489e66aa98271dbe798 \
+       file://modules/puppet-dcorch/src/LICENSE;md5=0e5ccf641e613489e66aa98271dbe798 \
+       file://modules/puppet-fm/src/LICENSE;md5=0e5ccf641e613489e66aa98271dbe798 \
+       file://modules/puppet-mtce/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://modules/puppet-nfv/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://modules/puppet-patching/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://modules/puppet-smapi/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://modules/puppet-sshd/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       file://modules/puppet-sysinv/src/LICENSE;md5=0e5ccf641e613489e66aa98271dbe798 \
+       file://puppet-manifests/src/LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57 \
+       "
+
+#      file://${BPN}/use-cast-operator.patch 
+SRC_URI = " \
+       git://opendev.org/starlingx/${SRCNAME}.git;protocol=${PROTOCOL};rev=${SRCREV};branch=${BRANCH} \
+       file://${BPN}/0001-puppet-manifest-apply-rebase-adjust-path.patch \
+       file://${BPN}/0002-puppet-manifests-port-Adjust-path-default-bindir.patch \
+       file://${BPN}/0003-puppet-dcmanager-updates-for-poky-stx.patch \
+       file://${BPN}/0004-puppet-dcorch-updates-for-poky-stx.patch \
+       file://${BPN}/0005-puppet-sysinv-updates-for-poky-stx.patch \
+       file://${BPN}/0006-puppet-manifest-apply-do-not-treat-warnings-as-block.patch \
+       file://${BPN}/0007-puppet-manifests-etcd-override-typo-and-journalctl.patch \
+       file://${BPN}/0008-puppet-manifests-keystone-include-platform-client.patch \
+       file://${BPN}/0009-puppet-manifests-lvm-remove-lvmetad.patch \
+       file://${BPN}/0010-puppet-manifest-apply-workaround-to-ignore-known-err.patch \
+       file://${BPN}/get-boot-device-from-cmdline.patch \
+       file://${BPN}/poky-specific-apply-network-config-script.patch \
+       file://${BPN}/apply_network_config_poky.sh \
+       "
+
+RDEPENDS_${PN} += " \
+       bash puppet \
+       e2fsprogs-resize2fs \
+       hiera \
+       kpartx \
+       multipath-tools \
+       multipath-tools-libs \
+       ntpdate \
+       puppet-staging \
+       puppet-oslo \
+       puppetlabs-apache \
+       puppetlabs-mysql \
+       "
+
+# WRS puppet modules
+RDEPENDS_puppet-manifests += " \
+       puppet-dcorch \
+       puppet-dcmanager \
+       puppet-mtce \
+       puppet-nfv \
+       puppet-patching \
+       puppet-sysinv \
+       puppet-sshd \
+       puppet-smapi \
+       puppet-fm \
+       puppet-dcdbsync \
+       "
+# Openstack puppet modules
+RDEPENDS_puppet-manifests += " \
+       puppet-barbican \
+       puppet-ceph \
+       puppet-horizon \
+       puppet-keystone \
+       puppet-openstacklib \
+       puppet-vswitch \
+       puppet-memcached \
+       "
+
+# Puppetlabs puppet modules 
+RDEPENDS_puppet-manifests += " \
+        puppetlabs-concat \
+        puppetlabs-create-resources \
+        puppet-drbd \
+        puppetlabs-firewall \
+        puppetlabs-haproxy \
+        puppetlabs-inifile \
+        puppetlabs-lvm \
+        puppetlabs-postgresql \
+        puppetlabs-rabbitmq \
+        puppetlabs-stdlib \
+        puppet-sysctl \
+        puppet-etcd \
+       "
+
+# 3rd party puppet modules
+RDEPENDS_puppet-manifests += " \
+        puppet-boolean \
+        puppet-certmonger \
+        puppet-dnsmasq \
+        puppet-filemapper \
+        puppet-kmod \
+        puppet-ldap \
+        puppet-network \
+        puppet-nslcd \
+        puppet-nssdb \
+        puppet-puppi \
+        puppet-vlan \
+        puppet-collectd \
+       "
+
+
+RDEPENDS_puppet-mtce += " puppet"
+RDEPENDS_puppet-dcdbsync += " puppet" 
+RDEPENDS_puppet-dcmanager += " puppet"
+RDEPENDS_puppet-dcorch += " puppet"
+RDEPENDS_puppet-fm += " puppet"
+RDEPENDS_puppet-nfv += " puppet"
+RDEPENDS_puppet-patching += " puppet"
+RDEPENDS_puppet-smapi += " puppet"
+RDEPENDS_puppet-sshd += " puppet"
+RDEPENDS_puppet-sysinv += " puppet"
+
+PACKAGES += " puppet-dcdbsync"
+PACKAGES += " puppet-dcmanager"
+PACKAGES += " puppet-dcorch"
+PACKAGES += " puppet-fm"
+PACKAGES += " puppet-mtce"
+PACKAGES += " puppet-nfv"
+PACKAGES += " puppet-patching"
+PACKAGES += " puppet-smapi"
+PACKAGES += " puppet-sshd"
+PACKAGES += " puppet-sysinv"
+PACKAGES += " puppet-manifests"
+
+do_install() {
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/dcdbsync
+       cp -R ${S}/modules/puppet-dcdbsync/src/dcdbsync ${D}/${datadir}/puppet/modules
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/dcmanager
+       cp -R ${S}/modules/puppet-dcmanager/src/dcmanager ${D}/${datadir}/puppet/modules
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/dcorch
+       cp -R ${S}/modules/puppet-dcorch/src/dcorch/ ${D}/${datadir}/puppet/modules/
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/fm
+       cp -R ${S}/modules/puppet-fm/src/fm ${D}/${datadir}/puppet/modules
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/mtce
+       cp -R ${S}/modules/puppet-mtce/src/mtce ${D}/${datadir}/puppet/modules
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/nfv
+       cp -R ${S}/modules/puppet-nfv/src/nfv ${D}/${datadir}/puppet/modules
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/patching
+       cp -R ${S}/modules/puppet-patching/src/patching ${D}/${datadir}/puppet/modules
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/smapi
+       cp -R ${S}/modules/puppet-smapi/src/smapi ${D}/${datadir}/puppet/modules
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/sshd
+       cp -R ${S}/modules/puppet-sshd/src/sshd ${D}/${datadir}/puppet/modules
+
+       install -d -m 0755 ${D}/${datadir}/puppet/modules/sysinv
+       cp -R ${S}/modules/puppet-sysinv/src/sysinv ${D}/${datadir}/puppet/modules
+
+       cd ${S}/puppet-manifests/src
+       oe_runmake BINDIR=${D}/${bindir} \
+               CONFIGDIR=${D}/${sysconfdir}/puppet/ \
+               MODULEDIR=${D}/${datadir}/puppet/modules -f Makefile install
+
+       # fix the path for systemctl
+       sed -i -e 's|${bindir}/systemctl|${base_bindir}/systemctl|' ${D}/${datadir}/puppet/modules/platform/manifests/*.pp
+       sed -i -e 's|${bindir}|${bindir}:${base_bindir}|' ${D}/${datadir}/puppet/modules/sysinv/manifests/api.pp
+
+       # fix the path for mount and awk
+       sed -i -e 's|${bindir}|${bindir}:${base_bindir}|' ${D}/${datadir}/puppet/modules/platform/manifests/filesystem.pp
+
+       # fix the path for slapd.conf
+       sed -i -e '/\/usr\/lib64\/openldap/d' ${D}/${datadir}/puppet/modules/platform/manifests/ldap.pp
+
+       # fix the libdir for collectd
+       sed -i -e 's|/usr/lib64|${libdir}|' ${D}/${datadir}/puppet/modules/platform/templates/collectd.conf.erb
+
+       install -m 0755 ${WORKDIR}/${PN}/apply_network_config_poky.sh  ${D}/${bindir}/apply_network_config_poky.sh
+}
+
+FILES_puppet-sysinv += " \
+       ${datadir}/puppet/modules/sysinv \
+       "
+
+FILES_puppet-sshd += " \
+       ${datadir}/puppet/modules/sshd \
+       "
+
+FILES_puppet-smapi += " \
+       ${datadir}/puppet/modules/smapi \
+       "
+FILES_puppet-patching += " \
+       ${datadir}/puppet/modules/patching \
+       "
+
+FILES_puppet-nfv += " \
+       ${datadir}/puppet/modules/nfv \
+       "
+
+FILES_puppet-mtce = "\
+       ${datadir}/puppet/modules/mtce \
+       "
+
+FILES_puppet-manifests = "\
+       ${sysconfdir}/puppet/hiera.yaml \
+       ${sysconfdir}/puppet/hieradata \
+       ${sysconfdir}/puppet/hieradata/worker.yaml \
+       ${sysconfdir}/puppet/hieradata/storage.yaml \
+       ${sysconfdir}/puppet/hieradata/global.yaml \
+       ${sysconfdir}/puppet/hieradata/controller.yaml \
+       ${sysconfdir}/puppet/manifests/worker.pp \
+       ${sysconfdir}/puppet/manifests/ansible_bootstrap.pp \
+       ${sysconfdir}/puppet/manifests/bootstrap.pp \
+       ${sysconfdir}/puppet/manifests/runtime.pp \
+       ${sysconfdir}/puppet/manifests/storage.pp \
+       ${sysconfdir}/puppet/manifests/upgrade.pp \
+       ${sysconfdir}/puppet/manifests/controller.pp \
+       ${datadir}/puppet/modules/openstack/manifests \
+       ${datadir}/puppet/modules/openstack/templates \
+       ${datadir}/puppet/modules/platform/manifests \
+       ${datadir}/puppet/modules/platform/templates \
+       ${datadir}/puppet/modules/platform/files \
+       ${datadir}/puppet/modules/platform/lib/facter \
+       ${datadir}/puppet/modules/platform/lib/puppet \
+       ${bindir}/puppet-manifest-apply.sh \
+       ${bindir}/apply_network_config.sh \
+       ${bindir}/apply_network_config_poky.sh \
+       "
+FILES_puppet-fm += " \
+       ${datadir}/puppet/modules/fm \
+       "
+
+FILES_puppet-dcorch += " \
+       ${datadir}/puppet/modules/dcorch \
+       "
+
+FILES_puppet-dcmanager += " \
+       ${datadir}/puppet/modules/dcmanager \
+       "
+
+FILES_puppet-dcdbsync += " \
+       ${datadir}/puppet/modules/dcdbsync \
+       "
+
+inherit openssl10
diff --git a/meta-stx/recipes-support/qpid-proton/qpid-proton/fix-missing-libary-for-cpp-binding.patch b/meta-stx/recipes-support/qpid-proton/qpid-proton/fix-missing-libary-for-cpp-binding.patch
new file mode 100644 (file)
index 0000000..4c6eaf2
--- /dev/null
@@ -0,0 +1,97 @@
+diff -urN qpid-proton-0.28.0~/CMakeLists.txt qpid-proton-0.28.0/CMakeLists.txt
+--- qpid-proton-0.28.0~/CMakeLists.txt 2019-08-20 15:03:13.807056363 +0800
++++ qpid-proton-0.28.0/CMakeLists.txt  2019-08-21 11:15:26.282136822 +0800
+@@ -222,15 +222,6 @@
+ # So make these cached variables and the specific variables non cached
+ # and derived from them.
+-if (NOT DEFINED LIB_SUFFIX)
+-    get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
+-    if ("${LIB64}" STREQUAL "TRUE" AND ${CMAKE_SIZEOF_VOID_P} STREQUAL "8")
+-        set(LIB_SUFFIX 64)
+-    else()
+-        set(LIB_SUFFIX "")
+-    endif()
+-endif()
+-
+ # Start of variables used during install
+ set (INCLUDE_INSTALL_DIR include CACHE PATH "Include file directory")
+ set (LIB_INSTALL_DIR "lib${LIB_SUFFIX}" CACHE PATH "Library object file directory")
+@@ -354,7 +345,7 @@
+   # DEFAULT_{uppercase name of binding} to ON
+   # Prerequisites for Python wrapper:
+-  find_package (PythonLibs ${PYTHON_VERSION_STRING} EXACT)
++  find_package (PythonLibs 2.7 REQUIRED)
+   # TODO aconway 2018-09-07: get python binding tests working with sanitizers
+   if (PYTHONLIBS_FOUND AND NOT SANITIZE_FLAGS)
+     set (DEFAULT_PYTHON ON)
+@@ -393,10 +384,6 @@
+ unset(BUILD_BINDINGS CACHE) # Remove from cache, only relevant when creating the initial cache.
+-install (FILES LICENSE.txt README.md tests/share/CMakeLists.txt DESTINATION ${PROTON_SHARE})
+-install (FILES tests/share/examples-README.md RENAME README.md DESTINATION ${PROTON_SHARE}/examples)
+-install (DIRECTORY tests DESTINATION ${PROTON_SHARE} PATTERN share EXCLUDE)
+-
+ # Generate test environment settings
+ configure_file(${CMAKE_SOURCE_DIR}/misc/config.sh.in
+                ${CMAKE_BINARY_DIR}/config.sh @ONLY)
+diff -urN qpid-proton-0.28.0~/cpp/CMakeLists.txt qpid-proton-0.28.0/cpp/CMakeLists.txt
+--- qpid-proton-0.28.0~/cpp/CMakeLists.txt     2019-08-20 15:03:13.763056364 +0800
++++ qpid-proton-0.28.0/cpp/CMakeLists.txt      2019-08-20 15:06:51.195053615 +0800
+@@ -269,7 +269,7 @@
+ macro(add_cpp_test test)
+   add_executable (${test} src/${test}.cpp)
+-  target_link_libraries (${test} qpid-proton-cpp ${PLATFORM_LIBS})
++  target_link_libraries (${test} qpid-proton-cpp qpid-proton-core qpid-proton-proactor ${PLATFORM_LIBS})
+   add_test (NAME cpp-${test}
+     COMMAND ${PN_ENV_SCRIPT} -- ${test_env}  ${TEST_EXE_PREFIX_CMD} $<TARGET_FILE:${test}> ${ARGN})
+ endmacro(add_cpp_test)
+@@ -287,7 +287,7 @@
+ add_cpp_test(credit_test)
+ if (ENABLE_JSONCPP)
+   add_cpp_test(connect_config_test)
+-  target_link_libraries(connect_config_test qpid-proton-core) # For pn_sasl_enabled
++  target_link_libraries(connect_config_test qpid-proton-core qpid-proton-proactor) # For pn_sasl_enabled
+   set_tests_properties(cpp-connect_config_test PROPERTIES WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}")
+   # Test data and output directories for connect_config_test
+   file(COPY  "${CMAKE_CURRENT_SOURCE_DIR}/testdata" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")
+@@ -300,7 +300,7 @@
+ include_directories(${CMAKE_SOURCE_DIR}/tests/include)
+ add_executable(cpp-test src/cpp-test.cpp src/url_test.cpp)
+-target_link_libraries(cpp-test qpid-proton-cpp ${PLATFORM_LIBS})
++target_link_libraries(cpp-test qpid-proton-cpp qpid-proton-core qpid-proton-proactor ${PLATFORM_LIBS})
+ macro(add_catch_test tag)
+   add_test (
+diff -urN qpid-proton-0.28.0~/cpp/examples/CMakeLists.txt qpid-proton-0.28.0/cpp/examples/CMakeLists.txt
+--- qpid-proton-0.28.0~/cpp/examples/CMakeLists.txt    2019-08-20 15:03:13.767056363 +0800
++++ qpid-proton-0.28.0/cpp/examples/CMakeLists.txt     2019-08-20 15:26:18.731038854 +0800
+@@ -82,6 +82,7 @@
+     ssl_client_cert
+     encode_decode)
+   add_executable(${example} ${example}.cpp)
++  target_link_libraries(${example} ${CMAKE_THREAD_LIBS_INIT} qpid-proton-core qpid-proton-proactor)
+ endforeach()
+ if(HAS_ENOUGH_CPP11)
+@@ -90,6 +91,7 @@
+       scheduled_send
+       service_bus)
+     add_executable(${example} ${example}.cpp)
++    target_link_libraries(${example} ${CMAKE_THREAD_LIBS_INIT} qpid-proton-core qpid-proton-proactor)
+   endforeach()
+   # Examples that use threads directly
+@@ -98,7 +100,7 @@
+         multithreaded_client
+         multithreaded_client_flow_control)
+       add_executable(${example} ${example}.cpp)
+-      target_link_libraries(${example} ${CMAKE_THREAD_LIBS_INIT})
++      target_link_libraries(${example} ${CMAKE_THREAD_LIBS_INIT} qpid-proton-core qpid-proton-proactor)
+     endforeach()
+   endif()
+ endif()
diff --git a/meta-stx/recipes-support/qpid-proton/qpid-proton_0.28.0.bb b/meta-stx/recipes-support/qpid-proton/qpid-proton_0.28.0.bb
new file mode 100644 (file)
index 0000000..9587e43
--- /dev/null
@@ -0,0 +1,49 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Qpid Proton is a high-performance, lightweight messaging library."
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=b26578452df1dcf3b9a63978282b72d0"
+
+SRC_URI = "http://archive.apache.org/dist/qpid/proton/0.28.0/qpid-proton-0.28.0.tar.gz \
+           file://fix-missing-libary-for-cpp-binding.patch \
+          "
+
+SRC_URI[sha256sum] = "224e599a4e965a016087b6ce683e55ca918493e12cdd6d91dac1c17d64a7dafe"
+
+DEPENDS= "openssl swig-native python"
+
+inherit cmake pkgconfig
+
+EXTRA_OECMAKE = "-DCMAKE_INSTALL_PREFIX=/usr -DSYSINSTALL_BINDINGS=ON -DPYTHON_EXECUTABLE=`which python`"
+BBCLASSEXTEND =+ " native"
+
+PACKAGES =+ "\
+         ${PN}-cpp \
+         python-${PN} \
+         "
+
+#FILES_qpid-proton-c = "${libdir}/libqpid-proton.so.*" 
+#FILES_qpid-proton-c += "{libdir}/libqpid-proton-core.so.*" 
+#FILES_qpid-proton-c += "${libdir}/libbqpid-proton-proactor.so.*" 
+
+FILES_qpid-proton-cpp = "${libdir}/libqpid-proton-cpp.so.*" 
+FILES_python-${PN} = "${libdir}/python*" 
+
+RPROVIDES_${PN} = "qpid-proton-c"
+
+do_install_append() {
+         rm -fr ${D}/usr/share
+}
diff --git a/meta-stx/recipes-support/ruby-shadow/ruby-shadow_git.bbappend b/meta-stx/recipes-support/ruby-shadow/ruby-shadow_git.bbappend
new file mode 100644 (file)
index 0000000..9e0da34
--- /dev/null
@@ -0,0 +1,16 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DEPENDS += " libxcrypt"
diff --git a/meta-stx/recipes-support/sshpass/sshpass.inc b/meta-stx/recipes-support/sshpass/sshpass.inc
new file mode 100644 (file)
index 0000000..608ccce
--- /dev/null
@@ -0,0 +1,26 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "Non-interactive ssh password auth"
+HOMEPAGE = "http://sshpass.sourceforge.net/"
+SECTION = "console/network"
+LICENSE = "GPLv2"
+
+# SRC_URI = "${SOURCEFORGE_MIRROR}/sshpass/sshpass-${PV}.tar.gz"
+SRC_URI = "https://fossies.org/linux/privat/sshpass-${PV}.tar.gz"
+
+INC_PR = "r0"
+
+inherit autotools
diff --git a/meta-stx/recipes-support/sshpass/sshpass_1.06.bb b/meta-stx/recipes-support/sshpass/sshpass_1.06.bb
new file mode 100644 (file)
index 0000000..1b0b201
--- /dev/null
@@ -0,0 +1,24 @@
+#
+# Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+PR = "${INC_PR}.0"
+
+require sshpass.inc
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f"
+
+SRC_URI[md5sum] = "f59695e3b9761fb51be7d795819421f9"
+SRC_URI[sha256sum] = "c6324fcee608b99a58f9870157dfa754837f8c48be3df0f5e2f3accf145dee60"
+
diff --git a/meta-stx/recipes-support/syslog-ng/syslog-ng_%.bbappend b/meta-stx/recipes-support/syslog-ng/syslog-ng_%.bbappend
new file mode 100644 (file)
index 0000000..2caca32
--- /dev/null
@@ -0,0 +1,20 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+do_install_append () {
+    rm -rf ${D}${systemd_unitdir}/system/multi-user.target.wants
+}
+
+SYSTEMD_AUTO_ENABLE = "disable"
diff --git a/meta-stx/recipes-upstream/python/python-boto3.inc b/meta-stx/recipes-upstream/python/python-boto3.inc
new file mode 100644 (file)
index 0000000..81d8e11
--- /dev/null
@@ -0,0 +1,31 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+DESCRIPTION = "The AWS SDK for Python"
+HOMEPAGE = "https://aws.amazon.com/sdk-for-python/"
+AUTHOR = "Amazon Web Services"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=2ee41112a44fe7014dce33e26468ba93"
+
+SRC_URI[md5sum] = "a18e37ff05d0dd0a59e43e7bcfd79469"
+SRC_URI[sha256sum] = "b9c930982891229fe32c670c940835e4d5afcb52f60a5e512de8e5cba409900b"
+
+RDEPENDS_${PN} = "\
+    ${PYTHON_PN}-botocore \
+    ${PYTHON_PN}-jmespath \
+    ${PYTHON_PN}-s3transfer \
+"
+
+inherit pypi
diff --git a/meta-stx/recipes-upstream/python/python-boto3_1.10.25.bb b/meta-stx/recipes-upstream/python/python-boto3_1.10.25.bb
new file mode 100644 (file)
index 0000000..d777f82
--- /dev/null
@@ -0,0 +1,17 @@
+#
+## Copyright (C) 2019 Wind River Systems, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+
+inherit setuptools
+require python-boto3.inc