1 # LICENSE_START=======================================================
3 # ================================================================================
4 # Copyright © 2017 AT&T Intellectual Property. All rights reserved.
5 # ================================================================================
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 # ============LICENSE_END=========================================================
18 # ECOMP is a trademark and service mark of AT&T Intellectual Property.
20 ###############################################################################
21 ###############################################################################
23 ## Cambria API Server config
25 ## Default values are shown as commented settings.
27 ###############################################################################
31 ## 3904 is standard as of 7/29/14.
33 ## Zookeeper Connection
35 ## Both Cambria and Kafka make use of Zookeeper.
37 config.zk.servers=zookeeper:2181
39 ###############################################################################
43 ## Items below are passed through to Kafka's producer and consumer
44 ## configurations (after removing "kafka.")
45 ## if you want to change request.required.acks it can take this one value
46 #kafka.metadata.broker.list=localhost:9092,localhost:9093
47 #kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
48 kafka.metadata.broker.list=kaka:9092
49 ##kafka.request.required.acks=-1
50 #kafka.client.zookeeper=${config.zk.servers}
51 consumer.timeout.ms=100
52 zookeeper.connection.timeout.ms=6000
53 zookeeper.session.timeout.ms=20000
54 zookeeper.sync.time.ms=2000
55 auto.commit.interval.ms=1000
56 fetch.message.max.bytes =1000000
57 auto.commit.enable=false
59 #(backoff*retries > zksessiontimeout)
60 kafka.rebalance.backoff.ms=10000
61 kafka.rebalance.max.retries=6
64 ###############################################################################
68 ## Some data stored in the config system is sensitive -- API keys and secrets,
69 ## for example. to protect it, we use an encryption layer for this section
72 ## The key is a base64 encode AES key. This must be created/configured for
74 #cambria.secureConfig.key=
76 ## The initialization vector is a 16 byte value specific to the secured store.
77 ## This must be created/configured for each installation.
78 #cambria.secureConfig.iv=
81 cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
82 cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
83 authentication.adminSecret=fe3cCompound
86 ###############################################################################
90 ## Kafka expects live connections from the consumer to the broker, which
91 ## obviously doesn't work over connectionless HTTP requests. The Cambria
92 ## server proxies HTTP requests into Kafka consumer sessions that are kept
93 ## around for later re-use. Not doing so is costly for setup per request,
94 ## which would substantially impact a high volume consumer's performance.
96 ## This complicates Cambria server failover, because we often need server
97 ## A to close its connection before server B brings up the replacement.
100 ## The consumer cache is normally enabled.
101 #cambria.consumer.cache.enabled=true
103 ## Cached consumers are cleaned up after a period of disuse. The server inspects
104 ## consumers every sweepFreqSeconds and will clean up any connections that are
105 ## dormant for touchFreqMs.
106 #cambria.consumer.cache.sweepFreqSeconds=15
107 cambria.consumer.cache.touchFreqMs=120000
108 ##stickforallconsumerrequests=false
109 ## The cache is managed through ZK. The default value for the ZK connection
110 ## string is the same as config.zk.servers.
111 #cambria.consumer.cache.zkConnect=${config.zk.servers}
114 ## Shared cache information is associated with this node's name. The default
115 ## name is the hostname plus the HTTP service port this host runs on. (The
116 ## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
117 ## which is not always adequate.) You can set this value explicitly here.
119 #cambria.api.node.identifier=<use-something-unique-to-this-instance>
121 #cambria.rateLimit.maxEmptyPollsPerMinute=30
122 #cambria.rateLimitActual.delay.ms=10
124 ###############################################################################
128 ## This server can report its metrics periodically on a topic.
130 #metrics.send.cambria.enabled=true
131 #metrics.send.cambria.topic=cambria.apinode.metrics
132 #msgrtr.apinode.metrics.dmaap
133 #metrics.send.cambria.sendEverySeconds=60
135 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
139 ##############################################################################
141 maxcontentlength=10000
144 ##############################################################################
146 msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
147 msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
148 enforced.topic.name.AAF=org.onap.dmaap.mr
150 transidUEBtopicreqd=false
151 defaultNSforUEB=org.onap.dmaap.mr
152 ##############################################################################
155 msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
156 msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
157 msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
158 msgRtr.mirrormaker.timeout=15000
159 msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
160 msgRtr.mirrormaker.consumergroup=mmagentserver
161 msgRtr.mirrormaker.consumerid=1
163 kafka.max.poll.interval.ms=300000
164 kafka.heartbeat.interval.ms=60000
165 kafka.session.timeout.ms=240000
166 kafka.max.poll.records=1000