2 # LICENSE_START=======================================================
4 # ================================================================================
5 # Copyright © 2017 AT&T Intellectual Property. All rights reserved.
6 # Modifications Copyright © 2021-2023 Nordix Foundation
7 # ================================================================================
8 # Licensed under the Apache License, Version 2.0 (the "License");
9 # you may not use this file except in compliance with the License.
10 # You may obtain a copy of the License at
11 # http://www.apache.org/licenses/LICENSE-2.0
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 # ============LICENSE_END=========================================================
20 # ECOMP is a trademark and service mark of AT&T Intellectual Property.
22 ###############################################################################
23 ###############################################################################
28 ## Items below are passed through to Kafka's producer and consumer
29 ## configurations (after removing "kafka.")
30 ## if you want to change request.required.acks it can take this one value
31 #kafka.request.required.acks=-1
32 kafka.metadata.broker.list=kafka-1:9092
33 config.zk.servers=zookeeper-1:2181
34 consumer.timeout.ms=100
35 zookeeper.connection.timeout.ms=6000
36 zookeeper.session.timeout.ms=20000
37 zookeeper.sync.time.ms=2000
38 auto.commit.interval.ms=1000
39 fetch.message.max.bytes =1000000
40 auto.commit.enable=false
42 #(backoff*retries > zksessiontimeout)
43 kafka.rebalance.backoff.ms=10000
44 kafka.rebalance.max.retries=6
47 ###############################################################################
51 ## Some data stored in the config system is sensitive -- API keys and secrets,
52 ## for example. to protect it, we use an encryption layer for this section
55 ## The key is a base64 encode AES key. This must be created/configured for
57 #cambria.secureConfig.key=
59 ## The initialization vector is a 16 byte value specific to the secured store.
60 ## This must be created/configured for each installation.
61 #cambria.secureConfig.iv=
64 cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
65 cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
66 authentication.adminSecret=fe3cCompound
67 #cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
68 #cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
71 ###############################################################################
75 ## Kafka expects live connections from the consumer to the broker, which
76 ## obviously doesn't work over connectionless HTTP requests. The Cambria
77 ## server proxies HTTP requests into Kafka consumer sessions that are kept
78 ## around for later re-use. Not doing so is costly for setup per request,
79 ## which would substantially impact a high volume consumer's performance.
81 ## This complicates Cambria server failover, because we often need server
82 ## A to close its connection before server B brings up the replacement.
85 ## The consumer cache is normally enabled.
86 #cambria.consumer.cache.enabled=true
88 ## Cached consumers are cleaned up after a period of disuse. The server inspects
89 ## consumers every sweepFreqSeconds and will clean up any connections that are
90 ## dormant for touchFreqMs.
91 #cambria.consumer.cache.sweepFreqSeconds=15
92 cambria.consumer.cache.touchFreqMs=120000
93 ##stickforallconsumerrequests=false
94 ## The cache is managed through ZK. The default value for the ZK connection
95 ## string is the same as config.zk.servers.
96 #cambria.consumer.cache.zkConnect=${config.zk.servers}
99 ## Shared cache information is associated with this node's name. The default
100 ## name is the hostname plus the HTTP service port this host runs on. (The
101 ## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
102 ## which is not always adequate.) You can set this value explicitly here.
104 #cambria.api.node.identifier=<use-something-unique-to-this-instance>
106 #cambria.rateLimit.maxEmptyPollsPerMinute=30
107 #cambria.rateLimitActual.delay.ms=10
109 ###############################################################################
113 ## This server can report its metrics periodically on a topic.
115 #metrics.send.cambria.enabled=true
116 #metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
117 #metrics.send.cambria.sendEverySeconds=60
119 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
123 ##############################################################################
125 maxcontentlength=10000
127 ##############################################################################
132 kafka.max.poll.interval.ms=300000
133 kafka.heartbeat.interval.ms=60000
134 kafka.session.timeout.ms=240000
135 kafka.max.poll.records=1000