+ jh.mu.Unlock()
+}
+
+type pollingAgent interface {
+ pollMessages() ([]byte, error)
+}
+
+func createPollingAgent(typeDef config.TypeDefinition, mRAddress string, pollClient restclient.HTTPClient, kafkaFactory kafkaclient.KafkaFactory, topicID string) pollingAgent {
+ if typeDef.DMaaPTopicURL != "" {
+ return dMaaPPollingAgent{
+ messageRouterURL: mRAddress + typeDef.DMaaPTopicURL,
+ pollClient: pollClient,
+ }
+ } else {
+ return newKafkaPollingAgent(kafkaFactory, typeDef.KafkaInputTopic)
+ }
+}
+
+type dMaaPPollingAgent struct {
+ messageRouterURL string
+ pollClient restclient.HTTPClient
+}
+
+func (pa dMaaPPollingAgent) pollMessages() ([]byte, error) {
+ return restclient.Get(pa.messageRouterURL, pa.pollClient)
+}
+
+type kafkaPollingAgent struct {
+ kafkaClient kafkaclient.KafkaClient
+}
+
+func newKafkaPollingAgent(kafkaFactory kafkaclient.KafkaFactory, topicID string) kafkaPollingAgent {
+ c, err := kafkaclient.NewKafkaClient(kafkaFactory, topicID)
+ if err != nil {
+ log.Fatalf("Cannot create Kafka client for topic: %v, error details: %v\n", topicID, err)
+ }
+ return kafkaPollingAgent{
+ kafkaClient: c,
+ }
+}
+
+func (pa kafkaPollingAgent) pollMessages() ([]byte, error) {
+ msg, err := pa.kafkaClient.ReadMessage()
+ if err == nil {
+ return msg, nil
+ } else {
+ if isKafkaTimedOutError(err) {
+ return []byte(""), nil
+ }
+ return nil, err
+ }
+}
+
+func isKafkaTimedOutError(err error) bool {
+ kafkaErr, ok := err.(kafka.Error)
+ return ok && kafkaErr.Code() == kafka.ErrTimedOut
+}
+
+type job struct {
+ jobInfo JobInfo
+ client restclient.HTTPClient
+ messagesChannel chan []byte
+ controlChannel chan struct{}
+}
+
+func newJob(j JobInfo, c restclient.HTTPClient) job {
+
+ return job{
+ jobInfo: j,
+ client: c,
+ messagesChannel: make(chan []byte, 10),
+ controlChannel: make(chan struct{}),
+ }
+}
+
+type Parameters struct {
+ BufferTimeout BufferTimeout `json:"bufferTimeout"`
+} // @name Parameters
+
+type BufferTimeout struct {
+ MaxSize int `json:"maxSize"`
+ MaxTimeMiliseconds int64 `json:"maxTimeMiliseconds"`
+} // @name BufferTimeout
+
+func (j *job) start() {
+ if j.isJobBuffered() {
+ j.startReadingMessagesBuffered()
+ } else {
+ j.startReadingSingleMessages()
+ }
+}
+
+func (j *job) startReadingSingleMessages() {
+out:
+ for {
+ select {
+ case <-j.controlChannel:
+ log.Debug("Stop distribution for job: ", j.jobInfo.InfoJobIdentity)
+ break out
+ case msg := <-j.messagesChannel:
+ j.sendMessagesToConsumer(msg)
+ }
+ }
+}
+
+func (j *job) startReadingMessagesBuffered() {
+out:
+ for {
+ select {
+ case <-j.controlChannel:
+ log.Debug("Stop distribution for job: ", j.jobInfo.InfoJobIdentity)
+ break out
+ default:
+ msgs := j.read(j.jobInfo.InfoJobData.BufferTimeout)
+ if len(msgs) > 0 {
+ j.sendMessagesToConsumer(msgs)
+ }
+ }
+ }
+}
+
+func (j *job) read(bufferParams BufferTimeout) []byte {
+ wg := sync.WaitGroup{}
+ wg.Add(bufferParams.MaxSize)
+ rawMsgs := make([][]byte, 0, bufferParams.MaxSize)
+ c := make(chan struct{})
+ go func() {
+ i := 0
+ out:
+ for {
+ select {
+ case <-c:
+ break out
+ case msg := <-j.messagesChannel:
+ rawMsgs = append(rawMsgs, msg)
+ i++
+ wg.Done()
+ if i == bufferParams.MaxSize {
+ break out
+ }
+ }
+ }
+ }()
+ j.waitTimeout(&wg, time.Duration(bufferParams.MaxTimeMiliseconds)*time.Millisecond)
+ close(c)
+ return getAsJSONArray(rawMsgs)
+}
+
+func getAsJSONArray(rawMsgs [][]byte) []byte {
+ if len(rawMsgs) == 0 {
+ return []byte("")
+ }
+ strings := ""
+ for i := 0; i < len(rawMsgs); i++ {
+ strings = strings + makeIntoString(rawMsgs[i])
+ strings = addSeparatorIfNeeded(strings, i, len(rawMsgs))
+ }
+ return []byte(wrapInJSONArray(strings))
+}
+
+func makeIntoString(rawMsg []byte) string {
+ return `"` + strings.ReplaceAll(string(rawMsg), "\"", "\\\"") + `"`
+}
+
+func addSeparatorIfNeeded(strings string, position, length int) string {
+ if position < length-1 {
+ strings = strings + ","
+ }
+ return strings
+}
+
+func wrapInJSONArray(strings string) string {
+ return "[" + strings + "]"
+}
+
+func (j *job) waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
+ c := make(chan struct{})
+ go func() {
+ defer close(c)
+ wg.Wait()
+ }()
+ select {
+ case <-c:
+ return false // completed normally
+ case <-time.After(timeout):
+ return true // timed out
+ }
+}
+
+func (j *job) sendMessagesToConsumer(messages []byte) {
+ log.Debug("Processing job: ", j.jobInfo.InfoJobIdentity)
+ contentType := restclient.ContentTypeJSON
+ if j.isJobKafka() && !j.isJobBuffered() {
+ contentType = restclient.ContentTypePlain
+ }
+ if postErr := restclient.Post(j.jobInfo.TargetUri, messages, contentType, j.client); postErr != nil {
+ log.Warnf("Error posting data for job: %v. Cause: %v", j.jobInfo, postErr)
+ return
+ }
+ log.Debugf("Messages for job: %v distributed to consumer: %v", j.jobInfo.InfoJobIdentity, j.jobInfo.Owner)
+}
+
+func (j *job) isJobBuffered() bool {
+ return j.jobInfo.InfoJobData.BufferTimeout.MaxSize > 0 && j.jobInfo.InfoJobData.BufferTimeout.MaxTimeMiliseconds > 0
+}
+
+func (j *job) isJobKafka() bool {
+ return j.jobInfo.sourceType == kafkaSource