+ kafkaTypeDef := config.TypeDefinition{
+ Identity: "type2",
+ KafkaInputTopic: "topic",
+ }
+ kafkaFactoryMock := mocks.KafkaFactory{}
+ kafkaConsumerMock := mocks.KafkaConsumer{}
+ kafkaConsumerMock.On("Commit").Return([]kafka.TopicPartition{}, error(nil))
+ kafkaConsumerMock.On("Subscribe", mock.Anything).Return(error(nil))
+ kafkaConsumerMock.On("ReadMessage", mock.Anything).Return(&kafka.Message{
+ Value: []byte(kafkaMessages),
+ }, error(nil)).Once()
+ kafkaConsumerMock.On("ReadMessage", mock.Anything).Return(nil, fmt.Errorf("Just to stop"))
+ kafkaFactoryMock.On("NewKafkaConsumer", mock.Anything).Return(kafkaConsumerMock, nil)
+ kafkaJobsHandler := newJobsHandler(kafkaTypeDef, "", kafkaFactoryMock, nil, distributeClientMock)
+
+ jobsManager := NewJobsManagerImpl(nil, "", kafkaFactoryMock, distributeClientMock)
+ jobsManager.allTypes["type2"] = TypeData{
+ Identity: "type2",
+ jobsHandler: kafkaJobsHandler,
+ }
+
+ jobsManager.StartJobsForAllTypes()
+
+ kafkaJobInfo := JobInfo{
+ InfoTypeIdentity: "type2",
+ InfoJobIdentity: "job2",
+ TargetUri: "http://consumerHost/kafkatarget",
+ }
+
+ wg.Add(1) // Wait till the distribution has happened
+ err := jobsManager.AddJobFromRESTCall(kafkaJobInfo)
+ assertions.Nil(err)
+
+ if waitTimeout(&wg, 2*time.Second) {
+ t.Error("Not all calls to server were made")
+ t.Fail()
+ }
+}
+
+func TestJobsHandlerDeleteJob_shouldDeleteJobFromJobsMap(t *testing.T) {
+ jobToDelete := newJob(JobInfo{}, nil)
+ go jobToDelete.start()
+ typeDef := config.TypeDefinition{
+ Identity: "type1",
+ DMaaPTopicURL: "/topicUrl",
+ }
+ jobsHandler := newJobsHandler(typeDef, "http://mrAddr", kafkaclient.KafkaFactoryImpl{}, nil, nil)
+ jobsHandler.jobs["job1"] = jobToDelete
+
+ go jobsHandler.monitorManagementChannels()
+
+ jobsHandler.deleteJobCh <- "job1"
+
+ deleted := false
+ for i := 0; i < 100; i++ {
+ if len(jobsHandler.jobs) == 0 {
+ deleted = true
+ break
+ }
+ time.Sleep(time.Microsecond) // Need to drop control to let the job's goroutine do the job
+ }
+ require.New(t).True(deleted, "Job not deleted")
+}
+
+func TestJobsHandlerEmptyJobMessageBufferWhenItIsFull(t *testing.T) {
+ job := newJob(JobInfo{
+ InfoJobIdentity: "job",
+ }, nil)
+
+ typeDef := config.TypeDefinition{
+ Identity: "type1",
+ DMaaPTopicURL: "/topicUrl",
+ }
+ jobsHandler := newJobsHandler(typeDef, "http://mrAddr", kafkaclient.KafkaFactoryImpl{}, nil, nil)
+ jobsHandler.jobs["job1"] = job
+
+ fillMessagesBuffer(job.messagesChannel)
+
+ jobsHandler.distributeMessages([]byte("sent msg"))
+
+ require.New(t).Len(job.messagesChannel, 0)
+}
+
+func TestKafkaPollingAgentTimedOut_shouldResultInEMptyMessages(t *testing.T) {
+ assertions := require.New(t)
+
+ kafkaFactoryMock := mocks.KafkaFactory{}
+ kafkaConsumerMock := mocks.KafkaConsumer{}
+ kafkaConsumerMock.On("Commit").Return([]kafka.TopicPartition{}, error(nil))
+ kafkaConsumerMock.On("Subscribe", mock.Anything).Return(error(nil))
+ kafkaConsumerMock.On("ReadMessage", mock.Anything).Return(nil, kafka.NewError(kafka.ErrTimedOut, "", false))
+ kafkaFactoryMock.On("NewKafkaConsumer", mock.Anything).Return(kafkaConsumerMock, nil)
+
+ pollingAgentUnderTest := newKafkaPollingAgent(kafkaFactoryMock, "")
+ messages, err := pollingAgentUnderTest.pollMessages()
+
+ assertions.Equal([]byte(""), messages)
+ assertions.Nil(err)
+}
+
+func TestJobWithoutParameters_shouldSendOneMessageAtATime(t *testing.T) {
+ assertions := require.New(t)
+