package jobs
import (
- "encoding/json"
"fmt"
- "os"
"sync"
+ "time"
+ "github.com/confluentinc/confluent-kafka-go/kafka"
log "github.com/sirupsen/logrus"
"oransc.org/nonrtric/dmaapmediatorproducer/internal/config"
+ "oransc.org/nonrtric/dmaapmediatorproducer/internal/kafkaclient"
"oransc.org/nonrtric/dmaapmediatorproducer/internal/restclient"
)
type TypeData struct {
- TypeId string `json:"id"`
- DMaaPTopicURL string `json:"dmaapTopicUrl"`
- jobHandler *jobHandler
+ Identity string `json:"id"`
+ jobsHandler *jobsHandler
}
type JobInfo struct {
- Owner string `json:"owner"`
- LastUpdated string `json:"last_updated"`
- InfoJobIdentity string `json:"info_job_identity"`
- TargetUri string `json:"target_uri"`
- InfoJobData interface{} `json:"info_job_data"`
- InfoTypeIdentity string `json:"info_type_identity"`
+ Owner string `json:"owner"`
+ LastUpdated string `json:"last_updated"`
+ InfoJobIdentity string `json:"info_job_identity"`
+ TargetUri string `json:"target_uri"`
+ InfoJobData Parameters `json:"info_job_data"`
+ InfoTypeIdentity string `json:"info_type_identity"`
}
type JobTypesManager interface {
- LoadTypesFromConfiguration() ([]config.TypeDefinition, error)
+ LoadTypesFromConfiguration(types []config.TypeDefinition) []config.TypeDefinition
GetSupportedTypes() []string
}
type JobsManager interface {
- AddJob(JobInfo) error
- DeleteJob(jobId string)
+ AddJobFromRESTCall(JobInfo) error
+ DeleteJobFromRESTCall(jobId string)
}
type JobsManagerImpl struct {
- configFile string
allTypes map[string]TypeData
pollClient restclient.HTTPClient
mrAddress string
+ kafkaFactory kafkaclient.KafkaFactory
distributeClient restclient.HTTPClient
}
-type jobHandler struct {
- mu sync.Mutex
- typeId string
- topicUrl string
- jobs map[string]JobInfo
- addJobCh chan JobInfo
- deleteJobCh chan string
- pollClient restclient.HTTPClient
- distributeClient restclient.HTTPClient
-}
-
-func NewJobsManagerImpl(typeConfigFilePath string, pollClient restclient.HTTPClient, mrAddr string, distributeClient restclient.HTTPClient) *JobsManagerImpl {
+func NewJobsManagerImpl(pollClient restclient.HTTPClient, mrAddr string, kafkaFactory kafkaclient.KafkaFactory, distributeClient restclient.HTTPClient) *JobsManagerImpl {
return &JobsManagerImpl{
- configFile: typeConfigFilePath,
allTypes: make(map[string]TypeData),
pollClient: pollClient,
mrAddress: mrAddr,
+ kafkaFactory: kafkaFactory,
distributeClient: distributeClient,
}
}
-func (jm *JobsManagerImpl) AddJob(ji JobInfo) error {
+func (jm *JobsManagerImpl) AddJobFromRESTCall(ji JobInfo) error {
if err := jm.validateJobInfo(ji); err == nil {
typeData := jm.allTypes[ji.InfoTypeIdentity]
- typeData.jobHandler.addJobCh <- ji
+ typeData.jobsHandler.addJobCh <- ji
log.Debug("Added job: ", ji)
return nil
} else {
}
}
-func (jm *JobsManagerImpl) DeleteJob(jobId string) {
+func (jm *JobsManagerImpl) DeleteJobFromRESTCall(jobId string) {
for _, typeData := range jm.allTypes {
- log.Debugf("Deleting job %v from type %v", jobId, typeData.TypeId)
- typeData.jobHandler.deleteJobCh <- jobId
+ log.Debugf("Deleting job %v from type %v", jobId, typeData.Identity)
+ typeData.jobsHandler.deleteJobCh <- jobId
}
log.Debug("Deleted job: ", jobId)
}
return nil
}
-func (jm *JobsManagerImpl) LoadTypesFromConfiguration() ([]config.TypeDefinition, error) {
- typeDefsByte, err := os.ReadFile(jm.configFile)
- if err != nil {
- return nil, err
- }
- typeDefs := struct {
- Types []config.TypeDefinition `json:"types"`
- }{}
- err = json.Unmarshal(typeDefsByte, &typeDefs)
- if err != nil {
- return nil, err
- }
- for _, typeDef := range typeDefs.Types {
- addCh := make(chan JobInfo)
- deleteCh := make(chan string)
- jh := jobHandler{
- typeId: typeDef.Id,
- topicUrl: typeDef.DmaapTopicURL,
- jobs: make(map[string]JobInfo),
- addJobCh: addCh,
- deleteJobCh: deleteCh,
- pollClient: jm.pollClient,
- distributeClient: jm.distributeClient,
+func (jm *JobsManagerImpl) LoadTypesFromConfiguration(types []config.TypeDefinition) []config.TypeDefinition {
+ for _, typeDef := range types {
+ if typeDef.DMaaPTopicURL == "" && typeDef.KafkaInputTopic == "" {
+ log.Fatal("DMaaPTopicURL or KafkaInputTopic must be defined for type: ", typeDef.Identity)
}
- jm.allTypes[typeDef.Id] = TypeData{
- TypeId: typeDef.Id,
- DMaaPTopicURL: typeDef.DmaapTopicURL,
- jobHandler: &jh,
+ jm.allTypes[typeDef.Identity] = TypeData{
+ Identity: typeDef.Identity,
+ jobsHandler: newJobsHandler(typeDef, jm.mrAddress, jm.kafkaFactory, jm.pollClient, jm.distributeClient),
}
}
- return typeDefs.Types, nil
+ return types
}
func (jm *JobsManagerImpl) GetSupportedTypes() []string {
return supportedTypes
}
-func (jm *JobsManagerImpl) StartJobs() {
+func (jm *JobsManagerImpl) StartJobsForAllTypes() {
for _, jobType := range jm.allTypes {
- go jobType.jobHandler.start(jm.mrAddress)
+ go jobType.jobsHandler.startPollingAndDistribution()
}
}
-func (jh *jobHandler) start(mRAddress string) {
+type jobsHandler struct {
+ mu sync.Mutex
+ typeId string
+ pollingAgent pollingAgent
+ jobs map[string]job
+ addJobCh chan JobInfo
+ deleteJobCh chan string
+ distributeClient restclient.HTTPClient
+}
+
+func newJobsHandler(typeDef config.TypeDefinition, mRAddress string, kafkaFactory kafkaclient.KafkaFactory, pollClient restclient.HTTPClient, distributeClient restclient.HTTPClient) *jobsHandler {
+ pollingAgent := createPollingAgent(typeDef, mRAddress, pollClient, kafkaFactory, typeDef.KafkaInputTopic)
+ return &jobsHandler{
+ typeId: typeDef.Identity,
+ pollingAgent: pollingAgent,
+ jobs: make(map[string]job),
+ addJobCh: make(chan JobInfo),
+ deleteJobCh: make(chan string),
+ distributeClient: distributeClient,
+ }
+}
+
+func (jh *jobsHandler) startPollingAndDistribution() {
go func() {
for {
- jh.pollAndDistributeMessages(mRAddress)
+ jh.pollAndDistributeMessages()
}
}()
}()
}
-func (jh *jobHandler) pollAndDistributeMessages(mRAddress string) {
- jh.mu.Lock()
- defer jh.mu.Unlock()
+func (jh *jobsHandler) pollAndDistributeMessages() {
log.Debugf("Processing jobs for type: %v", jh.typeId)
- messagesBody, error := restclient.Get(mRAddress+jh.topicUrl, jh.pollClient)
+ messagesBody, error := jh.pollingAgent.pollMessages()
if error != nil {
- log.Warnf("Error getting data from MR. Cause: %v", error)
+ log.Warn("Error getting data from source. Cause: ", error)
+ time.Sleep(time.Minute) // Must wait before trying to call data source again
+ return
}
- log.Debugf("Received messages: %v", string(messagesBody))
jh.distributeMessages(messagesBody)
}
-func (jh *jobHandler) distributeMessages(messages []byte) {
- if len(messages) > 2 {
- for _, jobInfo := range jh.jobs {
- go jh.sendMessagesToConsumer(messages, jobInfo)
+func (jh *jobsHandler) distributeMessages(messages []byte) {
+ if string(messages) != "[]" && len(messages) > 0 { // MR returns an ampty array if there are no messages.
+ log.Debug("Distributing messages: ", string(messages))
+ jh.mu.Lock()
+ defer jh.mu.Unlock()
+ for _, job := range jh.jobs {
+ if len(job.messagesChannel) < cap(job.messagesChannel) {
+ job.messagesChannel <- messages
+ } else {
+ jh.emptyMessagesBuffer(job)
+ }
}
}
}
-func (jh *jobHandler) sendMessagesToConsumer(messages []byte, jobInfo JobInfo) {
- log.Debugf("Processing job: %v", jobInfo.InfoJobIdentity)
- if postErr := restclient.Post(jobInfo.TargetUri, messages, jh.distributeClient); postErr != nil {
- log.Warnf("Error posting data for job: %v. Cause: %v", jobInfo, postErr)
+func (jh *jobsHandler) emptyMessagesBuffer(job job) {
+ log.Debug("Emptying message queue for job: ", job.jobInfo.InfoJobIdentity)
+out:
+ for {
+ select {
+ case <-job.messagesChannel:
+ default:
+ break out
+ }
}
- log.Debugf("Messages distributed to consumer: %v.", jobInfo.Owner)
}
-func (jh *jobHandler) monitorManagementChannels() {
+func (jh *jobsHandler) monitorManagementChannels() {
select {
case addedJob := <-jh.addJobCh:
- jh.mu.Lock()
- log.Debugf("received %v from addJobCh\n", addedJob)
- jh.jobs[addedJob.InfoJobIdentity] = addedJob
- jh.mu.Unlock()
+ jh.addJob(addedJob)
case deletedJob := <-jh.deleteJobCh:
- jh.mu.Lock()
- log.Debugf("received %v from deleteJobCh\n", deletedJob)
+ jh.deleteJob(deletedJob)
+ }
+}
+
+func (jh *jobsHandler) addJob(addedJob JobInfo) {
+ jh.mu.Lock()
+ log.Debug("Add job: ", addedJob)
+ newJob := newJob(addedJob, jh.distributeClient)
+ go newJob.start()
+ jh.jobs[addedJob.InfoJobIdentity] = newJob
+ jh.mu.Unlock()
+}
+
+func (jh *jobsHandler) deleteJob(deletedJob string) {
+ jh.mu.Lock()
+ log.Debug("Delete job: ", deletedJob)
+ j, exist := jh.jobs[deletedJob]
+ if exist {
+ j.controlChannel <- struct{}{}
delete(jh.jobs, deletedJob)
- jh.mu.Unlock()
}
+ jh.mu.Unlock()
+}
+
+type pollingAgent interface {
+ pollMessages() ([]byte, error)
+}
+
+func createPollingAgent(typeDef config.TypeDefinition, mRAddress string, pollClient restclient.HTTPClient, kafkaFactory kafkaclient.KafkaFactory, topicID string) pollingAgent {
+ if typeDef.DMaaPTopicURL != "" {
+ return dMaaPPollingAgent{
+ messageRouterURL: mRAddress + typeDef.DMaaPTopicURL,
+ pollClient: pollClient,
+ }
+ } else {
+ return newKafkaPollingAgent(kafkaFactory, typeDef.KafkaInputTopic)
+ }
+}
+
+type dMaaPPollingAgent struct {
+ messageRouterURL string
+ pollClient restclient.HTTPClient
+}
+
+func (pa dMaaPPollingAgent) pollMessages() ([]byte, error) {
+ return restclient.Get(pa.messageRouterURL, pa.pollClient)
+}
+
+type kafkaPollingAgent struct {
+ kafkaClient kafkaclient.KafkaClient
+}
+
+func newKafkaPollingAgent(kafkaFactory kafkaclient.KafkaFactory, topicID string) kafkaPollingAgent {
+ c, err := kafkaclient.NewKafkaClient(kafkaFactory, topicID)
+ if err != nil {
+ log.Fatalf("Cannot create Kafka client for topic: %v, error details: %v\n", topicID, err)
+ }
+ return kafkaPollingAgent{
+ kafkaClient: c,
+ }
+}
+
+func (pa kafkaPollingAgent) pollMessages() ([]byte, error) {
+ msg, err := pa.kafkaClient.ReadMessage()
+ if err == nil {
+ return msg, nil
+ } else {
+ if isKafkaTimedOutError(err) {
+ return []byte(""), nil
+ }
+ return nil, err
+ }
+}
+
+func isKafkaTimedOutError(err error) bool {
+ kafkaErr, ok := err.(kafka.Error)
+ return ok && kafkaErr.Code() == kafka.ErrTimedOut
+}
+
+type job struct {
+ jobInfo JobInfo
+ client restclient.HTTPClient
+ messagesChannel chan []byte
+ controlChannel chan struct{}
+}
+
+func newJob(j JobInfo, c restclient.HTTPClient) job {
+
+ return job{
+ jobInfo: j,
+ client: c,
+ messagesChannel: make(chan []byte, 10),
+ controlChannel: make(chan struct{}),
+ }
+}
+
+type Parameters struct {
+ BufferTimeout BufferTimeout `json:"bufferTimeout"`
+}
+
+type BufferTimeout struct {
+ MaxSize int `json:"maxSize"`
+ MaxTimeMiliseconds int64 `json:"maxTimeMiliseconds"`
+}
+
+func (j *job) start() {
+ if j.jobInfo.InfoJobData.BufferTimeout.MaxSize == 0 {
+ j.startReadingSingleMessages()
+ } else {
+ j.startReadingMessagesBuffered()
+ }
+}
+
+func (j *job) startReadingSingleMessages() {
+out:
+ for {
+ select {
+ case <-j.controlChannel:
+ log.Debug("Stop distribution for job: ", j.jobInfo.InfoJobIdentity)
+ break out
+ case msg := <-j.messagesChannel:
+ j.sendMessagesToConsumer(msg)
+ }
+ }
+}
+
+func (j *job) startReadingMessagesBuffered() {
+out:
+ for {
+ select {
+ case <-j.controlChannel:
+ log.Debug("Stop distribution for job: ", j.jobInfo.InfoJobIdentity)
+ break out
+ default:
+ msgs := j.read(j.jobInfo.InfoJobData.BufferTimeout)
+ if len(msgs) > 0 {
+ j.sendMessagesToConsumer(msgs)
+ }
+ }
+ }
+}
+
+func (j *job) read(bufferParams BufferTimeout) []byte {
+ wg := sync.WaitGroup{}
+ wg.Add(bufferParams.MaxSize)
+ var msgs []byte
+ c := make(chan struct{})
+ go func() {
+ i := 0
+ out:
+ for {
+ select {
+ case <-c:
+ break out
+ case msg := <-j.messagesChannel:
+ i++
+ msgs = append(msgs, msg...)
+ wg.Done()
+ if i == bufferParams.MaxSize {
+ break out
+ }
+ }
+ }
+ }()
+ j.waitTimeout(&wg, time.Duration(bufferParams.MaxTimeMiliseconds)*time.Millisecond)
+ close(c)
+ return msgs
+}
+
+func (j *job) waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
+ c := make(chan struct{})
+ go func() {
+ defer close(c)
+ wg.Wait()
+ }()
+ select {
+ case <-c:
+ return false // completed normally
+ case <-time.After(timeout):
+ return true // timed out
+ }
+}
+
+func (j *job) sendMessagesToConsumer(messages []byte) {
+ log.Debug("Processing job: ", j.jobInfo.InfoJobIdentity)
+ if postErr := restclient.Post(j.jobInfo.TargetUri, messages, j.client); postErr != nil {
+ log.Warnf("Error posting data for job: %v. Cause: %v", j.jobInfo, postErr)
+ return
+ }
+ log.Debugf("Messages for job: %v distributed to consumer: %v", j.jobInfo.InfoJobIdentity, j.jobInfo.Owner)
}