X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?a=blobdiff_plain;f=dmaap-mediator-producer%2Finternal%2Fjobs%2Fjobs.go;h=c84e2773e1c42447270a7fc03c9f3b980b9b2506;hb=4a9589b4743667175a584e628e4fd0f97499482a;hp=09d389166ac3b855f4e2ed4b92e9937e171d8782;hpb=b65d86fc9b02415e1adf2415f8c4a257378e9c09;p=nonrtric.git diff --git a/dmaap-mediator-producer/internal/jobs/jobs.go b/dmaap-mediator-producer/internal/jobs/jobs.go index 09d38916..c84e2773 100644 --- a/dmaap-mediator-producer/internal/jobs/jobs.go +++ b/dmaap-mediator-producer/internal/jobs/jobs.go @@ -21,65 +21,71 @@ package jobs import ( - "encoding/json" "fmt" - "os" + "strings" "sync" + "time" + "github.com/confluentinc/confluent-kafka-go/kafka" log "github.com/sirupsen/logrus" + "oransc.org/nonrtric/dmaapmediatorproducer/internal/config" + "oransc.org/nonrtric/dmaapmediatorproducer/internal/kafkaclient" "oransc.org/nonrtric/dmaapmediatorproducer/internal/restclient" ) -type TypeDefinitions struct { - Types []TypeDefinition `json:"types"` -} -type TypeDefinition struct { - Id string `json:"id"` - DmaapTopicURL string `json:"dmaapTopicUrl"` -} - type TypeData struct { - TypeId string `json:"id"` - DMaaPTopicURL string `json:"dmaapTopicUrl"` - Jobs map[string]JobInfo + Identity string `json:"id"` + jobsHandler *jobsHandler } +type sourceType string + +const dMaaPSource = sourceType("dmaap") +const kafkaSource = sourceType("kafka") + type JobInfo struct { - Owner string `json:"owner"` - LastUpdated string `json:"last_updated"` - InfoJobIdentity string `json:"info_job_identity"` - TargetUri string `json:"target_uri"` - InfoJobData interface{} `json:"info_job_data"` - InfoTypeIdentity string `json:"info_type_identity"` + Owner string `json:"owner"` + LastUpdated string `json:"last_updated"` + InfoJobIdentity string `json:"info_job_identity"` + TargetUri string `json:"target_uri"` + InfoJobData Parameters `json:"info_job_data"` + InfoTypeIdentity string `json:"info_type_identity"` + sourceType sourceType } -type JobHandler interface { - AddJob(JobInfo) error +type JobTypesManager interface { + LoadTypesFromConfiguration(types []config.TypeDefinition) []config.TypeDefinition + GetSupportedTypes() []string } -var ( - mu sync.Mutex - configFile = "configs/type_config.json" - Handler JobHandler - allTypes = make(map[string]TypeData) -) - -func init() { - Handler = newJobHandlerImpl() +type JobsManager interface { + AddJobFromRESTCall(JobInfo) error + DeleteJobFromRESTCall(jobId string) } -type jobHandlerImpl struct{} +type JobsManagerImpl struct { + allTypes map[string]TypeData + pollClient restclient.HTTPClient + mrAddress string + kafkaFactory kafkaclient.KafkaFactory + distributeClient restclient.HTTPClient +} -func newJobHandlerImpl() *jobHandlerImpl { - return &jobHandlerImpl{} +func NewJobsManagerImpl(pollClient restclient.HTTPClient, mrAddr string, kafkaFactory kafkaclient.KafkaFactory, distributeClient restclient.HTTPClient) *JobsManagerImpl { + return &JobsManagerImpl{ + allTypes: make(map[string]TypeData), + pollClient: pollClient, + mrAddress: mrAddr, + kafkaFactory: kafkaFactory, + distributeClient: distributeClient, + } } -func (jh *jobHandlerImpl) AddJob(ji JobInfo) error { - mu.Lock() - defer mu.Unlock() - if err := validateJobInfo(ji); err == nil { - jobs := allTypes[ji.InfoTypeIdentity].Jobs - jobs[ji.InfoJobIdentity] = ji +func (jm *JobsManagerImpl) AddJobFromRESTCall(ji JobInfo) error { + if err := jm.validateJobInfo(ji); err == nil { + typeData := jm.allTypes[ji.InfoTypeIdentity] + ji.sourceType = typeData.jobsHandler.sourceType + typeData.jobsHandler.addJobCh <- ji log.Debug("Added job: ", ji) return nil } else { @@ -87,8 +93,16 @@ func (jh *jobHandlerImpl) AddJob(ji JobInfo) error { } } -func validateJobInfo(ji JobInfo) error { - if _, ok := allTypes[ji.InfoTypeIdentity]; !ok { +func (jm *JobsManagerImpl) DeleteJobFromRESTCall(jobId string) { + for _, typeData := range jm.allTypes { + log.Debugf("Deleting job %v from type %v", jobId, typeData.Identity) + typeData.jobsHandler.deleteJobCh <- jobId + } + log.Debug("Deleted job: ", jobId) +} + +func (jm *JobsManagerImpl) validateJobInfo(ji JobInfo) error { + if _, ok := jm.allTypes[ji.InfoTypeIdentity]; !ok { return fmt.Errorf("type not supported: %v", ji.InfoTypeIdentity) } if ji.InfoJobIdentity == "" { @@ -101,82 +115,347 @@ func validateJobInfo(ji JobInfo) error { return nil } -func GetTypes() ([]TypeData, error) { - mu.Lock() - defer mu.Unlock() - types := make([]TypeData, 0, 1) - typeDefsByte, err := os.ReadFile(configFile) - if err != nil { - return nil, err - } - typeDefs := TypeDefinitions{} - err = json.Unmarshal(typeDefsByte, &typeDefs) - if err != nil { - return nil, err - } - for _, typeDef := range typeDefs.Types { - typeInfo := TypeData{ - TypeId: typeDef.Id, - DMaaPTopicURL: typeDef.DmaapTopicURL, - Jobs: make(map[string]JobInfo), +func (jm *JobsManagerImpl) LoadTypesFromConfiguration(types []config.TypeDefinition) []config.TypeDefinition { + for _, typeDef := range types { + if typeDef.DMaaPTopicURL == "" && typeDef.KafkaInputTopic == "" { + log.Fatal("DMaaPTopicURL or KafkaInputTopic must be defined for type: ", typeDef.Identity) } - if _, ok := allTypes[typeInfo.TypeId]; !ok { - allTypes[typeInfo.TypeId] = typeInfo + jm.allTypes[typeDef.Identity] = TypeData{ + Identity: typeDef.Identity, + jobsHandler: newJobsHandler(typeDef, jm.mrAddress, jm.kafkaFactory, jm.pollClient, jm.distributeClient), } - types = append(types, typeInfo) } - return types, nil + return types } -func GetSupportedTypes() []string { - mu.Lock() - defer mu.Unlock() +func (jm *JobsManagerImpl) GetSupportedTypes() []string { supportedTypes := []string{} - for k := range allTypes { + for k := range jm.allTypes { supportedTypes = append(supportedTypes, k) } return supportedTypes } -func AddJob(job JobInfo) error { - return Handler.AddJob(job) +func (jm *JobsManagerImpl) StartJobsForAllTypes() { + for _, jobType := range jm.allTypes { + + go jobType.jobsHandler.startPollingAndDistribution() + + } +} + +type jobsHandler struct { + mu sync.Mutex + typeId string + sourceType sourceType + pollingAgent pollingAgent + jobs map[string]job + addJobCh chan JobInfo + deleteJobCh chan string + distributeClient restclient.HTTPClient +} + +func newJobsHandler(typeDef config.TypeDefinition, mRAddress string, kafkaFactory kafkaclient.KafkaFactory, pollClient restclient.HTTPClient, distributeClient restclient.HTTPClient) *jobsHandler { + pollingAgent := createPollingAgent(typeDef, mRAddress, pollClient, kafkaFactory, typeDef.KafkaInputTopic) + sourceType := kafkaSource + if typeDef.DMaaPTopicURL != "" { + sourceType = dMaaPSource + } + return &jobsHandler{ + typeId: typeDef.Identity, + sourceType: sourceType, + pollingAgent: pollingAgent, + jobs: make(map[string]job), + addJobCh: make(chan JobInfo), + deleteJobCh: make(chan string), + distributeClient: distributeClient, + } +} + +func (jh *jobsHandler) startPollingAndDistribution() { + go func() { + for { + jh.pollAndDistributeMessages() + } + }() + + go func() { + for { + jh.monitorManagementChannels() + } + }() +} + +func (jh *jobsHandler) pollAndDistributeMessages() { + log.Debugf("Processing jobs for type: %v", jh.typeId) + messagesBody, error := jh.pollingAgent.pollMessages() + if error != nil { + log.Warn("Error getting data from source. Cause: ", error) + time.Sleep(time.Minute) // Must wait before trying to call data source again + return + } + jh.distributeMessages(messagesBody) +} + +func (jh *jobsHandler) distributeMessages(messages []byte) { + if string(messages) != "[]" && len(messages) > 0 { // MR returns an ampty array if there are no messages. + log.Debug("Distributing messages: ", string(messages)) + jh.mu.Lock() + defer jh.mu.Unlock() + for _, job := range jh.jobs { + if len(job.messagesChannel) < cap(job.messagesChannel) { + job.messagesChannel <- messages + } else { + jh.emptyMessagesBuffer(job) + } + } + } +} + +func (jh *jobsHandler) emptyMessagesBuffer(job job) { + log.Debug("Emptying message queue for job: ", job.jobInfo.InfoJobIdentity) +out: + for { + select { + case <-job.messagesChannel: + default: + break out + } + } +} + +func (jh *jobsHandler) monitorManagementChannels() { + select { + case addedJob := <-jh.addJobCh: + jh.addJob(addedJob) + case deletedJob := <-jh.deleteJobCh: + jh.deleteJob(deletedJob) + } +} + +func (jh *jobsHandler) addJob(addedJob JobInfo) { + jh.mu.Lock() + log.Debug("Add job: ", addedJob) + newJob := newJob(addedJob, jh.distributeClient) + go newJob.start() + jh.jobs[addedJob.InfoJobIdentity] = newJob + jh.mu.Unlock() +} + +func (jh *jobsHandler) deleteJob(deletedJob string) { + jh.mu.Lock() + log.Debug("Delete job: ", deletedJob) + j, exist := jh.jobs[deletedJob] + if exist { + j.controlChannel <- struct{}{} + delete(jh.jobs, deletedJob) + } + jh.mu.Unlock() +} + +type pollingAgent interface { + pollMessages() ([]byte, error) +} + +func createPollingAgent(typeDef config.TypeDefinition, mRAddress string, pollClient restclient.HTTPClient, kafkaFactory kafkaclient.KafkaFactory, topicID string) pollingAgent { + if typeDef.DMaaPTopicURL != "" { + return dMaaPPollingAgent{ + messageRouterURL: mRAddress + typeDef.DMaaPTopicURL, + pollClient: pollClient, + } + } else { + return newKafkaPollingAgent(kafkaFactory, typeDef.KafkaInputTopic) + } +} + +type dMaaPPollingAgent struct { + messageRouterURL string + pollClient restclient.HTTPClient +} + +func (pa dMaaPPollingAgent) pollMessages() ([]byte, error) { + return restclient.Get(pa.messageRouterURL, pa.pollClient) +} + +type kafkaPollingAgent struct { + kafkaClient kafkaclient.KafkaClient +} + +func newKafkaPollingAgent(kafkaFactory kafkaclient.KafkaFactory, topicID string) kafkaPollingAgent { + c, err := kafkaclient.NewKafkaClient(kafkaFactory, topicID) + if err != nil { + log.Fatalf("Cannot create Kafka client for topic: %v, error details: %v\n", topicID, err) + } + return kafkaPollingAgent{ + kafkaClient: c, + } +} + +func (pa kafkaPollingAgent) pollMessages() ([]byte, error) { + msg, err := pa.kafkaClient.ReadMessage() + if err == nil { + return msg, nil + } else { + if isKafkaTimedOutError(err) { + return []byte(""), nil + } + return nil, err + } +} + +func isKafkaTimedOutError(err error) bool { + kafkaErr, ok := err.(kafka.Error) + return ok && kafkaErr.Code() == kafka.ErrTimedOut +} + +type job struct { + jobInfo JobInfo + client restclient.HTTPClient + messagesChannel chan []byte + controlChannel chan struct{} } -func RunJobs(mRAddress string) { +func newJob(j JobInfo, c restclient.HTTPClient) job { + + return job{ + jobInfo: j, + client: c, + messagesChannel: make(chan []byte, 10), + controlChannel: make(chan struct{}), + } +} + +type Parameters struct { + BufferTimeout BufferTimeout `json:"bufferTimeout"` +} + +type BufferTimeout struct { + MaxSize int `json:"maxSize"` + MaxTimeMiliseconds int64 `json:"maxTimeMiliseconds"` +} + +func (j *job) start() { + if j.isJobBuffered() { + j.startReadingMessagesBuffered() + } else { + j.startReadingSingleMessages() + } +} + +func (j *job) startReadingSingleMessages() { +out: for { - pollAndDistributeMessages(mRAddress) + select { + case <-j.controlChannel: + log.Debug("Stop distribution for job: ", j.jobInfo.InfoJobIdentity) + break out + case msg := <-j.messagesChannel: + j.sendMessagesToConsumer(msg) + } } } -func pollAndDistributeMessages(mRAddress string) { - for typeId, typeInfo := range allTypes { - log.Debugf("Processing jobs for type: %v", typeId) - messagesBody, error := restclient.Get(fmt.Sprintf("%v/%v", mRAddress, typeInfo.DMaaPTopicURL)) - if error != nil { - log.Warnf("Error getting data from MR. Cause: %v", error) - continue +func (j *job) startReadingMessagesBuffered() { +out: + for { + select { + case <-j.controlChannel: + log.Debug("Stop distribution for job: ", j.jobInfo.InfoJobIdentity) + break out + default: + msgs := j.read(j.jobInfo.InfoJobData.BufferTimeout) + if len(msgs) > 0 { + j.sendMessagesToConsumer(msgs) + } } - distributeMessages(messagesBody, typeInfo) } } -func distributeMessages(messages []byte, typeInfo TypeData) { - if len(messages) > 2 { - mu.Lock() - for _, jobInfo := range typeInfo.Jobs { - go sendMessagesToConsumer(messages, jobInfo) +func (j *job) read(bufferParams BufferTimeout) []byte { + wg := sync.WaitGroup{} + wg.Add(bufferParams.MaxSize) + rawMsgs := make([][]byte, 0, bufferParams.MaxSize) + c := make(chan struct{}) + go func() { + i := 0 + out: + for { + select { + case <-c: + break out + case msg := <-j.messagesChannel: + rawMsgs = append(rawMsgs, msg) + i++ + wg.Done() + if i == bufferParams.MaxSize { + break out + } + } } - mu.Unlock() + }() + j.waitTimeout(&wg, time.Duration(bufferParams.MaxTimeMiliseconds)*time.Millisecond) + close(c) + return getAsJSONArray(rawMsgs) +} + +func getAsJSONArray(rawMsgs [][]byte) []byte { + if len(rawMsgs) == 0 { + return []byte("") + } + strings := "" + for i := 0; i < len(rawMsgs); i++ { + strings = strings + makeIntoString(rawMsgs[i]) + strings = addSeparatorIfNeeded(strings, i, len(rawMsgs)) } + return []byte(wrapInJSONArray(strings)) } -func sendMessagesToConsumer(messages []byte, jobInfo JobInfo) { - log.Debugf("Processing job: %v", jobInfo.InfoJobIdentity) - if postErr := restclient.Post(jobInfo.TargetUri, messages); postErr != nil { - log.Warnf("Error posting data for job: %v. Cause: %v", jobInfo, postErr) +func makeIntoString(rawMsg []byte) string { + return `"` + strings.ReplaceAll(string(rawMsg), "\"", "\\\"") + `"` +} + +func addSeparatorIfNeeded(strings string, position, length int) string { + if position < length-1 { + strings = strings + "," + } + return strings +} + +func wrapInJSONArray(strings string) string { + return "[" + strings + "]" +} + +func (j *job) waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { + c := make(chan struct{}) + go func() { + defer close(c) + wg.Wait() + }() + select { + case <-c: + return false // completed normally + case <-time.After(timeout): + return true // timed out } } -func clearAll() { - allTypes = make(map[string]TypeData) +func (j *job) sendMessagesToConsumer(messages []byte) { + log.Debug("Processing job: ", j.jobInfo.InfoJobIdentity) + contentType := restclient.ContentTypeJSON + if j.isJobKafka() && !j.isJobBuffered() { + contentType = restclient.ContentTypePlain + } + if postErr := restclient.Post(j.jobInfo.TargetUri, messages, contentType, j.client); postErr != nil { + log.Warnf("Error posting data for job: %v. Cause: %v", j.jobInfo, postErr) + return + } + log.Debugf("Messages for job: %v distributed to consumer: %v", j.jobInfo.InfoJobIdentity, j.jobInfo.Owner) +} + +func (j *job) isJobBuffered() bool { + return j.jobInfo.InfoJobData.BufferTimeout.MaxSize > 0 && j.jobInfo.InfoJobData.BufferTimeout.MaxTimeMiliseconds > 0 +} + +func (j *job) isJobKafka() bool { + return j.jobInfo.sourceType == kafkaSource }