2023-10-04 20:58:42 +00:00
|
|
|
package ctrl
|
2021-02-24 09:58:02 +00:00
|
|
|
|
|
|
|
import (
|
2021-04-07 16:54:08 +00:00
|
|
|
"context"
|
2021-02-24 09:58:02 +00:00
|
|
|
"fmt"
|
2021-03-12 11:08:11 +00:00
|
|
|
"log"
|
2021-08-11 08:11:57 +00:00
|
|
|
"sync"
|
2021-03-09 10:58:50 +00:00
|
|
|
"time"
|
2021-03-04 15:27:55 +00:00
|
|
|
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2021-02-24 09:58:02 +00:00
|
|
|
)
|
|
|
|
|
2021-08-11 08:11:57 +00:00
|
|
|
// processes holds all the information about running processes
|
|
|
|
type processes struct {
|
2022-06-22 03:03:11 +00:00
|
|
|
// mutex for processes
|
|
|
|
mu sync.Mutex
|
2021-08-11 10:23:37 +00:00
|
|
|
// The main context for subscriber processes.
|
|
|
|
ctx context.Context
|
|
|
|
// cancel func to send cancel signal to the subscriber processes context.
|
|
|
|
cancel context.CancelFunc
|
2021-08-11 08:11:57 +00:00
|
|
|
// The active spawned processes
|
2022-04-01 05:09:55 +00:00
|
|
|
// server
|
|
|
|
server *server
|
2021-10-08 10:07:10 +00:00
|
|
|
active procsMap
|
2021-08-11 08:11:57 +00:00
|
|
|
// mutex to lock the map
|
2021-10-08 20:39:46 +00:00
|
|
|
// mu sync.RWMutex
|
2021-08-11 08:11:57 +00:00
|
|
|
// The last processID created
|
|
|
|
lastProcessID int
|
2021-08-18 10:16:21 +00:00
|
|
|
// The instance global prometheus registry.
|
|
|
|
metrics *metrics
|
2022-02-02 07:54:36 +00:00
|
|
|
// Waitgroup to keep track of all the processes started.
|
2021-08-12 07:21:56 +00:00
|
|
|
wg sync.WaitGroup
|
2022-01-19 04:44:20 +00:00
|
|
|
// errorKernel
|
|
|
|
errorKernel *errorKernel
|
2022-02-02 07:54:36 +00:00
|
|
|
// configuration
|
|
|
|
configuration *Configuration
|
|
|
|
|
2022-02-04 09:33:31 +00:00
|
|
|
// Signatures
|
2022-04-21 11:21:36 +00:00
|
|
|
nodeAuth *nodeAuth
|
2021-08-11 08:11:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newProcesses will prepare and return a *processes which
|
|
|
|
// is map containing all the currently running processes.
|
2022-04-01 05:09:55 +00:00
|
|
|
func newProcesses(ctx context.Context, server *server) *processes {
|
2021-08-11 08:11:57 +00:00
|
|
|
p := processes{
|
2022-04-01 05:09:55 +00:00
|
|
|
server: server,
|
2022-02-02 07:54:36 +00:00
|
|
|
active: *newProcsMap(),
|
2022-04-01 05:09:55 +00:00
|
|
|
errorKernel: server.errorKernel,
|
|
|
|
configuration: server.configuration,
|
2022-04-21 11:21:36 +00:00
|
|
|
nodeAuth: server.nodeAuth,
|
2022-04-01 06:51:14 +00:00
|
|
|
metrics: server.metrics,
|
2021-08-11 08:11:57 +00:00
|
|
|
}
|
|
|
|
|
2021-08-16 11:01:12 +00:00
|
|
|
// Prepare the parent context for the subscribers.
|
2021-08-11 10:23:37 +00:00
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
|
2021-10-08 11:42:19 +00:00
|
|
|
// // Start the processes map.
|
|
|
|
// go func() {
|
|
|
|
// p.active.run(ctx)
|
|
|
|
// }()
|
2021-10-08 10:07:10 +00:00
|
|
|
|
2021-08-11 10:23:37 +00:00
|
|
|
p.ctx = ctx
|
|
|
|
p.cancel = cancel
|
|
|
|
|
2021-08-11 08:11:57 +00:00
|
|
|
return &p
|
|
|
|
}
|
|
|
|
|
2021-10-08 06:16:12 +00:00
|
|
|
// ----------------------
|
|
|
|
|
2022-02-02 07:54:36 +00:00
|
|
|
// ----------------------
|
|
|
|
|
2021-10-08 06:16:12 +00:00
|
|
|
type procsMap struct {
|
2021-11-16 18:07:24 +00:00
|
|
|
procNames map[processName]process
|
2021-11-16 09:21:44 +00:00
|
|
|
mu sync.Mutex
|
2021-10-08 06:16:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newProcsMap() *procsMap {
|
|
|
|
cM := procsMap{
|
2021-11-16 18:07:24 +00:00
|
|
|
procNames: make(map[processName]process),
|
2021-10-08 06:16:12 +00:00
|
|
|
}
|
|
|
|
return &cM
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------
|
|
|
|
|
2021-08-11 08:11:57 +00:00
|
|
|
// Start all the subscriber processes.
|
|
|
|
// Takes an initial process as it's input. All processes
|
|
|
|
// will be tied to this single process's context.
|
|
|
|
func (p *processes) Start(proc process) {
|
2021-08-11 10:23:37 +00:00
|
|
|
// Set the context for the initial process.
|
|
|
|
proc.ctx = p.ctx
|
2021-03-26 08:08:47 +00:00
|
|
|
|
|
|
|
// --- Subscriber services that can be started via flags
|
|
|
|
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQOpProcessList, nil)
|
|
|
|
proc.startup.subscriber(proc, REQOpProcessStart, nil)
|
|
|
|
proc.startup.subscriber(proc, REQOpProcessStop, nil)
|
|
|
|
proc.startup.subscriber(proc, REQTest, nil)
|
2022-05-22 04:36:02 +00:00
|
|
|
|
2021-09-08 16:56:23 +00:00
|
|
|
if proc.configuration.StartSubREQToFileAppend {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQToFileAppend, nil)
|
2021-02-24 09:58:02 +00:00
|
|
|
}
|
|
|
|
|
2021-09-08 16:56:23 +00:00
|
|
|
if proc.configuration.StartSubREQToFile {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQToFile, nil)
|
2021-04-06 17:42:03 +00:00
|
|
|
}
|
|
|
|
|
2022-03-04 14:02:43 +00:00
|
|
|
if proc.configuration.StartSubREQToFileNACK {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQToFileNACK, nil)
|
2022-03-04 14:02:43 +00:00
|
|
|
}
|
|
|
|
|
2022-06-09 03:59:37 +00:00
|
|
|
if proc.configuration.StartSubREQCopySrc {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQCopySrc, nil)
|
2022-06-09 03:59:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if proc.configuration.StartSubREQCopyDst {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQCopyDst, nil)
|
2022-06-09 03:59:37 +00:00
|
|
|
}
|
|
|
|
|
2021-09-08 16:56:23 +00:00
|
|
|
if proc.configuration.StartSubREQHello {
|
2023-04-18 13:47:40 +00:00
|
|
|
// subREQHello is the handler that is triggered when we are receiving a hello
|
|
|
|
// message. To keep the state of all the hello's received from nodes we need
|
|
|
|
// to also start a procFunc that will live as a go routine tied to this process,
|
|
|
|
// where the procFunc will receive messages from the handler when a message is
|
|
|
|
// received, the handler will deliver the message to the procFunc on the
|
|
|
|
// proc.procFuncCh, and we can then read that message from the procFuncCh in
|
|
|
|
// the procFunc running.
|
|
|
|
pf := func(ctx context.Context, procFuncCh chan Message) error {
|
|
|
|
// sayHelloNodes := make(map[Node]struct{})
|
|
|
|
|
|
|
|
for {
|
|
|
|
// Receive a copy of the message sent from the method handler.
|
|
|
|
var m Message
|
|
|
|
|
|
|
|
select {
|
|
|
|
case m = <-procFuncCh:
|
|
|
|
case <-ctx.Done():
|
|
|
|
er := fmt.Errorf("info: stopped handleFunc for: subscriber %v", proc.subject.name())
|
|
|
|
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
|
|
|
|
p.errorKernel.logDebug(er, p.configuration)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
proc.centralAuth.addPublicKey(proc, m)
|
|
|
|
|
|
|
|
// update the prometheus metrics
|
|
|
|
|
|
|
|
proc.server.centralAuth.pki.nodesAcked.mu.Lock()
|
|
|
|
mapLen := len(proc.server.centralAuth.pki.nodesAcked.keysAndHash.Keys)
|
|
|
|
proc.server.centralAuth.pki.nodesAcked.mu.Unlock()
|
|
|
|
proc.metrics.promHelloNodesTotal.Set(float64(mapLen))
|
|
|
|
proc.metrics.promHelloNodesContactLast.With(prometheus.Labels{"nodeName": string(m.FromNode)}).SetToCurrentTime()
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
proc.startup.subscriber(proc, REQHello, pf)
|
2021-02-24 09:58:02 +00:00
|
|
|
}
|
2021-02-24 14:43:31 +00:00
|
|
|
|
2022-06-03 04:02:27 +00:00
|
|
|
if proc.configuration.IsCentralErrorLogger {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQErrorLog, nil)
|
2021-02-24 14:43:31 +00:00
|
|
|
}
|
2021-03-09 10:58:50 +00:00
|
|
|
|
2021-09-08 16:56:23 +00:00
|
|
|
if proc.configuration.StartSubREQCliCommand {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQCliCommand, nil)
|
2021-03-26 08:08:47 +00:00
|
|
|
}
|
|
|
|
|
2021-09-08 16:56:23 +00:00
|
|
|
if proc.configuration.StartSubREQToConsole {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQToConsole, nil)
|
2021-03-26 08:08:47 +00:00
|
|
|
}
|
|
|
|
|
2021-08-11 08:11:57 +00:00
|
|
|
if proc.configuration.StartPubREQHello != 0 {
|
2023-04-18 13:47:40 +00:00
|
|
|
pf := func(ctx context.Context, procFuncCh chan Message) error {
|
|
|
|
ticker := time.NewTicker(time.Second * time.Duration(p.configuration.StartPubREQHello))
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
|
|
|
|
// d := fmt.Sprintf("Hello from %v\n", p.node)
|
|
|
|
// Send the ed25519 public key used for signing as the payload of the message.
|
|
|
|
d := proc.server.nodeAuth.SignPublicKey
|
|
|
|
|
|
|
|
m := Message{
|
|
|
|
FileName: "hello.log",
|
|
|
|
Directory: "hello-messages",
|
|
|
|
ToNode: Node(p.configuration.CentralNodeName),
|
|
|
|
FromNode: Node(proc.node),
|
|
|
|
Data: []byte(d),
|
|
|
|
Method: REQHello,
|
|
|
|
ACKTimeout: proc.configuration.DefaultMessageTimeout,
|
|
|
|
Retries: 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
sam, err := newSubjectAndMessage(m)
|
|
|
|
if err != nil {
|
|
|
|
// In theory the system should drop the message before it reaches here.
|
|
|
|
er := fmt.Errorf("error: ProcessesStart: %v", err)
|
|
|
|
p.errorKernel.errSend(proc, m, er, logError)
|
|
|
|
}
|
|
|
|
proc.toRingbufferCh <- []subjectAndMessage{sam}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
case <-ctx.Done():
|
|
|
|
er := fmt.Errorf("info: stopped handleFunc for: publisher %v", proc.subject.name())
|
|
|
|
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
|
|
|
|
p.errorKernel.logDebug(er, p.configuration)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
proc.startup.publisher(proc, REQHello, pf)
|
2021-03-09 10:58:50 +00:00
|
|
|
}
|
2021-04-06 17:42:03 +00:00
|
|
|
|
2022-05-30 05:14:15 +00:00
|
|
|
if proc.configuration.EnableKeyUpdates {
|
2023-04-18 13:47:40 +00:00
|
|
|
// pubREQKeysRequestUpdate defines the startup of a publisher that will send REQREQKeysRequestUpdate
|
|
|
|
// to central server and ask for publics keys, and to get them deliver back with a request
|
|
|
|
// of type pubREQKeysDeliverUpdate.
|
|
|
|
pf := func(ctx context.Context, procFuncCh chan Message) error {
|
|
|
|
ticker := time.NewTicker(time.Second * time.Duration(p.configuration.REQKeysRequestUpdateInterval))
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
|
|
|
|
// Send a message with the hash of the currently stored keys,
|
|
|
|
// so we would know on the subscriber at central if it should send
|
|
|
|
// and update with new keys back.
|
|
|
|
|
|
|
|
proc.nodeAuth.publicKeys.mu.Lock()
|
|
|
|
er := fmt.Errorf(" ----> publisher REQKeysRequestUpdate: sending our current hash: %v", []byte(proc.nodeAuth.publicKeys.keysAndHash.Hash[:]))
|
|
|
|
p.errorKernel.logDebug(er, p.configuration)
|
|
|
|
|
|
|
|
m := Message{
|
|
|
|
FileName: "publickeysget.log",
|
|
|
|
Directory: "publickeysget",
|
|
|
|
ToNode: Node(p.configuration.CentralNodeName),
|
|
|
|
FromNode: Node(proc.node),
|
|
|
|
Data: []byte(proc.nodeAuth.publicKeys.keysAndHash.Hash[:]),
|
|
|
|
Method: REQKeysRequestUpdate,
|
|
|
|
ReplyMethod: REQKeysDeliverUpdate,
|
|
|
|
ACKTimeout: proc.configuration.DefaultMessageTimeout,
|
|
|
|
Retries: 1,
|
|
|
|
}
|
|
|
|
proc.nodeAuth.publicKeys.mu.Unlock()
|
|
|
|
|
|
|
|
sam, err := newSubjectAndMessage(m)
|
|
|
|
if err != nil {
|
|
|
|
// In theory the system should drop the message before it reaches here.
|
|
|
|
p.errorKernel.errSend(proc, m, err, logError)
|
|
|
|
}
|
|
|
|
proc.toRingbufferCh <- []subjectAndMessage{sam}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
case <-ctx.Done():
|
|
|
|
er := fmt.Errorf("info: stopped handleFunc for: publisher %v", proc.subject.name())
|
|
|
|
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
|
|
|
|
p.errorKernel.logDebug(er, p.configuration)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
proc.startup.publisher(proc, REQKeysRequestUpdate, pf)
|
|
|
|
proc.startup.subscriber(proc, REQKeysDeliverUpdate, nil)
|
2022-05-26 06:07:23 +00:00
|
|
|
}
|
|
|
|
|
2022-05-30 05:14:15 +00:00
|
|
|
if proc.configuration.EnableAclUpdates {
|
2023-04-18 13:47:40 +00:00
|
|
|
pf := func(ctx context.Context, procFuncCh chan Message) error {
|
|
|
|
ticker := time.NewTicker(time.Second * time.Duration(p.configuration.REQAclRequestUpdateInterval))
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
|
|
|
|
// Send a message with the hash of the currently stored acl's,
|
|
|
|
// so we would know for the subscriber at central if it should send
|
|
|
|
// and update with new keys back.
|
|
|
|
|
|
|
|
proc.nodeAuth.nodeAcl.mu.Lock()
|
|
|
|
er := fmt.Errorf(" ----> publisher REQAclRequestUpdate: sending our current hash: %v", []byte(proc.nodeAuth.nodeAcl.aclAndHash.Hash[:]))
|
|
|
|
p.errorKernel.logDebug(er, p.configuration)
|
|
|
|
|
|
|
|
m := Message{
|
|
|
|
FileName: "aclRequestUpdate.log",
|
|
|
|
Directory: "aclRequestUpdate",
|
|
|
|
ToNode: Node(p.configuration.CentralNodeName),
|
|
|
|
FromNode: Node(proc.node),
|
|
|
|
Data: []byte(proc.nodeAuth.nodeAcl.aclAndHash.Hash[:]),
|
|
|
|
Method: REQAclRequestUpdate,
|
|
|
|
ReplyMethod: REQAclDeliverUpdate,
|
|
|
|
ACKTimeout: proc.configuration.DefaultMessageTimeout,
|
|
|
|
Retries: 1,
|
|
|
|
}
|
|
|
|
proc.nodeAuth.nodeAcl.mu.Unlock()
|
|
|
|
|
|
|
|
sam, err := newSubjectAndMessage(m)
|
|
|
|
if err != nil {
|
|
|
|
// In theory the system should drop the message before it reaches here.
|
|
|
|
p.errorKernel.errSend(proc, m, err, logError)
|
|
|
|
log.Printf("error: ProcessesStart: %v\n", err)
|
|
|
|
}
|
|
|
|
proc.toRingbufferCh <- []subjectAndMessage{sam}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
case <-ctx.Done():
|
|
|
|
er := fmt.Errorf("info: stopped handleFunc for: publisher %v", proc.subject.name())
|
|
|
|
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
|
|
|
|
p.errorKernel.logDebug(er, p.configuration)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
proc.startup.publisher(proc, REQAclRequestUpdate, pf)
|
|
|
|
proc.startup.subscriber(proc, REQAclDeliverUpdate, nil)
|
2022-04-07 07:34:06 +00:00
|
|
|
}
|
|
|
|
|
2022-04-07 12:18:28 +00:00
|
|
|
if proc.configuration.IsCentralAuth {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQKeysRequestUpdate, nil)
|
|
|
|
proc.startup.subscriber(proc, REQKeysAllow, nil)
|
|
|
|
proc.startup.subscriber(proc, REQKeysDelete, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclRequestUpdate, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclAddCommand, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclDeleteCommand, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclDeleteSource, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclGroupNodesAddNode, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclGroupNodesDeleteNode, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclGroupNodesDeleteGroup, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclGroupCommandsAddCommand, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclGroupCommandsDeleteCommand, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclGroupCommandsDeleteGroup, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclExport, nil)
|
|
|
|
proc.startup.subscriber(proc, REQAclImport, nil)
|
2022-04-07 07:34:06 +00:00
|
|
|
}
|
|
|
|
|
2021-09-08 16:56:23 +00:00
|
|
|
if proc.configuration.StartSubREQHttpGet {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQHttpGet, nil)
|
2021-04-06 17:42:03 +00:00
|
|
|
}
|
2021-04-13 09:28:52 +00:00
|
|
|
|
2022-02-11 06:27:51 +00:00
|
|
|
if proc.configuration.StartSubREQHttpGetScheduled {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQHttpGetScheduled, nil)
|
2022-02-11 06:27:51 +00:00
|
|
|
}
|
|
|
|
|
2021-09-08 16:56:23 +00:00
|
|
|
if proc.configuration.StartSubREQTailFile {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQTailFile, nil)
|
2021-04-13 09:28:52 +00:00
|
|
|
}
|
2021-07-01 08:05:34 +00:00
|
|
|
|
2021-09-17 08:17:10 +00:00
|
|
|
if proc.configuration.StartSubREQCliCommandCont {
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQCliCommandCont, nil)
|
2021-08-10 10:49:42 +00:00
|
|
|
}
|
|
|
|
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.startup.subscriber(proc, REQPublicKey, nil)
|
2021-02-24 09:58:02 +00:00
|
|
|
}
|
2021-04-08 11:43:47 +00:00
|
|
|
|
2021-08-11 10:23:37 +00:00
|
|
|
// Stop all subscriber processes.
|
|
|
|
func (p *processes) Stop() {
|
2021-08-12 10:27:47 +00:00
|
|
|
log.Printf("info: canceling all subscriber processes...\n")
|
2021-08-11 10:23:37 +00:00
|
|
|
p.cancel()
|
2021-08-12 10:27:47 +00:00
|
|
|
p.wg.Wait()
|
|
|
|
log.Printf("info: done canceling all subscriber processes.\n")
|
|
|
|
|
2021-08-11 10:23:37 +00:00
|
|
|
}
|
|
|
|
|
2021-04-09 09:30:40 +00:00
|
|
|
// ---------------------------------------------------------------------------------------
|
2021-04-08 11:43:47 +00:00
|
|
|
|
2021-08-11 08:11:57 +00:00
|
|
|
// Startup holds all the startup methods for subscribers.
|
2021-08-18 10:16:21 +00:00
|
|
|
type startup struct {
|
2022-04-05 08:35:59 +00:00
|
|
|
server *server
|
|
|
|
centralAuth *centralAuth
|
|
|
|
metrics *metrics
|
2021-08-18 10:16:21 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 05:09:55 +00:00
|
|
|
func newStartup(server *server) *startup {
|
2022-04-01 07:21:50 +00:00
|
|
|
s := startup{
|
2022-04-05 08:35:59 +00:00
|
|
|
server: server,
|
|
|
|
centralAuth: server.centralAuth,
|
|
|
|
metrics: server.metrics,
|
2022-04-01 07:21:50 +00:00
|
|
|
}
|
2021-08-18 10:16:21 +00:00
|
|
|
|
|
|
|
return &s
|
|
|
|
}
|
2021-04-09 09:30:40 +00:00
|
|
|
|
2023-04-19 03:46:38 +00:00
|
|
|
// subscriber will start a subscriber process. It takes the initial process, request method,
|
|
|
|
// and a procFunc as it's input arguments. If a procFunc os not needed, use the value nil.
|
2023-04-18 13:47:40 +00:00
|
|
|
func (s *startup) subscriber(p process, m Method, pf func(ctx context.Context, procFuncCh chan Message) error) {
|
|
|
|
er := fmt.Errorf("starting %v subscriber: %#v", m, p.node)
|
2023-01-12 11:03:10 +00:00
|
|
|
p.errorKernel.logDebug(er, p.configuration)
|
2023-06-01 04:15:39 +00:00
|
|
|
|
|
|
|
var sub Subject
|
|
|
|
switch {
|
|
|
|
case m == REQErrorLog:
|
|
|
|
sub = newSubject(m, "errorCentral")
|
|
|
|
default:
|
|
|
|
sub = newSubject(m, string(p.node))
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Printf("DEBUG:::startup subscriber, subject: %v\n", sub)
|
2024-03-08 21:55:21 +00:00
|
|
|
proc := newProcess(p.ctx, p.processes.server, sub, processKindSubscriber)
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.procFunc = pf
|
2021-11-09 13:01:42 +00:00
|
|
|
|
2022-04-01 05:09:55 +00:00
|
|
|
go proc.spawnWorker()
|
2021-04-09 09:30:40 +00:00
|
|
|
}
|
|
|
|
|
2023-04-18 13:47:40 +00:00
|
|
|
func (s *startup) publisher(p process, m Method, pf func(ctx context.Context, procFuncCh chan Message) error) {
|
|
|
|
er := fmt.Errorf("starting %v publisher: %#v", m, p.node)
|
2023-01-12 11:03:10 +00:00
|
|
|
p.errorKernel.logDebug(er, p.configuration)
|
2023-04-18 13:47:40 +00:00
|
|
|
sub := newSubject(m, string(p.node))
|
2024-03-08 21:55:21 +00:00
|
|
|
proc := newProcess(p.ctx, p.processes.server, sub, processKindPublisher)
|
2023-04-18 13:47:40 +00:00
|
|
|
proc.procFunc = pf
|
2023-01-13 14:09:23 +00:00
|
|
|
proc.isLongRunningPublisher = true
|
2022-04-07 07:34:06 +00:00
|
|
|
|
2022-04-01 05:09:55 +00:00
|
|
|
go proc.spawnWorker()
|
2022-02-08 10:49:32 +00:00
|
|
|
}
|
|
|
|
|
2021-08-11 10:23:37 +00:00
|
|
|
// ---------------------------------------------------------------
|
|
|
|
|
|
|
|
// Print the content of the processes map.
|
|
|
|
func (p *processes) printProcessesMap() {
|
2023-01-12 07:48:01 +00:00
|
|
|
er := fmt.Errorf("output of processes map : ")
|
2023-01-12 11:03:10 +00:00
|
|
|
p.errorKernel.logDebug(er, p.configuration)
|
2021-10-08 10:07:10 +00:00
|
|
|
|
2021-11-16 09:21:44 +00:00
|
|
|
{
|
|
|
|
p.active.mu.Lock()
|
2021-10-08 10:07:10 +00:00
|
|
|
|
2021-11-16 18:07:24 +00:00
|
|
|
for pName, proc := range p.active.procNames {
|
2022-10-05 07:16:22 +00:00
|
|
|
er := fmt.Errorf("info: proc - pub/sub: %v, procName in map: %v , id: %v, subject: %v", proc.processKind, pName, proc.processID, proc.subject.name())
|
2023-01-12 11:03:10 +00:00
|
|
|
proc.errorKernel.logDebug(er, proc.configuration)
|
2021-08-11 10:23:37 +00:00
|
|
|
}
|
|
|
|
|
2022-04-01 07:21:50 +00:00
|
|
|
p.metrics.promProcessesTotal.Set(float64(len(p.active.procNames)))
|
2021-11-16 09:21:44 +00:00
|
|
|
|
|
|
|
p.active.mu.Unlock()
|
|
|
|
}
|
2021-08-11 10:23:37 +00:00
|
|
|
|
|
|
|
}
|