2023-10-04 20:58:42 +00:00
package ctrl
2021-02-24 09:58:02 +00:00
import (
2021-04-07 16:54:08 +00:00
"context"
2024-11-24 18:19:47 +00:00
"encoding/json"
2021-02-24 09:58:02 +00:00
"fmt"
2021-03-12 11:08:11 +00:00
"log"
2021-08-11 08:11:57 +00:00
"sync"
2021-03-09 10:58:50 +00:00
"time"
2021-03-04 15:27:55 +00:00
2024-11-24 18:19:47 +00:00
"github.com/nats-io/nats.go/jetstream"
2021-03-04 15:27:55 +00:00
"github.com/prometheus/client_golang/prometheus"
2021-02-24 09:58:02 +00:00
)
2021-08-11 08:11:57 +00:00
// processes holds all the information about running processes
type processes struct {
2022-06-22 03:03:11 +00:00
// mutex for processes
mu sync . Mutex
2021-08-11 10:23:37 +00:00
// The main context for subscriber processes.
ctx context . Context
// cancel func to send cancel signal to the subscriber processes context.
cancel context . CancelFunc
2021-08-11 08:11:57 +00:00
// The active spawned processes
2022-04-01 05:09:55 +00:00
// server
server * server
2021-10-08 10:07:10 +00:00
active procsMap
2021-08-11 08:11:57 +00:00
// mutex to lock the map
2021-10-08 20:39:46 +00:00
// mu sync.RWMutex
2021-08-11 08:11:57 +00:00
// The last processID created
lastProcessID int
2021-08-18 10:16:21 +00:00
// The instance global prometheus registry.
metrics * metrics
2022-02-02 07:54:36 +00:00
// Waitgroup to keep track of all the processes started.
2021-08-12 07:21:56 +00:00
wg sync . WaitGroup
2022-01-19 04:44:20 +00:00
// errorKernel
errorKernel * errorKernel
2022-02-02 07:54:36 +00:00
// configuration
configuration * Configuration
2022-02-04 09:33:31 +00:00
// Signatures
2022-04-21 11:21:36 +00:00
nodeAuth * nodeAuth
2021-08-11 08:11:57 +00:00
}
// newProcesses will prepare and return a *processes which
// is map containing all the currently running processes.
2022-04-01 05:09:55 +00:00
func newProcesses ( ctx context . Context , server * server ) * processes {
2021-08-11 08:11:57 +00:00
p := processes {
2022-04-01 05:09:55 +00:00
server : server ,
2022-02-02 07:54:36 +00:00
active : * newProcsMap ( ) ,
2022-04-01 05:09:55 +00:00
errorKernel : server . errorKernel ,
configuration : server . configuration ,
2022-04-21 11:21:36 +00:00
nodeAuth : server . nodeAuth ,
2022-04-01 06:51:14 +00:00
metrics : server . metrics ,
2021-08-11 08:11:57 +00:00
}
2021-08-16 11:01:12 +00:00
// Prepare the parent context for the subscribers.
2021-08-11 10:23:37 +00:00
ctx , cancel := context . WithCancel ( ctx )
2021-10-08 11:42:19 +00:00
// // Start the processes map.
// go func() {
// p.active.run(ctx)
// }()
2021-10-08 10:07:10 +00:00
2021-08-11 10:23:37 +00:00
p . ctx = ctx
p . cancel = cancel
2021-08-11 08:11:57 +00:00
return & p
}
2021-10-08 06:16:12 +00:00
// ----------------------
2022-02-02 07:54:36 +00:00
// ----------------------
2021-10-08 06:16:12 +00:00
type procsMap struct {
2021-11-16 18:07:24 +00:00
procNames map [ processName ] process
2021-11-16 09:21:44 +00:00
mu sync . Mutex
2021-10-08 06:16:12 +00:00
}
func newProcsMap ( ) * procsMap {
cM := procsMap {
2021-11-16 18:07:24 +00:00
procNames : make ( map [ processName ] process ) ,
2021-10-08 06:16:12 +00:00
}
return & cM
}
// ----------------------
2021-08-11 08:11:57 +00:00
// Start all the subscriber processes.
// Takes an initial process as it's input. All processes
// will be tied to this single process's context.
func ( p * processes ) Start ( proc process ) {
2021-08-11 10:23:37 +00:00
// Set the context for the initial process.
proc . ctx = p . ctx
2021-03-26 08:08:47 +00:00
// --- Subscriber services that can be started via flags
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , OpProcessList , nil )
proc . startup . subscriber ( proc , OpProcessStart , nil )
proc . startup . subscriber ( proc , OpProcessStop , nil )
proc . startup . subscriber ( proc , Test , nil )
2022-05-22 04:36:02 +00:00
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubFileAppend {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , FileAppend , nil )
2021-02-24 09:58:02 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubFile {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , File , nil )
2021-04-06 17:42:03 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubCopySrc {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , CopySrc , nil )
2022-06-09 03:59:37 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubCopyDst {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , CopyDst , nil )
2022-06-09 03:59:37 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubHello {
2023-04-18 13:47:40 +00:00
// subREQHello is the handler that is triggered when we are receiving a hello
// message. To keep the state of all the hello's received from nodes we need
// to also start a procFunc that will live as a go routine tied to this process,
// where the procFunc will receive messages from the handler when a message is
// received, the handler will deliver the message to the procFunc on the
// proc.procFuncCh, and we can then read that message from the procFuncCh in
// the procFunc running.
pf := func ( ctx context . Context , procFuncCh chan Message ) error {
// sayHelloNodes := make(map[Node]struct{})
for {
// Receive a copy of the message sent from the method handler.
var m Message
select {
case m = <- procFuncCh :
case <- ctx . Done ( ) :
er := fmt . Errorf ( "info: stopped handleFunc for: subscriber %v" , proc . subject . name ( ) )
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
2024-03-27 11:48:17 +00:00
p . errorKernel . logDebug ( er )
2023-04-18 13:47:40 +00:00
return nil
}
proc . centralAuth . addPublicKey ( proc , m )
// update the prometheus metrics
proc . server . centralAuth . pki . nodesAcked . mu . Lock ( )
mapLen := len ( proc . server . centralAuth . pki . nodesAcked . keysAndHash . Keys )
proc . server . centralAuth . pki . nodesAcked . mu . Unlock ( )
proc . metrics . promHelloNodesTotal . Set ( float64 ( mapLen ) )
proc . metrics . promHelloNodesContactLast . With ( prometheus . Labels { "nodeName" : string ( m . FromNode ) } ) . SetToCurrentTime ( )
}
}
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , Hello , pf )
2021-02-24 09:58:02 +00:00
}
2021-02-24 14:43:31 +00:00
2024-11-24 18:19:47 +00:00
fmt . Printf ( "--------------------------------IsCentralErrorLogger = %v------------------------------\n" , proc . configuration . IsCentralErrorLogger )
if proc . configuration . IsCentralErrorLogger {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , ErrorLog , nil )
2021-02-24 14:43:31 +00:00
}
2021-03-09 10:58:50 +00:00
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubCliCommand {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , CliCommand , nil )
2021-03-26 08:08:47 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubConsole {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , Console , nil )
2021-03-26 08:08:47 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . StartPubHello != 0 {
2023-04-18 13:47:40 +00:00
pf := func ( ctx context . Context , procFuncCh chan Message ) error {
2024-11-24 18:19:47 +00:00
ticker := time . NewTicker ( time . Second * time . Duration ( p . configuration . StartPubHello ) )
2023-04-18 13:47:40 +00:00
defer ticker . Stop ( )
for {
// d := fmt.Sprintf("Hello from %v\n", p.node)
// Send the ed25519 public key used for signing as the payload of the message.
d := proc . server . nodeAuth . SignPublicKey
m := Message {
FileName : "hello.log" ,
Directory : "hello-messages" ,
ToNode : Node ( p . configuration . CentralNodeName ) ,
FromNode : Node ( proc . node ) ,
Data : [ ] byte ( d ) ,
2024-11-19 02:48:42 +00:00
Method : Hello ,
2023-04-18 13:47:40 +00:00
ACKTimeout : proc . configuration . DefaultMessageTimeout ,
Retries : 1 ,
}
sam , err := newSubjectAndMessage ( m )
if err != nil {
// In theory the system should drop the message before it reaches here.
er := fmt . Errorf ( "error: ProcessesStart: %v" , err )
p . errorKernel . errSend ( proc , m , er , logError )
}
2024-11-19 19:28:26 +00:00
proc . newMessagesCh <- [ ] subjectAndMessage { sam }
2023-04-18 13:47:40 +00:00
select {
case <- ticker . C :
case <- ctx . Done ( ) :
er := fmt . Errorf ( "info: stopped handleFunc for: publisher %v" , proc . subject . name ( ) )
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
2024-03-27 11:48:17 +00:00
p . errorKernel . logDebug ( er )
2023-04-18 13:47:40 +00:00
return nil
}
}
}
2024-11-19 02:48:42 +00:00
proc . startup . publisher ( proc , Hello , pf )
2021-03-09 10:58:50 +00:00
}
2021-04-06 17:42:03 +00:00
2024-11-24 18:19:47 +00:00
if proc . configuration . EnableKeyUpdates {
2024-11-19 02:48:42 +00:00
// Define the startup of a publisher that will send KeysRequestUpdate
2023-04-18 13:47:40 +00:00
// to central server and ask for publics keys, and to get them deliver back with a request
2024-11-19 02:48:42 +00:00
// of type KeysDeliverUpdate.
2023-04-18 13:47:40 +00:00
pf := func ( ctx context . Context , procFuncCh chan Message ) error {
2024-11-19 02:48:42 +00:00
ticker := time . NewTicker ( time . Second * time . Duration ( p . configuration . KeysUpdateInterval ) )
2023-04-18 13:47:40 +00:00
defer ticker . Stop ( )
for {
// Send a message with the hash of the currently stored keys,
// so we would know on the subscriber at central if it should send
// and update with new keys back.
proc . nodeAuth . publicKeys . mu . Lock ( )
2024-11-19 02:48:42 +00:00
er := fmt . Errorf ( " ----> publisher KeysRequestUpdate: sending our current hash: %v" , [ ] byte ( proc . nodeAuth . publicKeys . keysAndHash . Hash [ : ] ) )
2024-03-27 11:48:17 +00:00
p . errorKernel . logDebug ( er )
2023-04-18 13:47:40 +00:00
m := Message {
FileName : "publickeysget.log" ,
Directory : "publickeysget" ,
ToNode : Node ( p . configuration . CentralNodeName ) ,
FromNode : Node ( proc . node ) ,
Data : [ ] byte ( proc . nodeAuth . publicKeys . keysAndHash . Hash [ : ] ) ,
2024-11-19 02:48:42 +00:00
Method : KeysRequestUpdate ,
ReplyMethod : KeysDeliverUpdate ,
2023-04-18 13:47:40 +00:00
ACKTimeout : proc . configuration . DefaultMessageTimeout ,
Retries : 1 ,
}
proc . nodeAuth . publicKeys . mu . Unlock ( )
sam , err := newSubjectAndMessage ( m )
if err != nil {
// In theory the system should drop the message before it reaches here.
p . errorKernel . errSend ( proc , m , err , logError )
}
2024-11-19 19:28:26 +00:00
proc . newMessagesCh <- [ ] subjectAndMessage { sam }
2023-04-18 13:47:40 +00:00
select {
case <- ticker . C :
case <- ctx . Done ( ) :
er := fmt . Errorf ( "info: stopped handleFunc for: publisher %v" , proc . subject . name ( ) )
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
2024-03-27 11:48:17 +00:00
p . errorKernel . logDebug ( er )
2023-04-18 13:47:40 +00:00
return nil
}
}
}
2024-11-19 02:48:42 +00:00
proc . startup . publisher ( proc , KeysRequestUpdate , pf )
proc . startup . subscriber ( proc , KeysDeliverUpdate , nil )
2022-05-26 06:07:23 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . EnableAclUpdates {
2023-04-18 13:47:40 +00:00
pf := func ( ctx context . Context , procFuncCh chan Message ) error {
2024-11-19 02:48:42 +00:00
ticker := time . NewTicker ( time . Second * time . Duration ( p . configuration . AclUpdateInterval ) )
2023-04-18 13:47:40 +00:00
defer ticker . Stop ( )
for {
// Send a message with the hash of the currently stored acl's,
// so we would know for the subscriber at central if it should send
// and update with new keys back.
proc . nodeAuth . nodeAcl . mu . Lock ( )
2024-11-19 02:48:42 +00:00
er := fmt . Errorf ( " ----> publisher AclRequestUpdate: sending our current hash: %v" , [ ] byte ( proc . nodeAuth . nodeAcl . aclAndHash . Hash [ : ] ) )
2024-03-27 11:48:17 +00:00
p . errorKernel . logDebug ( er )
2023-04-18 13:47:40 +00:00
m := Message {
FileName : "aclRequestUpdate.log" ,
Directory : "aclRequestUpdate" ,
ToNode : Node ( p . configuration . CentralNodeName ) ,
FromNode : Node ( proc . node ) ,
Data : [ ] byte ( proc . nodeAuth . nodeAcl . aclAndHash . Hash [ : ] ) ,
2024-11-19 02:48:42 +00:00
Method : AclRequestUpdate ,
ReplyMethod : AclDeliverUpdate ,
2023-04-18 13:47:40 +00:00
ACKTimeout : proc . configuration . DefaultMessageTimeout ,
Retries : 1 ,
}
proc . nodeAuth . nodeAcl . mu . Unlock ( )
sam , err := newSubjectAndMessage ( m )
if err != nil {
// In theory the system should drop the message before it reaches here.
p . errorKernel . errSend ( proc , m , err , logError )
log . Printf ( "error: ProcessesStart: %v\n" , err )
}
2024-11-19 19:28:26 +00:00
proc . newMessagesCh <- [ ] subjectAndMessage { sam }
2023-04-18 13:47:40 +00:00
select {
case <- ticker . C :
case <- ctx . Done ( ) :
er := fmt . Errorf ( "info: stopped handleFunc for: publisher %v" , proc . subject . name ( ) )
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
2024-03-27 11:48:17 +00:00
p . errorKernel . logDebug ( er )
2023-04-18 13:47:40 +00:00
return nil
}
}
}
2024-11-19 02:48:42 +00:00
proc . startup . publisher ( proc , AclRequestUpdate , pf )
proc . startup . subscriber ( proc , AclDeliverUpdate , nil )
2022-04-07 07:34:06 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . IsCentralAuth {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , KeysRequestUpdate , nil )
proc . startup . subscriber ( proc , KeysAllow , nil )
proc . startup . subscriber ( proc , KeysDelete , nil )
proc . startup . subscriber ( proc , AclRequestUpdate , nil )
proc . startup . subscriber ( proc , AclAddCommand , nil )
proc . startup . subscriber ( proc , AclDeleteCommand , nil )
proc . startup . subscriber ( proc , AclDeleteSource , nil )
proc . startup . subscriber ( proc , AclGroupNodesAddNode , nil )
proc . startup . subscriber ( proc , AclGroupNodesDeleteNode , nil )
proc . startup . subscriber ( proc , AclGroupNodesDeleteGroup , nil )
proc . startup . subscriber ( proc , AclGroupCommandsAddCommand , nil )
proc . startup . subscriber ( proc , AclGroupCommandsDeleteCommand , nil )
proc . startup . subscriber ( proc , AclGroupCommandsDeleteGroup , nil )
proc . startup . subscriber ( proc , AclExport , nil )
proc . startup . subscriber ( proc , AclImport , nil )
2022-04-07 07:34:06 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubHttpGet {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , HttpGet , nil )
2021-04-06 17:42:03 +00:00
}
2021-04-13 09:28:52 +00:00
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubTailFile {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , TailFile , nil )
2022-02-11 06:27:51 +00:00
}
2024-11-24 18:19:47 +00:00
if proc . configuration . StartSubCliCommandCont {
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , CliCommandCont , nil )
2021-04-13 09:28:52 +00:00
}
2021-07-01 08:05:34 +00:00
2024-11-19 02:48:42 +00:00
proc . startup . subscriber ( proc , PublicKey , nil )
2024-11-24 18:19:47 +00:00
// --------------------------------------------------
// ProcFunc for Jetstream publishers.
// --------------------------------------------------
if proc . configuration . StartJetstreamPublisher {
pfJetstreamPublishers := func ( ctx context . Context , procFuncCh chan Message ) error {
fmt . Printf ( "######## DEBUG: Publisher: beginning og jetstream publisher: %v\n" , "#######" )
js , err := jetstream . New ( proc . natsConn )
if err != nil {
log . Fatalf ( "error: jetstream new failed: %v\n" , err )
}
_ , err = js . CreateOrUpdateStream ( proc . ctx , jetstream . StreamConfig {
Name : "nodes" ,
Description : "nodes stream" ,
Subjects : [ ] string { "nodes.>" } ,
// Discard older messages and keep only the last one.
// MaxMsgsPerSubject: 1,
} )
fmt . Printf ( "######## DEBUG: Publisher: CreateOrUpdateStream: %v\n" , "#######" )
if err != nil {
log . Fatalf ( "error: jetstream create or update failed: %v\n" , err )
}
for {
// TODO:
select {
case msg := <- proc . jetstreamOut :
fmt . Printf ( "######## DEBUG: Publisher: received on <-proc.jetstreamOut: %v\n" , msg )
// b, err := proc.messageSerializeAndCompress(msg)
// if err != nil {
// log.Fatalf("error: pfJetstreamPublishers: js failed to marshal message: %v\n", err)
// }
b , err := json . Marshal ( msg )
if err != nil {
log . Fatalf ( "error: pfJetstreamPublishers: js failed to marshal message: %v\n" , err )
}
subject := fmt . Sprintf ( "nodes.%v" , msg . JetstreamToNode )
fmt . Printf ( "######## DEBUG: Publisher: before publish: %v\n" , "###" )
_ , err = js . Publish ( proc . ctx , subject , b )
if err != nil {
log . Fatalf ( "error: pfJetstreamPublishers:js failed to publish message: %v\n" , err )
}
fmt . Printf ( "######## DEBUG: Publisher: after publish: %v\n" , "###" )
case <- ctx . Done ( ) :
return fmt . Errorf ( "%v" , "info: pfJetstreamPublishers: got <-ctx.done" )
}
}
}
proc . startup . publisher ( proc , JetStreamPublishers , pfJetstreamPublishers )
}
// --------------------------------------------------
// Procfunc for Jetstream consumers.
// --------------------------------------------------
// pfJetstreamConsumers connect to a given nats jetstream, and consume messages
// for the node on specified subjects within that stream.
// After a jetstream message is picked up from the stream, the steward message
// will be extracted from the data field, and the ctrl message will be put
// on the local delivery channel, and handled as a normal ctrl message.
if proc . configuration . StartJetstreamConsumer {
pfJetstreamConsumers := func ( ctx context . Context , procFuncCh chan Message ) error {
fmt . Println ( "---------------------------------------------------------------" )
fmt . Printf ( "--- DEBUG: consumer: starting up jetstream consumer %v\n" , "---" )
fmt . Println ( "---------------------------------------------------------------" )
js , err := jetstream . New ( proc . natsConn )
if err != nil {
log . Fatalf ( "error: jetstream new failed: %v\n" , err )
}
stream , err := js . Stream ( proc . ctx , "nodes" )
if err != nil {
log . Printf ( "error: js.Stream failed: %v\n" , err )
}
// stream, err := js.CreateOrUpdateStream(proc.ctx, jetstream.StreamConfig{
// Name: "nodes",
// Description: "nodes stream",
// Subjects: []string{"nodes.>"},
// // Discard older messages and keep only the last one.
// MaxMsgsPerSubject: 1,
// })
if err != nil {
log . Printf ( "error: js.Stream failed: %v\n" , err )
}
// Check for more subjects via flags to listen to, and if defined prefix all
// the values with "nodes."
filterSubjectValues := [ ] string {
fmt . Sprintf ( "nodes.%v" , proc . server . nodeName ) ,
//"nodes.all",
}
fmt . Printf ( "--- DEBUG: consumer: filterSubjectValues: %v\n" , filterSubjectValues )
//// Check if there are more to consume defined in flags/env.
//if proc.configuration.JetstreamsConsume != "" {
// splitValues := strings.Split(proc.configuration.JetstreamsConsume, ",")
// for i, v := range splitValues {
// filterSubjectValues[i] = fmt.Sprintf("nodes.%v", v)
// }
//}
consumer , err := stream . CreateOrUpdateConsumer ( proc . ctx , jetstream . ConsumerConfig {
Name : "nodes_processor" ,
Durable : "nodes_processor" ,
FilterSubjects : filterSubjectValues ,
} )
if err != nil {
log . Fatalf ( "error: create or update consumer failed: %v\n" , err )
}
consumerInfo , _ := fmt . Printf ( "--- DEBUG: consumer: filterSubjectValues: %v\n" , filterSubjectValues )
fmt . Printf ( "--- DEBUG: consumer: created consumer: %v\n" , consumerInfo )
cctx , err := consumer . Consume ( func ( msg jetstream . Msg ) {
fmt . Printf ( "--- DEBUG: consumer: got jetstream msg to consume: %v\n" , msg )
msg . Ack ( )
stewardMessage := Message { }
// stewardMessage, err := proc.messageDeserializeAndUncompress(msg)
// if err != nil {
// log.Fatalf("error: pfJetstreamConsumers: json.Unmarshal failed: %v\n", err)
// }
err := json . Unmarshal ( msg . Data ( ) , & stewardMessage )
if err != nil {
log . Fatalf ( "error: pfJetstreamConsumers: json.Unmarshal failed: %v\n" , err )
}
log . Printf ( "----- Received jetstream message to convert and handle as normal nats message: %v, with ctrl method: %v\n" , string ( msg . Subject ( ) ) , string ( stewardMessage . Method ) )
// Messages received here via jetstream are for this node. Put the message into
// a SubjectAndMessage structure, and we use the deliver local from here.
sam , err := newSubjectAndMessage ( stewardMessage )
if err != nil {
log . Fatalf ( "error: pfJetstreamConsumers: newSubjectAndMessage failed: %v\n" , err )
}
fmt . Print ( "--- DEBUG : consumer: befor putting on samSendLocalCh\n" )
proc . server . samSendLocalCh <- [ ] subjectAndMessage { sam }
fmt . Print ( "--- DEBUG : consumer: befor putting on samSendLocalCh\n" )
} )
if err != nil {
log . Fatalf ( "error: create or update consumer failed: %v\n" , err )
}
defer cctx . Stop ( )
<- proc . ctx . Done ( )
return nil
}
proc . startup . subscriber ( proc , JetstreamConsumers , pfJetstreamConsumers )
}
2021-02-24 09:58:02 +00:00
}
2021-04-08 11:43:47 +00:00
2024-11-24 18:19:47 +00:00
// --------------------------------------------------
2021-08-11 10:23:37 +00:00
// Stop all subscriber processes.
func ( p * processes ) Stop ( ) {
2021-08-12 10:27:47 +00:00
log . Printf ( "info: canceling all subscriber processes...\n" )
2021-08-11 10:23:37 +00:00
p . cancel ( )
2021-08-12 10:27:47 +00:00
p . wg . Wait ( )
log . Printf ( "info: done canceling all subscriber processes.\n" )
2021-08-11 10:23:37 +00:00
}
2024-11-24 18:19:47 +00:00
// ---------------------------------------------------------------------------------------
// Helper functions, and other
2021-04-09 09:30:40 +00:00
// ---------------------------------------------------------------------------------------
2021-04-08 11:43:47 +00:00
2021-08-11 08:11:57 +00:00
// Startup holds all the startup methods for subscribers.
2021-08-18 10:16:21 +00:00
type startup struct {
2022-04-05 08:35:59 +00:00
server * server
centralAuth * centralAuth
metrics * metrics
2021-08-18 10:16:21 +00:00
}
2022-04-01 05:09:55 +00:00
func newStartup ( server * server ) * startup {
2022-04-01 07:21:50 +00:00
s := startup {
2022-04-05 08:35:59 +00:00
server : server ,
centralAuth : server . centralAuth ,
metrics : server . metrics ,
2022-04-01 07:21:50 +00:00
}
2021-08-18 10:16:21 +00:00
return & s
}
2021-04-09 09:30:40 +00:00
2023-04-19 03:46:38 +00:00
// subscriber will start a subscriber process. It takes the initial process, request method,
2024-03-27 11:48:17 +00:00
// and a procFunc as it's input arguments. If a procFunc is not needed, use the value nil.
2023-04-18 13:47:40 +00:00
func ( s * startup ) subscriber ( p process , m Method , pf func ( ctx context . Context , procFuncCh chan Message ) error ) {
er := fmt . Errorf ( "starting %v subscriber: %#v" , m , p . node )
2024-03-27 11:48:17 +00:00
p . errorKernel . logDebug ( er )
2023-06-01 04:15:39 +00:00
var sub Subject
switch {
2024-11-19 02:48:42 +00:00
case m == ErrorLog :
2023-06-01 04:15:39 +00:00
sub = newSubject ( m , "errorCentral" )
default :
sub = newSubject ( m , string ( p . node ) )
}
fmt . Printf ( "DEBUG:::startup subscriber, subject: %v\n" , sub )
2024-11-24 18:19:47 +00:00
proc := newProcess ( p . ctx , p . processes . server , sub , streamInfo { } , processKindSubscriberNats )
2023-04-18 13:47:40 +00:00
proc . procFunc = pf
2021-11-09 13:01:42 +00:00
2024-11-24 18:19:47 +00:00
go proc . Start ( )
2021-04-09 09:30:40 +00:00
}
2024-11-19 19:28:26 +00:00
// publisher will start a publisher process. It takes the initial process, request method,
// and a procFunc as it's input arguments. If a procFunc is not needed, use the value nil.
2023-04-18 13:47:40 +00:00
func ( s * startup ) publisher ( p process , m Method , pf func ( ctx context . Context , procFuncCh chan Message ) error ) {
er := fmt . Errorf ( "starting %v publisher: %#v" , m , p . node )
2024-03-27 11:48:17 +00:00
p . errorKernel . logDebug ( er )
2023-04-18 13:47:40 +00:00
sub := newSubject ( m , string ( p . node ) )
2024-11-24 18:19:47 +00:00
proc := newProcess ( p . ctx , p . processes . server , sub , streamInfo { } , processKindPublisherNats )
2023-04-18 13:47:40 +00:00
proc . procFunc = pf
2023-01-13 14:09:23 +00:00
proc . isLongRunningPublisher = true
2022-04-07 07:34:06 +00:00
2024-11-24 18:19:47 +00:00
go proc . Start ( )
2022-02-08 10:49:32 +00:00
}
2021-08-11 10:23:37 +00:00
// ---------------------------------------------------------------
// Print the content of the processes map.
func ( p * processes ) printProcessesMap ( ) {
2023-01-12 07:48:01 +00:00
er := fmt . Errorf ( "output of processes map : " )
2024-03-27 11:48:17 +00:00
p . errorKernel . logDebug ( er )
2021-10-08 10:07:10 +00:00
2021-11-16 09:21:44 +00:00
{
p . active . mu . Lock ( )
2021-10-08 10:07:10 +00:00
2021-11-16 18:07:24 +00:00
for pName , proc := range p . active . procNames {
2022-10-05 07:16:22 +00:00
er := fmt . Errorf ( "info: proc - pub/sub: %v, procName in map: %v , id: %v, subject: %v" , proc . processKind , pName , proc . processID , proc . subject . name ( ) )
2024-03-27 11:48:17 +00:00
proc . errorKernel . logDebug ( er )
2021-08-11 10:23:37 +00:00
}
2022-04-01 07:21:50 +00:00
p . metrics . promProcessesTotal . Set ( float64 ( len ( p . active . procNames ) ) )
2021-11-16 09:21:44 +00:00
p . active . mu . Unlock ( )
}
2021-08-11 10:23:37 +00:00
}