mirror of
https://github.com/postmannen/ctrl.git
synced 2024-12-14 12:37:31 +00:00
removed configuration as input argument from all the loggers
This commit is contained in:
parent
8c68b87fbe
commit
118f6bab98
15 changed files with 194 additions and 194 deletions
|
@ -82,7 +82,7 @@ func newSchemaMain(configuration *Configuration, errorKernel *errorKernel) *sche
|
|||
func() {
|
||||
if _, err := os.Stat(s.ACLMapFilePath); os.IsNotExist(err) {
|
||||
er := fmt.Errorf("info: newSchemaMain: no file for ACLMap found, will create new one, %v: %v", s.ACLMapFilePath, err)
|
||||
errorKernel.logInfo(er, configuration)
|
||||
errorKernel.logInfo(er)
|
||||
|
||||
// If no aclmap is present on disk we just return from this
|
||||
// function without loading any values.
|
||||
|
@ -92,20 +92,20 @@ func newSchemaMain(configuration *Configuration, errorKernel *errorKernel) *sche
|
|||
fh, err := os.Open(s.ACLMapFilePath)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: newSchemaMain: failed to open file for reading %v: %v", s.ACLMapFilePath, err)
|
||||
errorKernel.logError(er, configuration)
|
||||
errorKernel.logError(er)
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(fh)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: newSchemaMain: failed to ReadAll file %v: %v", s.ACLMapFilePath, err)
|
||||
errorKernel.logError(er, configuration)
|
||||
errorKernel.logError(er)
|
||||
}
|
||||
|
||||
// Unmarshal the data read from disk.
|
||||
err = json.Unmarshal(b, &s.ACLMap)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: newSchemaMain: failed to unmarshal content from file %v: %v", s.ACLMapFilePath, err)
|
||||
errorKernel.logError(er, configuration)
|
||||
errorKernel.logError(er)
|
||||
}
|
||||
|
||||
// Generate the aclGenerated map happens in the function where this function is called.
|
||||
|
@ -225,7 +225,7 @@ func (c *centralAuth) aclAddCommand(host Node, source Node, cmd command) {
|
|||
err := c.generateACLsForAllNodes()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: addCommandForFromNode: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
// fmt.Printf(" * DEBUG: aclNodeFromnodeCommandAdd: a.schemaMain.ACLMap=%v\n", a.schemaMain.ACLMap)
|
||||
|
@ -255,7 +255,7 @@ func (c *centralAuth) aclDeleteCommand(host Node, source Node, cmd command) erro
|
|||
err := c.generateACLsForAllNodes()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: aclNodeFromNodeCommandDelete: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -280,7 +280,7 @@ func (c *centralAuth) aclDeleteSource(host Node, source Node) error {
|
|||
err := c.generateACLsForAllNodes()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: aclNodeFromnodeDelete: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -299,7 +299,7 @@ func (c *centralAuth) generateACLsForAllNodes() error {
|
|||
fh, err := os.OpenFile(c.accessLists.schemaMain.ACLMapFilePath, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: generateACLsForAllNodes: opening file for writing: %v, err: %v", c.accessLists.schemaMain.ACLMapFilePath, err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
return
|
||||
}
|
||||
defer fh.Close()
|
||||
|
@ -311,7 +311,7 @@ func (c *centralAuth) generateACLsForAllNodes() error {
|
|||
err = enc.Encode(c.accessLists.schemaMain.ACLMap)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: generateACLsForAllNodes: encoding json to file failed: %v, err: %v", c.accessLists.schemaMain.ACLMapFilePath, err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
@ -334,7 +334,7 @@ func (c *centralAuth) generateACLsForAllNodes() error {
|
|||
}
|
||||
|
||||
inf := fmt.Errorf("generateACLsFor all nodes, ACLsToConvert contains: %#v", c.accessLists.schemaGenerated.ACLsToConvert)
|
||||
c.accessLists.errorKernel.logDebug(inf, c.accessLists.configuration)
|
||||
c.accessLists.errorKernel.logDebug(inf)
|
||||
|
||||
// ACLsToConvert got the complete picture of what ACL's that
|
||||
// are defined for each individual host node.
|
||||
|
@ -355,7 +355,7 @@ func (c *centralAuth) generateACLsForAllNodes() error {
|
|||
cb, err := cbor.Marshal(m)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: generateACLsForAllNodes: failed to generate cbor for host in schemaGenerated: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -366,7 +366,7 @@ func (c *centralAuth) generateACLsForAllNodes() error {
|
|||
b, err := cbor.Marshal(sns)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: generateACLsForAllNodes: failed to generate cbor for hash: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
|
@ -387,7 +387,7 @@ func (c *centralAuth) generateACLsForAllNodes() error {
|
|||
}()
|
||||
|
||||
inf = fmt.Errorf("generateACLsFor all nodes, GeneratedACLsMap contains: %#v", c.accessLists.schemaGenerated.GeneratedACLsMap)
|
||||
c.accessLists.errorKernel.logDebug(inf, c.accessLists.configuration)
|
||||
c.accessLists.errorKernel.logDebug(inf)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -456,7 +456,7 @@ func (c *centralAuth) groupNodesAddNode(ng nodeGroup, n Node) {
|
|||
|
||||
if !strings.HasPrefix(string(ng), "grp_nodes_") {
|
||||
er := fmt.Errorf("error: group name do not start with grp_nodes_")
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -473,7 +473,7 @@ func (c *centralAuth) groupNodesAddNode(ng nodeGroup, n Node) {
|
|||
err := c.generateACLsForAllNodes()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: groupNodesAddNode: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ func (c *centralAuth) groupNodesDeleteNode(ng nodeGroup, n Node) {
|
|||
defer c.accessLists.schemaMain.mu.Unlock()
|
||||
if _, ok := c.accessLists.schemaMain.NodeGroupMap[ng][n]; !ok {
|
||||
er := fmt.Errorf("info: no such node with name=%v found in group=%v", ng, n)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -495,7 +495,7 @@ func (c *centralAuth) groupNodesDeleteNode(ng nodeGroup, n Node) {
|
|||
err := c.generateACLsForAllNodes()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: groupNodesDeleteNode: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -506,7 +506,7 @@ func (c *centralAuth) groupNodesDeleteGroup(ng nodeGroup) {
|
|||
defer c.accessLists.schemaMain.mu.Unlock()
|
||||
if _, ok := c.accessLists.schemaMain.NodeGroupMap[ng]; !ok {
|
||||
er := fmt.Errorf("info: no such group found: %v", ng)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -517,7 +517,7 @@ func (c *centralAuth) groupNodesDeleteGroup(ng nodeGroup) {
|
|||
err := c.generateACLsForAllNodes()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: groupNodesDeleteGroup: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -535,7 +535,7 @@ func (c *centralAuth) groupCommandsAddCommand(cg commandGroup, cmd command) {
|
|||
|
||||
if !strings.HasPrefix(string(cg), "grp_commands_") {
|
||||
er := fmt.Errorf("error: group name do not start with grp_commands_")
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -552,7 +552,7 @@ func (c *centralAuth) groupCommandsAddCommand(cg commandGroup, cmd command) {
|
|||
err := c.generateACLsForAllNodes()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: groupCommandsAddCommand: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -563,7 +563,7 @@ func (c *centralAuth) groupCommandsDeleteCommand(cg commandGroup, cmd command) {
|
|||
defer c.accessLists.schemaMain.mu.Unlock()
|
||||
if _, ok := c.accessLists.schemaMain.CommandGroupMap[cg][cmd]; !ok {
|
||||
er := fmt.Errorf("info: no such command with name=%v found in group=%v", c, cg)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -574,7 +574,7 @@ func (c *centralAuth) groupCommandsDeleteCommand(cg commandGroup, cmd command) {
|
|||
err := c.generateACLsForAllNodes()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: groupCommandsDeleteCommand: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -585,7 +585,7 @@ func (c *centralAuth) groupCommandDeleteGroup(cg commandGroup) {
|
|||
defer c.accessLists.schemaMain.mu.Unlock()
|
||||
if _, ok := c.accessLists.schemaMain.CommandGroupMap[cg]; !ok {
|
||||
er := fmt.Errorf("info: no such group found: %v", cg)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -596,7 +596,7 @@ func (c *centralAuth) groupCommandDeleteGroup(cg commandGroup) {
|
|||
err := c.generateACLsForAllNodes()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: groupCommandDeleteGroup: %v", err)
|
||||
c.errorKernel.logError(er, c.configuration)
|
||||
c.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ func newPKI(configuration *Configuration, errorKernel *errorKernel) *pki {
|
|||
db, err := bolt.Open(databaseFilepath, 0660, nil)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("newPKI: error: failed to open db: %v", err)
|
||||
errorKernel.logDebug(er, configuration)
|
||||
errorKernel.logDebug(er)
|
||||
return &p
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ func newPKI(configuration *Configuration, errorKernel *errorKernel) *pki {
|
|||
keys, err := p.dbDumpPublicKey()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("newPKI: dbPublicKeyDump failed, probably empty db: %v", err)
|
||||
errorKernel.logDebug(er, configuration)
|
||||
errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
// Only assign from storage to in memory map if the storage contained any values.
|
||||
|
@ -99,7 +99,7 @@ func newPKI(configuration *Configuration, errorKernel *errorKernel) *pki {
|
|||
p.nodesAcked.keysAndHash.Keys = keys
|
||||
for k, v := range keys {
|
||||
er := fmt.Errorf("newPKI: public keys db contains: %v, %v", k, []byte(v))
|
||||
errorKernel.logDebug(er, configuration)
|
||||
errorKernel.logDebug(er)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ func (c *centralAuth) addPublicKey(proc process, msg Message) {
|
|||
|
||||
if ok && bytes.Equal(existingKey, msg.Data) {
|
||||
er := fmt.Errorf("info: public key value for REGISTERED node %v is the same, doing nothing", msg.FromNode)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ func (c *centralAuth) addPublicKey(proc process, msg Message) {
|
|||
|
||||
er := fmt.Errorf("info: detected new public key for node: %v. This key will need to be authorized by operator to be allowed into the system", msg.FromNode)
|
||||
c.pki.errorKernel.infoSend(proc, msg, er)
|
||||
c.pki.errorKernel.logDebug(er, c.pki.configuration)
|
||||
c.pki.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
// deletePublicKeys to the db if the node do not exist, or if it is a new value.
|
||||
|
@ -169,7 +169,7 @@ func (c *centralAuth) deletePublicKeys(proc process, msg Message, nodes []string
|
|||
}
|
||||
|
||||
er := fmt.Errorf("info: detected new public key for node: %v. This key will need to be authorized by operator to be allowed into the system", msg.FromNode)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
c.pki.errorKernel.infoSend(proc, msg, er)
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ func (p *pki) dbDeletePublicKeys(bucket string, nodes []string) error {
|
|||
err := bu.Delete([]byte(n))
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: delete key in bucket %v failed: %v", bucket, err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return er
|
||||
}
|
||||
}
|
||||
|
@ -324,14 +324,14 @@ func (p *pki) dbViewHash() ([]byte, error) {
|
|||
bu := tx.Bucket([]byte("hash"))
|
||||
if bu == nil {
|
||||
er := fmt.Errorf("info: no db hash bucket exist")
|
||||
p.errorKernel.logWarn(er, p.configuration)
|
||||
p.errorKernel.logWarn(er)
|
||||
return nil
|
||||
}
|
||||
|
||||
v := bu.Get([]byte("hash"))
|
||||
if len(v) == 0 {
|
||||
er := fmt.Errorf("info: view: hash key not found")
|
||||
p.errorKernel.logWarn(er, p.configuration)
|
||||
p.errorKernel.logWarn(er)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -276,26 +276,26 @@ func (e *errorKernel) infoSend(proc process, msg Message, err error) {
|
|||
e.errorCh <- ev
|
||||
}
|
||||
|
||||
func (e *errorKernel) logError(err error, c *Configuration) {
|
||||
if c.LogLevel == string(logError) {
|
||||
func (e *errorKernel) logError(err error) {
|
||||
if e.configuration.LogLevel == string(logError) {
|
||||
slog.Error("error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *errorKernel) logInfo(err error, c *Configuration) {
|
||||
if c.LogLevel == string(logInfo) {
|
||||
func (e *errorKernel) logInfo(err error) {
|
||||
if e.configuration.LogLevel == string(logInfo) {
|
||||
slog.Info(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *errorKernel) logWarn(err error, c *Configuration) {
|
||||
if c.LogLevel == string(logWarning) {
|
||||
func (e *errorKernel) logWarn(err error) {
|
||||
if e.configuration.LogLevel == string(logWarning) {
|
||||
slog.Warn(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *errorKernel) logDebug(err error, c *Configuration) {
|
||||
if c.LogLevel == string(logDebug) {
|
||||
func (e *errorKernel) logDebug(err error) {
|
||||
if e.configuration.LogLevel == string(logDebug) {
|
||||
slog.Debug(err.Error())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,12 +41,12 @@ func (s *server) readStartupFolder() {
|
|||
|
||||
for _, fp := range filePaths {
|
||||
er := fmt.Errorf("info: ranging filepaths, current filePath contains: %v", fp)
|
||||
s.errorKernel.logInfo(er, s.configuration)
|
||||
s.errorKernel.logInfo(er)
|
||||
}
|
||||
|
||||
for _, filePath := range filePaths {
|
||||
er := fmt.Errorf("info: reading and working on file from startup folder %v", filePath)
|
||||
s.errorKernel.logInfo(er, s.configuration)
|
||||
s.errorKernel.logInfo(er)
|
||||
|
||||
// Read the content of each file.
|
||||
readBytes, err := func(filePath string) ([]byte, error) {
|
||||
|
@ -225,7 +225,7 @@ func (s *server) readFolder() {
|
|||
err := os.MkdirAll(s.configuration.ReadFolder, 0770)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: failed to create readfolder folder: %v", err)
|
||||
s.errorKernel.logError(er, s.configuration)
|
||||
s.errorKernel.logError(er)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ func (s *server) readFolder() {
|
|||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("main: failed to create new logWatcher: %v", err)
|
||||
s.errorKernel.logError(er, s.configuration)
|
||||
s.errorKernel.logError(er)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -248,7 +248,7 @@ func (s *server) readFolder() {
|
|||
|
||||
if event.Op == fsnotify.Create || event.Op == fsnotify.Chmod {
|
||||
er := fmt.Errorf("readFolder: got file event, name: %v, op: %v", event.Name, event.Op)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
|
||||
func() {
|
||||
fh, err := os.Open(event.Name)
|
||||
|
@ -290,7 +290,7 @@ func (s *server) readFolder() {
|
|||
}
|
||||
|
||||
er := fmt.Errorf("readFolder: read new message in readfolder and putting it on s.samToSendCh: %#v", sams)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
|
||||
// Send the SAM struct to be picked up by the ring buffer.
|
||||
s.samToSendCh <- sams
|
||||
|
@ -321,7 +321,7 @@ func (s *server) readFolder() {
|
|||
err = watcher.Add(s.configuration.ReadFolder)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("startLogsWatcher: failed to add watcher: %v", err)
|
||||
s.errorKernel.logError(er, s.configuration)
|
||||
s.errorKernel.logError(er)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -334,7 +334,7 @@ func (s *server) readTCPListener() {
|
|||
ln, err := net.Listen("tcp", s.configuration.TCPListener)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: readTCPListener: failed to start tcp listener: %v", err)
|
||||
s.errorKernel.logError(er, s.configuration)
|
||||
s.errorKernel.logError(er)
|
||||
os.Exit(1)
|
||||
}
|
||||
// Loop, and wait for new connections.
|
||||
|
@ -441,7 +441,7 @@ func (s *server) readHttpListener() {
|
|||
n, err := net.Listen("tcp", s.configuration.HTTPListener)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: startMetrics: failed to open prometheus listen port: %v", err)
|
||||
s.errorKernel.logError(er, s.configuration)
|
||||
s.errorKernel.logError(er)
|
||||
os.Exit(1)
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
|
@ -450,7 +450,7 @@ func (s *server) readHttpListener() {
|
|||
err = http.Serve(n, mux)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: startMetrics: failed to start http.Serve: %v", err)
|
||||
s.errorKernel.logError(er, s.configuration)
|
||||
s.errorKernel.logError(er)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
|
32
node_auth.go
32
node_auth.go
|
@ -55,7 +55,7 @@ func newNodeAuth(configuration *Configuration, errorKernel *errorKernel) *nodeAu
|
|||
err := n.loadSigningKeys()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("newNodeAuth: %v", err)
|
||||
errorKernel.logError(er, configuration)
|
||||
errorKernel.logError(er)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ func newNodeAcl(c *Configuration, errorKernel *errorKernel) *nodeAcl {
|
|||
err := n.loadFromFile()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: newNodeAcl: loading acl's from file: %v", err)
|
||||
errorKernel.logError(er, c)
|
||||
errorKernel.logError(er)
|
||||
// os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ func (n *nodeAcl) loadFromFile() error {
|
|||
// Just logging the error since it is not crucial that a key file is missing,
|
||||
// since a new one will be created on the next update.
|
||||
er := fmt.Errorf("acl: loadFromFile: no acl file found at %v", n.filePath)
|
||||
n.errorKernel.logDebug(er, n.configuration)
|
||||
n.errorKernel.logDebug(er)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ func (n *nodeAcl) loadFromFile() error {
|
|||
}
|
||||
|
||||
er := fmt.Errorf("nodeAcl: loadFromFile: Loaded existing acl's from file: %v", n.aclAndHash.Hash)
|
||||
n.errorKernel.logDebug(er, n.configuration)
|
||||
n.errorKernel.logDebug(er)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ func newPublicKeys(c *Configuration, errorKernel *errorKernel) *publicKeys {
|
|||
err := p.loadFromFile()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: newPublicKeys: loading public keys from file: %v", err)
|
||||
errorKernel.logError(er, c)
|
||||
errorKernel.logError(er)
|
||||
// os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -218,7 +218,7 @@ func (p *publicKeys) loadFromFile() error {
|
|||
// Just logging the error since it is not crucial that a key file is missing,
|
||||
// since a new one will be created on the next update.
|
||||
er := fmt.Errorf("no public keys file found at %v, new file will be created", p.filePath)
|
||||
p.errorKernel.logInfo(er, p.configuration)
|
||||
p.errorKernel.logInfo(er)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -241,7 +241,7 @@ func (p *publicKeys) loadFromFile() error {
|
|||
}
|
||||
|
||||
er := fmt.Errorf("nodeAuth: loadFromFile: Loaded existing keys from file: %v", p.keysAndHash.Hash)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -322,7 +322,7 @@ func (n *nodeAuth) loadSigningKeys() error {
|
|||
n.SignPrivateKey = priv
|
||||
|
||||
er := fmt.Errorf("info: no signing keys found, generating new keys")
|
||||
n.errorKernel.logInfo(er, n.configuration)
|
||||
n.errorKernel.logInfo(er)
|
||||
|
||||
// We got the new generated keys now, so we can return.
|
||||
return nil
|
||||
|
@ -394,7 +394,7 @@ func (n *nodeAuth) verifySignature(m Message) bool {
|
|||
// NB: Only enable signature checking for REQCliCommand for now.
|
||||
if m.Method != REQCliCommand {
|
||||
er := fmt.Errorf("verifySignature: not REQCliCommand and will not do signature check, method: %v", m.Method)
|
||||
n.errorKernel.logInfo(er, n.configuration)
|
||||
n.errorKernel.logInfo(er)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -417,11 +417,11 @@ func (n *nodeAuth) verifySignature(m Message) bool {
|
|||
}()
|
||||
|
||||
if err != nil {
|
||||
n.errorKernel.logError(err, n.configuration)
|
||||
n.errorKernel.logError(err)
|
||||
}
|
||||
|
||||
er := fmt.Errorf("info: verifySignature, result: %v, fromNode: %v, method: %v", ok, m.FromNode, m.Method)
|
||||
n.errorKernel.logInfo(er, n.configuration)
|
||||
n.errorKernel.logInfo(er)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
@ -431,7 +431,7 @@ func (n *nodeAuth) verifyAcl(m Message) bool {
|
|||
// NB: Only enable acl checking for REQCliCommand for now.
|
||||
if m.Method != REQCliCommand {
|
||||
er := fmt.Errorf("verifyAcl: not REQCliCommand and will not do acl check, method: %v", m.Method)
|
||||
n.errorKernel.logInfo(er, n.configuration)
|
||||
n.errorKernel.logInfo(er)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -444,26 +444,26 @@ func (n *nodeAuth) verifyAcl(m Message) bool {
|
|||
cmdMap, ok := n.nodeAcl.aclAndHash.Acl[m.FromNode]
|
||||
if !ok {
|
||||
er := fmt.Errorf("verifyAcl: The fromNode=%v was not found in the acl", m.FromNode)
|
||||
n.errorKernel.logError(er, n.configuration)
|
||||
n.errorKernel.logError(er)
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok = cmdMap[command("*")]
|
||||
if ok {
|
||||
er := fmt.Errorf("verifyAcl: The acl said \"*\", all commands allowed from node=%v", m.FromNode)
|
||||
n.errorKernel.logInfo(er, n.configuration)
|
||||
n.errorKernel.logInfo(er)
|
||||
return true
|
||||
}
|
||||
|
||||
_, ok = cmdMap[command(argsStringified)]
|
||||
if !ok {
|
||||
er := fmt.Errorf("verifyAcl: The command=%v was NOT FOUND in the acl", m.MethodArgs)
|
||||
n.errorKernel.logInfo(er, n.configuration)
|
||||
n.errorKernel.logInfo(er)
|
||||
return false
|
||||
}
|
||||
|
||||
er := fmt.Errorf("verifyAcl: the command was FOUND in the acl, verifyAcl, result: %v, fromNode: %v, method: %v", ok, m.FromNode, m.Method)
|
||||
n.errorKernel.logInfo(er, n.configuration)
|
||||
n.errorKernel.logInfo(er)
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
72
process.go
72
process.go
|
@ -205,7 +205,7 @@ func (p process) spawnWorker() {
|
|||
p.processes.active.mu.Unlock()
|
||||
|
||||
er := fmt.Errorf("successfully started process: %v", p.processName)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
func (p process) startPublisher() {
|
||||
|
@ -257,7 +257,7 @@ func (p process) startSubscriber() {
|
|||
if err != nil {
|
||||
er := fmt.Errorf("error: spawnWorker: got <-ctx.Done, but unable to unsubscribe natsSubscription failed: %v", err)
|
||||
p.errorKernel.errSend(p, Message{}, er, logError)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
p.processes.active.mu.Lock()
|
||||
|
@ -265,7 +265,7 @@ func (p process) startSubscriber() {
|
|||
p.processes.active.mu.Unlock()
|
||||
|
||||
er := fmt.Errorf("successfully stopped process: %v", p.processName)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
}()
|
||||
}
|
||||
|
@ -299,7 +299,7 @@ func (p process) messageDeliverNats(natsMsgPayload []byte, natsMsgHeader nats.He
|
|||
}
|
||||
|
||||
er := fmt.Errorf("info: preparing to send nats message with subject %v, id: %v", msg.Subject, message.ID)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
var err error
|
||||
|
||||
|
@ -311,7 +311,7 @@ func (p process) messageDeliverNats(natsMsgPayload []byte, natsMsgHeader nats.He
|
|||
err := natsConn.PublishMsg(msg)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: nats publish for message with subject failed: %v", err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return ErrACKSubscribeRetry
|
||||
}
|
||||
p.metrics.promNatsDeliveredTotal.Inc()
|
||||
|
@ -346,7 +346,7 @@ func (p process) messageDeliverNats(natsMsgPayload []byte, natsMsgHeader nats.He
|
|||
}
|
||||
|
||||
er := fmt.Errorf("send attempt:%v, max retries: %v, ack timeout: %v, message.ID: %v, method: %v, toNode: %v", retryAttempts, message.Retries, message.ACKTimeout, message.ID, message.Method, message.ToNode)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
// The SubscribeSync used in the subscriber, will get messages that
|
||||
// are sent after it started subscribing.
|
||||
|
@ -357,14 +357,14 @@ func (p process) messageDeliverNats(natsMsgPayload []byte, natsMsgHeader nats.He
|
|||
err := subReply.Unsubscribe()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: nats SubscribeSync: failed when unsubscribing for ACK: %v", err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: nats SubscribeSync failed: failed to create reply message for subject: %v, error: %v", msg.Reply, err)
|
||||
// sendErrorLogMessage(p.toRingbufferCh, node(p.node), er)
|
||||
er = fmt.Errorf("%v, waiting equal to RetryWait %ds before retrying", er, message.RetryWait)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
time.Sleep(time.Second * time.Duration(message.RetryWait))
|
||||
|
||||
|
@ -376,7 +376,7 @@ func (p process) messageDeliverNats(natsMsgPayload []byte, natsMsgHeader nats.He
|
|||
if err != nil {
|
||||
er := fmt.Errorf("error: nats publish failed: %v, waiting equal to RetryWait of %ds before retrying", err, message.RetryWait)
|
||||
// sendErrorLogMessage(p.toRingbufferCh, node(p.node), er)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
time.Sleep(time.Second * time.Duration(message.RetryWait))
|
||||
|
||||
return ErrACKSubscribeRetry
|
||||
|
@ -394,7 +394,7 @@ func (p process) messageDeliverNats(natsMsgPayload []byte, natsMsgHeader nats.He
|
|||
switch {
|
||||
case err == nats.ErrNoResponders || err == nats.ErrTimeout:
|
||||
er := fmt.Errorf("error: ack receive failed: waiting for %v seconds before retrying: subject=%v: %v", message.RetryWait, p.subject.name(), err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
time.Sleep(time.Second * time.Duration(message.RetryWait))
|
||||
p.metrics.promNatsMessagesMissedACKsTotal.Inc()
|
||||
|
@ -403,13 +403,13 @@ func (p process) messageDeliverNats(natsMsgPayload []byte, natsMsgHeader nats.He
|
|||
|
||||
case err == nats.ErrBadSubscription || err == nats.ErrConnectionClosed:
|
||||
er := fmt.Errorf("error: ack receive failed: conneciton closed or bad subscription, will not retry message: subject=%v: %v", p.subject.name(), err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
return er
|
||||
|
||||
default:
|
||||
er := fmt.Errorf("error: ack receive failed: the error was not defined, check if nats client have been updated with new error values, and update ctrl to handle the new error type: subject=%v: %v", p.subject.name(), err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
return er
|
||||
}
|
||||
|
@ -434,7 +434,7 @@ func (p process) messageDeliverNats(natsMsgPayload []byte, natsMsgHeader nats.He
|
|||
p.metrics.promNatsDeliveredTotal.Inc()
|
||||
|
||||
er = fmt.Errorf("info: sent nats message with subject %v, id: %v", msg.Subject, message.ID)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ func (p process) messageSubscriberHandler(natsConn *nats.Conn, thisNode string,
|
|||
// If debugging is enabled, print the source node name of the nats messages received.
|
||||
if val, ok := msg.Header["fromNode"]; ok {
|
||||
er := fmt.Errorf("info: nats message received from %v, with subject %v ", val, subject)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
// If compression is used, decompress it to get the gob data. If
|
||||
|
@ -567,7 +567,7 @@ func (p process) messageSubscriberHandler(natsConn *nats.Conn, thisNode string,
|
|||
// Check for ACK type Event.
|
||||
case message.ACKTimeout >= 1:
|
||||
er := fmt.Errorf("subscriberHandler: received ACK message: %v, from: %v, id:%v", message.Method, message.FromNode, message.ID)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
// When spawning sub processes we can directly assign handlers to the process upon
|
||||
// creation. We here check if a handler is already assigned, and if it is nil, we
|
||||
// lookup and find the correct handler to use if available.
|
||||
|
@ -592,7 +592,7 @@ func (p process) messageSubscriberHandler(natsConn *nats.Conn, thisNode string,
|
|||
|
||||
case message.ACKTimeout < 1:
|
||||
er := fmt.Errorf("subscriberHandler: received NACK message: %v, from: %v, id:%v", message.Method, message.FromNode, message.ID)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
// When spawning sub processes we can directly assign handlers to the process upon
|
||||
// creation. We here check if a handler is already assigned, and if it is nil, we
|
||||
// lookup and find the correct handler to use if available.
|
||||
|
@ -635,7 +635,7 @@ func (p process) callHandler(message Message, thisNode string) []byte {
|
|||
// ACL/Signature checking failed.
|
||||
er := fmt.Errorf("error: subscriberHandler: ACL were verified not-OK, doing nothing")
|
||||
p.errorKernel.errSend(p, message, er, logWarning)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -665,7 +665,7 @@ func executeHandler(p process, message Message, thisNode string) {
|
|||
if p.configuration.EnableAclCheck {
|
||||
// Either ACL were verified OK, or ACL/Signature check was not enabled, so we call the handler.
|
||||
er := fmt.Errorf("info: subscriberHandler: Either ACL were verified OK, or ACL/Signature check was not enabled, so we call the handler: %v", true)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
switch {
|
||||
|
@ -676,7 +676,7 @@ func executeHandler(p process, message Message, thisNode string) {
|
|||
if err != nil {
|
||||
er := fmt.Errorf("error: subscriberHandler: handler method failed: %v", err)
|
||||
p.errorKernel.errSend(p, message, er, logError)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -700,7 +700,7 @@ func executeHandler(p process, message Message, thisNode string) {
|
|||
if err != nil {
|
||||
er := fmt.Errorf("error: subscriberHandler: handler method failed: %v", err)
|
||||
p.errorKernel.errSend(p, message, er, logError)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -708,7 +708,7 @@ func executeHandler(p process, message Message, thisNode string) {
|
|||
select {
|
||||
case <-p.ctx.Done():
|
||||
er := fmt.Errorf("info: subscriberHandler: proc ctx done: toNode=%v, fromNode=%v, method=%v, methodArgs=%v", message.ToNode, message.FromNode, message.Method, message.MethodArgs)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
//cancel()
|
||||
return
|
||||
|
@ -716,7 +716,7 @@ func executeHandler(p process, message Message, thisNode string) {
|
|||
// Total time reached. End the process.
|
||||
//cancel()
|
||||
er := fmt.Errorf("info: subscriberHandler: schedule totalTime done: toNode=%v, fromNode=%v, method=%v, methodArgs=%v", message.ToNode, message.FromNode, message.Method, message.MethodArgs)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
return
|
||||
|
||||
|
@ -726,7 +726,7 @@ func executeHandler(p process, message Message, thisNode string) {
|
|||
if err != nil {
|
||||
er := fmt.Errorf("error: subscriberHandler: handler method failed: %v", err)
|
||||
p.errorKernel.errSend(p, message, er, logError)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -754,7 +754,7 @@ func (p process) verifySigOrAclFlag(message Message) bool {
|
|||
sigOK := p.nodeAuth.verifySignature(message)
|
||||
|
||||
er := fmt.Errorf("verifySigOrAclFlag: verify acl/sig: Only signature checking enabled, ALLOW the message if sigOK, sigOK=%v, method %v", sigOK, message.Method)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
if sigOK {
|
||||
doHandler = true
|
||||
|
@ -766,7 +766,7 @@ func (p process) verifySigOrAclFlag(message Message) bool {
|
|||
aclOK := p.nodeAuth.verifyAcl(message)
|
||||
|
||||
er := fmt.Errorf("verifySigOrAclFlag: verify acl/sig:both signature and acl checking enabled, allow the message if sigOK and aclOK, or method is not REQCliCommand, sigOK=%v, aclOK=%v, method=%v", sigOK, aclOK, message.Method)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
if sigOK && aclOK {
|
||||
doHandler = true
|
||||
|
@ -776,7 +776,7 @@ func (p process) verifySigOrAclFlag(message Message) bool {
|
|||
// of doHandler=false, so the handler is not done.
|
||||
default:
|
||||
er := fmt.Errorf("verifySigOrAclFlag: verify acl/sig: None of the verify flags matched, not doing handler for message, method=%v", message.Method)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
return doHandler
|
||||
|
@ -796,7 +796,7 @@ func (p process) subscribeMessages() *nats.Subscription {
|
|||
})
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: Subscribe failed: %v", err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -819,7 +819,7 @@ func (p process) publishMessages(natsConn *nats.Conn) {
|
|||
enc, err := zstd.NewWriter(nil, zstd.WithEncoderConcurrency(1))
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: zstd new encoder failed: %v", err)
|
||||
p.errorKernel.logError(er, p.configuration)
|
||||
p.errorKernel.logError(er)
|
||||
os.Exit(1)
|
||||
}
|
||||
zEnc = enc
|
||||
|
@ -843,7 +843,7 @@ func (p process) publishMessages(natsConn *nats.Conn) {
|
|||
if p.isLongRunningPublisher {
|
||||
er := fmt.Errorf("info: isLongRunningPublisher, will not cancel publisher: %v", p.processName)
|
||||
//sendErrorLogMessage(p.toRingbufferCh, Node(p.node), er)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -858,7 +858,7 @@ func (p process) publishMessages(natsConn *nats.Conn) {
|
|||
|
||||
er := fmt.Errorf("info: canceled publisher: %v", p.processName)
|
||||
//sendErrorLogMessage(p.toRingbufferCh, Node(p.node), er)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
return
|
||||
//}
|
||||
|
@ -873,7 +873,7 @@ func (p process) publishMessages(natsConn *nats.Conn) {
|
|||
case <-p.ctx.Done():
|
||||
er := fmt.Errorf("info: canceling publisher: %v", p.processName)
|
||||
//sendErrorLogMessage(p.toRingbufferCh, Node(p.node), er)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -903,7 +903,7 @@ func (p process) publishAMessage(m Message, zEnc *zstd.Encoder, once *sync.Once,
|
|||
b, err := cbor.Marshal(m)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: messageDeliverNats: cbor encode message failed: %v", err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -916,7 +916,7 @@ func (p process) publishAMessage(m Message, zEnc *zstd.Encoder, once *sync.Once,
|
|||
err := gobEnc.Encode(m)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: messageDeliverNats: gob encode message failed: %v", err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -955,7 +955,7 @@ func (p process) publishAMessage(m Message, zEnc *zstd.Encoder, once *sync.Once,
|
|||
_, err := gzipW.Write(natsMsgPayloadSerialized)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: failed to write gzip: %v", err)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -971,11 +971,11 @@ func (p process) publishAMessage(m Message, zEnc *zstd.Encoder, once *sync.Once,
|
|||
default: // no compression
|
||||
// Allways log the error to console.
|
||||
er := fmt.Errorf("error: publishing: compression type not defined, setting default to no compression")
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
// We only wan't to send the error message to errorCentral once.
|
||||
once.Do(func() {
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
})
|
||||
|
||||
// No compression, so we just assign the value of the serialized
|
||||
|
|
20
processes.go
20
processes.go
|
@ -137,7 +137,7 @@ func (p *processes) Start(proc process) {
|
|||
case <-ctx.Done():
|
||||
er := fmt.Errorf("info: stopped handleFunc for: subscriber %v", proc.subject.name())
|
||||
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ func (p *processes) Start(proc process) {
|
|||
case <-ctx.Done():
|
||||
er := fmt.Errorf("info: stopped handleFunc for: publisher %v", proc.subject.name())
|
||||
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ func (p *processes) Start(proc process) {
|
|||
|
||||
proc.nodeAuth.publicKeys.mu.Lock()
|
||||
er := fmt.Errorf(" ----> publisher REQKeysRequestUpdate: sending our current hash: %v", []byte(proc.nodeAuth.publicKeys.keysAndHash.Hash[:]))
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
m := Message{
|
||||
FileName: "publickeysget.log",
|
||||
|
@ -252,7 +252,7 @@ func (p *processes) Start(proc process) {
|
|||
case <-ctx.Done():
|
||||
er := fmt.Errorf("info: stopped handleFunc for: publisher %v", proc.subject.name())
|
||||
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ func (p *processes) Start(proc process) {
|
|||
|
||||
proc.nodeAuth.nodeAcl.mu.Lock()
|
||||
er := fmt.Errorf(" ----> publisher REQAclRequestUpdate: sending our current hash: %v", []byte(proc.nodeAuth.nodeAcl.aclAndHash.Hash[:]))
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
m := Message{
|
||||
FileName: "aclRequestUpdate.log",
|
||||
|
@ -301,7 +301,7 @@ func (p *processes) Start(proc process) {
|
|||
case <-ctx.Done():
|
||||
er := fmt.Errorf("info: stopped handleFunc for: publisher %v", proc.subject.name())
|
||||
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ func newStartup(server *server) *startup {
|
|||
// and a procFunc as it's input arguments. If a procFunc os not needed, use the value nil.
|
||||
func (s *startup) subscriber(p process, m Method, pf func(ctx context.Context, procFuncCh chan Message) error) {
|
||||
er := fmt.Errorf("starting %v subscriber: %#v", m, p.node)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
var sub Subject
|
||||
switch {
|
||||
|
@ -398,7 +398,7 @@ func (s *startup) subscriber(p process, m Method, pf func(ctx context.Context, p
|
|||
|
||||
func (s *startup) publisher(p process, m Method, pf func(ctx context.Context, procFuncCh chan Message) error) {
|
||||
er := fmt.Errorf("starting %v publisher: %#v", m, p.node)
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
sub := newSubject(m, string(p.node))
|
||||
proc := newProcess(p.ctx, p.processes.server, sub, processKindPublisher)
|
||||
proc.procFunc = pf
|
||||
|
@ -412,14 +412,14 @@ func (s *startup) publisher(p process, m Method, pf func(ctx context.Context, pr
|
|||
// Print the content of the processes map.
|
||||
func (p *processes) printProcessesMap() {
|
||||
er := fmt.Errorf("output of processes map : ")
|
||||
p.errorKernel.logDebug(er, p.configuration)
|
||||
p.errorKernel.logDebug(er)
|
||||
|
||||
{
|
||||
p.active.mu.Lock()
|
||||
|
||||
for pName, proc := range p.active.procNames {
|
||||
er := fmt.Errorf("info: proc - pub/sub: %v, procName in map: %v , id: %v, subject: %v", proc.processKind, pName, proc.processID, proc.subject.name())
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
p.metrics.promProcessesTotal.Set(float64(len(p.active.procNames)))
|
||||
|
|
|
@ -12,8 +12,8 @@ import (
|
|||
|
||||
// Handler to get all acl's from a central server.
|
||||
func methodREQAclRequestUpdate(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- subscriber methodREQAclRequestUpdate received from: %v, hash data = %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- subscriber methodREQAclRequestUpdate received from: %v, hash data = %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
// fmt.Printf("\n --- subscriber methodREQAclRequestUpdate: the message brought to handler : %+v\n", message)
|
||||
|
||||
|
@ -45,22 +45,22 @@ func methodREQAclRequestUpdate(proc process, message Message, node string) ([]by
|
|||
defer proc.centralAuth.accessLists.schemaGenerated.mu.Unlock()
|
||||
|
||||
er := fmt.Errorf("info: subscriber methodREQAclRequestUpdate: got acl hash from NODE=%v, HASH data =%v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
// Check if the received hash is the same as the one currently active,
|
||||
// If it is the same we exit the handler immediately.
|
||||
hash32 := proc.centralAuth.accessLists.schemaGenerated.GeneratedACLsMap[message.FromNode].Hash
|
||||
hash := hash32[:]
|
||||
er = fmt.Errorf("info: subscriber methodREQAclRequestUpdate: the central acl hash=%v", hash32)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
if bytes.Equal(hash, message.Data) {
|
||||
er := fmt.Errorf("info: subscriber methodREQAclRequestUpdate: NODE AND CENTRAL HAVE EQUAL ACL HASH, NOTHING TO DO, EXITING HANDLER")
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
return
|
||||
}
|
||||
|
||||
er = fmt.Errorf("info: subscriber methodREQAclRequestUpdate: NODE AND CENTRAL HAD NOT EQUAL ACL, PREPARING TO SEND NEW VERSION OF Acl")
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
// Generate JSON for Message.Data
|
||||
|
||||
|
@ -77,7 +77,7 @@ func methodREQAclRequestUpdate(proc process, message Message, node string) ([]by
|
|||
}
|
||||
|
||||
er = fmt.Errorf("----> subscriber methodREQAclRequestUpdate: SENDING ACL'S TO NODE=%v, serializedAndHash=%+v", message.FromNode, hdh)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
newReplyMessage(proc, message, js)
|
||||
}()
|
||||
|
@ -93,7 +93,7 @@ func methodREQAclRequestUpdate(proc process, message Message, node string) ([]by
|
|||
// Handler to receive the acls from a central server.
|
||||
func methodREQAclDeliverUpdate(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- subscriber methodREQAclDeliverUpdate received from: %v, containing: %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
proc.errorKernel.logDebug(inf)
|
||||
|
||||
// fmt.Printf("\n --- subscriber methodREQAclRequestUpdate: the message received on handler : %+v\n\n", message)
|
||||
|
||||
|
@ -169,8 +169,8 @@ func methodREQAclDeliverUpdate(proc process, message Message, node string) ([]by
|
|||
// ---
|
||||
|
||||
func methodREQAclAddCommand(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclAddCommand received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclAddCommand received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -231,8 +231,8 @@ func methodREQAclAddCommand(proc process, message Message, node string) ([]byte,
|
|||
// ---
|
||||
|
||||
func methodREQAclDeleteCommand(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclDeleteCommand received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclDeleteCommand received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -293,8 +293,8 @@ func methodREQAclDeleteCommand(proc process, message Message, node string) ([]by
|
|||
// ---
|
||||
|
||||
func methodREQAclDeleteSource(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclDeleteSource received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclDeleteSource received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -354,8 +354,8 @@ func methodREQAclDeleteSource(proc process, message Message, node string) ([]byt
|
|||
// ---
|
||||
|
||||
func methodREQAclGroupNodesAddNode(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclGroupNodesAddNode received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclGroupNodesAddNode received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -415,8 +415,8 @@ func methodREQAclGroupNodesAddNode(proc process, message Message, node string) (
|
|||
// ---
|
||||
|
||||
func methodREQAclGroupNodesDeleteNode(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclGroupNodesDeleteNode received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclGroupNodesDeleteNode received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -476,8 +476,8 @@ func methodREQAclGroupNodesDeleteNode(proc process, message Message, node string
|
|||
// ---
|
||||
|
||||
func methodREQAclGroupNodesDeleteGroup(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclGroupNodesDeleteGroup received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclGroupNodesDeleteGroup received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -536,8 +536,8 @@ func methodREQAclGroupNodesDeleteGroup(proc process, message Message, node strin
|
|||
// ---
|
||||
|
||||
func methodREQAclGroupCommandsAddCommand(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclGroupCommandsAddCommand received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclGroupCommandsAddCommand received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -597,8 +597,8 @@ func methodREQAclGroupCommandsAddCommand(proc process, message Message, node str
|
|||
// ---
|
||||
|
||||
func methodREQAclGroupCommandsDeleteCommand(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclGroupCommandsDeleteCommand received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclGroupCommandsDeleteCommand received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -658,8 +658,8 @@ func methodREQAclGroupCommandsDeleteCommand(proc process, message Message, node
|
|||
// ---
|
||||
|
||||
func methodREQAclGroupCommandsDeleteGroup(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclGroupCommandsDeleteGroup received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclGroupCommandsDeleteGroup received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -718,8 +718,8 @@ func methodREQAclGroupCommandsDeleteGroup(proc process, message Message, node st
|
|||
// ---
|
||||
|
||||
func methodREQAclExport(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclExport received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclExport received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -774,8 +774,8 @@ func methodREQAclExport(proc process, message Message, node string) ([]byte, err
|
|||
// ---
|
||||
|
||||
func methodREQAclImport(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQAclImport received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- methodREQAclImport received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
|
|
@ -13,8 +13,8 @@ import (
|
|||
// return the output of the command run back to the calling publisher
|
||||
// as a new message.
|
||||
func methodREQCliCommand(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- CLICommandREQUEST received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- CLICommandREQUEST received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
msgForErrors := message
|
||||
msgForErrors.FileName = msgForErrors.FileName + ".error"
|
||||
|
@ -128,8 +128,8 @@ func methodREQCliCommand(proc process, message Message, node string) ([]byte, er
|
|||
// longer time and you want to send the output of the command continually
|
||||
// back as it is generated, and not just when the command is finished.
|
||||
func methodREQCliCommandCont(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- CLInCommandCont REQUEST received from: %v, containing: %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- CLInCommandCont REQUEST received from: %v, containing: %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
msgForErrors := message
|
||||
msgForErrors.FileName = msgForErrors.FileName + ".error"
|
||||
|
|
|
@ -123,7 +123,7 @@ func methodREQCopySrc(proc process, message Message, node string) ([]byte, error
|
|||
folderPermission := uint64(0755)
|
||||
|
||||
er := fmt.Errorf("info: before switch: FolderPermission defined in message for socket: %04o", folderPermission)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
// Verify and check the methodArgs
|
||||
|
||||
if len(message.MethodArgs) < 3 {
|
||||
|
@ -158,11 +158,11 @@ func methodREQCopySrc(proc process, message Message, node string) ([]byte, error
|
|||
folderPermission, err = strconv.ParseUint(message.MethodArgs[5], 8, 32)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("methodREQCopySrc: failed to parse uint, %v", err)
|
||||
proc.errorKernel.logError(er, proc.configuration)
|
||||
proc.errorKernel.logError(er)
|
||||
}
|
||||
|
||||
er := fmt.Errorf("info: FolderPermission defined in message for socket: %v, converted = %v", message.MethodArgs[5], folderPermission)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: methodREQCopySrc: unable to convert folderPermission into int value: %v", err)
|
||||
proc.errorKernel.errSend(proc, message, er, logWarning)
|
||||
|
@ -200,7 +200,7 @@ func methodREQCopySrc(proc process, message Message, node string) ([]byte, error
|
|||
if err != nil {
|
||||
// errCh <- fmt.Errorf("error: methodREQCopySrc: failed to open file: %v, %v", SrcFilePath, err)
|
||||
er := fmt.Errorf("error: copySrcSubProcFunc: failed to stat file: %v", err)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -342,7 +342,7 @@ func methodREQCopyDst(proc process, message Message, node string) ([]byte, error
|
|||
|
||||
if ok {
|
||||
er := fmt.Errorf("methodREQCopyDst: subprocesses already existed, will not start another subscriber for %v", pn)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
// HERE!!!
|
||||
// If the process name already existed we return here before any
|
||||
|
@ -384,10 +384,10 @@ func copySrcSubHandler() func(process, Message, string) ([]byte, error) {
|
|||
select {
|
||||
case <-proc.ctx.Done():
|
||||
er := fmt.Errorf(" * copySrcHandler ended: %v", proc.processName)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
case proc.procFuncCh <- message:
|
||||
er := fmt.Errorf("copySrcHandler: passing message over to procFunc: %v", proc.processName)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
|
@ -402,10 +402,10 @@ func copyDstSubHandler() func(process, Message, string) ([]byte, error) {
|
|||
select {
|
||||
case <-proc.ctx.Done():
|
||||
er := fmt.Errorf(" * copyDstHandler ended: %v", proc.processName)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
case proc.procFuncCh <- message:
|
||||
er := fmt.Errorf("copyDstHandler: passing message over to procFunc: %v", proc.processName)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
|
@ -486,7 +486,7 @@ func copySrcSubProcFunc(proc process, cia copyInitialData, cancel context.Cancel
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
er := fmt.Errorf(" info: canceling copySrcProcFunc : %v", proc.processName)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
return nil
|
||||
|
||||
// Pick up the message recived by the copySrcSubHandler.
|
||||
|
@ -723,7 +723,7 @@ func copyDstSubProcFunc(proc process, cia copyInitialData, message Message, canc
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
er := fmt.Errorf(" * copyDstProcFunc ended: %v", proc.processName)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
return nil
|
||||
case message := <-procFuncCh:
|
||||
var csa copySubData
|
||||
|
@ -739,7 +739,7 @@ func copyDstSubProcFunc(proc process, cia copyInitialData, message Message, canc
|
|||
hash := sha256.Sum256(csa.CopyData)
|
||||
if hash != csa.Hash {
|
||||
er := fmt.Errorf("error: copyDstSubProcFunc: hash of received message is not correct for: %v", cia.DstMethod)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
csa.CopyStatus = copyResendLast
|
||||
}
|
||||
|
@ -844,7 +844,7 @@ func copyDstSubProcFunc(proc process, cia copyInitialData, message Message, canc
|
|||
|
||||
// HERE:
|
||||
er := fmt.Errorf("info: Before creating folder: cia.FolderPermission: %04o", cia.FolderPermission)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
if _, err := os.Stat(cia.DstDir); os.IsNotExist(err) {
|
||||
// TODO: Add option to set permission here ???
|
||||
|
@ -853,7 +853,7 @@ func copyDstSubProcFunc(proc process, cia copyInitialData, message Message, canc
|
|||
return fmt.Errorf("error: failed to create destination directory for file copying %v: %v", cia.DstDir, err)
|
||||
}
|
||||
er := fmt.Errorf("info: Created folder: with cia.FolderPermission: %04o", cia.FolderPermission)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
// Rename the file so we got a backup.
|
||||
|
@ -952,7 +952,7 @@ func copyDstSubProcFunc(proc process, cia copyInitialData, message Message, canc
|
|||
}
|
||||
|
||||
er = fmt.Errorf("info: copy: successfully wrote all split chunk files into file=%v", filePath)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
// Signal back to src that we are done, so it can cancel the process.
|
||||
{
|
||||
|
|
|
@ -26,7 +26,7 @@ func reqWriteFileOrSocket(isAppend bool, proc process, message Message) error {
|
|||
fi, err := os.Stat(file)
|
||||
if err == nil && !os.IsNotExist(err) {
|
||||
er := fmt.Errorf("info: reqWriteFileOrSocket: failed to stat file, but will continue: %v", folderTree)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
if fi != nil && fi.Mode().Type() == fs.ModeSocket {
|
||||
|
@ -56,7 +56,7 @@ func reqWriteFileOrSocket(isAppend bool, proc process, message Message) error {
|
|||
}
|
||||
|
||||
er := fmt.Errorf("info: Creating subscribers data folder at %v", folderTree)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
var fileFlag int
|
||||
|
@ -115,7 +115,7 @@ func methodREQToFile(proc process, message Message, node string) ([]byte, error)
|
|||
// as a new message.
|
||||
func methodREQTailFile(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- TailFile REQUEST received from: %v, containing: %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
proc.errorKernel.logDebug(inf)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
|
||||
// handler to do a Http Get.
|
||||
func methodREQHttpGet(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- REQHttpGet received from: %v, containing: %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- REQHttpGet received from: %v, containing: %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
msgForErrors := message
|
||||
msgForErrors.FileName = msgForErrors.FileName + ".error"
|
||||
|
@ -112,8 +112,8 @@ func methodREQHttpGet(proc process, message Message, node string) ([]byte, error
|
|||
// handler to do a Http Get Scheduled.
|
||||
// The second element of the MethodArgs slice holds the timer defined in seconds.
|
||||
func methodREQHttpGetScheduled(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- REQHttpGetScheduled received from: %v, containing: %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
er := fmt.Errorf("<--- REQHttpGetScheduled received from: %v, containing: %v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
|
|
@ -86,21 +86,21 @@ func methodREQKeysRequestUpdate(proc process, message Message, node string) ([]b
|
|||
defer proc.centralAuth.pki.nodesAcked.mu.Unlock()
|
||||
|
||||
er := fmt.Errorf(" <---- methodREQKeysRequestUpdate: received hash from NODE=%v, HASH=%v", message.FromNode, message.Data)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
// Check if the received hash is the same as the one currently active,
|
||||
if bytes.Equal(proc.centralAuth.pki.nodesAcked.keysAndHash.Hash[:], message.Data) {
|
||||
er := fmt.Errorf("info: methodREQKeysRequestUpdate: node %v and central have equal keys, nothing to do, exiting key update handler", message.FromNode)
|
||||
// proc.errorKernel.infoSend(proc, message, er)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
return
|
||||
}
|
||||
|
||||
er = fmt.Errorf("info: methodREQKeysRequestUpdate: node %v and central had not equal keys, preparing to send new version of keys", message.FromNode)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
er = fmt.Errorf("info: methodREQKeysRequestUpdate: marshalling new keys and hash to send: map=%v, hash=%v", proc.centralAuth.pki.nodesAcked.keysAndHash.Keys, proc.centralAuth.pki.nodesAcked.keysAndHash.Hash)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
b, err := json.Marshal(proc.centralAuth.pki.nodesAcked.keysAndHash)
|
||||
|
||||
|
@ -109,7 +109,7 @@ func methodREQKeysRequestUpdate(proc process, message Message, node string) ([]b
|
|||
proc.errorKernel.errSend(proc, message, er, logWarning)
|
||||
}
|
||||
er = fmt.Errorf("----> methodREQKeysRequestUpdate: SENDING KEYS TO NODE=%v", message.FromNode)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
newReplyMessage(proc, message, b)
|
||||
}()
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ func methodREQKeysDeliverUpdate(proc process, message Message, node string) ([]b
|
|||
}
|
||||
|
||||
er := fmt.Errorf("<---- REQKeysDeliverUpdate: after unmarshal, nodeAuth keysAndhash contains: %+v", keysAndHash)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
// If the received map was empty we also want to delete all the locally stored keys,
|
||||
// else we copy the marshaled keysAndHash we received from central into our map.
|
||||
|
@ -292,7 +292,7 @@ func methodREQKeysAllow(proc process, message Message, node string) ([]byte, err
|
|||
func pushKeys(proc process, message Message, nodes []Node) error {
|
||||
er := fmt.Errorf("info: beginning of pushKeys, nodes=%v", nodes)
|
||||
var knh []byte
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
err := func() error {
|
||||
proc.centralAuth.pki.nodesAcked.mu.Lock()
|
||||
|
@ -319,7 +319,7 @@ func pushKeys(proc process, message Message, nodes []Node) error {
|
|||
// For all nodes that is not ack'ed we try to send an update once.
|
||||
for n := range proc.centralAuth.pki.nodeNotAckedPublicKeys.KeyMap {
|
||||
er := fmt.Errorf("info: node to send REQKeysDeliverUpdate to:%v ", n)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
msg := Message{
|
||||
ToNode: n,
|
||||
Method: REQKeysDeliverUpdate,
|
||||
|
@ -337,7 +337,7 @@ func pushKeys(proc process, message Message, nodes []Node) error {
|
|||
proc.toRingbufferCh <- []subjectAndMessage{sam}
|
||||
|
||||
er = fmt.Errorf("----> methodREQKeysAllow: SENDING KEYS TO NODE=%v", message.FromNode)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
// Create the data payload of the current allowed keys.
|
||||
|
@ -365,7 +365,7 @@ func pushKeys(proc process, message Message, nodes []Node) error {
|
|||
// For all nodes that is ack'ed we try to send an update once.
|
||||
for n := range nodeMap {
|
||||
er := fmt.Errorf("info: node to send REQKeysDeliverUpdate to:%v ", n)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
msg := Message{
|
||||
ToNode: n,
|
||||
Method: REQKeysDeliverUpdate,
|
||||
|
@ -384,7 +384,7 @@ func pushKeys(proc process, message Message, nodes []Node) error {
|
|||
proc.toRingbufferCh <- []subjectAndMessage{sam}
|
||||
|
||||
er = fmt.Errorf("----> methodREQKeysAllow: sending keys update to node=%v", message.FromNode)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -393,7 +393,7 @@ func pushKeys(proc process, message Message, nodes []Node) error {
|
|||
|
||||
func methodREQKeysDelete(proc process, message Message, node string) ([]byte, error) {
|
||||
inf := fmt.Errorf("<--- methodREQKeysDelete received from: %v, containing: %v", message.FromNode, message.MethodArgs)
|
||||
proc.errorKernel.logDebug(inf, proc.configuration)
|
||||
proc.errorKernel.logDebug(inf)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -423,13 +423,13 @@ func methodREQKeysDelete(proc process, message Message, node string) ([]byte, er
|
|||
|
||||
proc.centralAuth.deletePublicKeys(proc, message, message.MethodArgs)
|
||||
er := fmt.Errorf("info: Deleted public keys: %v", message.MethodArgs)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
// All new elements are now added, and we can create a new hash
|
||||
// representing the current keys in the allowed map.
|
||||
proc.centralAuth.updateHash(proc, message)
|
||||
er = fmt.Errorf(" * DEBUG updated hash for public keys")
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
|
||||
var nodes []Node
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ func methodREQHello(proc process, message Message, node string) ([]byte, error)
|
|||
}
|
||||
|
||||
er := fmt.Errorf("info: Creating subscribers data folder at %v", folderTree)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
// Open file and write data.
|
||||
|
@ -74,7 +74,7 @@ func methodREQErrorLog(proc process, message Message, node string) ([]byte, erro
|
|||
}
|
||||
|
||||
er := fmt.Errorf("info: Creating subscribers data folder at %v", folderTree)
|
||||
proc.errorKernel.logDebug(er, proc.configuration)
|
||||
proc.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
// Open file and write data.
|
||||
|
|
16
server.go
16
server.go
|
@ -242,7 +242,7 @@ func NewServer(configuration *Configuration, version string) (*server, error) {
|
|||
}
|
||||
|
||||
er := fmt.Errorf("info: creating subscribers data folder at %v", configuration.SubscribersDataFolder)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
return &s, nil
|
||||
|
@ -538,7 +538,7 @@ func (s *server) routeMessagesToProcess() {
|
|||
}
|
||||
if ok && ctxCanceled {
|
||||
er := fmt.Errorf(" ** routeMessagesToProcess: context is already ended for process %v, will not try to reuse existing publisher, deleting it, and creating a new publisher !!! ", proc.processName)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
delete(proc.processes.active.procNames, proc.processName)
|
||||
return false
|
||||
}
|
||||
|
@ -549,10 +549,10 @@ func (s *server) routeMessagesToProcess() {
|
|||
select {
|
||||
case proc.subject.messageCh <- m:
|
||||
er := fmt.Errorf(" ** routeMessagesToProcess: passed message: %v to existing process: %v", m.ID, proc.processName)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
case <-proc.ctx.Done():
|
||||
er := fmt.Errorf(" ** routeMessagesToProcess: got ctx.done for process %v", proc.processName)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
return true
|
||||
|
@ -568,7 +568,7 @@ func (s *server) routeMessagesToProcess() {
|
|||
}
|
||||
|
||||
er := fmt.Errorf("info: processNewMessages: did not find publisher process for subject %v, starting new", subjName)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
|
||||
sub := newSubject(sam.Subject.Method, sam.Subject.ToNode)
|
||||
var proc process
|
||||
|
@ -581,17 +581,17 @@ func (s *server) routeMessagesToProcess() {
|
|||
|
||||
proc.spawnWorker()
|
||||
er = fmt.Errorf("info: processNewMessages: new process started, subject: %v, processID: %v", subjName, proc.processID)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
|
||||
// Now when the process is spawned we continue,
|
||||
// and send the message to that new process.
|
||||
select {
|
||||
case proc.subject.messageCh <- m:
|
||||
er := fmt.Errorf(" ** routeMessagesToProcess: passed message: %v to the new process: %v", m.ID, proc.processName)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
case <-proc.ctx.Done():
|
||||
er := fmt.Errorf(" ** routeMessagesToProcess: got ctx.done for process %v", proc.processName)
|
||||
s.errorKernel.logDebug(er, s.configuration)
|
||||
s.errorKernel.logDebug(er)
|
||||
}
|
||||
|
||||
}(sam)
|
||||
|
|
Loading…
Reference in a new issue