mirror of
https://github.com/postmannen/ctrl.git
synced 2024-12-14 12:37:31 +00:00
added initial acl distribution, but are not working
This commit is contained in:
parent
aafa11aa25
commit
e7ae3d893b
5 changed files with 160 additions and 12 deletions
|
@ -280,7 +280,7 @@ func (a *accessLists) generateACLsForAllNodes() error {
|
|||
// cbor marshal the data of the ACL map to store for the host node.
|
||||
cb, err := cbor.Marshal(m)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: failed to generate json for host in schemaGenerated: %v", err)
|
||||
er := fmt.Errorf("error: failed to generate cbor for host in schemaGenerated: %v", err)
|
||||
log.Printf("%v\n", er)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
|
58
processes.go
58
processes.go
|
@ -175,6 +175,9 @@ func (p *processes) Start(proc process) {
|
|||
|
||||
if proc.configuration.StartPubREQKeysRequestUpdate {
|
||||
proc.startup.pubREQKeysRequestUpdate(proc)
|
||||
// TODO: Putting the acl publisher here.
|
||||
// Maybe we should also change the name of the configuration flag to something auth related ?
|
||||
proc.startup.pubREQAclRequestUpdate(proc)
|
||||
}
|
||||
|
||||
if proc.configuration.IsCentralAuth {
|
||||
|
@ -375,6 +378,61 @@ func (s startup) pubREQKeysRequestUpdate(p process) {
|
|||
go proc.spawnWorker()
|
||||
}
|
||||
|
||||
// pubREQAclRequestUpdate defines the startup of a publisher that will send REQREQKeysRequestUpdate
|
||||
// to central server and ask for publics keys, and to get them deliver back with a request
|
||||
// of type pubREQKeysDeliverUpdate.
|
||||
func (s startup) pubREQAclRequestUpdate(p process) {
|
||||
log.Printf("Starting REQAclRequestUpdate Publisher: %#v\n", p.node)
|
||||
|
||||
sub := newSubject(REQAclRequestUpdate, p.configuration.CentralNodeName)
|
||||
proc := newProcess(p.ctx, s.server, sub, processKindPublisher, nil)
|
||||
|
||||
// Define the procFunc to be used for the process.
|
||||
proc.procFunc = func(ctx context.Context, procFuncCh chan Message) error {
|
||||
ticker := time.NewTicker(time.Second * time.Duration(p.configuration.PublicKeysGetInterval))
|
||||
for {
|
||||
|
||||
// TODO: We could send with the hash of the currently stored hash,
|
||||
// so we would know on the subscriber at central if it should send
|
||||
// and update with new keys back.
|
||||
|
||||
proc.nodeAuth.nodeAcl.mu.Lock()
|
||||
fmt.Printf("\n ----> REQKeysRequestUpdate: sending our current hash: %v\n\n", []byte(proc.nodeAuth.nodeAcl.aclAndHash.Hash[:]))
|
||||
|
||||
m := Message{
|
||||
FileName: "aclRequestUpdate.log",
|
||||
Directory: "aclRequestUpdate",
|
||||
ToNode: Node(p.configuration.CentralNodeName),
|
||||
FromNode: Node(p.node),
|
||||
Data: []byte(proc.nodeAuth.nodeAcl.aclAndHash.Hash[:]),
|
||||
Method: REQAclRequestUpdate,
|
||||
ReplyMethod: REQAclDeliverUpdate,
|
||||
ACKTimeout: proc.configuration.DefaultMessageTimeout,
|
||||
Retries: 1,
|
||||
}
|
||||
proc.nodeAuth.nodeAcl.mu.Unlock()
|
||||
|
||||
sam, err := newSubjectAndMessage(m)
|
||||
if err != nil {
|
||||
// In theory the system should drop the message before it reaches here.
|
||||
p.errorKernel.errSend(p, m, err)
|
||||
log.Printf("error: ProcessesStart: %v\n", err)
|
||||
}
|
||||
proc.toRingbufferCh <- []subjectAndMessage{sam}
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-ctx.Done():
|
||||
er := fmt.Errorf("info: stopped handleFunc for: publisher %v", proc.subject.name())
|
||||
// sendErrorLogMessage(proc.toRingbufferCh, proc.node, er)
|
||||
log.Printf("%v\n", er)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
go proc.spawnWorker()
|
||||
}
|
||||
|
||||
func (s startup) subREQKeysRequestUpdate(p process) {
|
||||
log.Printf("Starting Public keys request update subscriber: %#v\n", p.node)
|
||||
sub := newSubject(REQKeysRequestUpdate, string(p.node))
|
||||
|
|
|
@ -133,6 +133,8 @@ const (
|
|||
|
||||
// REQAclRequestUpdate will get all node acl's from central if an update is available.
|
||||
REQAclRequestUpdate Method = "REQAclRequestUpdate"
|
||||
// REQAclDeliverUpdate will deliver the acl from central to a node.
|
||||
REQAclDeliverUpdate Method = "REQAclDeliverUpdate"
|
||||
|
||||
// REQAclAddCommand
|
||||
REQAclAddCommand = "REQAclAddCommand"
|
||||
|
@ -255,6 +257,9 @@ func (m Method) GetMethodsAvailable() MethodsAvailable {
|
|||
REQAclRequestUpdate: methodREQAclRequestUpdate{
|
||||
event: EventNACK,
|
||||
},
|
||||
REQAclDeliverUpdate: methodREQAclDeliverUpdate{
|
||||
event: EventNACK,
|
||||
},
|
||||
|
||||
REQAclAddCommand: methodREQAclAddCommand{
|
||||
event: EventACK,
|
||||
|
|
|
@ -2,7 +2,10 @@ package steward
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
// ----
|
||||
|
@ -44,7 +47,7 @@ func (m methodREQAclRequestUpdate) handler(proc process, message Message, node s
|
|||
proc.centralAuth.accessLists.schemaGenerated.mu.Lock()
|
||||
defer proc.centralAuth.accessLists.schemaGenerated.mu.Unlock()
|
||||
|
||||
fmt.Printf(" <---- methodREQKeysRequestUpdate: received acl hash from NODE=%v, HASH=%v\n", message.FromNode, message.Data)
|
||||
fmt.Printf(" <---- methodREQAclRequestUpdate: received acl hash from NODE=%v, HASH=%v\n", message.FromNode, message.Data)
|
||||
|
||||
// Check if the received hash is the same as the one currently active,
|
||||
// If it is the same we exit the handler immediately.
|
||||
|
@ -55,11 +58,17 @@ func (m methodREQAclRequestUpdate) handler(proc process, message Message, node s
|
|||
return
|
||||
}
|
||||
|
||||
fmt.Printf("\n ------------ NODE AND CENTRAL WERE NOT EQUAL ACL, PREPARING TO SEND NEW VERSION OF KEYS\n\n")
|
||||
fmt.Printf("\n ------------ NODE AND CENTRAL HAD NOT EQUAL ACL, PREPARING TO SEND NEW VERSION OF Acl\n\n")
|
||||
|
||||
fmt.Printf("\n ----> methodREQKeysRequestUpdate: SENDING ACL'S TO NODE=%v\n", message.FromNode)
|
||||
// TODO: PUT THE BELOW LINE BACK AGAIN WHEN DONE TESTING!
|
||||
// newReplyMessage(proc, message, proc.centralAuth.accessLists.schemaGenerated.GeneratedACLsMap[message.FromNode].Data)
|
||||
fmt.Printf("\n ----> methodREQAclRequestUpdate: SENDING ACL'S TO NODE=%v\n", message.FromNode)
|
||||
|
||||
js, err := json.Marshal(proc.centralAuth.accessLists.schemaGenerated.GeneratedACLsMap[message.FromNode])
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: REQAclRequestUpdate : json marshal failed: %v, message: %v", err, message)
|
||||
proc.errorKernel.errSend(proc, message, er)
|
||||
}
|
||||
|
||||
newReplyMessage(proc, message, js)
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
@ -68,6 +77,84 @@ func (m methodREQAclRequestUpdate) handler(proc process, message Message, node s
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type methodREQAclDeliverUpdate struct {
|
||||
event Event
|
||||
}
|
||||
|
||||
func (m methodREQAclDeliverUpdate) getKind() Event {
|
||||
return m.event
|
||||
}
|
||||
|
||||
// Handler to receive the acls from a central server.
|
||||
func (m methodREQAclDeliverUpdate) handler(proc process, message Message, node string) ([]byte, error) {
|
||||
// Get a context with the timeout specified in message.MethodTimeout.
|
||||
ctx, _ := getContextForMethodTimeout(proc.ctx, message)
|
||||
|
||||
proc.processes.wg.Add(1)
|
||||
go func() {
|
||||
defer proc.processes.wg.Done()
|
||||
outCh := make(chan []byte)
|
||||
|
||||
go func() {
|
||||
// Normally we would do some logic here, where the result is passed to outCh when done,
|
||||
// so we can split up the working logic, and f.ex. sending a reply logic.
|
||||
// In this case this go func and the below select is not needed, but keeping it so the
|
||||
// structure is the same as the other handlers.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
||||
case outCh <- []byte{}:
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
// case proc.toRingbufferCh <- []subjectAndMessage{sam}:
|
||||
case <-ctx.Done():
|
||||
case <-outCh:
|
||||
|
||||
proc.nodeAuth.nodeAcl.mu.Lock()
|
||||
|
||||
hdh := HostACLsSerializedWithHash{}
|
||||
|
||||
err := json.Unmarshal(message.Data, &hdh)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: REQAclDeliverUpdate : json unmarshal failed: %v, message: %v", err, message)
|
||||
proc.errorKernel.errSend(proc, message, er)
|
||||
}
|
||||
|
||||
mapOfFromNodeCommands := make(map[Node]map[command]struct{})
|
||||
err = cbor.Unmarshal(hdh.Data, &mapOfFromNodeCommands)
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: REQAclDeliverUpdate : json unmarshal failed: %v, message: %v", err, message)
|
||||
proc.errorKernel.errSend(proc, message, er)
|
||||
}
|
||||
|
||||
proc.nodeAuth.nodeAcl.aclAndHash.Hash = hdh.Hash
|
||||
proc.nodeAuth.nodeAcl.aclAndHash.Acl = mapOfFromNodeCommands
|
||||
|
||||
fmt.Printf("\n <---- REQAclDeliverUpdate: after unmarshal, nodeAuth aclAndhash contains: %+v\n\n", proc.nodeAuth.nodeAcl.aclAndHash)
|
||||
|
||||
proc.nodeAuth.nodeAcl.mu.Unlock()
|
||||
|
||||
err = proc.nodeAuth.nodeAcl.saveToFile()
|
||||
if err != nil {
|
||||
er := fmt.Errorf("error: REQAclDeliverUpdate : save to file failed: %v, message: %v", err, message)
|
||||
proc.errorKernel.errSend(proc, message, er)
|
||||
}
|
||||
|
||||
// Prepare and queue for sending a new message with the output
|
||||
// of the action executed.
|
||||
// newReplyMessage(proc, message, out)
|
||||
}
|
||||
}()
|
||||
|
||||
// Send back an ACK message.
|
||||
// ackMsg := []byte("confirmed from: " + node + ": " + fmt.Sprint(message.ID))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ---
|
||||
|
||||
type methodREQAclAddCommand struct {
|
||||
|
|
|
@ -117,11 +117,11 @@ func (m methodREQKeysRequestUpdate) handler(proc process, message Message, node
|
|||
|
||||
// Check if the received hash is the same as the one currently active,
|
||||
if bytes.Equal(proc.centralAuth.pki.nodesAcked.keysAndHash.Hash[:], message.Data) {
|
||||
fmt.Printf("\n ------------ NODE AND CENTRAL ARE EQUAL, NOTHING TO DO, EXITING HANDLER\n\n")
|
||||
fmt.Printf("\n ------------ NODE AND CENTRAL HAVE EQUAL KEYS, NOTHING TO DO, EXITING HANDLER\n\n")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("\n ------------ NODE AND CENTRAL WERE NOT EQUAL, PREPARING TO SEND NEW VERSION OF KEYS\n\n")
|
||||
fmt.Printf("\n ------------ NODE AND CENTRAL HAD NOT EQUAL KEYS, PREPARING TO SEND NEW VERSION OF KEYS\n\n")
|
||||
|
||||
fmt.Printf(" * methodREQKeysRequestUpdate: marshalling new keys and hash to send: map=%v, hash=%v\n\n", proc.centralAuth.pki.nodesAcked.keysAndHash.Keys, proc.centralAuth.pki.nodesAcked.keysAndHash.Hash)
|
||||
|
||||
|
@ -195,10 +195,8 @@ func (m methodREQKeysDeliverUpdate) handler(proc process, message Message, node
|
|||
proc.errorKernel.errSend(proc, message, er)
|
||||
}
|
||||
|
||||
// TODO TOMORROW: The hash is not sent with the requests to get public keys, and
|
||||
// the reason is that the hash is not stored on the nodes ?
|
||||
// Idea: We need to also persist the hash on the receiving nodes. We can then load
|
||||
// that key upon startup, and send it along when we do a public keys get.
|
||||
// We need to also persist the hash on the receiving nodes. We can then load
|
||||
// that key upon startup.
|
||||
|
||||
err = proc.nodeAuth.publicKeys.saveToFile()
|
||||
if err != nil {
|
Loading…
Reference in a new issue