2021-01-28 10:17:54 +00:00
|
|
|
// Notes:
|
2021-02-01 10:13:38 +00:00
|
|
|
package steward
|
2021-01-25 14:23:00 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/gob"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
2021-01-28 13:58:16 +00:00
|
|
|
"sync"
|
2021-01-25 14:23:00 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/nats-io/nats.go"
|
|
|
|
)
|
|
|
|
|
2021-01-28 13:58:16 +00:00
|
|
|
var mu sync.Mutex
|
|
|
|
|
2021-01-25 14:50:44 +00:00
|
|
|
type messageType int
|
|
|
|
|
2021-01-28 10:17:54 +00:00
|
|
|
// TODO: Figure it makes sense to have these types at all.
|
|
|
|
// It might make more sense to implement these as two
|
|
|
|
// individual subjects.
|
2021-01-25 14:50:44 +00:00
|
|
|
const (
|
2021-01-27 08:45:52 +00:00
|
|
|
// shellCommand, command that will just wait for an
|
|
|
|
// ack, and nothing of the output of the command are
|
|
|
|
// delivered back in the reply ack message.
|
|
|
|
// The message should contain the unique ID of the
|
|
|
|
// command.
|
2021-01-27 13:02:57 +00:00
|
|
|
commandReturnOutput messageType = iota
|
2021-01-25 14:50:44 +00:00
|
|
|
// shellCommand, wait for and return the output
|
|
|
|
// of the command in the ACK message. This means
|
|
|
|
// that the command should be executed immediately
|
|
|
|
// and that we should get the confirmation that it
|
|
|
|
// was successful or not.
|
2021-01-27 13:02:57 +00:00
|
|
|
eventReturnAck messageType = iota
|
2021-01-25 14:50:44 +00:00
|
|
|
// eventCommand, just wait for the ACK that the
|
|
|
|
// message is received. What action happens on the
|
|
|
|
// receiving side is up to the received to decide.
|
|
|
|
)
|
|
|
|
|
2021-01-25 14:23:00 +00:00
|
|
|
type Message struct {
|
2021-01-25 14:50:44 +00:00
|
|
|
// The Unique ID of the message
|
|
|
|
ID int
|
|
|
|
// The actual data in the message
|
2021-01-25 15:50:21 +00:00
|
|
|
// TODO: Change this to a slice instead...or maybe use an
|
|
|
|
// interface type here to handle several data types ?
|
2021-01-27 08:45:52 +00:00
|
|
|
Data []string
|
2021-01-25 14:50:44 +00:00
|
|
|
// The type of the message being sent
|
|
|
|
MessageType messageType
|
2021-01-25 14:23:00 +00:00
|
|
|
}
|
|
|
|
|
2021-01-28 10:17:54 +00:00
|
|
|
// server is the structure that will hold the state about spawned
|
|
|
|
// processes on a local instance.
|
2021-01-27 13:02:57 +00:00
|
|
|
type server struct {
|
|
|
|
natsConn *nats.Conn
|
2021-01-28 10:17:54 +00:00
|
|
|
// TODO: sessions should probably hold a slice/map of processes ?
|
|
|
|
processes map[node]process
|
|
|
|
// The last processID created
|
|
|
|
lastProcessID int
|
2021-02-01 12:41:04 +00:00
|
|
|
nodeName string
|
2021-01-27 13:02:57 +00:00
|
|
|
}
|
|
|
|
|
2021-01-28 10:17:54 +00:00
|
|
|
// newServer will prepare and return a server type
|
2021-02-01 10:13:38 +00:00
|
|
|
func NewServer(brokerAddress string, nodeName string) (*server, error) {
|
|
|
|
conn, err := nats.Connect(brokerAddress, nil)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error: nats.Connect failed: %v\n", err)
|
|
|
|
}
|
|
|
|
|
2021-02-01 12:41:04 +00:00
|
|
|
s := &server{
|
|
|
|
nodeName: nodeName,
|
|
|
|
natsConn: conn,
|
|
|
|
processes: make(map[node]process),
|
|
|
|
}
|
2021-01-29 05:09:48 +00:00
|
|
|
|
2021-01-28 13:58:16 +00:00
|
|
|
go func() {
|
|
|
|
|
|
|
|
for {
|
|
|
|
for k := range s.processes {
|
|
|
|
select {
|
|
|
|
case e := <-s.processes[k].errorCh:
|
|
|
|
fmt.Printf("*** %v\n", e)
|
|
|
|
default:
|
|
|
|
time.Sleep(time.Millisecond * 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2021-02-01 12:41:04 +00:00
|
|
|
return s, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *server) RunPublisher() {
|
2021-02-02 12:06:37 +00:00
|
|
|
// start the checking of files for input messages
|
|
|
|
fileReadCh := make((chan []byte))
|
|
|
|
go getMessagesFromFile("./", "inmsg.txt", fileReadCh)
|
|
|
|
|
|
|
|
// TODO: For now we just print content of the files read.
|
|
|
|
// Replace this whit a broker function that will know how
|
|
|
|
// send it on to the correct publisher.
|
|
|
|
go func() {
|
|
|
|
for b := range fileReadCh {
|
|
|
|
// Check if there are new content read from file input
|
|
|
|
fmt.Printf("received: %s\n", b)
|
|
|
|
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2021-02-01 12:41:04 +00:00
|
|
|
proc := s.prepareNewProcess("btship1")
|
|
|
|
// fmt.Printf("*** %#v\n", proc)
|
|
|
|
go s.spawnProcess(proc)
|
|
|
|
|
|
|
|
proc = s.prepareNewProcess("btship2")
|
|
|
|
// fmt.Printf("*** %#v\n", proc)
|
|
|
|
go s.spawnProcess(proc)
|
|
|
|
|
2021-01-28 10:17:54 +00:00
|
|
|
select {}
|
2021-01-28 13:58:16 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
type node string
|
|
|
|
|
|
|
|
// process are represent the communication to one individual host
|
|
|
|
type process struct {
|
|
|
|
messageID int
|
|
|
|
subject string
|
|
|
|
// Put a node here to be able know the node a process is at.
|
|
|
|
// NB: Might not be needed later on.
|
|
|
|
node node
|
|
|
|
// The processID for the current process
|
|
|
|
processID int
|
|
|
|
// errorCh is used to report errors from a process
|
|
|
|
// NB: Implementing this as an int to report for testing
|
|
|
|
errorCh chan string
|
2021-02-02 12:06:37 +00:00
|
|
|
// subject
|
|
|
|
}
|
|
|
|
|
|
|
|
type subject struct {
|
|
|
|
// node, the name of the node
|
|
|
|
node string
|
|
|
|
// messageType, command/event
|
|
|
|
messageType string
|
|
|
|
// method, what is this message doing, etc. shellcommand, syslog, etc.
|
|
|
|
method string
|
|
|
|
// description, usefu
|
|
|
|
description string
|
2021-01-28 10:17:54 +00:00
|
|
|
}
|
|
|
|
|
2021-02-01 12:41:04 +00:00
|
|
|
// prepareNewProcess will set the the provided values and the default
|
|
|
|
// values for a process.
|
2021-01-28 10:17:54 +00:00
|
|
|
func (s *server) prepareNewProcess(nodeName string) process {
|
2021-01-27 13:02:57 +00:00
|
|
|
// create the initial configuration for a sessions communicating with 1 host.
|
2021-01-28 10:17:54 +00:00
|
|
|
s.lastProcessID++
|
|
|
|
proc := process{
|
2021-01-27 13:02:57 +00:00
|
|
|
messageID: 0,
|
2021-01-28 10:17:54 +00:00
|
|
|
node: node(nodeName),
|
|
|
|
processID: s.lastProcessID,
|
2021-01-28 13:58:16 +00:00
|
|
|
errorCh: make(chan string),
|
2021-01-27 13:02:57 +00:00
|
|
|
}
|
2021-01-28 10:17:54 +00:00
|
|
|
|
|
|
|
return proc
|
|
|
|
}
|
|
|
|
|
2021-02-01 12:41:04 +00:00
|
|
|
// spawnProcess will spawn a new process. It will give the process
|
|
|
|
// the next available ID, and also add the process to the processes
|
|
|
|
// map.
|
2021-01-28 10:17:54 +00:00
|
|
|
func (s *server) spawnProcess(proc process) {
|
2021-01-28 13:58:16 +00:00
|
|
|
mu.Lock()
|
2021-01-28 10:17:54 +00:00
|
|
|
s.processes[proc.node] = proc
|
2021-01-28 13:58:16 +00:00
|
|
|
mu.Unlock()
|
2021-01-27 13:02:57 +00:00
|
|
|
|
|
|
|
// Loop creating one new message every second to simulate getting new
|
|
|
|
// messages to deliver.
|
2021-02-02 12:06:37 +00:00
|
|
|
//
|
|
|
|
// TODO: I think it makes most sense that the messages would come to
|
|
|
|
// here from some other message-pickup-process, and that process will
|
|
|
|
// give the message to the correct publisher process. A channel that
|
|
|
|
// is listened on in the for loop below could be used to receive the
|
|
|
|
// messages from the message-pickup-process.
|
2021-01-27 13:02:57 +00:00
|
|
|
for {
|
|
|
|
m := getMessageToDeliver()
|
2021-01-28 13:58:16 +00:00
|
|
|
m.ID = s.processes[proc.node].messageID
|
2021-01-29 05:09:48 +00:00
|
|
|
messageDeliver(proc, m, s.natsConn)
|
2021-01-27 08:45:52 +00:00
|
|
|
|
|
|
|
// Increment the counter for the next message to be sent.
|
2021-01-28 10:17:54 +00:00
|
|
|
proc.messageID++
|
2021-01-28 13:58:16 +00:00
|
|
|
s.processes[proc.node] = proc
|
2021-01-27 08:45:52 +00:00
|
|
|
time.Sleep(time.Second * 1)
|
2021-01-28 13:58:16 +00:00
|
|
|
|
|
|
|
// simulate that we get an error, and that we can send that
|
|
|
|
// out of the process and receive it in another thread.
|
2021-02-01 12:41:04 +00:00
|
|
|
s.processes[proc.node].errorCh <- "received an error from process: " + fmt.Sprintf("%v\n", proc.processID)
|
2021-01-28 13:58:16 +00:00
|
|
|
|
|
|
|
//fmt.Printf("%#v\n", s.processes[proc.node])
|
2021-01-27 08:45:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-27 13:02:57 +00:00
|
|
|
// get MessageToDeliver will pick up the next message to be created.
|
|
|
|
// TODO: read this from local file or rest or....?
|
|
|
|
func getMessageToDeliver() Message {
|
|
|
|
return Message{
|
|
|
|
Data: []string{"uname", "-a"},
|
|
|
|
MessageType: eventReturnAck,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 05:09:48 +00:00
|
|
|
func messageDeliver(proc process, message Message, natsConn *nats.Conn) {
|
2021-01-27 08:45:52 +00:00
|
|
|
for {
|
2021-01-27 13:02:57 +00:00
|
|
|
dataPayload, err := gobEncodePayload(message)
|
2021-01-27 08:45:52 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("error: createDataPayload: %v\n", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
msg := &nats.Msg{
|
2021-01-29 13:22:36 +00:00
|
|
|
Subject: fmt.Sprintf("%s.%s.%s", proc.node, "command", "shellcommand"),
|
2021-01-29 05:09:48 +00:00
|
|
|
// Structure of the reply message are:
|
2021-01-29 13:22:36 +00:00
|
|
|
// reply.<nodename>.<message type>.<method>
|
|
|
|
Reply: "reply." + string(proc.node) + "command.shellcommand",
|
2021-01-29 05:09:48 +00:00
|
|
|
Data: dataPayload,
|
2021-01-25 14:23:00 +00:00
|
|
|
}
|
|
|
|
|
2021-01-27 08:45:52 +00:00
|
|
|
// The SubscribeSync used in the subscriber, will get messages that
|
|
|
|
// are sent after it started subscribing, so we start a publisher
|
|
|
|
// that sends out a message every second.
|
|
|
|
//
|
|
|
|
// Create a subscriber for the reply message.
|
|
|
|
subReply, err := natsConn.SubscribeSync(msg.Reply)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error: nc.SubscribeSync failed: %v\n", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Publish message
|
|
|
|
err = natsConn.PublishMsg(msg)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error: publish failed: %v\n", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait up until 10 seconds for a reply,
|
|
|
|
// continue and resend if to reply received.
|
|
|
|
msgReply, err := subReply.NextMsg(time.Second * 10)
|
|
|
|
if err != nil {
|
2021-01-29 05:09:48 +00:00
|
|
|
log.Printf("error: subRepl.NextMsg failed for node=%v pid=%v: %v\n", proc.node, proc.processID, err)
|
2021-01-27 08:45:52 +00:00
|
|
|
// did not receive a reply, continuing from top again
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fmt.Printf("publisher: received: %s\n", msgReply.Data)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-27 13:02:57 +00:00
|
|
|
// gobEncodePayload will encode the message structure along with its
|
|
|
|
// valued in gob binary format.
|
2021-01-28 10:17:54 +00:00
|
|
|
// TODO: Check if it adds value to compress with gzip.
|
2021-01-27 13:02:57 +00:00
|
|
|
func gobEncodePayload(m Message) ([]byte, error) {
|
2021-01-27 08:45:52 +00:00
|
|
|
var buf bytes.Buffer
|
|
|
|
gobEnc := gob.NewEncoder(&buf)
|
|
|
|
err := gobEnc.Encode(m)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error: gob.Enode failed: %v", err)
|
|
|
|
}
|
2021-01-25 14:23:00 +00:00
|
|
|
|
2021-01-27 08:45:52 +00:00
|
|
|
return buf.Bytes(), nil
|
2021-01-25 14:23:00 +00:00
|
|
|
}
|