cluster.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. package networkdb
  2. import (
  3. "bytes"
  4. "crypto/rand"
  5. "encoding/hex"
  6. "fmt"
  7. "log"
  8. "math/big"
  9. rnd "math/rand"
  10. "net"
  11. "strings"
  12. "time"
  13. "github.com/hashicorp/memberlist"
  14. "github.com/sirupsen/logrus"
  15. )
  16. const (
  17. reapInterval = 30 * time.Minute
  18. reapPeriod = 5 * time.Second
  19. retryInterval = 1 * time.Second
  20. nodeReapInterval = 24 * time.Hour
  21. nodeReapPeriod = 2 * time.Hour
  22. )
  23. type logWriter struct{}
  24. func (l *logWriter) Write(p []byte) (int, error) {
  25. str := string(p)
  26. str = strings.TrimSuffix(str, "\n")
  27. switch {
  28. case strings.HasPrefix(str, "[WARN] "):
  29. str = strings.TrimPrefix(str, "[WARN] ")
  30. logrus.Warn(str)
  31. case strings.HasPrefix(str, "[DEBUG] "):
  32. str = strings.TrimPrefix(str, "[DEBUG] ")
  33. logrus.Debug(str)
  34. case strings.HasPrefix(str, "[INFO] "):
  35. str = strings.TrimPrefix(str, "[INFO] ")
  36. logrus.Info(str)
  37. case strings.HasPrefix(str, "[ERR] "):
  38. str = strings.TrimPrefix(str, "[ERR] ")
  39. logrus.Warn(str)
  40. }
  41. return len(p), nil
  42. }
  43. // SetKey adds a new key to the key ring
  44. func (nDB *NetworkDB) SetKey(key []byte) {
  45. logrus.Debugf("Adding key %s", hex.EncodeToString(key)[0:5])
  46. nDB.Lock()
  47. defer nDB.Unlock()
  48. for _, dbKey := range nDB.config.Keys {
  49. if bytes.Equal(key, dbKey) {
  50. return
  51. }
  52. }
  53. nDB.config.Keys = append(nDB.config.Keys, key)
  54. if nDB.keyring != nil {
  55. nDB.keyring.AddKey(key)
  56. }
  57. }
  58. // SetPrimaryKey sets the given key as the primary key. This should have
  59. // been added apriori through SetKey
  60. func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
  61. logrus.Debugf("Primary Key %s", hex.EncodeToString(key)[0:5])
  62. nDB.RLock()
  63. defer nDB.RUnlock()
  64. for _, dbKey := range nDB.config.Keys {
  65. if bytes.Equal(key, dbKey) {
  66. if nDB.keyring != nil {
  67. nDB.keyring.UseKey(dbKey)
  68. }
  69. break
  70. }
  71. }
  72. }
  73. // RemoveKey removes a key from the key ring. The key being removed
  74. // can't be the primary key
  75. func (nDB *NetworkDB) RemoveKey(key []byte) {
  76. logrus.Debugf("Remove Key %s", hex.EncodeToString(key)[0:5])
  77. nDB.Lock()
  78. defer nDB.Unlock()
  79. for i, dbKey := range nDB.config.Keys {
  80. if bytes.Equal(key, dbKey) {
  81. nDB.config.Keys = append(nDB.config.Keys[:i], nDB.config.Keys[i+1:]...)
  82. if nDB.keyring != nil {
  83. nDB.keyring.RemoveKey(dbKey)
  84. }
  85. break
  86. }
  87. }
  88. }
  89. func (nDB *NetworkDB) clusterInit() error {
  90. nDB.lastStatsTimestamp = time.Now()
  91. nDB.lastHealthTimestamp = nDB.lastStatsTimestamp
  92. config := memberlist.DefaultLANConfig()
  93. config.Name = nDB.config.NodeName
  94. config.BindAddr = nDB.config.BindAddr
  95. config.AdvertiseAddr = nDB.config.AdvertiseAddr
  96. config.UDPBufferSize = nDB.config.PacketBufferSize
  97. if nDB.config.BindPort != 0 {
  98. config.BindPort = nDB.config.BindPort
  99. }
  100. config.ProtocolVersion = memberlist.ProtocolVersion2Compatible
  101. config.Delegate = &delegate{nDB: nDB}
  102. config.Events = &eventDelegate{nDB: nDB}
  103. // custom logger that does not add time or date, so they are not
  104. // duplicated by logrus
  105. config.Logger = log.New(&logWriter{}, "", 0)
  106. var err error
  107. if len(nDB.config.Keys) > 0 {
  108. for i, key := range nDB.config.Keys {
  109. logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5])
  110. }
  111. nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
  112. if err != nil {
  113. return err
  114. }
  115. config.Keyring = nDB.keyring
  116. }
  117. nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
  118. NumNodes: func() int {
  119. nDB.RLock()
  120. num := len(nDB.nodes)
  121. nDB.RUnlock()
  122. return num
  123. },
  124. RetransmitMult: config.RetransmitMult,
  125. }
  126. nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{
  127. NumNodes: func() int {
  128. nDB.RLock()
  129. num := len(nDB.nodes)
  130. nDB.RUnlock()
  131. return num
  132. },
  133. RetransmitMult: config.RetransmitMult,
  134. }
  135. mlist, err := memberlist.Create(config)
  136. if err != nil {
  137. return fmt.Errorf("failed to create memberlist: %v", err)
  138. }
  139. nDB.stopCh = make(chan struct{})
  140. nDB.memberlist = mlist
  141. for _, trigger := range []struct {
  142. interval time.Duration
  143. fn func()
  144. }{
  145. {reapPeriod, nDB.reapState},
  146. {config.GossipInterval, nDB.gossip},
  147. {config.PushPullInterval, nDB.bulkSyncTables},
  148. {retryInterval, nDB.reconnectNode},
  149. {nodeReapPeriod, nDB.reapDeadNode},
  150. } {
  151. t := time.NewTicker(trigger.interval)
  152. go nDB.triggerFunc(trigger.interval, t.C, nDB.stopCh, trigger.fn)
  153. nDB.tickers = append(nDB.tickers, t)
  154. }
  155. return nil
  156. }
  157. func (nDB *NetworkDB) retryJoin(members []string, stop <-chan struct{}) {
  158. t := time.NewTicker(retryInterval)
  159. defer t.Stop()
  160. for {
  161. select {
  162. case <-t.C:
  163. if _, err := nDB.memberlist.Join(members); err != nil {
  164. logrus.Errorf("Failed to join memberlist %s on retry: %v", members, err)
  165. continue
  166. }
  167. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  168. logrus.Errorf("failed to send node join on retry: %v", err)
  169. continue
  170. }
  171. return
  172. case <-stop:
  173. return
  174. }
  175. }
  176. }
  177. func (nDB *NetworkDB) clusterJoin(members []string) error {
  178. mlist := nDB.memberlist
  179. if _, err := mlist.Join(members); err != nil {
  180. // In case of failure, keep retrying join until it succeeds or the cluster is shutdown.
  181. go nDB.retryJoin(members, nDB.stopCh)
  182. return fmt.Errorf("could not join node to memberlist: %v", err)
  183. }
  184. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  185. return fmt.Errorf("failed to send node join: %v", err)
  186. }
  187. return nil
  188. }
  189. func (nDB *NetworkDB) clusterLeave() error {
  190. mlist := nDB.memberlist
  191. if err := nDB.sendNodeEvent(NodeEventTypeLeave); err != nil {
  192. logrus.Errorf("failed to send node leave: %v", err)
  193. }
  194. if err := mlist.Leave(time.Second); err != nil {
  195. return err
  196. }
  197. close(nDB.stopCh)
  198. for _, t := range nDB.tickers {
  199. t.Stop()
  200. }
  201. return mlist.Shutdown()
  202. }
  203. func (nDB *NetworkDB) triggerFunc(stagger time.Duration, C <-chan time.Time, stop <-chan struct{}, f func()) {
  204. // Use a random stagger to avoid syncronizing
  205. randStagger := time.Duration(uint64(rnd.Int63()) % uint64(stagger))
  206. select {
  207. case <-time.After(randStagger):
  208. case <-stop:
  209. return
  210. }
  211. for {
  212. select {
  213. case <-C:
  214. f()
  215. case <-stop:
  216. return
  217. }
  218. }
  219. }
  220. func (nDB *NetworkDB) reapDeadNode() {
  221. nDB.Lock()
  222. defer nDB.Unlock()
  223. for id, n := range nDB.failedNodes {
  224. if n.reapTime > 0 {
  225. n.reapTime -= nodeReapPeriod
  226. continue
  227. }
  228. logrus.Debugf("Removing failed node %v from gossip cluster", n.Name)
  229. delete(nDB.failedNodes, id)
  230. }
  231. }
  232. func (nDB *NetworkDB) reconnectNode() {
  233. nDB.RLock()
  234. if len(nDB.failedNodes) == 0 {
  235. nDB.RUnlock()
  236. return
  237. }
  238. nodes := make([]*node, 0, len(nDB.failedNodes))
  239. for _, n := range nDB.failedNodes {
  240. nodes = append(nodes, n)
  241. }
  242. nDB.RUnlock()
  243. node := nodes[randomOffset(len(nodes))]
  244. addr := net.UDPAddr{IP: node.Addr, Port: int(node.Port)}
  245. if _, err := nDB.memberlist.Join([]string{addr.String()}); err != nil {
  246. return
  247. }
  248. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  249. return
  250. }
  251. logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name)
  252. nDB.bulkSync([]string{node.Name}, true)
  253. }
  254. // For timing the entry deletion in the repaer APIs that doesn't use monotonic clock
  255. // source (time.Now, Sub etc.) should be avoided. Hence we use reapTime in every
  256. // entry which is set initially to reapInterval and decremented by reapPeriod every time
  257. // the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This
  258. // is safe as long as no other concurrent path touches the reapTime field.
  259. func (nDB *NetworkDB) reapState() {
  260. nDB.reapNetworks()
  261. nDB.reapTableEntries()
  262. }
  263. func (nDB *NetworkDB) reapNetworks() {
  264. nDB.Lock()
  265. for _, nn := range nDB.networks {
  266. for id, n := range nn {
  267. if n.leaving {
  268. if n.reapTime <= 0 {
  269. delete(nn, id)
  270. continue
  271. }
  272. n.reapTime -= reapPeriod
  273. }
  274. }
  275. }
  276. nDB.Unlock()
  277. }
  278. func (nDB *NetworkDB) reapTableEntries() {
  279. var paths []string
  280. nDB.RLock()
  281. nDB.indexes[byTable].Walk(func(path string, v interface{}) bool {
  282. entry, ok := v.(*entry)
  283. if !ok {
  284. return false
  285. }
  286. if !entry.deleting {
  287. return false
  288. }
  289. if entry.reapTime > 0 {
  290. entry.reapTime -= reapPeriod
  291. return false
  292. }
  293. paths = append(paths, path)
  294. return false
  295. })
  296. nDB.RUnlock()
  297. nDB.Lock()
  298. for _, path := range paths {
  299. params := strings.Split(path[1:], "/")
  300. tname := params[0]
  301. nid := params[1]
  302. key := params[2]
  303. if _, ok := nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key)); !ok {
  304. logrus.Errorf("Could not delete entry in table %s with network id %s and key %s as it does not exist", tname, nid, key)
  305. }
  306. if _, ok := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key)); !ok {
  307. logrus.Errorf("Could not delete entry in network %s with table name %s and key %s as it does not exist", nid, tname, key)
  308. }
  309. }
  310. nDB.Unlock()
  311. }
  312. func (nDB *NetworkDB) gossip() {
  313. networkNodes := make(map[string][]string)
  314. nDB.RLock()
  315. thisNodeNetworks := nDB.networks[nDB.config.NodeName]
  316. for nid := range thisNodeNetworks {
  317. networkNodes[nid] = nDB.networkNodes[nid]
  318. }
  319. printStats := time.Since(nDB.lastStatsTimestamp) >= nDB.config.StatsPrintPeriod
  320. printHealth := time.Since(nDB.lastHealthTimestamp) >= nDB.config.HealthPrintPeriod
  321. nDB.RUnlock()
  322. if printHealth {
  323. healthScore := nDB.memberlist.GetHealthScore()
  324. if healthScore != 0 {
  325. logrus.Warnf("NetworkDB stats - healthscore:%d (connectivity issues)", healthScore)
  326. }
  327. nDB.lastHealthTimestamp = time.Now()
  328. }
  329. for nid, nodes := range networkNodes {
  330. mNodes := nDB.mRandomNodes(3, nodes)
  331. bytesAvail := nDB.config.PacketBufferSize - compoundHeaderOverhead
  332. nDB.RLock()
  333. network, ok := thisNodeNetworks[nid]
  334. nDB.RUnlock()
  335. if !ok || network == nil {
  336. // It is normal for the network to be removed
  337. // between the time we collect the network
  338. // attachments of this node and processing
  339. // them here.
  340. continue
  341. }
  342. broadcastQ := network.tableBroadcasts
  343. if broadcastQ == nil {
  344. logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid)
  345. continue
  346. }
  347. msgs := broadcastQ.GetBroadcasts(compoundOverhead, bytesAvail)
  348. // Collect stats and print the queue info, note this code is here also to have a view of the queues empty
  349. network.qMessagesSent += len(msgs)
  350. if printStats {
  351. logrus.Infof("NetworkDB stats - Queue net:%s qLen:%d netPeers:%d netMsg/s:%d",
  352. nid, broadcastQ.NumQueued(), broadcastQ.NumNodes(), network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
  353. network.qMessagesSent = 0
  354. }
  355. if len(msgs) == 0 {
  356. continue
  357. }
  358. // Create a compound message
  359. compound := makeCompoundMessage(msgs)
  360. for _, node := range mNodes {
  361. nDB.RLock()
  362. mnode := nDB.nodes[node]
  363. nDB.RUnlock()
  364. if mnode == nil {
  365. break
  366. }
  367. // Send the compound message
  368. if err := nDB.memberlist.SendBestEffort(&mnode.Node, compound); err != nil {
  369. logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err)
  370. }
  371. }
  372. }
  373. // Reset the stats
  374. if printStats {
  375. nDB.lastStatsTimestamp = time.Now()
  376. }
  377. }
  378. func (nDB *NetworkDB) bulkSyncTables() {
  379. var networks []string
  380. nDB.RLock()
  381. for nid, network := range nDB.networks[nDB.config.NodeName] {
  382. if network.leaving {
  383. continue
  384. }
  385. networks = append(networks, nid)
  386. }
  387. nDB.RUnlock()
  388. for {
  389. if len(networks) == 0 {
  390. break
  391. }
  392. nid := networks[0]
  393. networks = networks[1:]
  394. nDB.RLock()
  395. nodes := nDB.networkNodes[nid]
  396. nDB.RUnlock()
  397. // No peer nodes on this network. Move on.
  398. if len(nodes) == 0 {
  399. continue
  400. }
  401. completed, err := nDB.bulkSync(nodes, false)
  402. if err != nil {
  403. logrus.Errorf("periodic bulk sync failure for network %s: %v", nid, err)
  404. continue
  405. }
  406. // Remove all the networks for which we have
  407. // successfully completed bulk sync in this iteration.
  408. updatedNetworks := make([]string, 0, len(networks))
  409. for _, nid := range networks {
  410. var found bool
  411. for _, completedNid := range completed {
  412. if nid == completedNid {
  413. found = true
  414. break
  415. }
  416. }
  417. if !found {
  418. updatedNetworks = append(updatedNetworks, nid)
  419. }
  420. }
  421. networks = updatedNetworks
  422. }
  423. }
  424. func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) {
  425. if !all {
  426. // Get 2 random nodes. 2nd node will be tried if the bulk sync to
  427. // 1st node fails.
  428. nodes = nDB.mRandomNodes(2, nodes)
  429. }
  430. if len(nodes) == 0 {
  431. return nil, nil
  432. }
  433. var err error
  434. var networks []string
  435. for _, node := range nodes {
  436. if node == nDB.config.NodeName {
  437. continue
  438. }
  439. logrus.Debugf("%s: Initiating bulk sync with node %v", nDB.config.NodeName, node)
  440. networks = nDB.findCommonNetworks(node)
  441. err = nDB.bulkSyncNode(networks, node, true)
  442. // if its periodic bulksync stop after the first successful sync
  443. if !all && err == nil {
  444. break
  445. }
  446. if err != nil {
  447. err = fmt.Errorf("bulk sync to node %s failed: %v", node, err)
  448. logrus.Warn(err.Error())
  449. }
  450. }
  451. if err != nil {
  452. return nil, err
  453. }
  454. return networks, nil
  455. }
  456. // Bulk sync all the table entries belonging to a set of networks to a
  457. // single peer node. It can be unsolicited or can be in response to an
  458. // unsolicited bulk sync
  459. func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited bool) error {
  460. var msgs [][]byte
  461. var unsolMsg string
  462. if unsolicited {
  463. unsolMsg = "unsolicited"
  464. }
  465. logrus.Debugf("%s: Initiating %s bulk sync for networks %v with node %s", nDB.config.NodeName, unsolMsg, networks, node)
  466. nDB.RLock()
  467. mnode := nDB.nodes[node]
  468. if mnode == nil {
  469. nDB.RUnlock()
  470. return nil
  471. }
  472. for _, nid := range networks {
  473. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  474. entry, ok := v.(*entry)
  475. if !ok {
  476. return false
  477. }
  478. eType := TableEventTypeCreate
  479. if entry.deleting {
  480. eType = TableEventTypeDelete
  481. }
  482. params := strings.Split(path[1:], "/")
  483. tEvent := TableEvent{
  484. Type: eType,
  485. LTime: entry.ltime,
  486. NodeName: entry.node,
  487. NetworkID: nid,
  488. TableName: params[1],
  489. Key: params[2],
  490. Value: entry.value,
  491. }
  492. msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
  493. if err != nil {
  494. logrus.Errorf("Encode failure during bulk sync: %#v", tEvent)
  495. return false
  496. }
  497. msgs = append(msgs, msg)
  498. return false
  499. })
  500. }
  501. nDB.RUnlock()
  502. // Create a compound message
  503. compound := makeCompoundMessage(msgs)
  504. bsm := BulkSyncMessage{
  505. LTime: nDB.tableClock.Time(),
  506. Unsolicited: unsolicited,
  507. NodeName: nDB.config.NodeName,
  508. Networks: networks,
  509. Payload: compound,
  510. }
  511. buf, err := encodeMessage(MessageTypeBulkSync, &bsm)
  512. if err != nil {
  513. return fmt.Errorf("failed to encode bulk sync message: %v", err)
  514. }
  515. nDB.Lock()
  516. ch := make(chan struct{})
  517. nDB.bulkSyncAckTbl[node] = ch
  518. nDB.Unlock()
  519. err = nDB.memberlist.SendReliable(&mnode.Node, buf)
  520. if err != nil {
  521. nDB.Lock()
  522. delete(nDB.bulkSyncAckTbl, node)
  523. nDB.Unlock()
  524. return fmt.Errorf("failed to send a TCP message during bulk sync: %v", err)
  525. }
  526. // Wait on a response only if it is unsolicited.
  527. if unsolicited {
  528. startTime := time.Now()
  529. t := time.NewTimer(30 * time.Second)
  530. select {
  531. case <-t.C:
  532. logrus.Errorf("Bulk sync to node %s timed out", node)
  533. case <-ch:
  534. logrus.Debugf("%s: Bulk sync to node %s took %s", nDB.config.NodeName, node, time.Since(startTime))
  535. }
  536. t.Stop()
  537. }
  538. return nil
  539. }
  540. // Returns a random offset between 0 and n
  541. func randomOffset(n int) int {
  542. if n == 0 {
  543. return 0
  544. }
  545. val, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
  546. if err != nil {
  547. logrus.Errorf("Failed to get a random offset: %v", err)
  548. return 0
  549. }
  550. return int(val.Int64())
  551. }
  552. // mRandomNodes is used to select up to m random nodes. It is possible
  553. // that less than m nodes are returned.
  554. func (nDB *NetworkDB) mRandomNodes(m int, nodes []string) []string {
  555. n := len(nodes)
  556. mNodes := make([]string, 0, m)
  557. OUTER:
  558. // Probe up to 3*n times, with large n this is not necessary
  559. // since k << n, but with small n we want search to be
  560. // exhaustive
  561. for i := 0; i < 3*n && len(mNodes) < m; i++ {
  562. // Get random node
  563. idx := randomOffset(n)
  564. node := nodes[idx]
  565. if node == nDB.config.NodeName {
  566. continue
  567. }
  568. // Check if we have this node already
  569. for j := 0; j < len(mNodes); j++ {
  570. if node == mNodes[j] {
  571. continue OUTER
  572. }
  573. }
  574. // Append the node
  575. mNodes = append(mNodes, node)
  576. }
  577. return mNodes
  578. }