cluster.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. package networkdb
  2. import (
  3. "bytes"
  4. "crypto/rand"
  5. "encoding/hex"
  6. "fmt"
  7. "log"
  8. "math/big"
  9. rnd "math/rand"
  10. "net"
  11. "strings"
  12. "time"
  13. "github.com/Sirupsen/logrus"
  14. "github.com/hashicorp/memberlist"
  15. )
  16. const (
  17. reapInterval = 30 * time.Minute
  18. reapPeriod = 5 * time.Second
  19. retryInterval = 1 * time.Second
  20. nodeReapInterval = 24 * time.Hour
  21. nodeReapPeriod = 2 * time.Hour
  22. )
  23. type logWriter struct{}
  24. func (l *logWriter) Write(p []byte) (int, error) {
  25. str := string(p)
  26. str = strings.TrimSuffix(str, "\n")
  27. switch {
  28. case strings.HasPrefix(str, "[WARN] "):
  29. str = strings.TrimPrefix(str, "[WARN] ")
  30. logrus.Warn(str)
  31. case strings.HasPrefix(str, "[DEBUG] "):
  32. str = strings.TrimPrefix(str, "[DEBUG] ")
  33. logrus.Debug(str)
  34. case strings.HasPrefix(str, "[INFO] "):
  35. str = strings.TrimPrefix(str, "[INFO] ")
  36. logrus.Info(str)
  37. case strings.HasPrefix(str, "[ERR] "):
  38. str = strings.TrimPrefix(str, "[ERR] ")
  39. logrus.Warn(str)
  40. }
  41. return len(p), nil
  42. }
  43. // SetKey adds a new key to the key ring
  44. func (nDB *NetworkDB) SetKey(key []byte) {
  45. logrus.Debugf("Adding key %s", hex.EncodeToString(key)[0:5])
  46. nDB.Lock()
  47. defer nDB.Unlock()
  48. for _, dbKey := range nDB.config.Keys {
  49. if bytes.Equal(key, dbKey) {
  50. return
  51. }
  52. }
  53. nDB.config.Keys = append(nDB.config.Keys, key)
  54. if nDB.keyring != nil {
  55. nDB.keyring.AddKey(key)
  56. }
  57. }
  58. // SetPrimaryKey sets the given key as the primary key. This should have
  59. // been added apriori through SetKey
  60. func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
  61. logrus.Debugf("Primary Key %s", hex.EncodeToString(key)[0:5])
  62. nDB.RLock()
  63. defer nDB.RUnlock()
  64. for _, dbKey := range nDB.config.Keys {
  65. if bytes.Equal(key, dbKey) {
  66. if nDB.keyring != nil {
  67. nDB.keyring.UseKey(dbKey)
  68. }
  69. break
  70. }
  71. }
  72. }
  73. // RemoveKey removes a key from the key ring. The key being removed
  74. // can't be the primary key
  75. func (nDB *NetworkDB) RemoveKey(key []byte) {
  76. logrus.Debugf("Remove Key %s", hex.EncodeToString(key)[0:5])
  77. nDB.Lock()
  78. defer nDB.Unlock()
  79. for i, dbKey := range nDB.config.Keys {
  80. if bytes.Equal(key, dbKey) {
  81. nDB.config.Keys = append(nDB.config.Keys[:i], nDB.config.Keys[i+1:]...)
  82. if nDB.keyring != nil {
  83. nDB.keyring.RemoveKey(dbKey)
  84. }
  85. break
  86. }
  87. }
  88. }
  89. func (nDB *NetworkDB) clusterInit() error {
  90. config := memberlist.DefaultLANConfig()
  91. config.Name = nDB.config.NodeName
  92. config.BindAddr = nDB.config.BindAddr
  93. config.AdvertiseAddr = nDB.config.AdvertiseAddr
  94. if nDB.config.BindPort != 0 {
  95. config.BindPort = nDB.config.BindPort
  96. }
  97. config.ProtocolVersion = memberlist.ProtocolVersion2Compatible
  98. config.Delegate = &delegate{nDB: nDB}
  99. config.Events = &eventDelegate{nDB: nDB}
  100. // custom logger that does not add time or date, so they are not
  101. // duplicated by logrus
  102. config.Logger = log.New(&logWriter{}, "", 0)
  103. var err error
  104. if len(nDB.config.Keys) > 0 {
  105. for i, key := range nDB.config.Keys {
  106. logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5])
  107. }
  108. nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
  109. if err != nil {
  110. return err
  111. }
  112. config.Keyring = nDB.keyring
  113. }
  114. nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
  115. NumNodes: func() int {
  116. nDB.RLock()
  117. num := len(nDB.nodes)
  118. nDB.RUnlock()
  119. return num
  120. },
  121. RetransmitMult: config.RetransmitMult,
  122. }
  123. nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{
  124. NumNodes: func() int {
  125. nDB.RLock()
  126. num := len(nDB.nodes)
  127. nDB.RUnlock()
  128. return num
  129. },
  130. RetransmitMult: config.RetransmitMult,
  131. }
  132. mlist, err := memberlist.Create(config)
  133. if err != nil {
  134. return fmt.Errorf("failed to create memberlist: %v", err)
  135. }
  136. nDB.stopCh = make(chan struct{})
  137. nDB.memberlist = mlist
  138. for _, trigger := range []struct {
  139. interval time.Duration
  140. fn func()
  141. }{
  142. {reapPeriod, nDB.reapState},
  143. {config.GossipInterval, nDB.gossip},
  144. {config.PushPullInterval, nDB.bulkSyncTables},
  145. {retryInterval, nDB.reconnectNode},
  146. {nodeReapPeriod, nDB.reapDeadNode},
  147. } {
  148. t := time.NewTicker(trigger.interval)
  149. go nDB.triggerFunc(trigger.interval, t.C, nDB.stopCh, trigger.fn)
  150. nDB.tickers = append(nDB.tickers, t)
  151. }
  152. return nil
  153. }
  154. func (nDB *NetworkDB) retryJoin(members []string, stop <-chan struct{}) {
  155. t := time.NewTicker(retryInterval)
  156. defer t.Stop()
  157. for {
  158. select {
  159. case <-t.C:
  160. if _, err := nDB.memberlist.Join(members); err != nil {
  161. logrus.Errorf("Failed to join memberlist %s on retry: %v", members, err)
  162. continue
  163. }
  164. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  165. logrus.Errorf("failed to send node join on retry: %v", err)
  166. continue
  167. }
  168. return
  169. case <-stop:
  170. return
  171. }
  172. }
  173. }
  174. func (nDB *NetworkDB) clusterJoin(members []string) error {
  175. mlist := nDB.memberlist
  176. if _, err := mlist.Join(members); err != nil {
  177. // Incase of failure, keep retrying join until it succeeds or the cluster is shutdown.
  178. go nDB.retryJoin(members, nDB.stopCh)
  179. return fmt.Errorf("could not join node to memberlist: %v", err)
  180. }
  181. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  182. return fmt.Errorf("failed to send node join: %v", err)
  183. }
  184. return nil
  185. }
  186. func (nDB *NetworkDB) clusterLeave() error {
  187. mlist := nDB.memberlist
  188. if err := nDB.sendNodeEvent(NodeEventTypeLeave); err != nil {
  189. logrus.Errorf("failed to send node leave: %v", err)
  190. }
  191. if err := mlist.Leave(time.Second); err != nil {
  192. return err
  193. }
  194. close(nDB.stopCh)
  195. for _, t := range nDB.tickers {
  196. t.Stop()
  197. }
  198. return mlist.Shutdown()
  199. }
  200. func (nDB *NetworkDB) triggerFunc(stagger time.Duration, C <-chan time.Time, stop <-chan struct{}, f func()) {
  201. // Use a random stagger to avoid syncronizing
  202. randStagger := time.Duration(uint64(rnd.Int63()) % uint64(stagger))
  203. select {
  204. case <-time.After(randStagger):
  205. case <-stop:
  206. return
  207. }
  208. for {
  209. select {
  210. case <-C:
  211. f()
  212. case <-stop:
  213. return
  214. }
  215. }
  216. }
  217. func (nDB *NetworkDB) reapDeadNode() {
  218. nDB.Lock()
  219. defer nDB.Unlock()
  220. for id, n := range nDB.failedNodes {
  221. if n.reapTime > 0 {
  222. n.reapTime -= nodeReapPeriod
  223. continue
  224. }
  225. logrus.Debugf("Removing failed node %v from gossip cluster", n.Name)
  226. delete(nDB.failedNodes, id)
  227. }
  228. }
  229. func (nDB *NetworkDB) reconnectNode() {
  230. nDB.RLock()
  231. if len(nDB.failedNodes) == 0 {
  232. nDB.RUnlock()
  233. return
  234. }
  235. nodes := make([]*node, 0, len(nDB.failedNodes))
  236. for _, n := range nDB.failedNodes {
  237. nodes = append(nodes, n)
  238. }
  239. nDB.RUnlock()
  240. node := nodes[randomOffset(len(nodes))]
  241. addr := net.UDPAddr{IP: node.Addr, Port: int(node.Port)}
  242. if _, err := nDB.memberlist.Join([]string{addr.String()}); err != nil {
  243. return
  244. }
  245. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  246. return
  247. }
  248. // Update all the local table state to a new time to
  249. // force update on the node we are trying to rejoin, just in
  250. // case that node has these in deleting state still. This is
  251. // facilitate fast convergence after recovering from a gossip
  252. // failure.
  253. nDB.updateLocalTableTime()
  254. logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name)
  255. nDB.bulkSync([]string{node.Name}, true)
  256. }
  257. // For timing the entry deletion in the repaer APIs that doesn't use monotonic clock
  258. // source (time.Now, Sub etc.) should be avoided. Hence we use reapTime in every
  259. // entry which is set initially to reapInterval and decremented by reapPeriod every time
  260. // the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This
  261. // is safe as long as no other concurrent path touches the reapTime field.
  262. func (nDB *NetworkDB) reapState() {
  263. nDB.reapNetworks()
  264. nDB.reapTableEntries()
  265. }
  266. func (nDB *NetworkDB) reapNetworks() {
  267. nDB.Lock()
  268. for name, nn := range nDB.networks {
  269. for id, n := range nn {
  270. if n.leaving {
  271. if n.reapTime <= 0 {
  272. delete(nn, id)
  273. nDB.deleteNetworkNode(id, name)
  274. continue
  275. }
  276. n.reapTime -= reapPeriod
  277. }
  278. }
  279. }
  280. nDB.Unlock()
  281. }
  282. func (nDB *NetworkDB) reapTableEntries() {
  283. var paths []string
  284. nDB.RLock()
  285. nDB.indexes[byTable].Walk(func(path string, v interface{}) bool {
  286. entry, ok := v.(*entry)
  287. if !ok {
  288. return false
  289. }
  290. if !entry.deleting {
  291. return false
  292. }
  293. if entry.reapTime > 0 {
  294. entry.reapTime -= reapPeriod
  295. return false
  296. }
  297. paths = append(paths, path)
  298. return false
  299. })
  300. nDB.RUnlock()
  301. nDB.Lock()
  302. for _, path := range paths {
  303. params := strings.Split(path[1:], "/")
  304. tname := params[0]
  305. nid := params[1]
  306. key := params[2]
  307. if _, ok := nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key)); !ok {
  308. logrus.Errorf("Could not delete entry in table %s with network id %s and key %s as it does not exist", tname, nid, key)
  309. }
  310. if _, ok := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key)); !ok {
  311. logrus.Errorf("Could not delete entry in network %s with table name %s and key %s as it does not exist", nid, tname, key)
  312. }
  313. }
  314. nDB.Unlock()
  315. }
  316. func (nDB *NetworkDB) gossip() {
  317. networkNodes := make(map[string][]string)
  318. nDB.RLock()
  319. thisNodeNetworks := nDB.networks[nDB.config.NodeName]
  320. for nid := range thisNodeNetworks {
  321. networkNodes[nid] = nDB.networkNodes[nid]
  322. }
  323. nDB.RUnlock()
  324. for nid, nodes := range networkNodes {
  325. mNodes := nDB.mRandomNodes(3, nodes)
  326. bytesAvail := udpSendBuf - compoundHeaderOverhead
  327. nDB.RLock()
  328. network, ok := thisNodeNetworks[nid]
  329. nDB.RUnlock()
  330. if !ok || network == nil {
  331. // It is normal for the network to be removed
  332. // between the time we collect the network
  333. // attachments of this node and processing
  334. // them here.
  335. continue
  336. }
  337. broadcastQ := network.tableBroadcasts
  338. if broadcastQ == nil {
  339. logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid)
  340. continue
  341. }
  342. msgs := broadcastQ.GetBroadcasts(compoundOverhead, bytesAvail)
  343. if len(msgs) == 0 {
  344. continue
  345. }
  346. // Create a compound message
  347. compound := makeCompoundMessage(msgs)
  348. for _, node := range mNodes {
  349. nDB.RLock()
  350. mnode := nDB.nodes[node]
  351. nDB.RUnlock()
  352. if mnode == nil {
  353. break
  354. }
  355. // Send the compound message
  356. if err := nDB.memberlist.SendToUDP(&mnode.Node, compound); err != nil {
  357. logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err)
  358. }
  359. }
  360. }
  361. }
  362. func (nDB *NetworkDB) bulkSyncTables() {
  363. var networks []string
  364. nDB.RLock()
  365. for nid, network := range nDB.networks[nDB.config.NodeName] {
  366. if network.leaving {
  367. continue
  368. }
  369. networks = append(networks, nid)
  370. }
  371. nDB.RUnlock()
  372. for {
  373. if len(networks) == 0 {
  374. break
  375. }
  376. nid := networks[0]
  377. networks = networks[1:]
  378. nDB.RLock()
  379. nodes := nDB.networkNodes[nid]
  380. nDB.RUnlock()
  381. // No peer nodes on this network. Move on.
  382. if len(nodes) == 0 {
  383. continue
  384. }
  385. completed, err := nDB.bulkSync(nodes, false)
  386. if err != nil {
  387. logrus.Errorf("periodic bulk sync failure for network %s: %v", nid, err)
  388. continue
  389. }
  390. // Remove all the networks for which we have
  391. // successfully completed bulk sync in this iteration.
  392. updatedNetworks := make([]string, 0, len(networks))
  393. for _, nid := range networks {
  394. var found bool
  395. for _, completedNid := range completed {
  396. if nid == completedNid {
  397. found = true
  398. break
  399. }
  400. }
  401. if !found {
  402. updatedNetworks = append(updatedNetworks, nid)
  403. }
  404. }
  405. networks = updatedNetworks
  406. }
  407. }
  408. func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) {
  409. if !all {
  410. // Get 2 random nodes. 2nd node will be tried if the bulk sync to
  411. // 1st node fails.
  412. nodes = nDB.mRandomNodes(2, nodes)
  413. }
  414. if len(nodes) == 0 {
  415. return nil, nil
  416. }
  417. var err error
  418. var networks []string
  419. for _, node := range nodes {
  420. if node == nDB.config.NodeName {
  421. continue
  422. }
  423. logrus.Debugf("%s: Initiating bulk sync with node %v", nDB.config.NodeName, node)
  424. networks = nDB.findCommonNetworks(node)
  425. err = nDB.bulkSyncNode(networks, node, true)
  426. // if its periodic bulksync stop after the first successful sync
  427. if !all && err == nil {
  428. break
  429. }
  430. if err != nil {
  431. err = fmt.Errorf("bulk sync to node %s failed: %v", node, err)
  432. logrus.Warn(err.Error())
  433. }
  434. }
  435. if err != nil {
  436. return nil, err
  437. }
  438. return networks, nil
  439. }
  440. // Bulk sync all the table entries belonging to a set of networks to a
  441. // single peer node. It can be unsolicited or can be in response to an
  442. // unsolicited bulk sync
  443. func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited bool) error {
  444. var msgs [][]byte
  445. var unsolMsg string
  446. if unsolicited {
  447. unsolMsg = "unsolicited"
  448. }
  449. logrus.Debugf("%s: Initiating %s bulk sync for networks %v with node %s", nDB.config.NodeName, unsolMsg, networks, node)
  450. nDB.RLock()
  451. mnode := nDB.nodes[node]
  452. if mnode == nil {
  453. nDB.RUnlock()
  454. return nil
  455. }
  456. for _, nid := range networks {
  457. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  458. entry, ok := v.(*entry)
  459. if !ok {
  460. return false
  461. }
  462. eType := TableEventTypeCreate
  463. if entry.deleting {
  464. eType = TableEventTypeDelete
  465. }
  466. params := strings.Split(path[1:], "/")
  467. tEvent := TableEvent{
  468. Type: eType,
  469. LTime: entry.ltime,
  470. NodeName: entry.node,
  471. NetworkID: nid,
  472. TableName: params[1],
  473. Key: params[2],
  474. Value: entry.value,
  475. }
  476. msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
  477. if err != nil {
  478. logrus.Errorf("Encode failure during bulk sync: %#v", tEvent)
  479. return false
  480. }
  481. msgs = append(msgs, msg)
  482. return false
  483. })
  484. }
  485. nDB.RUnlock()
  486. // Create a compound message
  487. compound := makeCompoundMessage(msgs)
  488. bsm := BulkSyncMessage{
  489. LTime: nDB.tableClock.Time(),
  490. Unsolicited: unsolicited,
  491. NodeName: nDB.config.NodeName,
  492. Networks: networks,
  493. Payload: compound,
  494. }
  495. buf, err := encodeMessage(MessageTypeBulkSync, &bsm)
  496. if err != nil {
  497. return fmt.Errorf("failed to encode bulk sync message: %v", err)
  498. }
  499. nDB.Lock()
  500. ch := make(chan struct{})
  501. nDB.bulkSyncAckTbl[node] = ch
  502. nDB.Unlock()
  503. err = nDB.memberlist.SendToTCP(&mnode.Node, buf)
  504. if err != nil {
  505. nDB.Lock()
  506. delete(nDB.bulkSyncAckTbl, node)
  507. nDB.Unlock()
  508. return fmt.Errorf("failed to send a TCP message during bulk sync: %v", err)
  509. }
  510. // Wait on a response only if it is unsolicited.
  511. if unsolicited {
  512. startTime := time.Now()
  513. t := time.NewTimer(30 * time.Second)
  514. select {
  515. case <-t.C:
  516. logrus.Errorf("Bulk sync to node %s timed out", node)
  517. case <-ch:
  518. logrus.Debugf("%s: Bulk sync to node %s took %s", nDB.config.NodeName, node, time.Now().Sub(startTime))
  519. }
  520. t.Stop()
  521. }
  522. return nil
  523. }
  524. // Returns a random offset between 0 and n
  525. func randomOffset(n int) int {
  526. if n == 0 {
  527. return 0
  528. }
  529. val, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
  530. if err != nil {
  531. logrus.Errorf("Failed to get a random offset: %v", err)
  532. return 0
  533. }
  534. return int(val.Int64())
  535. }
  536. // mRandomNodes is used to select up to m random nodes. It is possible
  537. // that less than m nodes are returned.
  538. func (nDB *NetworkDB) mRandomNodes(m int, nodes []string) []string {
  539. n := len(nodes)
  540. mNodes := make([]string, 0, m)
  541. OUTER:
  542. // Probe up to 3*n times, with large n this is not necessary
  543. // since k << n, but with small n we want search to be
  544. // exhaustive
  545. for i := 0; i < 3*n && len(mNodes) < m; i++ {
  546. // Get random node
  547. idx := randomOffset(n)
  548. node := nodes[idx]
  549. if node == nDB.config.NodeName {
  550. continue
  551. }
  552. // Check if we have this node already
  553. for j := 0; j < len(mNodes); j++ {
  554. if node == mNodes[j] {
  555. continue OUTER
  556. }
  557. }
  558. // Append the node
  559. mNodes = append(mNodes, node)
  560. }
  561. return mNodes
  562. }