cluster.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. package networkdb
  2. import (
  3. "bytes"
  4. "crypto/rand"
  5. "encoding/hex"
  6. "fmt"
  7. "log"
  8. "math/big"
  9. rnd "math/rand"
  10. "net"
  11. "strings"
  12. "time"
  13. "github.com/hashicorp/memberlist"
  14. "github.com/sirupsen/logrus"
  15. )
  16. const (
  17. // The garbage collection logic for entries leverage the presence of the network.
  18. // For this reason the expiration time of the network is put slightly higher than the entry expiration so that
  19. // there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network.
  20. reapEntryInterval = 30 * time.Minute
  21. reapNetworkInterval = reapEntryInterval + 5*reapPeriod
  22. reapPeriod = 5 * time.Second
  23. retryInterval = 1 * time.Second
  24. nodeReapInterval = 24 * time.Hour
  25. nodeReapPeriod = 2 * time.Hour
  26. )
  27. type logWriter struct{}
  28. func (l *logWriter) Write(p []byte) (int, error) {
  29. str := string(p)
  30. str = strings.TrimSuffix(str, "\n")
  31. switch {
  32. case strings.HasPrefix(str, "[WARN] "):
  33. str = strings.TrimPrefix(str, "[WARN] ")
  34. logrus.Warn(str)
  35. case strings.HasPrefix(str, "[DEBUG] "):
  36. str = strings.TrimPrefix(str, "[DEBUG] ")
  37. logrus.Debug(str)
  38. case strings.HasPrefix(str, "[INFO] "):
  39. str = strings.TrimPrefix(str, "[INFO] ")
  40. logrus.Info(str)
  41. case strings.HasPrefix(str, "[ERR] "):
  42. str = strings.TrimPrefix(str, "[ERR] ")
  43. logrus.Warn(str)
  44. }
  45. return len(p), nil
  46. }
  47. // SetKey adds a new key to the key ring
  48. func (nDB *NetworkDB) SetKey(key []byte) {
  49. logrus.Debugf("Adding key %s", hex.EncodeToString(key)[0:5])
  50. nDB.Lock()
  51. defer nDB.Unlock()
  52. for _, dbKey := range nDB.config.Keys {
  53. if bytes.Equal(key, dbKey) {
  54. return
  55. }
  56. }
  57. nDB.config.Keys = append(nDB.config.Keys, key)
  58. if nDB.keyring != nil {
  59. nDB.keyring.AddKey(key)
  60. }
  61. }
  62. // SetPrimaryKey sets the given key as the primary key. This should have
  63. // been added apriori through SetKey
  64. func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
  65. logrus.Debugf("Primary Key %s", hex.EncodeToString(key)[0:5])
  66. nDB.RLock()
  67. defer nDB.RUnlock()
  68. for _, dbKey := range nDB.config.Keys {
  69. if bytes.Equal(key, dbKey) {
  70. if nDB.keyring != nil {
  71. nDB.keyring.UseKey(dbKey)
  72. }
  73. break
  74. }
  75. }
  76. }
  77. // RemoveKey removes a key from the key ring. The key being removed
  78. // can't be the primary key
  79. func (nDB *NetworkDB) RemoveKey(key []byte) {
  80. logrus.Debugf("Remove Key %s", hex.EncodeToString(key)[0:5])
  81. nDB.Lock()
  82. defer nDB.Unlock()
  83. for i, dbKey := range nDB.config.Keys {
  84. if bytes.Equal(key, dbKey) {
  85. nDB.config.Keys = append(nDB.config.Keys[:i], nDB.config.Keys[i+1:]...)
  86. if nDB.keyring != nil {
  87. nDB.keyring.RemoveKey(dbKey)
  88. }
  89. break
  90. }
  91. }
  92. }
  93. func (nDB *NetworkDB) clusterInit() error {
  94. nDB.lastStatsTimestamp = time.Now()
  95. nDB.lastHealthTimestamp = nDB.lastStatsTimestamp
  96. config := memberlist.DefaultLANConfig()
  97. config.Name = nDB.config.NodeName
  98. config.BindAddr = nDB.config.BindAddr
  99. config.AdvertiseAddr = nDB.config.AdvertiseAddr
  100. config.UDPBufferSize = nDB.config.PacketBufferSize
  101. if nDB.config.BindPort != 0 {
  102. config.BindPort = nDB.config.BindPort
  103. }
  104. config.ProtocolVersion = memberlist.ProtocolVersion2Compatible
  105. config.Delegate = &delegate{nDB: nDB}
  106. config.Events = &eventDelegate{nDB: nDB}
  107. // custom logger that does not add time or date, so they are not
  108. // duplicated by logrus
  109. config.Logger = log.New(&logWriter{}, "", 0)
  110. var err error
  111. if len(nDB.config.Keys) > 0 {
  112. for i, key := range nDB.config.Keys {
  113. logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5])
  114. }
  115. nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
  116. if err != nil {
  117. return err
  118. }
  119. config.Keyring = nDB.keyring
  120. }
  121. nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
  122. NumNodes: func() int {
  123. nDB.RLock()
  124. num := len(nDB.nodes)
  125. nDB.RUnlock()
  126. return num
  127. },
  128. RetransmitMult: config.RetransmitMult,
  129. }
  130. nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{
  131. NumNodes: func() int {
  132. nDB.RLock()
  133. num := len(nDB.nodes)
  134. nDB.RUnlock()
  135. return num
  136. },
  137. RetransmitMult: config.RetransmitMult,
  138. }
  139. mlist, err := memberlist.Create(config)
  140. if err != nil {
  141. return fmt.Errorf("failed to create memberlist: %v", err)
  142. }
  143. nDB.stopCh = make(chan struct{})
  144. nDB.memberlist = mlist
  145. for _, trigger := range []struct {
  146. interval time.Duration
  147. fn func()
  148. }{
  149. {reapPeriod, nDB.reapState},
  150. {config.GossipInterval, nDB.gossip},
  151. {config.PushPullInterval, nDB.bulkSyncTables},
  152. {retryInterval, nDB.reconnectNode},
  153. {nodeReapPeriod, nDB.reapDeadNode},
  154. } {
  155. t := time.NewTicker(trigger.interval)
  156. go nDB.triggerFunc(trigger.interval, t.C, nDB.stopCh, trigger.fn)
  157. nDB.tickers = append(nDB.tickers, t)
  158. }
  159. return nil
  160. }
  161. func (nDB *NetworkDB) retryJoin(members []string, stop <-chan struct{}) {
  162. t := time.NewTicker(retryInterval)
  163. defer t.Stop()
  164. for {
  165. select {
  166. case <-t.C:
  167. if _, err := nDB.memberlist.Join(members); err != nil {
  168. logrus.Errorf("Failed to join memberlist %s on retry: %v", members, err)
  169. continue
  170. }
  171. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  172. logrus.Errorf("failed to send node join on retry: %v", err)
  173. continue
  174. }
  175. return
  176. case <-stop:
  177. return
  178. }
  179. }
  180. }
  181. func (nDB *NetworkDB) clusterJoin(members []string) error {
  182. mlist := nDB.memberlist
  183. if _, err := mlist.Join(members); err != nil {
  184. // In case of failure, keep retrying join until it succeeds or the cluster is shutdown.
  185. go nDB.retryJoin(members, nDB.stopCh)
  186. return fmt.Errorf("could not join node to memberlist: %v", err)
  187. }
  188. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  189. return fmt.Errorf("failed to send node join: %v", err)
  190. }
  191. return nil
  192. }
  193. func (nDB *NetworkDB) clusterLeave() error {
  194. mlist := nDB.memberlist
  195. if err := nDB.sendNodeEvent(NodeEventTypeLeave); err != nil {
  196. logrus.Errorf("failed to send node leave: %v", err)
  197. }
  198. if err := mlist.Leave(time.Second); err != nil {
  199. return err
  200. }
  201. close(nDB.stopCh)
  202. for _, t := range nDB.tickers {
  203. t.Stop()
  204. }
  205. return mlist.Shutdown()
  206. }
  207. func (nDB *NetworkDB) triggerFunc(stagger time.Duration, C <-chan time.Time, stop <-chan struct{}, f func()) {
  208. // Use a random stagger to avoid syncronizing
  209. randStagger := time.Duration(uint64(rnd.Int63()) % uint64(stagger))
  210. select {
  211. case <-time.After(randStagger):
  212. case <-stop:
  213. return
  214. }
  215. for {
  216. select {
  217. case <-C:
  218. f()
  219. case <-stop:
  220. return
  221. }
  222. }
  223. }
  224. func (nDB *NetworkDB) reapDeadNode() {
  225. nDB.Lock()
  226. defer nDB.Unlock()
  227. for id, n := range nDB.failedNodes {
  228. if n.reapTime > 0 {
  229. n.reapTime -= nodeReapPeriod
  230. continue
  231. }
  232. logrus.Debugf("Removing failed node %v from gossip cluster", n.Name)
  233. delete(nDB.failedNodes, id)
  234. }
  235. }
  236. func (nDB *NetworkDB) reconnectNode() {
  237. nDB.RLock()
  238. if len(nDB.failedNodes) == 0 {
  239. nDB.RUnlock()
  240. return
  241. }
  242. nodes := make([]*node, 0, len(nDB.failedNodes))
  243. for _, n := range nDB.failedNodes {
  244. nodes = append(nodes, n)
  245. }
  246. nDB.RUnlock()
  247. node := nodes[randomOffset(len(nodes))]
  248. addr := net.UDPAddr{IP: node.Addr, Port: int(node.Port)}
  249. if _, err := nDB.memberlist.Join([]string{addr.String()}); err != nil {
  250. return
  251. }
  252. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  253. return
  254. }
  255. logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name)
  256. nDB.bulkSync([]string{node.Name}, true)
  257. }
  258. // For timing the entry deletion in the repaer APIs that doesn't use monotonic clock
  259. // source (time.Now, Sub etc.) should be avoided. Hence we use reapTime in every
  260. // entry which is set initially to reapInterval and decremented by reapPeriod every time
  261. // the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This
  262. // is safe as long as no other concurrent path touches the reapTime field.
  263. func (nDB *NetworkDB) reapState() {
  264. // The reapTableEntries leverage the presence of the network so garbage collect entries first
  265. nDB.reapTableEntries()
  266. nDB.reapNetworks()
  267. }
  268. func (nDB *NetworkDB) reapNetworks() {
  269. nDB.Lock()
  270. for _, nn := range nDB.networks {
  271. for id, n := range nn {
  272. if n.leaving {
  273. if n.reapTime <= 0 {
  274. delete(nn, id)
  275. continue
  276. }
  277. n.reapTime -= reapPeriod
  278. }
  279. }
  280. }
  281. nDB.Unlock()
  282. }
  283. func (nDB *NetworkDB) reapTableEntries() {
  284. var nodeNetworks []string
  285. // This is best effort, if the list of network changes will be picked up in the next cycle
  286. nDB.RLock()
  287. for nid := range nDB.networks[nDB.config.NodeName] {
  288. nodeNetworks = append(nodeNetworks, nid)
  289. }
  290. nDB.RUnlock()
  291. cycleStart := time.Now()
  292. // In order to avoid blocking the database for a long time, apply the garbage collection logic by network
  293. // The lock is taken at the beginning of the cycle and the deletion is inline
  294. for _, nid := range nodeNetworks {
  295. nDB.Lock()
  296. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  297. // timeCompensation compensate in case the lock took some time to be released
  298. timeCompensation := time.Since(cycleStart)
  299. entry, ok := v.(*entry)
  300. if !ok || !entry.deleting {
  301. return false
  302. }
  303. // In this check we are adding an extra 1 second to guarantee that when the number is truncated to int32 to fit the packet
  304. // for the tableEvent the number is always strictly > 1 and never 0
  305. if entry.reapTime > reapPeriod+timeCompensation+time.Second {
  306. entry.reapTime -= reapPeriod + timeCompensation
  307. return false
  308. }
  309. params := strings.Split(path[1:], "/")
  310. nid := params[0]
  311. tname := params[1]
  312. key := params[2]
  313. okTable, okNetwork := nDB.deleteEntry(nid, tname, key)
  314. if !okTable {
  315. logrus.Errorf("Table tree delete failed, entry with key:%s does not exists in the table:%s network:%s", key, tname, nid)
  316. }
  317. if !okNetwork {
  318. logrus.Errorf("Network tree delete failed, entry with key:%s does not exists in the network:%s table:%s", key, nid, tname)
  319. }
  320. return false
  321. })
  322. nDB.Unlock()
  323. }
  324. }
  325. func (nDB *NetworkDB) gossip() {
  326. networkNodes := make(map[string][]string)
  327. nDB.RLock()
  328. thisNodeNetworks := nDB.networks[nDB.config.NodeName]
  329. for nid := range thisNodeNetworks {
  330. networkNodes[nid] = nDB.networkNodes[nid]
  331. }
  332. printStats := time.Since(nDB.lastStatsTimestamp) >= nDB.config.StatsPrintPeriod
  333. printHealth := time.Since(nDB.lastHealthTimestamp) >= nDB.config.HealthPrintPeriod
  334. nDB.RUnlock()
  335. if printHealth {
  336. healthScore := nDB.memberlist.GetHealthScore()
  337. if healthScore != 0 {
  338. logrus.Warnf("NetworkDB stats - healthscore:%d (connectivity issues)", healthScore)
  339. }
  340. nDB.lastHealthTimestamp = time.Now()
  341. }
  342. for nid, nodes := range networkNodes {
  343. mNodes := nDB.mRandomNodes(3, nodes)
  344. bytesAvail := nDB.config.PacketBufferSize - compoundHeaderOverhead
  345. nDB.RLock()
  346. network, ok := thisNodeNetworks[nid]
  347. nDB.RUnlock()
  348. if !ok || network == nil {
  349. // It is normal for the network to be removed
  350. // between the time we collect the network
  351. // attachments of this node and processing
  352. // them here.
  353. continue
  354. }
  355. broadcastQ := network.tableBroadcasts
  356. if broadcastQ == nil {
  357. logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid)
  358. continue
  359. }
  360. msgs := broadcastQ.GetBroadcasts(compoundOverhead, bytesAvail)
  361. // Collect stats and print the queue info, note this code is here also to have a view of the queues empty
  362. network.qMessagesSent += len(msgs)
  363. if printStats {
  364. logrus.Infof("NetworkDB stats - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d",
  365. nid, network.leaving, broadcastQ.NumNodes(), network.entriesNumber, broadcastQ.NumQueued(),
  366. network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
  367. network.qMessagesSent = 0
  368. }
  369. if len(msgs) == 0 {
  370. continue
  371. }
  372. // Create a compound message
  373. compound := makeCompoundMessage(msgs)
  374. for _, node := range mNodes {
  375. nDB.RLock()
  376. mnode := nDB.nodes[node]
  377. nDB.RUnlock()
  378. if mnode == nil {
  379. break
  380. }
  381. // Send the compound message
  382. if err := nDB.memberlist.SendBestEffort(&mnode.Node, compound); err != nil {
  383. logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err)
  384. }
  385. }
  386. }
  387. // Reset the stats
  388. if printStats {
  389. nDB.lastStatsTimestamp = time.Now()
  390. }
  391. }
  392. func (nDB *NetworkDB) bulkSyncTables() {
  393. var networks []string
  394. nDB.RLock()
  395. for nid, network := range nDB.networks[nDB.config.NodeName] {
  396. if network.leaving {
  397. continue
  398. }
  399. networks = append(networks, nid)
  400. }
  401. nDB.RUnlock()
  402. for {
  403. if len(networks) == 0 {
  404. break
  405. }
  406. nid := networks[0]
  407. networks = networks[1:]
  408. nDB.RLock()
  409. nodes := nDB.networkNodes[nid]
  410. nDB.RUnlock()
  411. // No peer nodes on this network. Move on.
  412. if len(nodes) == 0 {
  413. continue
  414. }
  415. completed, err := nDB.bulkSync(nodes, false)
  416. if err != nil {
  417. logrus.Errorf("periodic bulk sync failure for network %s: %v", nid, err)
  418. continue
  419. }
  420. // Remove all the networks for which we have
  421. // successfully completed bulk sync in this iteration.
  422. updatedNetworks := make([]string, 0, len(networks))
  423. for _, nid := range networks {
  424. var found bool
  425. for _, completedNid := range completed {
  426. if nid == completedNid {
  427. found = true
  428. break
  429. }
  430. }
  431. if !found {
  432. updatedNetworks = append(updatedNetworks, nid)
  433. }
  434. }
  435. networks = updatedNetworks
  436. }
  437. }
  438. func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) {
  439. if !all {
  440. // Get 2 random nodes. 2nd node will be tried if the bulk sync to
  441. // 1st node fails.
  442. nodes = nDB.mRandomNodes(2, nodes)
  443. }
  444. if len(nodes) == 0 {
  445. return nil, nil
  446. }
  447. var err error
  448. var networks []string
  449. for _, node := range nodes {
  450. if node == nDB.config.NodeName {
  451. continue
  452. }
  453. logrus.Debugf("%s: Initiating bulk sync with node %v", nDB.config.NodeName, node)
  454. networks = nDB.findCommonNetworks(node)
  455. err = nDB.bulkSyncNode(networks, node, true)
  456. // if its periodic bulksync stop after the first successful sync
  457. if !all && err == nil {
  458. break
  459. }
  460. if err != nil {
  461. err = fmt.Errorf("bulk sync to node %s failed: %v", node, err)
  462. logrus.Warn(err.Error())
  463. }
  464. }
  465. if err != nil {
  466. return nil, err
  467. }
  468. return networks, nil
  469. }
  470. // Bulk sync all the table entries belonging to a set of networks to a
  471. // single peer node. It can be unsolicited or can be in response to an
  472. // unsolicited bulk sync
  473. func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited bool) error {
  474. var msgs [][]byte
  475. var unsolMsg string
  476. if unsolicited {
  477. unsolMsg = "unsolicited"
  478. }
  479. logrus.Debugf("%s: Initiating %s bulk sync for networks %v with node %s", nDB.config.NodeName, unsolMsg, networks, node)
  480. nDB.RLock()
  481. mnode := nDB.nodes[node]
  482. if mnode == nil {
  483. nDB.RUnlock()
  484. return nil
  485. }
  486. for _, nid := range networks {
  487. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  488. entry, ok := v.(*entry)
  489. if !ok {
  490. return false
  491. }
  492. eType := TableEventTypeCreate
  493. if entry.deleting {
  494. eType = TableEventTypeDelete
  495. }
  496. params := strings.Split(path[1:], "/")
  497. tEvent := TableEvent{
  498. Type: eType,
  499. LTime: entry.ltime,
  500. NodeName: entry.node,
  501. NetworkID: nid,
  502. TableName: params[1],
  503. Key: params[2],
  504. Value: entry.value,
  505. // The duration in second is a float that below would be truncated
  506. ResidualReapTime: int32(entry.reapTime.Seconds()),
  507. }
  508. msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
  509. if err != nil {
  510. logrus.Errorf("Encode failure during bulk sync: %#v", tEvent)
  511. return false
  512. }
  513. msgs = append(msgs, msg)
  514. return false
  515. })
  516. }
  517. nDB.RUnlock()
  518. // Create a compound message
  519. compound := makeCompoundMessage(msgs)
  520. bsm := BulkSyncMessage{
  521. LTime: nDB.tableClock.Time(),
  522. Unsolicited: unsolicited,
  523. NodeName: nDB.config.NodeName,
  524. Networks: networks,
  525. Payload: compound,
  526. }
  527. buf, err := encodeMessage(MessageTypeBulkSync, &bsm)
  528. if err != nil {
  529. return fmt.Errorf("failed to encode bulk sync message: %v", err)
  530. }
  531. nDB.Lock()
  532. ch := make(chan struct{})
  533. nDB.bulkSyncAckTbl[node] = ch
  534. nDB.Unlock()
  535. err = nDB.memberlist.SendReliable(&mnode.Node, buf)
  536. if err != nil {
  537. nDB.Lock()
  538. delete(nDB.bulkSyncAckTbl, node)
  539. nDB.Unlock()
  540. return fmt.Errorf("failed to send a TCP message during bulk sync: %v", err)
  541. }
  542. // Wait on a response only if it is unsolicited.
  543. if unsolicited {
  544. startTime := time.Now()
  545. t := time.NewTimer(30 * time.Second)
  546. select {
  547. case <-t.C:
  548. logrus.Errorf("Bulk sync to node %s timed out", node)
  549. case <-ch:
  550. logrus.Debugf("%s: Bulk sync to node %s took %s", nDB.config.NodeName, node, time.Since(startTime))
  551. }
  552. t.Stop()
  553. }
  554. return nil
  555. }
  556. // Returns a random offset between 0 and n
  557. func randomOffset(n int) int {
  558. if n == 0 {
  559. return 0
  560. }
  561. val, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
  562. if err != nil {
  563. logrus.Errorf("Failed to get a random offset: %v", err)
  564. return 0
  565. }
  566. return int(val.Int64())
  567. }
  568. // mRandomNodes is used to select up to m random nodes. It is possible
  569. // that less than m nodes are returned.
  570. func (nDB *NetworkDB) mRandomNodes(m int, nodes []string) []string {
  571. n := len(nodes)
  572. mNodes := make([]string, 0, m)
  573. OUTER:
  574. // Probe up to 3*n times, with large n this is not necessary
  575. // since k << n, but with small n we want search to be
  576. // exhaustive
  577. for i := 0; i < 3*n && len(mNodes) < m; i++ {
  578. // Get random node
  579. idx := randomOffset(n)
  580. node := nodes[idx]
  581. if node == nDB.config.NodeName {
  582. continue
  583. }
  584. // Check if we have this node already
  585. for j := 0; j < len(mNodes); j++ {
  586. if node == mNodes[j] {
  587. continue OUTER
  588. }
  589. }
  590. // Append the node
  591. mNodes = append(mNodes, node)
  592. }
  593. return mNodes
  594. }