cluster.go 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. package networkdb
  2. import (
  3. "bytes"
  4. "crypto/rand"
  5. "encoding/hex"
  6. "fmt"
  7. "log"
  8. "math/big"
  9. rnd "math/rand"
  10. "net"
  11. "strings"
  12. "time"
  13. "github.com/hashicorp/memberlist"
  14. "github.com/sirupsen/logrus"
  15. )
  16. const (
  17. reapPeriod = 5 * time.Second
  18. retryInterval = 1 * time.Second
  19. nodeReapInterval = 24 * time.Hour
  20. nodeReapPeriod = 2 * time.Hour
  21. )
  22. type logWriter struct{}
  23. func (l *logWriter) Write(p []byte) (int, error) {
  24. str := string(p)
  25. str = strings.TrimSuffix(str, "\n")
  26. switch {
  27. case strings.HasPrefix(str, "[WARN] "):
  28. str = strings.TrimPrefix(str, "[WARN] ")
  29. logrus.Warn(str)
  30. case strings.HasPrefix(str, "[DEBUG] "):
  31. str = strings.TrimPrefix(str, "[DEBUG] ")
  32. logrus.Debug(str)
  33. case strings.HasPrefix(str, "[INFO] "):
  34. str = strings.TrimPrefix(str, "[INFO] ")
  35. logrus.Info(str)
  36. case strings.HasPrefix(str, "[ERR] "):
  37. str = strings.TrimPrefix(str, "[ERR] ")
  38. logrus.Warn(str)
  39. }
  40. return len(p), nil
  41. }
  42. // SetKey adds a new key to the key ring
  43. func (nDB *NetworkDB) SetKey(key []byte) {
  44. logrus.Debugf("Adding key %s", hex.EncodeToString(key)[0:5])
  45. nDB.Lock()
  46. defer nDB.Unlock()
  47. for _, dbKey := range nDB.config.Keys {
  48. if bytes.Equal(key, dbKey) {
  49. return
  50. }
  51. }
  52. nDB.config.Keys = append(nDB.config.Keys, key)
  53. if nDB.keyring != nil {
  54. nDB.keyring.AddKey(key)
  55. }
  56. }
  57. // SetPrimaryKey sets the given key as the primary key. This should have
  58. // been added apriori through SetKey
  59. func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
  60. logrus.Debugf("Primary Key %s", hex.EncodeToString(key)[0:5])
  61. nDB.RLock()
  62. defer nDB.RUnlock()
  63. for _, dbKey := range nDB.config.Keys {
  64. if bytes.Equal(key, dbKey) {
  65. if nDB.keyring != nil {
  66. nDB.keyring.UseKey(dbKey)
  67. }
  68. break
  69. }
  70. }
  71. }
  72. // RemoveKey removes a key from the key ring. The key being removed
  73. // can't be the primary key
  74. func (nDB *NetworkDB) RemoveKey(key []byte) {
  75. logrus.Debugf("Remove Key %s", hex.EncodeToString(key)[0:5])
  76. nDB.Lock()
  77. defer nDB.Unlock()
  78. for i, dbKey := range nDB.config.Keys {
  79. if bytes.Equal(key, dbKey) {
  80. nDB.config.Keys = append(nDB.config.Keys[:i], nDB.config.Keys[i+1:]...)
  81. if nDB.keyring != nil {
  82. nDB.keyring.RemoveKey(dbKey)
  83. }
  84. break
  85. }
  86. }
  87. }
  88. func (nDB *NetworkDB) clusterInit() error {
  89. nDB.lastStatsTimestamp = time.Now()
  90. nDB.lastHealthTimestamp = nDB.lastStatsTimestamp
  91. config := memberlist.DefaultLANConfig()
  92. config.Name = nDB.config.NodeID
  93. config.BindAddr = nDB.config.BindAddr
  94. config.AdvertiseAddr = nDB.config.AdvertiseAddr
  95. config.UDPBufferSize = nDB.config.PacketBufferSize
  96. if nDB.config.BindPort != 0 {
  97. config.BindPort = nDB.config.BindPort
  98. }
  99. config.ProtocolVersion = memberlist.ProtocolVersion2Compatible
  100. config.Delegate = &delegate{nDB: nDB}
  101. config.Events = &eventDelegate{nDB: nDB}
  102. // custom logger that does not add time or date, so they are not
  103. // duplicated by logrus
  104. config.Logger = log.New(&logWriter{}, "", 0)
  105. var err error
  106. if len(nDB.config.Keys) > 0 {
  107. for i, key := range nDB.config.Keys {
  108. logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5])
  109. }
  110. nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
  111. if err != nil {
  112. return err
  113. }
  114. config.Keyring = nDB.keyring
  115. }
  116. nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
  117. NumNodes: func() int {
  118. nDB.RLock()
  119. num := len(nDB.nodes)
  120. nDB.RUnlock()
  121. return num
  122. },
  123. RetransmitMult: config.RetransmitMult,
  124. }
  125. nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{
  126. NumNodes: func() int {
  127. nDB.RLock()
  128. num := len(nDB.nodes)
  129. nDB.RUnlock()
  130. return num
  131. },
  132. RetransmitMult: config.RetransmitMult,
  133. }
  134. mlist, err := memberlist.Create(config)
  135. if err != nil {
  136. return fmt.Errorf("failed to create memberlist: %v", err)
  137. }
  138. nDB.stopCh = make(chan struct{})
  139. nDB.memberlist = mlist
  140. for _, trigger := range []struct {
  141. interval time.Duration
  142. fn func()
  143. }{
  144. {reapPeriod, nDB.reapState},
  145. {config.GossipInterval, nDB.gossip},
  146. {config.PushPullInterval, nDB.bulkSyncTables},
  147. {retryInterval, nDB.reconnectNode},
  148. {nodeReapPeriod, nDB.reapDeadNode},
  149. } {
  150. t := time.NewTicker(trigger.interval)
  151. go nDB.triggerFunc(trigger.interval, t.C, nDB.stopCh, trigger.fn)
  152. nDB.tickers = append(nDB.tickers, t)
  153. }
  154. return nil
  155. }
  156. func (nDB *NetworkDB) retryJoin(members []string, stop <-chan struct{}) {
  157. t := time.NewTicker(retryInterval)
  158. defer t.Stop()
  159. for {
  160. select {
  161. case <-t.C:
  162. if _, err := nDB.memberlist.Join(members); err != nil {
  163. logrus.Errorf("Failed to join memberlist %s on retry: %v", members, err)
  164. continue
  165. }
  166. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  167. logrus.Errorf("failed to send node join on retry: %v", err)
  168. continue
  169. }
  170. return
  171. case <-stop:
  172. return
  173. }
  174. }
  175. }
  176. func (nDB *NetworkDB) clusterJoin(members []string) error {
  177. mlist := nDB.memberlist
  178. if _, err := mlist.Join(members); err != nil {
  179. // In case of failure, keep retrying join until it succeeds or the cluster is shutdown.
  180. go nDB.retryJoin(members, nDB.stopCh)
  181. return fmt.Errorf("could not join node to memberlist: %v", err)
  182. }
  183. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  184. return fmt.Errorf("failed to send node join: %v", err)
  185. }
  186. return nil
  187. }
  188. func (nDB *NetworkDB) clusterLeave() error {
  189. mlist := nDB.memberlist
  190. if err := nDB.sendNodeEvent(NodeEventTypeLeave); err != nil {
  191. logrus.Errorf("failed to send node leave: %v", err)
  192. }
  193. if err := mlist.Leave(time.Second); err != nil {
  194. return err
  195. }
  196. close(nDB.stopCh)
  197. for _, t := range nDB.tickers {
  198. t.Stop()
  199. }
  200. return mlist.Shutdown()
  201. }
  202. func (nDB *NetworkDB) triggerFunc(stagger time.Duration, C <-chan time.Time, stop <-chan struct{}, f func()) {
  203. // Use a random stagger to avoid syncronizing
  204. randStagger := time.Duration(uint64(rnd.Int63()) % uint64(stagger))
  205. select {
  206. case <-time.After(randStagger):
  207. case <-stop:
  208. return
  209. }
  210. for {
  211. select {
  212. case <-C:
  213. f()
  214. case <-stop:
  215. return
  216. }
  217. }
  218. }
  219. func (nDB *NetworkDB) reapDeadNode() {
  220. nDB.Lock()
  221. defer nDB.Unlock()
  222. for id, n := range nDB.failedNodes {
  223. if n.reapTime > 0 {
  224. n.reapTime -= nodeReapPeriod
  225. continue
  226. }
  227. logrus.Debugf("Removing failed node %v from gossip cluster", n.Name)
  228. delete(nDB.failedNodes, id)
  229. }
  230. }
  231. func (nDB *NetworkDB) reconnectNode() {
  232. nDB.RLock()
  233. if len(nDB.failedNodes) == 0 {
  234. nDB.RUnlock()
  235. return
  236. }
  237. nodes := make([]*node, 0, len(nDB.failedNodes))
  238. for _, n := range nDB.failedNodes {
  239. nodes = append(nodes, n)
  240. }
  241. nDB.RUnlock()
  242. node := nodes[randomOffset(len(nodes))]
  243. addr := net.UDPAddr{IP: node.Addr, Port: int(node.Port)}
  244. if _, err := nDB.memberlist.Join([]string{addr.String()}); err != nil {
  245. return
  246. }
  247. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  248. return
  249. }
  250. logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name)
  251. nDB.bulkSync([]string{node.Name}, true)
  252. }
  253. // For timing the entry deletion in the repaer APIs that doesn't use monotonic clock
  254. // source (time.Now, Sub etc.) should be avoided. Hence we use reapTime in every
  255. // entry which is set initially to reapInterval and decremented by reapPeriod every time
  256. // the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This
  257. // is safe as long as no other concurrent path touches the reapTime field.
  258. func (nDB *NetworkDB) reapState() {
  259. // The reapTableEntries leverage the presence of the network so garbage collect entries first
  260. nDB.reapTableEntries()
  261. nDB.reapNetworks()
  262. }
  263. func (nDB *NetworkDB) reapNetworks() {
  264. nDB.Lock()
  265. for _, nn := range nDB.networks {
  266. for id, n := range nn {
  267. if n.leaving {
  268. if n.reapTime <= 0 {
  269. delete(nn, id)
  270. continue
  271. }
  272. n.reapTime -= reapPeriod
  273. }
  274. }
  275. }
  276. nDB.Unlock()
  277. }
  278. func (nDB *NetworkDB) reapTableEntries() {
  279. var nodeNetworks []string
  280. // This is best effort, if the list of network changes will be picked up in the next cycle
  281. nDB.RLock()
  282. for nid := range nDB.networks[nDB.config.NodeID] {
  283. nodeNetworks = append(nodeNetworks, nid)
  284. }
  285. nDB.RUnlock()
  286. cycleStart := time.Now()
  287. // In order to avoid blocking the database for a long time, apply the garbage collection logic by network
  288. // The lock is taken at the beginning of the cycle and the deletion is inline
  289. for _, nid := range nodeNetworks {
  290. nDB.Lock()
  291. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  292. // timeCompensation compensate in case the lock took some time to be released
  293. timeCompensation := time.Since(cycleStart)
  294. entry, ok := v.(*entry)
  295. if !ok || !entry.deleting {
  296. return false
  297. }
  298. // In this check we are adding an extra 1 second to guarantee that when the number is truncated to int32 to fit the packet
  299. // for the tableEvent the number is always strictly > 1 and never 0
  300. if entry.reapTime > reapPeriod+timeCompensation+time.Second {
  301. entry.reapTime -= reapPeriod + timeCompensation
  302. return false
  303. }
  304. params := strings.Split(path[1:], "/")
  305. nid := params[0]
  306. tname := params[1]
  307. key := params[2]
  308. okTable, okNetwork := nDB.deleteEntry(nid, tname, key)
  309. if !okTable {
  310. logrus.Errorf("Table tree delete failed, entry with key:%s does not exists in the table:%s network:%s", key, tname, nid)
  311. }
  312. if !okNetwork {
  313. logrus.Errorf("Network tree delete failed, entry with key:%s does not exists in the network:%s table:%s", key, nid, tname)
  314. }
  315. return false
  316. })
  317. nDB.Unlock()
  318. }
  319. }
  320. func (nDB *NetworkDB) gossip() {
  321. networkNodes := make(map[string][]string)
  322. nDB.RLock()
  323. thisNodeNetworks := nDB.networks[nDB.config.NodeID]
  324. for nid := range thisNodeNetworks {
  325. networkNodes[nid] = nDB.networkNodes[nid]
  326. }
  327. printStats := time.Since(nDB.lastStatsTimestamp) >= nDB.config.StatsPrintPeriod
  328. printHealth := time.Since(nDB.lastHealthTimestamp) >= nDB.config.HealthPrintPeriod
  329. nDB.RUnlock()
  330. if printHealth {
  331. healthScore := nDB.memberlist.GetHealthScore()
  332. if healthScore != 0 {
  333. logrus.Warnf("NetworkDB stats %v(%v) - healthscore:%d (connectivity issues)", nDB.config.Hostname, nDB.config.NodeID, healthScore)
  334. }
  335. nDB.lastHealthTimestamp = time.Now()
  336. }
  337. for nid, nodes := range networkNodes {
  338. mNodes := nDB.mRandomNodes(3, nodes)
  339. bytesAvail := nDB.config.PacketBufferSize - compoundHeaderOverhead
  340. nDB.RLock()
  341. network, ok := thisNodeNetworks[nid]
  342. nDB.RUnlock()
  343. if !ok || network == nil {
  344. // It is normal for the network to be removed
  345. // between the time we collect the network
  346. // attachments of this node and processing
  347. // them here.
  348. continue
  349. }
  350. broadcastQ := network.tableBroadcasts
  351. if broadcastQ == nil {
  352. logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid)
  353. continue
  354. }
  355. msgs := broadcastQ.GetBroadcasts(compoundOverhead, bytesAvail)
  356. // Collect stats and print the queue info, note this code is here also to have a view of the queues empty
  357. network.qMessagesSent += len(msgs)
  358. if printStats {
  359. logrus.Infof("NetworkDB stats %v(%v) - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d",
  360. nDB.config.Hostname, nDB.config.NodeID,
  361. nid, network.leaving, broadcastQ.NumNodes(), network.entriesNumber, broadcastQ.NumQueued(),
  362. network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
  363. network.qMessagesSent = 0
  364. }
  365. if len(msgs) == 0 {
  366. continue
  367. }
  368. // Create a compound message
  369. compound := makeCompoundMessage(msgs)
  370. for _, node := range mNodes {
  371. nDB.RLock()
  372. mnode := nDB.nodes[node]
  373. nDB.RUnlock()
  374. if mnode == nil {
  375. break
  376. }
  377. // Send the compound message
  378. if err := nDB.memberlist.SendBestEffort(&mnode.Node, compound); err != nil {
  379. logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err)
  380. }
  381. }
  382. }
  383. // Reset the stats
  384. if printStats {
  385. nDB.lastStatsTimestamp = time.Now()
  386. }
  387. }
  388. func (nDB *NetworkDB) bulkSyncTables() {
  389. var networks []string
  390. nDB.RLock()
  391. for nid, network := range nDB.networks[nDB.config.NodeID] {
  392. if network.leaving {
  393. continue
  394. }
  395. networks = append(networks, nid)
  396. }
  397. nDB.RUnlock()
  398. for {
  399. if len(networks) == 0 {
  400. break
  401. }
  402. nid := networks[0]
  403. networks = networks[1:]
  404. nDB.RLock()
  405. nodes := nDB.networkNodes[nid]
  406. nDB.RUnlock()
  407. // No peer nodes on this network. Move on.
  408. if len(nodes) == 0 {
  409. continue
  410. }
  411. completed, err := nDB.bulkSync(nodes, false)
  412. if err != nil {
  413. logrus.Errorf("periodic bulk sync failure for network %s: %v", nid, err)
  414. continue
  415. }
  416. // Remove all the networks for which we have
  417. // successfully completed bulk sync in this iteration.
  418. updatedNetworks := make([]string, 0, len(networks))
  419. for _, nid := range networks {
  420. var found bool
  421. for _, completedNid := range completed {
  422. if nid == completedNid {
  423. found = true
  424. break
  425. }
  426. }
  427. if !found {
  428. updatedNetworks = append(updatedNetworks, nid)
  429. }
  430. }
  431. networks = updatedNetworks
  432. }
  433. }
  434. func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) {
  435. if !all {
  436. // Get 2 random nodes. 2nd node will be tried if the bulk sync to
  437. // 1st node fails.
  438. nodes = nDB.mRandomNodes(2, nodes)
  439. }
  440. if len(nodes) == 0 {
  441. return nil, nil
  442. }
  443. var err error
  444. var networks []string
  445. for _, node := range nodes {
  446. if node == nDB.config.NodeID {
  447. continue
  448. }
  449. logrus.Debugf("%v(%v): Initiating bulk sync with node %v", nDB.config.Hostname, nDB.config.NodeID, node)
  450. networks = nDB.findCommonNetworks(node)
  451. err = nDB.bulkSyncNode(networks, node, true)
  452. // if its periodic bulksync stop after the first successful sync
  453. if !all && err == nil {
  454. break
  455. }
  456. if err != nil {
  457. err = fmt.Errorf("bulk sync to node %s failed: %v", node, err)
  458. logrus.Warn(err.Error())
  459. }
  460. }
  461. if err != nil {
  462. return nil, err
  463. }
  464. return networks, nil
  465. }
  466. // Bulk sync all the table entries belonging to a set of networks to a
  467. // single peer node. It can be unsolicited or can be in response to an
  468. // unsolicited bulk sync
  469. func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited bool) error {
  470. var msgs [][]byte
  471. var unsolMsg string
  472. if unsolicited {
  473. unsolMsg = "unsolicited"
  474. }
  475. logrus.Debugf("%v(%v): Initiating %s bulk sync for networks %v with node %s",
  476. nDB.config.Hostname, nDB.config.NodeID, unsolMsg, networks, node)
  477. nDB.RLock()
  478. mnode := nDB.nodes[node]
  479. if mnode == nil {
  480. nDB.RUnlock()
  481. return nil
  482. }
  483. for _, nid := range networks {
  484. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  485. entry, ok := v.(*entry)
  486. if !ok {
  487. return false
  488. }
  489. eType := TableEventTypeCreate
  490. if entry.deleting {
  491. eType = TableEventTypeDelete
  492. }
  493. params := strings.Split(path[1:], "/")
  494. tEvent := TableEvent{
  495. Type: eType,
  496. LTime: entry.ltime,
  497. NodeName: entry.node,
  498. NetworkID: nid,
  499. TableName: params[1],
  500. Key: params[2],
  501. Value: entry.value,
  502. // The duration in second is a float that below would be truncated
  503. ResidualReapTime: int32(entry.reapTime.Seconds()),
  504. }
  505. msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
  506. if err != nil {
  507. logrus.Errorf("Encode failure during bulk sync: %#v", tEvent)
  508. return false
  509. }
  510. msgs = append(msgs, msg)
  511. return false
  512. })
  513. }
  514. nDB.RUnlock()
  515. // Create a compound message
  516. compound := makeCompoundMessage(msgs)
  517. bsm := BulkSyncMessage{
  518. LTime: nDB.tableClock.Time(),
  519. Unsolicited: unsolicited,
  520. NodeName: nDB.config.NodeID,
  521. Networks: networks,
  522. Payload: compound,
  523. }
  524. buf, err := encodeMessage(MessageTypeBulkSync, &bsm)
  525. if err != nil {
  526. return fmt.Errorf("failed to encode bulk sync message: %v", err)
  527. }
  528. nDB.Lock()
  529. ch := make(chan struct{})
  530. nDB.bulkSyncAckTbl[node] = ch
  531. nDB.Unlock()
  532. err = nDB.memberlist.SendReliable(&mnode.Node, buf)
  533. if err != nil {
  534. nDB.Lock()
  535. delete(nDB.bulkSyncAckTbl, node)
  536. nDB.Unlock()
  537. return fmt.Errorf("failed to send a TCP message during bulk sync: %v", err)
  538. }
  539. // Wait on a response only if it is unsolicited.
  540. if unsolicited {
  541. startTime := time.Now()
  542. t := time.NewTimer(30 * time.Second)
  543. select {
  544. case <-t.C:
  545. logrus.Errorf("Bulk sync to node %s timed out", node)
  546. case <-ch:
  547. logrus.Debugf("%v(%v): Bulk sync to node %s took %s", nDB.config.Hostname, nDB.config.NodeID, node, time.Since(startTime))
  548. }
  549. t.Stop()
  550. }
  551. return nil
  552. }
  553. // Returns a random offset between 0 and n
  554. func randomOffset(n int) int {
  555. if n == 0 {
  556. return 0
  557. }
  558. val, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
  559. if err != nil {
  560. logrus.Errorf("Failed to get a random offset: %v", err)
  561. return 0
  562. }
  563. return int(val.Int64())
  564. }
  565. // mRandomNodes is used to select up to m random nodes. It is possible
  566. // that less than m nodes are returned.
  567. func (nDB *NetworkDB) mRandomNodes(m int, nodes []string) []string {
  568. n := len(nodes)
  569. mNodes := make([]string, 0, m)
  570. OUTER:
  571. // Probe up to 3*n times, with large n this is not necessary
  572. // since k << n, but with small n we want search to be
  573. // exhaustive
  574. for i := 0; i < 3*n && len(mNodes) < m; i++ {
  575. // Get random node
  576. idx := randomOffset(n)
  577. node := nodes[idx]
  578. if node == nDB.config.NodeID {
  579. continue
  580. }
  581. // Check if we have this node already
  582. for j := 0; j < len(mNodes); j++ {
  583. if node == mNodes[j] {
  584. continue OUTER
  585. }
  586. }
  587. // Append the node
  588. mNodes = append(mNodes, node)
  589. }
  590. return mNodes
  591. }