cluster.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. package networkdb
  2. import (
  3. "bytes"
  4. "context"
  5. "crypto/rand"
  6. "encoding/hex"
  7. "fmt"
  8. "log"
  9. "math/big"
  10. rnd "math/rand"
  11. "net"
  12. "strings"
  13. "time"
  14. "github.com/hashicorp/memberlist"
  15. "github.com/sirupsen/logrus"
  16. )
  17. const (
  18. reapPeriod = 5 * time.Second
  19. rejoinClusterDuration = 10 * time.Second
  20. rejoinInterval = 60 * time.Second
  21. retryInterval = 1 * time.Second
  22. nodeReapInterval = 24 * time.Hour
  23. nodeReapPeriod = 2 * time.Hour
  24. // considering a cluster with > 20 nodes and a drain speed of 100 msg/s
  25. // the following is roughly 1 minute
  26. maxQueueLenBroadcastOnSync = 500
  27. )
  28. type logWriter struct{}
  29. func (l *logWriter) Write(p []byte) (int, error) {
  30. str := string(p)
  31. str = strings.TrimSuffix(str, "\n")
  32. switch {
  33. case strings.HasPrefix(str, "[WARN] "):
  34. str = strings.TrimPrefix(str, "[WARN] ")
  35. logrus.Warn(str)
  36. case strings.HasPrefix(str, "[DEBUG] "):
  37. str = strings.TrimPrefix(str, "[DEBUG] ")
  38. logrus.Debug(str)
  39. case strings.HasPrefix(str, "[INFO] "):
  40. str = strings.TrimPrefix(str, "[INFO] ")
  41. logrus.Info(str)
  42. case strings.HasPrefix(str, "[ERR] "):
  43. str = strings.TrimPrefix(str, "[ERR] ")
  44. logrus.Warn(str)
  45. }
  46. return len(p), nil
  47. }
  48. // SetKey adds a new key to the key ring
  49. func (nDB *NetworkDB) SetKey(key []byte) {
  50. logrus.Debugf("Adding key %s", hex.EncodeToString(key)[0:5])
  51. nDB.Lock()
  52. defer nDB.Unlock()
  53. for _, dbKey := range nDB.config.Keys {
  54. if bytes.Equal(key, dbKey) {
  55. return
  56. }
  57. }
  58. nDB.config.Keys = append(nDB.config.Keys, key)
  59. if nDB.keyring != nil {
  60. nDB.keyring.AddKey(key)
  61. }
  62. }
  63. // SetPrimaryKey sets the given key as the primary key. This should have
  64. // been added apriori through SetKey
  65. func (nDB *NetworkDB) SetPrimaryKey(key []byte) {
  66. logrus.Debugf("Primary Key %s", hex.EncodeToString(key)[0:5])
  67. nDB.RLock()
  68. defer nDB.RUnlock()
  69. for _, dbKey := range nDB.config.Keys {
  70. if bytes.Equal(key, dbKey) {
  71. if nDB.keyring != nil {
  72. nDB.keyring.UseKey(dbKey)
  73. }
  74. break
  75. }
  76. }
  77. }
  78. // RemoveKey removes a key from the key ring. The key being removed
  79. // can't be the primary key
  80. func (nDB *NetworkDB) RemoveKey(key []byte) {
  81. logrus.Debugf("Remove Key %s", hex.EncodeToString(key)[0:5])
  82. nDB.Lock()
  83. defer nDB.Unlock()
  84. for i, dbKey := range nDB.config.Keys {
  85. if bytes.Equal(key, dbKey) {
  86. nDB.config.Keys = append(nDB.config.Keys[:i], nDB.config.Keys[i+1:]...)
  87. if nDB.keyring != nil {
  88. nDB.keyring.RemoveKey(dbKey)
  89. }
  90. break
  91. }
  92. }
  93. }
  94. func (nDB *NetworkDB) clusterInit() error {
  95. nDB.lastStatsTimestamp = time.Now()
  96. nDB.lastHealthTimestamp = nDB.lastStatsTimestamp
  97. config := memberlist.DefaultLANConfig()
  98. config.Name = nDB.config.NodeID
  99. config.BindAddr = nDB.config.BindAddr
  100. config.AdvertiseAddr = nDB.config.AdvertiseAddr
  101. config.UDPBufferSize = nDB.config.PacketBufferSize
  102. if nDB.config.BindPort != 0 {
  103. config.BindPort = nDB.config.BindPort
  104. }
  105. config.ProtocolVersion = memberlist.ProtocolVersion2Compatible
  106. config.Delegate = &delegate{nDB: nDB}
  107. config.Events = &eventDelegate{nDB: nDB}
  108. // custom logger that does not add time or date, so they are not
  109. // duplicated by logrus
  110. config.Logger = log.New(&logWriter{}, "", 0)
  111. var err error
  112. if len(nDB.config.Keys) > 0 {
  113. for i, key := range nDB.config.Keys {
  114. logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5])
  115. }
  116. nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0])
  117. if err != nil {
  118. return err
  119. }
  120. config.Keyring = nDB.keyring
  121. }
  122. nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{
  123. NumNodes: func() int {
  124. nDB.RLock()
  125. num := len(nDB.nodes)
  126. nDB.RUnlock()
  127. return num
  128. },
  129. RetransmitMult: config.RetransmitMult,
  130. }
  131. nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{
  132. NumNodes: func() int {
  133. nDB.RLock()
  134. num := len(nDB.nodes)
  135. nDB.RUnlock()
  136. return num
  137. },
  138. RetransmitMult: config.RetransmitMult,
  139. }
  140. mlist, err := memberlist.Create(config)
  141. if err != nil {
  142. return fmt.Errorf("failed to create memberlist: %v", err)
  143. }
  144. nDB.ctx, nDB.cancelCtx = context.WithCancel(context.Background())
  145. nDB.memberlist = mlist
  146. for _, trigger := range []struct {
  147. interval time.Duration
  148. fn func()
  149. }{
  150. {reapPeriod, nDB.reapState},
  151. {config.GossipInterval, nDB.gossip},
  152. {config.PushPullInterval, nDB.bulkSyncTables},
  153. {retryInterval, nDB.reconnectNode},
  154. {nodeReapPeriod, nDB.reapDeadNode},
  155. {rejoinInterval, nDB.rejoinClusterBootStrap},
  156. } {
  157. t := time.NewTicker(trigger.interval)
  158. go nDB.triggerFunc(trigger.interval, t.C, trigger.fn)
  159. nDB.tickers = append(nDB.tickers, t)
  160. }
  161. return nil
  162. }
  163. func (nDB *NetworkDB) retryJoin(ctx context.Context, members []string) {
  164. t := time.NewTicker(retryInterval)
  165. defer t.Stop()
  166. for {
  167. select {
  168. case <-t.C:
  169. if _, err := nDB.memberlist.Join(members); err != nil {
  170. logrus.Errorf("Failed to join memberlist %s on retry: %v", members, err)
  171. continue
  172. }
  173. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  174. logrus.Errorf("failed to send node join on retry: %v", err)
  175. continue
  176. }
  177. return
  178. case <-ctx.Done():
  179. return
  180. }
  181. }
  182. }
  183. func (nDB *NetworkDB) clusterJoin(members []string) error {
  184. mlist := nDB.memberlist
  185. if _, err := mlist.Join(members); err != nil {
  186. // In case of failure, we no longer need to explicitly call retryJoin.
  187. // rejoinClusterBootStrap, which runs every minute, will retryJoin for 10sec
  188. return fmt.Errorf("could not join node to memberlist: %v", err)
  189. }
  190. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  191. return fmt.Errorf("failed to send node join: %v", err)
  192. }
  193. return nil
  194. }
  195. func (nDB *NetworkDB) clusterLeave() error {
  196. mlist := nDB.memberlist
  197. if err := nDB.sendNodeEvent(NodeEventTypeLeave); err != nil {
  198. logrus.Errorf("failed to send node leave: %v", err)
  199. }
  200. if err := mlist.Leave(time.Second); err != nil {
  201. return err
  202. }
  203. // cancel the context
  204. nDB.cancelCtx()
  205. for _, t := range nDB.tickers {
  206. t.Stop()
  207. }
  208. return mlist.Shutdown()
  209. }
  210. func (nDB *NetworkDB) triggerFunc(stagger time.Duration, C <-chan time.Time, f func()) {
  211. // Use a random stagger to avoid syncronizing
  212. randStagger := time.Duration(uint64(rnd.Int63()) % uint64(stagger))
  213. select {
  214. case <-time.After(randStagger):
  215. case <-nDB.ctx.Done():
  216. return
  217. }
  218. for {
  219. select {
  220. case <-C:
  221. f()
  222. case <-nDB.ctx.Done():
  223. return
  224. }
  225. }
  226. }
  227. func (nDB *NetworkDB) reapDeadNode() {
  228. nDB.Lock()
  229. defer nDB.Unlock()
  230. for _, nodeMap := range []map[string]*node{
  231. nDB.failedNodes,
  232. nDB.leftNodes,
  233. } {
  234. for id, n := range nodeMap {
  235. if n.reapTime > nodeReapPeriod {
  236. n.reapTime -= nodeReapPeriod
  237. continue
  238. }
  239. logrus.Debugf("Garbage collect node %v", n.Name)
  240. delete(nodeMap, id)
  241. }
  242. }
  243. }
  244. // rejoinClusterBootStrap is called periodically to check if all bootStrap nodes are active in the cluster,
  245. // if not, call the cluster join to merge 2 separate clusters that are formed when all managers
  246. // stopped/started at the same time
  247. func (nDB *NetworkDB) rejoinClusterBootStrap() {
  248. nDB.RLock()
  249. if len(nDB.bootStrapIP) == 0 {
  250. nDB.RUnlock()
  251. return
  252. }
  253. myself, _ := nDB.nodes[nDB.config.NodeID]
  254. bootStrapIPs := make([]string, 0, len(nDB.bootStrapIP))
  255. for _, bootIP := range nDB.bootStrapIP {
  256. // botostrap IPs are usually IP:port from the Join
  257. var bootstrapIP net.IP
  258. ipStr, _, err := net.SplitHostPort(bootIP)
  259. if err != nil {
  260. // try to parse it as an IP with port
  261. // Note this seems to be the case for swarm that do not specify any port
  262. ipStr = bootIP
  263. }
  264. bootstrapIP = net.ParseIP(ipStr)
  265. if bootstrapIP != nil {
  266. for _, node := range nDB.nodes {
  267. if node.Addr.Equal(bootstrapIP) && !node.Addr.Equal(myself.Addr) {
  268. // One of the bootstrap nodes (and not myself) is part of the cluster, return
  269. nDB.RUnlock()
  270. return
  271. }
  272. }
  273. bootStrapIPs = append(bootStrapIPs, bootIP)
  274. }
  275. }
  276. nDB.RUnlock()
  277. if len(bootStrapIPs) == 0 {
  278. // this will also avoid to call the Join with an empty list erasing the current bootstrap ip list
  279. logrus.Debug("rejoinClusterBootStrap did not find any valid IP")
  280. return
  281. }
  282. // None of the bootStrap nodes are in the cluster, call memberlist join
  283. logrus.Debugf("rejoinClusterBootStrap, calling cluster join with bootStrap %v", bootStrapIPs)
  284. ctx, cancel := context.WithTimeout(nDB.ctx, rejoinClusterDuration)
  285. defer cancel()
  286. nDB.retryJoin(ctx, bootStrapIPs)
  287. }
  288. func (nDB *NetworkDB) reconnectNode() {
  289. nDB.RLock()
  290. if len(nDB.failedNodes) == 0 {
  291. nDB.RUnlock()
  292. return
  293. }
  294. nodes := make([]*node, 0, len(nDB.failedNodes))
  295. for _, n := range nDB.failedNodes {
  296. nodes = append(nodes, n)
  297. }
  298. nDB.RUnlock()
  299. node := nodes[randomOffset(len(nodes))]
  300. addr := net.UDPAddr{IP: node.Addr, Port: int(node.Port)}
  301. if _, err := nDB.memberlist.Join([]string{addr.String()}); err != nil {
  302. return
  303. }
  304. if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil {
  305. return
  306. }
  307. logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name)
  308. nDB.bulkSync([]string{node.Name}, true)
  309. }
  310. // For timing the entry deletion in the repaer APIs that doesn't use monotonic clock
  311. // source (time.Now, Sub etc.) should be avoided. Hence we use reapTime in every
  312. // entry which is set initially to reapInterval and decremented by reapPeriod every time
  313. // the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This
  314. // is safe as long as no other concurrent path touches the reapTime field.
  315. func (nDB *NetworkDB) reapState() {
  316. // The reapTableEntries leverage the presence of the network so garbage collect entries first
  317. nDB.reapTableEntries()
  318. nDB.reapNetworks()
  319. }
  320. func (nDB *NetworkDB) reapNetworks() {
  321. nDB.Lock()
  322. for _, nn := range nDB.networks {
  323. for id, n := range nn {
  324. if n.leaving {
  325. if n.reapTime <= 0 {
  326. delete(nn, id)
  327. continue
  328. }
  329. n.reapTime -= reapPeriod
  330. }
  331. }
  332. }
  333. nDB.Unlock()
  334. }
  335. func (nDB *NetworkDB) reapTableEntries() {
  336. var nodeNetworks []string
  337. // This is best effort, if the list of network changes will be picked up in the next cycle
  338. nDB.RLock()
  339. for nid := range nDB.networks[nDB.config.NodeID] {
  340. nodeNetworks = append(nodeNetworks, nid)
  341. }
  342. nDB.RUnlock()
  343. cycleStart := time.Now()
  344. // In order to avoid blocking the database for a long time, apply the garbage collection logic by network
  345. // The lock is taken at the beginning of the cycle and the deletion is inline
  346. for _, nid := range nodeNetworks {
  347. nDB.Lock()
  348. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  349. // timeCompensation compensate in case the lock took some time to be released
  350. timeCompensation := time.Since(cycleStart)
  351. entry, ok := v.(*entry)
  352. if !ok || !entry.deleting {
  353. return false
  354. }
  355. // In this check we are adding an extra 1 second to guarantee that when the number is truncated to int32 to fit the packet
  356. // for the tableEvent the number is always strictly > 1 and never 0
  357. if entry.reapTime > reapPeriod+timeCompensation+time.Second {
  358. entry.reapTime -= reapPeriod + timeCompensation
  359. return false
  360. }
  361. params := strings.Split(path[1:], "/")
  362. nid := params[0]
  363. tname := params[1]
  364. key := params[2]
  365. okTable, okNetwork := nDB.deleteEntry(nid, tname, key)
  366. if !okTable {
  367. logrus.Errorf("Table tree delete failed, entry with key:%s does not exists in the table:%s network:%s", key, tname, nid)
  368. }
  369. if !okNetwork {
  370. logrus.Errorf("Network tree delete failed, entry with key:%s does not exists in the network:%s table:%s", key, nid, tname)
  371. }
  372. return false
  373. })
  374. nDB.Unlock()
  375. }
  376. }
  377. func (nDB *NetworkDB) gossip() {
  378. networkNodes := make(map[string][]string)
  379. nDB.RLock()
  380. thisNodeNetworks := nDB.networks[nDB.config.NodeID]
  381. for nid := range thisNodeNetworks {
  382. networkNodes[nid] = nDB.networkNodes[nid]
  383. }
  384. printStats := time.Since(nDB.lastStatsTimestamp) >= nDB.config.StatsPrintPeriod
  385. printHealth := time.Since(nDB.lastHealthTimestamp) >= nDB.config.HealthPrintPeriod
  386. nDB.RUnlock()
  387. if printHealth {
  388. healthScore := nDB.memberlist.GetHealthScore()
  389. if healthScore != 0 {
  390. logrus.Warnf("NetworkDB stats %v(%v) - healthscore:%d (connectivity issues)", nDB.config.Hostname, nDB.config.NodeID, healthScore)
  391. }
  392. nDB.lastHealthTimestamp = time.Now()
  393. }
  394. for nid, nodes := range networkNodes {
  395. mNodes := nDB.mRandomNodes(3, nodes)
  396. bytesAvail := nDB.config.PacketBufferSize - compoundHeaderOverhead
  397. nDB.RLock()
  398. network, ok := thisNodeNetworks[nid]
  399. nDB.RUnlock()
  400. if !ok || network == nil {
  401. // It is normal for the network to be removed
  402. // between the time we collect the network
  403. // attachments of this node and processing
  404. // them here.
  405. continue
  406. }
  407. broadcastQ := network.tableBroadcasts
  408. if broadcastQ == nil {
  409. logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid)
  410. continue
  411. }
  412. msgs := broadcastQ.GetBroadcasts(compoundOverhead, bytesAvail)
  413. // Collect stats and print the queue info, note this code is here also to have a view of the queues empty
  414. network.qMessagesSent += len(msgs)
  415. if printStats {
  416. logrus.Infof("NetworkDB stats %v(%v) - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d",
  417. nDB.config.Hostname, nDB.config.NodeID,
  418. nid, network.leaving, broadcastQ.NumNodes(), network.entriesNumber, broadcastQ.NumQueued(),
  419. network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
  420. network.qMessagesSent = 0
  421. }
  422. if len(msgs) == 0 {
  423. continue
  424. }
  425. // Create a compound message
  426. compound := makeCompoundMessage(msgs)
  427. for _, node := range mNodes {
  428. nDB.RLock()
  429. mnode := nDB.nodes[node]
  430. nDB.RUnlock()
  431. if mnode == nil {
  432. break
  433. }
  434. // Send the compound message
  435. if err := nDB.memberlist.SendBestEffort(&mnode.Node, compound); err != nil {
  436. logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err)
  437. }
  438. }
  439. }
  440. // Reset the stats
  441. if printStats {
  442. nDB.lastStatsTimestamp = time.Now()
  443. }
  444. }
  445. func (nDB *NetworkDB) bulkSyncTables() {
  446. var networks []string
  447. nDB.RLock()
  448. for nid, network := range nDB.networks[nDB.config.NodeID] {
  449. if network.leaving {
  450. continue
  451. }
  452. networks = append(networks, nid)
  453. }
  454. nDB.RUnlock()
  455. for {
  456. if len(networks) == 0 {
  457. break
  458. }
  459. nid := networks[0]
  460. networks = networks[1:]
  461. nDB.RLock()
  462. nodes := nDB.networkNodes[nid]
  463. nDB.RUnlock()
  464. // No peer nodes on this network. Move on.
  465. if len(nodes) == 0 {
  466. continue
  467. }
  468. completed, err := nDB.bulkSync(nodes, false)
  469. if err != nil {
  470. logrus.Errorf("periodic bulk sync failure for network %s: %v", nid, err)
  471. continue
  472. }
  473. // Remove all the networks for which we have
  474. // successfully completed bulk sync in this iteration.
  475. updatedNetworks := make([]string, 0, len(networks))
  476. for _, nid := range networks {
  477. var found bool
  478. for _, completedNid := range completed {
  479. if nid == completedNid {
  480. found = true
  481. break
  482. }
  483. }
  484. if !found {
  485. updatedNetworks = append(updatedNetworks, nid)
  486. }
  487. }
  488. networks = updatedNetworks
  489. }
  490. }
  491. func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) {
  492. if !all {
  493. // Get 2 random nodes. 2nd node will be tried if the bulk sync to
  494. // 1st node fails.
  495. nodes = nDB.mRandomNodes(2, nodes)
  496. }
  497. if len(nodes) == 0 {
  498. return nil, nil
  499. }
  500. var err error
  501. var networks []string
  502. var success bool
  503. for _, node := range nodes {
  504. if node == nDB.config.NodeID {
  505. continue
  506. }
  507. logrus.Debugf("%v(%v): Initiating bulk sync with node %v", nDB.config.Hostname, nDB.config.NodeID, node)
  508. networks = nDB.findCommonNetworks(node)
  509. err = nDB.bulkSyncNode(networks, node, true)
  510. if err != nil {
  511. err = fmt.Errorf("bulk sync to node %s failed: %v", node, err)
  512. logrus.Warn(err.Error())
  513. } else {
  514. // bulk sync succeeded
  515. success = true
  516. // if its periodic bulksync stop after the first successful sync
  517. if !all {
  518. break
  519. }
  520. }
  521. }
  522. if success {
  523. // if at least one node sync succeeded
  524. return networks, nil
  525. }
  526. return nil, err
  527. }
  528. // Bulk sync all the table entries belonging to a set of networks to a
  529. // single peer node. It can be unsolicited or can be in response to an
  530. // unsolicited bulk sync
  531. func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited bool) error {
  532. var msgs [][]byte
  533. var unsolMsg string
  534. if unsolicited {
  535. unsolMsg = "unsolicited"
  536. }
  537. logrus.Debugf("%v(%v): Initiating %s bulk sync for networks %v with node %s",
  538. nDB.config.Hostname, nDB.config.NodeID, unsolMsg, networks, node)
  539. nDB.RLock()
  540. mnode := nDB.nodes[node]
  541. if mnode == nil {
  542. nDB.RUnlock()
  543. return nil
  544. }
  545. for _, nid := range networks {
  546. nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
  547. entry, ok := v.(*entry)
  548. if !ok {
  549. return false
  550. }
  551. eType := TableEventTypeCreate
  552. if entry.deleting {
  553. eType = TableEventTypeDelete
  554. }
  555. params := strings.Split(path[1:], "/")
  556. tEvent := TableEvent{
  557. Type: eType,
  558. LTime: entry.ltime,
  559. NodeName: entry.node,
  560. NetworkID: nid,
  561. TableName: params[1],
  562. Key: params[2],
  563. Value: entry.value,
  564. // The duration in second is a float that below would be truncated
  565. ResidualReapTime: int32(entry.reapTime.Seconds()),
  566. }
  567. msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
  568. if err != nil {
  569. logrus.Errorf("Encode failure during bulk sync: %#v", tEvent)
  570. return false
  571. }
  572. msgs = append(msgs, msg)
  573. return false
  574. })
  575. }
  576. nDB.RUnlock()
  577. // Create a compound message
  578. compound := makeCompoundMessage(msgs)
  579. bsm := BulkSyncMessage{
  580. LTime: nDB.tableClock.Time(),
  581. Unsolicited: unsolicited,
  582. NodeName: nDB.config.NodeID,
  583. Networks: networks,
  584. Payload: compound,
  585. }
  586. buf, err := encodeMessage(MessageTypeBulkSync, &bsm)
  587. if err != nil {
  588. return fmt.Errorf("failed to encode bulk sync message: %v", err)
  589. }
  590. nDB.Lock()
  591. ch := make(chan struct{})
  592. nDB.bulkSyncAckTbl[node] = ch
  593. nDB.Unlock()
  594. err = nDB.memberlist.SendReliable(&mnode.Node, buf)
  595. if err != nil {
  596. nDB.Lock()
  597. delete(nDB.bulkSyncAckTbl, node)
  598. nDB.Unlock()
  599. return fmt.Errorf("failed to send a TCP message during bulk sync: %v", err)
  600. }
  601. // Wait on a response only if it is unsolicited.
  602. if unsolicited {
  603. startTime := time.Now()
  604. t := time.NewTimer(30 * time.Second)
  605. select {
  606. case <-t.C:
  607. logrus.Errorf("Bulk sync to node %s timed out", node)
  608. case <-ch:
  609. logrus.Debugf("%v(%v): Bulk sync to node %s took %s", nDB.config.Hostname, nDB.config.NodeID, node, time.Since(startTime))
  610. }
  611. t.Stop()
  612. }
  613. return nil
  614. }
  615. // Returns a random offset between 0 and n
  616. func randomOffset(n int) int {
  617. if n == 0 {
  618. return 0
  619. }
  620. val, err := rand.Int(rand.Reader, big.NewInt(int64(n)))
  621. if err != nil {
  622. logrus.Errorf("Failed to get a random offset: %v", err)
  623. return 0
  624. }
  625. return int(val.Int64())
  626. }
  627. // mRandomNodes is used to select up to m random nodes. It is possible
  628. // that less than m nodes are returned.
  629. func (nDB *NetworkDB) mRandomNodes(m int, nodes []string) []string {
  630. n := len(nodes)
  631. mNodes := make([]string, 0, m)
  632. OUTER:
  633. // Probe up to 3*n times, with large n this is not necessary
  634. // since k << n, but with small n we want search to be
  635. // exhaustive
  636. for i := 0; i < 3*n && len(mNodes) < m; i++ {
  637. // Get random node
  638. idx := randomOffset(n)
  639. node := nodes[idx]
  640. if node == nDB.config.NodeID {
  641. continue
  642. }
  643. // Check if we have this node already
  644. for j := 0; j < len(mNodes); j++ {
  645. if node == mNodes[j] {
  646. continue OUTER
  647. }
  648. }
  649. // Append the node
  650. mNodes = append(mNodes, node)
  651. }
  652. return mNodes
  653. }