func (db *NodeDB) updateStatistics(s *Statistics) func() error { return func() error { db.Main.Batch(func(tx *bolt.Tx) error { m := store.NewMeta(s) m.InvalidateIn(db.validTimeGluon) err := db.Main.UpdateMeta(tx, store.NewMeta(&Statistics{}), m) if err == nil { err = db.NewNodeID(tx, s.Statistics.Data.NodeID, s.Key()) } if err == nil && s.Statistics.Data.Gateway != nil { // put entry in Gateway table g := &Gateway{} gatewayid, _ := db.ResolveNodeID(tx, *s.Statistics.Data.Gateway) g.SetKey([]byte(gatewayid)) m := store.NewMeta(g) m.InvalidateIn(db.validTimeVisData) err = db.Main.Put(tx, m) } return err }) db.cacheExportStatistics.invalidate() //db.cacheExportNodes.invalidate() //db.cacheExportNodesOld.invalidate() return nil } }
// read nodes.json compatible data into database func (db *NodeDB) ImportNodes(r io.Reader, persistent bool) error { nodes, err := readNodesJSON(r) if err != nil { return err } if nodes.Version != 1 { return ErrUnknownVersion } for _, node := range nodes.Nodes { var addr alfred.HardwareAddr if err := addr.Parse(node.NodeInfo.NodeID); err != nil { log.Printf("Import: error parsing NodeID %s: %v, skipping", node.NodeInfo.NodeID, err) continue } n := &NodeInfo{NodeInfo: gluon.NodeInfo{Source: addr, Data: &node.NodeInfo}} m := store.NewMeta(n) m.Updated = time.Time(node.LastSeen).Local() m.Created = time.Time(node.FirstSeen).Local() if !persistent { m.InvalidateIn(db.validTimeGluon) } err := db.Main.Batch(func(tx *bolt.Tx) error { return db.Main.UpdateMeta(tx, store.NewMeta(&NodeInfo{}), m) }) if err != nil { log.Printf("Import: error on node %v", node.NodeInfo.NodeID) } } return err }
// Write a full nodes.json style document based on the current // database contents. func (db *NodeDB) GenerateNodesJSON(w io.Writer, offlineDuration time.Duration) { data := db.cacheExportNodes.get(func() []byte { nodejs := NodesJSON{ Nodes: make(map[string]*NodesJSONData), Timestamp: NodesJSONTime(time.Now()), Version: 1, } db.Main.View(func(tx *bolt.Tx) error { nodeinfo := &NodeInfo{} nmeta := store.NewMeta(nodeinfo) return db.Main.ForEach(tx, nmeta, func(cursor *bolt.Cursor) (bool, error) { data, err := db.getNodesJSONData(tx, nmeta, offlineDuration) if err == nil { nodejs.Nodes[data.NodeInfo.NodeID] = data } else { log.Printf("NodeDB: can not generate node info JSON for %v: %v", alfred.HardwareAddr(nmeta.Key()), err) } return false, nil }) }) buf := new(bytes.Buffer) enc := json.NewEncoder(buf) if err := enc.Encode(&nodejs); err != nil { return []byte{} } return buf.Bytes() }) w.Write(data) }
func (db *NodeDB) updateVisData(v *VisData) func() error { return func() error { db.Main.Batch(func(tx *bolt.Tx) error { m := store.NewMeta(v) m.InvalidateIn(db.validTimeVisData) err := db.Main.Put(tx, m) nodeid, _ := db.ResolveNodeID(tx, v.VisV1.Mac) err = db.NewNodeID(tx, nodeid, v.VisV1.Mac) if err == nil { for _, mac := range v.VisV1.Ifaces { err = db.NewNodeID(tx, nodeid, mac.Mac) if err != nil { break } } } return err }) db.cacheExportVisData.invalidate() db.cacheExportAliases.invalidate() db.cacheExportGraph.invalidate() return nil } }
func (db *NodeDB) NewNodeID(tx *bolt.Tx, nodeid string, alias []byte) error { id := &NodeID{} id.SetKey(alias) id.Set([]byte(nodeid)) m := store.NewMeta(id) m.InvalidateIn(db.validTimeVisData) return db.Main.Put(tx, m) }
func (db *NodeDB) updateNodeInfo(i *NodeInfo, persistent bool) func() error { return func() error { db.Main.Batch(func(tx *bolt.Tx) error { m := store.NewMeta(i) if !persistent { m.InvalidateIn(db.validTimeGluon) } err := db.Main.UpdateMeta(tx, store.NewMeta(&NodeInfo{}), m) if err == nil { err = db.NewNodeID(tx, i.NodeInfo.Data.NodeID, i.Key()) } return err }) db.cacheExportNodeInfo.invalidate() db.cacheExportNodes.invalidate() db.cacheExportNodesOld.invalidate() return nil } }
func (db *NodeDB) ResolveNodeID(tx *bolt.Tx, mac alfred.HardwareAddr) (string, bool) { id := &NodeID{} m := store.NewMeta(id) if db.Main.Get(tx, mac, m) == nil && m.GetItem(id) == nil { if bytes, err := id.Bytes(); err == nil { return string(bytes), true } } // when we have no nodeID, we return a synthetic one return mac.String(), false }
func (db *NodeDB) jsonexport(w io.Writer, i store.Item) func(tx *bolt.Tx) error { return func(tx *bolt.Tx) error { enc := json.NewEncoder(w) first := true m := store.NewMeta(i) db.Main.ForEach(tx, m, func(cursor *bolt.Cursor) (bool, error) { err := m.GetItem(i) if err == nil { t := m.GetTransfer() if first { first = false } else { w.Write([]byte{','}) } enc.Encode(t) } return false, nil }) return nil } }
// count data items func (db *NodeDB) count(offlineAfter time.Duration, done chan<- interface{}) error { s := &Statistics{} m := store.NewMeta(s) clients := 0 nodes := 0 now := time.Now() deadline := now.Add(-offlineAfter) err := db.Main.View(func(tx *bolt.Tx) error { return db.Main.ForEach(tx, m, func(cursor *bolt.Cursor) (bool, error) { if m.GetItem(s) == nil { nodeid := s.Data.NodeID if m.Updated.Before(deadline) { // node is offline l := NewCountNodeClients(nodeid, now, NODE_OFFLINE) db.logCount(l) return false, nil } if s.Data.Clients != nil { //TODO: log Total or just Wifi? For now: Wifi. l := NewCountNodeClients(nodeid, m.Updated, s.Data.Clients.Wifi) db.logCount(l) clients += s.Data.Clients.Wifi } else { l := NewCountNodeClients(nodeid, now, 0) db.logCount(l) } } nodes += 1 return false, nil }) }) lc := &CountMeshClients{Count{Timestamp: now, Count: clients}} db.logCount(lc) ln := &CountMeshNodes{Count{Timestamp: now, Count: nodes}} db.logCount(ln) log.Printf("Log: %d nodes with %d clients", nodes, clients) done <- struct{}{} return err }
// Assemble data elements for a mesh node from database. // This operation assumes the database is already locked by the caller. func (db *NodeDB) getNodesOldJSONData(tx *bolt.Tx, nmeta *store.Meta, nodeid string, offlineDuration time.Duration) (*NodesOldJSONData, error) { data := &NodesOldJSONData{} ninfo := &NodeInfo{} if err := nmeta.GetItem(ninfo); err != nil { return data, err } nodeinfo := *ninfo // make a copy data.Name = &nodeinfo.Data.Hostname data.Id = nodeid if nodeinfo.Data.Location != nil { data.Geo = []float64{nodeinfo.Data.Location.Latitude, nodeinfo.Data.Location.Longitude} } if nodeinfo.Data.Software != nil { if nodeinfo.Data.Software.Firmware != nil { data.Firmware = &nodeinfo.Data.Software.Firmware.Release data.GluonBase = nodeinfo.Data.Software.Firmware.Base } if nodeinfo.Data.Software.BatmanAdv != nil { data.BatmanVersion = &nodeinfo.Data.Software.BatmanAdv.Version } if nodeinfo.Data.Software.AutoUpdater != nil { data.AutoUpdaterState = nodeinfo.Data.Software.AutoUpdater.Enabled data.AutoUpdaterBranch = nodeinfo.Data.Software.AutoUpdater.Branch } } if nodeinfo.Data.Hardware != nil { data.Hardware = nodeinfo.Data.Hardware.Model } if nodeinfo.Data.Network != nil { data.Addresses = nodeinfo.Data.Network.Addresses } // latest datestamp is the "last seen" time lastseen := nmeta.Updated statistics := &Statistics{} smeta := store.NewMeta(statistics) if db.Main.Get(tx, nmeta.Key(), smeta) == nil { if lastseen.Before(smeta.Updated) { lastseen = smeta.Updated } if smeta.GetItem(statistics) == nil { statdata := statistics.Data data.Uptime = statdata.Uptime if statdata.Clients != nil { data.ClientCount = statdata.Clients.Total } if statdata.Gateway != nil { data.Gateway = statdata.Gateway } } } vis := &VisData{} vmeta := store.NewMeta(vis) if db.Main.Get(tx, nmeta.Key(), vmeta) == nil { if lastseen.Before(vmeta.Updated) { lastseen = vmeta.Updated } } data.LastSeen = lastseen.Unix() // set gateway flag when we have the node's address in // our list of gateways data.Flags.Gateway = db.Main.Exists(tx, nmeta.Key(), &Gateway{}) // online state is determined by the time we have last // seen a mesh node offline := time.Now().Sub(time.Time(lastseen)) if offline < offlineDuration { data.Flags.Online = true } else { data.Flags.Online = false } return data, nil }
// Write a full nodes.json style document based on the current // database contents. func (db *NodeDB) GenerateNodesOldJSON(w io.Writer, offlineDuration time.Duration) { data := db.cacheExportNodesOld.get(func() []byte { nodejs := NodesOldJSON{ Meta: NodesOldJSONMeta{ Timestamp: NodesJSONTime(time.Now()), GluonRelease: "0.6.3", }, Nodes: make([]*NodesOldJSONData, 0, 500), Links: make([]*NodesOldJSONLink, 0, 500), } nodes := make(map[string]int) db.Main.View(func(tx *bolt.Tx) error { nodeinfo := &NodeInfo{} nmeta := store.NewMeta(nodeinfo) err := db.Main.ForEach(tx, nmeta, func(cursor *bolt.Cursor) (bool, error) { data, err := db.getNodesOldJSONData(tx, nmeta, nodeinfo.Data.NodeID, offlineDuration) if err == nil { nodes[nodeinfo.Data.NodeID] = len(nodejs.Nodes) nodejs.Nodes = append(nodejs.Nodes, data) } else { log.Printf("NodeDB: can not generate node info JSON for %v: %v", nodeinfo.Data.NodeID, err) } return false, nil }) if err != nil { return err } links := make(map[int]map[int]*NodesOldJSONLink) d := &VisData{} m := store.NewMeta(d) err = db.Main.ForEach(tx, m, func(cursor *bolt.Cursor) (bool, error) { if m.GetItem(d) != nil { // skip unparseable items return false, nil } // main address is the first element in batadv.VisV1.Ifaces nodeid, _ := db.ResolveNodeID(tx, d.Ifaces[0].Mac) source, ok := nodes[nodeid] if !ok { return false, nil } _, exists := links[source] if !exists { links[source] = make(map[int]*NodesOldJSONLink) } for _, entry := range d.Entries { if entry.Qual == 0 { // TT entry, we do not cover these continue } enodeid, _ := db.ResolveNodeID(tx, []byte(entry.Mac)) target, ok := nodes[enodeid] if !ok { continue } _, exists = links[target] var node *NodesOldJSONLink if exists { node, exists = links[target][source] } if !exists { node, exists = links[source][target] } if exists { node.Quality = fmt.Sprintf("%s, %.03f", node.Quality, 255.0/float64(entry.Qual)) } else { links[source][target] = &NodesOldJSONLink{ Id: fmt.Sprintf("%s-%s", nodeid, enodeid), Source: source, Target: target, Quality: fmt.Sprintf("%.03f", 255.0/float64(entry.Qual)), Type: nil, } } } return false, nil }) if err != nil { return err } for _, s := range links { for _, l := range s { nodejs.Links = append(nodejs.Links, l) } } return nil }) buf := new(bytes.Buffer) enc := json.NewEncoder(w) if err := enc.Encode(&nodejs); err != nil { return []byte{} } return buf.Bytes() }) w.Write(data) }
// Assemble data elements for a mesh node from database. // This operation assumes the database is already locked by the caller. func (db *NodeDB) getNodesJSONData(tx *bolt.Tx, nmeta *store.Meta, offlineDuration time.Duration) (*NodesJSONData, error) { data := &NodesJSONData{} nodeinfo := &NodeInfo{} if err := nmeta.GetItem(nodeinfo); err != nil { return data, err } data.NodeInfo = *nodeinfo.Data // make a copy // earliest datestamp is the "first seen" time, // latest datestamp is the "last seen" time firstseen := nmeta.Created lastseen := nmeta.Updated statistics := &Statistics{} smeta := store.NewMeta(statistics) if db.Main.Get(tx, nmeta.Key(), smeta) == nil { if smeta.Created.Before(firstseen) { firstseen = smeta.Created } if lastseen.Before(smeta.Updated) { lastseen = smeta.Updated } if smeta.GetItem(statistics) == nil { statdata := statistics.Data if statdata.Memory != nil { if statdata.Memory.Total != 0 { // this calculation is a bit stupid, but compatible with ffmap-backend: data.Statistics.MemoryUsage = 1.0 - (float64(statdata.Memory.Free) / float64(statdata.Memory.Total)) } else { data.Statistics.MemoryUsage = 1 } } data.Statistics.Uptime = statdata.Uptime if statdata.Clients != nil { data.Statistics.Clients = statdata.Clients.Total } if statdata.Gateway != nil { data.Statistics.Gateway = statdata.Gateway } data.Statistics.LoadAvg = statdata.LoadAvg data.Statistics.RootFSUsage = statdata.RootFSUsage } } vis := &VisData{} vmeta := store.NewMeta(vis) if db.Main.Get(tx, nmeta.Key(), vmeta) == nil { if vmeta.Created.Before(firstseen) { firstseen = vmeta.Created } if lastseen.Before(vmeta.Updated) { lastseen = vmeta.Updated } } data.FirstSeen = NodesJSONTime(firstseen) data.LastSeen = NodesJSONTime(lastseen) // set gateway flag when we have the node's address in // our list of gateways nodeid, _ := db.ResolveNodeID(tx, alfred.HardwareAddr(nmeta.Key())) data.Flags.Gateway = db.Main.Exists(tx, []byte(nodeid), &Gateway{}) // online state is determined by the time we have last // seen a mesh node offline := time.Now().Sub(time.Time(data.LastSeen)) if offline < offlineDuration { data.Flags.Online = true } else { data.Flags.Online = false } return data, nil }
// Write a full graph.json document based on the contents of // the database. func (db *NodeDB) GenerateGraphJSON(w io.Writer) { data := db.cacheExportGraph.get(func() []byte { // index for the nodes in the node list for later lookup nodes := make(map[string]int) // actual node list nodesjs := make([]GraphJSONNode, 0, 100) // index for node links, indexed by their IDs/MACs links := make(map[string]map[string]GraphJSONLink) // actual link list objects linksjs := make([]GraphJSONLink, 0, 100) d := &VisData{} m := store.NewMeta(d) db.Main.View(func(tx *bolt.Tx) error { return db.Main.ForEach(tx, m, func(cursor *bolt.Cursor) (bool, error) { if m.GetItem(d) != nil { // skip unparseable items return false, nil } // main address is the first element in batadv.VisV1.Ifaces nodeid, _ := db.ResolveNodeID(tx, d.Ifaces[0].Mac) isgateway := db.Main.Exists(tx, []byte(nodeid), &Gateway{}) if _, seen := nodes[nodeid]; !seen { // new node, put into lists nodes[nodeid] = len(nodesjs) nodesjs = append(nodesjs, NewGraphJSONNode(d.VisV1.Mac, nodeid, len(nodesjs))) } nodelinks := make(map[string]GraphJSONLink) for _, entry := range d.Entries { if entry.Qual == 0 { // TT entry, we do not cover these continue } enodeid, _ := db.ResolveNodeID(tx, []byte(entry.Mac)) if _, seen := nodes[enodeid]; !seen { // linked node is a new node, also put into lists since it has to exist nodes[enodeid] = len(nodesjs) nodesjs = append(nodesjs, NewGraphJSONNode(entry.Mac, enodeid, len(nodesjs))) } // do a cross check: did we already record an entry for the // reverse direction? If so, mark it as being birectional // and recalculate the link quality value if rev, exists := links[enodeid]; exists { if rrev, exists := rev[nodeid]; exists { if isgateway { rrev.Vpn = true } rrev.Bidirect = true // middle value for now - or should we chose bigger (worse) value? rrev.Tq = (rrev.Tq + 255.0/float64(entry.Qual)) / 2 links[enodeid][nodeid] = rrev continue } } // new link, record it nodelinks[enodeid] = GraphJSONLink{Tq: 255.0 / float64(entry.Qual), Vpn: isgateway} } links[nodeid] = nodelinks return false, nil }) }) // build link table with numerical references for node, nodelinks := range links { if iface1, ok := nodes[node]; ok { for node2, link := range nodelinks { if iface2, ok := nodes[node2]; ok { link.Source = iface1 link.Target = iface2 linksjs = append(linksjs, link) } } } } graphjs := GraphJSON{ BatAdv: GraphJSONBatAdv{ Directed: false, Nodes: nodesjs, Links: linksjs, Graph: make([]struct{}, 0), }, Version: 1, } buf := new(bytes.Buffer) enc := json.NewEncoder(w) if err := enc.Encode(&graphjs); err != nil { return []byte{} } return buf.Bytes() }) w.Write(data) }