Compare commits

...

41 Commits

Author SHA1 Message Date
Stefan Hoffmann
631a5e398f Merge branch 'master' of ssh://git.freifunk-rhein-sieg.net:2222/Freifunk-Troisdorf/ubnt-freifunk-map-api
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2024-09-25 18:34:32 +02:00
Stefan Hoffmann
e6583918c1 Fixing ghost devices in Unifi 2024-09-25 18:33:41 +02:00
6f4fc76812 Delete .drone.jsonnet
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2024-09-25 16:00:47 +00:00
Stefan Hoffmann
ff5cf755aa Bugfixing UISP 503 Errors.
Finetuning API Calls
2024-09-25 17:53:38 +02:00
f9fa5fe26a
Naming changes 2024-03-19 18:43:09 +01:00
73166fcedc
send influx datapoints only when enabled
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2024-03-18 19:34:06 +01:00
7549eaa5d0
bugfix
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2024-03-18 18:57:55 +01:00
b8087ff4d9
Changed go build
Some checks failed
ci/woodpecker/push/woodpecker Pipeline failed
2024-03-18 18:53:05 +01:00
bcc4195234
Add woodpecker CI
Some checks failed
ci/woodpecker/push/woodpecker Pipeline failed
2024-03-18 18:27:43 +01:00
5ba7f23776
Changed UISP API for Statistics 2024-03-18 18:13:22 +01:00
dc1bc7f135
Removed unneeded line
All checks were successful
continuous-integration/drone/push Build is passing
2023-05-14 17:40:31 +02:00
c8c7c9e938
Error Handling
Some checks failed
continuous-integration/drone/push Build is failing
2023-05-14 11:44:20 +02:00
21157e5fb4
Readme angepasst 2023-05-14 11:44:11 +02:00
c9b496d5eb
Fixed CPU on Map
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-05-12 19:07:16 +02:00
0411e59eed
Fixes Memory for the Map
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-05-12 18:59:38 +02:00
ae6d96a0ff
Get CPU in % from InfluxDB 2023-05-12 18:00:25 +02:00
064161584d Merge pull request 'Added InfluxDB for Gateways' (#23) from testing-statistics into master
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
Reviewed-on: #23
2023-05-12 13:16:42 +00:00
2313dc827e
Added InfluxDB for Gateways
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-05-12 15:14:06 +02:00
0ba254b9a2
Add Addresses to Gateways
All checks were successful
continuous-integration/drone/tag Build is passing
continuous-integration/drone/push Build is passing
2023-05-12 08:17:31 +02:00
c4a7bfec1f
Bugfixing
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-05-12 07:55:58 +02:00
20b368f6f7
Add Static Gateway Configuration
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-05-11 21:38:10 +02:00
452a550801
removed testing branch entry from README.md
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-04-28 23:00:16 +02:00
742a904f95
Merge branch 'testing'
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-28 22:53:35 +02:00
97097adf58
syntax guessing
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
continuous-integration/drone/pr Build is failing
2023-04-28 22:46:08 +02:00
9cf5834e53
syntax 2023-04-28 22:45:07 +02:00
812c44e2fe
added tag latest 2023-04-28 22:41:03 +02:00
968f3b3f57
more foo
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-04-28 22:37:31 +02:00
26bad6c9a8
removed wrong comma 2023-04-28 22:34:55 +02:00
3cf3cfc906
deleted unused file manifest 2023-04-28 22:31:02 +02:00
c1554cbbc9
removed unnecessary pipeline arguments, removed manifest 2023-04-28 22:30:19 +02:00
7225bf5146
changed repo name back
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is failing
2023-04-28 22:22:55 +02:00
61ace83ebd
testing more stuffz
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is failing
2023-04-28 22:09:25 +02:00
3f27d527e0
typo
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is failing
2023-04-28 21:57:24 +02:00
d9d35a1270
added testing branch 2023-04-28 21:53:05 +02:00
c20fccf6ce
typo
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-28 21:52:43 +02:00
5e5a6184f3
Changed registry 2023-04-28 21:51:32 +02:00
0ef8e0c51f
invalid reference format: repository name must be lowercase
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is failing
2023-04-28 21:34:22 +02:00
3aca4a52bf
Changed Docker Registry
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is failing
2023-04-28 21:30:46 +02:00
13529828ea Change Go to 1.20
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-04-28 18:06:57 +00:00
22c935ce2a
Check if device is online for statistics processing
Some checks failed
continuous-integration/drone/push Build is failing
2023-04-28 16:47:15 +02:00
16dd56e320
Tidy things up 2023-04-28 15:20:42 +02:00
15 changed files with 597 additions and 427 deletions

View File

@ -1,113 +0,0 @@
local pipeline(os, arch) = {
kind: "pipeline",
name: os + "/" + arch,
platform: {
"os": os,
"arch": arch,
},
steps: [{
name: "compile " + os + "/" + arch,
image: "golang:1.15.6-alpine3.12",
environment: {
"GOOS": os,
"GOARCH": arch,
"CGO_ENABLED": "0",
},
commands: [
'go build -ldflags "-s -w -X main.version=${DRONE_TAG##v}" -trimpath -o release/' + os + "/" + arch + "/ubnt-freifunk-map-api .",
"tar -cvzf release/ubnt-freifunk-map-api_"+ os + "-" + arch + ".tar.gz -C release/" + os + "/" + arch + " ubnt-freifunk-map-api"
],
},
{
name: "gitea_release " + os + "/" + arch,
image: "plugins/gitea-release",
settings: {
api_key: { "from_secret": "gitea_api_key" },
base_url: "https://git.freifunk-rhein-sieg.net",
files: "release/*.tar.gz"
},
when: {
event: "tag"
},
},
{
name: "upload to docker hub " + os + "/" + arch,
image: "plugins/docker:" + os + "-" + arch,
settings: {
repo: "fftdf/ffmap-ubnt-api",
username: { "from_secret": "docker_username" },
password: { "from_secret": "docker_password" },
auto_tag: true,
auto_tag_suffix: os + "-" + arch
},
when: {
event: "tag"
},
},
],
};
local manifest() = {
kind: "pipeline",
type: "docker",
name: "manifest",
depends_on: ["linux/amd64"],
when: {
event: "tag"
},
steps: [
{
name: "publish",
image: "plugins/manifest",
settings: {
auto_tag: true,
ignore_missing: true,
spec: "manifest.yml",
username: { "from_secret": "docker_username" },
password: { "from_secret": "docker_password" },
},
when: {
event: "tag"
},
},
],
};
local validateJSON() = {
kind: "pipeline",
type: "docker",
name: "validate json",
when: {
event: "push"
},
steps: [
{
name: "validate ucDevices",
image: "fftdf/docker-json-validate",
commands: [
"jsonlint ucDevices.json"
],
when: {
event: "push"
},
},
{
name: "validate Devices",
image: "fftdf/docker-json-validate",
commands: [
"jsonlint devices.json",
],
when: {
event: "push"
},
},
],
};
[
pipeline("linux", "amd64"),
// pipeline("linux", "arm64"),
manifest()
]

25
.woodpecker.yml Normal file
View File

@ -0,0 +1,25 @@
---
platform: linux/arm64
pipeline:
build:
image: golang
environment:
- GOOS=linux
- GOARCH=amd64
commands:
- go build -ldflags "-s -w -X main.version=${CI_COMMIT_TAG}" -trimpath -o release/ubnt-freifunk-map-api .
docker:
image: woodpeckerci/plugin-docker-buildx
settings:
platforms: linux/amd64
registry: git.freifunk-rhein-sieg.net
repo: git.freifunk-rhein-sieg.net/freifunk-troisdorf/ubnt-freifunk-map-api
username:
from_secret: gitea_user
password:
from_secret: gitea_token
tags: ${CI_COMMIT_TAG}
when:
- branch: master

View File

@ -1,7 +1,7 @@
FROM alpine:3.12.3 FROM alpine:3.12.3
WORKDIR /opt/ WORKDIR /opt/
ADD ./release/*/*/ubnt-freifunk-map-api /opt/ubnt-freifunk-map-api ADD ./release/ubnt-freifunk-map-api /opt/ubnt-freifunk-map-api
RUN chmod +x /opt/ubnt-freifunk-map-api RUN chmod +x /opt/ubnt-freifunk-map-api
EXPOSE 3000 EXPOSE 3000

View File

@ -1,12 +1,17 @@
# Freifunk Meshviewer Unifi Access Points und Richtfunkstrecken import # Freifunk Meshviewer Unifi Access Points und Richtfunkstrecken import
Dieses tool Importiert Nodes für die Freifunk Map aus den APIs UNMS (Richtfunk) & Unifi (Access Points) Dieses tool Importiert Nodes für die Freifunk Map aus den APIs UISP (Richtfunk) & Unifi (Access Points).
Ebenfalls ist der Import statischer devices möglich. Da diese alle in unerem Proxmox cluster laufen, werden Statistikdaten aus der Proxmox InfluxDB geholt.
Alle Config dateien müssen per http erreichbar sein (z.B. in einem Git)
Für Troisdorf werden diese Dateien hier gepflegt: https://git.freifunk-rhein-sieg.net/Freifunk-Troisdorf/ubnt-api-devices
Für die Rhein-Sieg-Map hier: https://git.freifunk-rhein-sieg.net/Freifunk-Rhein-Sieg/ubnt-api-devices
## Config ## Config
### Unifi Access Points (ucDevices.json) ### Unifi Access Points (unifi_devices.json)
In der Datei ucDevices.json können die Access Points gepflegt werden, die auf der Freifunk Map erscheinen sollen. In der Datei unifi_devices.json können die Access Points gepflegt werden, die auf der Freifunk Map erscheinen sollen.
Hierzu muss die Datei im json Format erweitert werden. Hierzu muss die Datei im json Format erweitert werden.
@ -32,9 +37,9 @@ Erklärung:
* linked_to: (Optional) Die MAC Adresse des Routers an dem der AP angeschlossen ist. Normalerweise gateway_nexthop mit Doppelpunkten. Wenn nicht gesetzt wird kein Link auf der Map angezeigt. * linked_to: (Optional) Die MAC Adresse des Routers an dem der AP angeschlossen ist. Normalerweise gateway_nexthop mit Doppelpunkten. Wenn nicht gesetzt wird kein Link auf der Map angezeigt.
* domain: Die Domain in der sich der AP befindet. (tdf, inn, flu) * domain: Die Domain in der sich der AP befindet. (tdf, inn, flu)
### UNMS Richtfunkstrecken ### UISP Richtfunkstrecken
In der Datei devices.json können die Richtfunkstrecken gepflegt werden, die auf der Freifunk Map erscheinen sollen. In der Datei rifu_devices.json können die Richtfunkstrecken gepflegt werden, die auf der Freifunk Map erscheinen sollen.
```json ```json
{ {
@ -56,11 +61,44 @@ Erklärung:
* gateway: Im Normalfall die NodeID des Supernodes (zu finden in der MAP) * gateway: Im Normalfall die NodeID des Supernodes (zu finden in der MAP)
* domain: Die Domain in der sich der AP befindet. (tdf, inn, flu) * domain: Die Domain in der sich der AP befindet. (tdf, inn, flu)
### UISP Router
In dieser datei werden die Router (meist ER-X) gepflegt. Diese Daten werden dann ebenfalls aus der UISP API Importiert.
```json
{
"name": "Rathaus Uplink",
"mac": "18:e8:29:ad:9a:34",
"gateway_nexthop": "18e8292f7de6",
"gateway": "a28cae6ff604",
"domain": "tdf",
"location": {
"longitude":7.149406208,
"latitude":50.817093402
}
},
```
### Gateways.json
Hier werden Statische Geräte eingetragen die auf dem Proxmox Cluster laufen.
```json
{
"name": "VPN01",
"fqdn": "vpn01.fftdf.de",
"mac": "00:00:00:00:00:01",
"domain": "VPN1",
"adresses": ["5.9.220.114"]
},
```
### Config.json ### Config.json
Es gibt 3 Module die Ein/Ausgeschatet werden können: Es gibt 3 Module die Ein/Ausgeschatet werden können:
* UNMS * UNMS
* Unifi * Unifi
* Meshviewer * Meshviewer
* Gateways
Die Funktion Meshviewer importiert die vorhandenen meshviewer.json und manipuliert dort die Userzahlen. Sobald ein Access Point einen Node aus einer Meshviwer.json als "gateway_nexthop" eingetragen hat, werden die Clients an dem verbundenen Access Point und nicht mehr am Offloader angezeigt. Die Funktion Meshviewer importiert die vorhandenen meshviewer.json und manipuliert dort die Userzahlen. Sobald ein Access Point einen Node aus einer Meshviwer.json als "gateway_nexthop" eingetragen hat, werden die Clients an dem verbundenen Access Point und nicht mehr am Offloader angezeigt.

View File

@ -1,18 +1,21 @@
{ {
"unms": { "unms": {
"enabled": false, "enabled": false,
"unmsAPIUrl": "https://unifi.freifunk-troisdorf.de/v2.1", "unmsAPIUrl": "https://uisp.freifunk-troisdorf.de/v2.1",
"APItoken": "UNMS API TOKEN", "APItoken": "UNMS API TOKEN",
"devicesURL": "https://git.freifunk-rhein-sieg.net/Freifunk-Troisdorf/ubnt-freifunk-map-api/raw/branch/master/example.devices.json" "devicesURL": "https://git.freifunk-rhein-sieg.net/Freifunk-Troisdorf/ubnt-freifunk-map-api/raw/branch/master/example.devices.json"
}, },
"unifi": { "unifi": [
{
"name": "Unifi Freifunk Troisdorf",
"enabled": false, "enabled": false,
"displayusers": true, "displayusers": true,
"APIUrl": "https://unifi.freifunk-troisdorf.de:8443", "APIUrl": "https://unifi.freifunk-troisdorf.de",
"user": "APIuser", "user": "APIuser",
"password": "PASSWORD", "password": "PASSWORD",
"ucDevicesURL": "https://git.freifunk-rhein-sieg.net/Freifunk-Troisdorf/ubnt-freifunk-map-api/raw/branch/master/example.ucDevices.json" "ucDevicesURL": "https://git.freifunk-rhein-sieg.net/Freifunk-Troisdorf/ubnt-freifunk-map-api/raw/branch/master/example.ucDevices.json"
}, }
],
"meshviewer": { "meshviewer": {
"enabled": false, "enabled": false,
"files": [ "files": [

10
go.mod
View File

@ -1,9 +1,17 @@
module git.freifunk-rhein-sieg.net/Freifunk-Troisdorf/ubnt-freifunk-map-api module git.freifunk-rhein-sieg.net/Freifunk-Troisdorf/ubnt-freifunk-map-api
go 1.16 go 1.20
require ( require (
git.nils.zone/nils/prettify v0.0.4 git.nils.zone/nils/prettify v0.0.4
github.com/fatih/structs v1.1.0 github.com/fatih/structs v1.1.0
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c
) )
require (
github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 // indirect
github.com/fatih/color v1.9.0 // indirect
github.com/mattn/go-colorable v0.1.4 // indirect
github.com/mattn/go-isatty v0.0.11 // indirect
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 // indirect
)

63
influx.go Normal file
View File

@ -0,0 +1,63 @@
package main
import (
"encoding/json"
"log"
client "github.com/influxdata/influxdb1-client/v2"
)
// Create InfluxDB Client
func influxDBClient(port string) client.Client {
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: conf.General.InfluxURL + ":" + port,
})
if err != nil {
log.Fatalln("Error: ", err)
}
return c
}
// Get a single Datapoint from InfluxDB
func getInfluxDataPoint(dp string, h string, p string) float64 {
//Build the Query
query := "SELECT last(" + dp + ") FROM system WHERE host = '" + h + "'"
c := influxDBClient(p)
q := client.NewQuery(query, "udp", "s")
response, err := c.Query(q)
if err != nil {
log.Println("Influx query error!")
}
res := 0.0
if len(response.Results) > 0 {
res, err := response.Results[0].Series[0].Values[0][1].(json.Number).Float64()
if err != nil {
log.Println("Error in type conversion")
}
return res
}
return res
}
// Send Datapoints to InfluxDB, point map and InfluxDB Port needed
func sendInfluxBatchDataPoint(point *client.Point, influxPort string) {
// Open connection to InfluxDB
bp, err := client.NewBatchPoints(client.BatchPointsConfig{
Database: "freifunk",
Precision: "s",
})
if err != nil {
log.Fatalln("Error: ", err)
}
bp.AddPoint(point)
c := influxDBClient(influxPort)
err = c.Write(bp)
if err != nil {
log.Fatal(err)
}
//
}

127
main.go
View File

@ -4,11 +4,12 @@ import (
"encoding/json" "encoding/json"
"flag" "flag"
"fmt" "fmt"
"io/ioutil" "io"
"log" "log"
"net/http" "net/http"
"net/http/cookiejar" "net/http/cookiejar"
"os" "os"
"sync"
"time" "time"
_ "git.nils.zone/nils/prettify" _ "git.nils.zone/nils/prettify"
@ -16,14 +17,20 @@ import (
const ( const (
iso8601 = "2006-01-02T15:04:05-0700" iso8601 = "2006-01-02T15:04:05-0700"
fetchInterval = 1 * time.Minute
) )
// flags // flags
var (
lastFetchTime time.Time
cacheMutex sync.Mutex
cacheNodes []node
cacheLinks []link
)
var configPath = flag.String("configPath", "config.json", "Path to config.json") var configPath = flag.String("configPath", "config.json", "Path to config.json")
var version = "development" var version = "development"
var delay time.Duration = 60 * time.Second var delay time.Duration = 60 * time.Second
var conf = loadconfig(*configPath) var conf = loadconfig(*configPath)
var ucDev = getDevices(conf.Unifi.UCDevicesURL)
func main() { func main() {
log.Printf("starting version %s...\n", version) log.Printf("starting version %s...\n", version)
@ -34,54 +41,56 @@ func main() {
if *configPath == "" { if *configPath == "" {
log.Fatalln("Please specify path to config.json flag '-configPath'") log.Fatalln("Please specify path to config.json flag '-configPath'")
} }
// start API processing (runs in a loop) // start API processing (runs in a loop)
go processAPIs() go func() {
//processUNMSAPIRouter() if err := processAPIs(); err != nil {
//createMetrics(influxDBClient()) log.Fatalln("API processing failed, error is: ", err)
}
tick := time.Tick(delay)
for range tick {
if err := processAPIs(); err != nil {
log.Fatalln("API processing failed, error is: ", err)
}
}
}()
// start webserver on Port 3000 // start webserver on Port 3000
serveJSON() serveJSON()
} }
func loadconfig(file string) config { func processAPIs() error {
var config config
configFile, err := os.Open(file)
if err != nil {
log.Fatalln(err)
}
jsonParse := json.NewDecoder(configFile)
jsonParse.Decode(&config)
return config
}
// int to bool converter
func itob(i int) bool {
if i == 1 {
return true
}
return false
}
func processAPIs() {
tick := time.Tick(delay)
for range tick {
var nodes []node var nodes []node
var links []link var links []link
if conf.Unms.Enabled { if conf.UISP.Enabled {
log.Println("Processing UNMS") log.Println("Processing UISP")
unmsNodes, unmsLinks := processUNMSAPI() //Process UISP RiFu Nodes
unmsRouters := processUNMSAPIRouter() uispNodes, uispLinks, err := processUISPRiFu()
nodes = append(nodes, unmsNodes...) if err != nil {
nodes = append(nodes, unmsRouters...) return err
links = append(links, unmsLinks...) }
//Process UISP Routers (like EDGE Router)
uispRouters, err := processUISPRouter()
if err != nil {
return err
}
nodes = append(nodes, uispNodes...)
nodes = append(nodes, uispRouters...)
links = append(links, uispLinks...)
}
if len(conf.Unifi) > 0 {
log.Println("Anazahl der Unifi Server:", len(conf.Unifi))
for i := range conf.Unifi {
if conf.Unifi[i].Enabled {
log.Println("Processing Unifi-Server: ", conf.Unifi[i].Name)
//Process Unifi Nodes
unifiNodes, _, err := processUnifiAPI(i)
if err != nil {
return err
}
nodes = append(nodes, unifiNodes...)
}
} }
if conf.Unifi.Enabled {
log.Println("Processing Unifi")
//ucNodes, ucLinks := processUcAPIs()
ucNodes, _ := processUcAPIs()
nodes = append(nodes, ucNodes...)
//links = append(links, ucLinks...)
//links = links
} }
if conf.Meshviewer.Enabled { if conf.Meshviewer.Enabled {
log.Println("Processing Meshviewer") log.Println("Processing Meshviewer")
@ -89,6 +98,12 @@ func processAPIs() {
nodes = append(nodes, mvNodes...) nodes = append(nodes, mvNodes...)
links = append(links, mvLinks...) links = append(links, mvLinks...)
} }
if conf.Gateways.Enabled {
log.Println("Processing Gateways")
//Process Static Gateways from Json
gwNodes := processGateways()
nodes = append(nodes, gwNodes...)
}
// assemble final struct // assemble final struct
o := output{ o := output{
Timestamp: time.Now().Format(iso8601), Timestamp: time.Now().Format(iso8601),
@ -102,24 +117,38 @@ func processAPIs() {
if err := o.writeToFile(); err != nil { if err := o.writeToFile(); err != nil {
log.Fatalln(err) log.Fatalln(err)
} }
// get outages to serve as .csv
l := getUNMSLogs()
err := writeOutagesToCSV(l)
if err != nil {
log.Println("Error writing outages.csv")
}
// we're done here // we're done here
log.Println("...done") log.Println("...done")
} return nil
} }
func loadconfig(file string) config {
var config config
configFile, err := os.Open(file)
if err != nil {
log.Fatalln("Failed loding Config file: ", err)
}
jsonParse := json.NewDecoder(configFile)
if err := jsonParse.Decode(&config); err != nil {
log.Fatalln(err)
}
return config
}
// int to bool converter
func itob(i int) bool {
return i == 1
}
// function to get file from meshviewer
func getFile(url string) []byte { func getFile(url string) []byte {
resp, err := http.Get(url) resp, err := http.Get(url)
if err != nil { if err != nil {
log.Println("Error getting file from:", url) log.Println("Error getting file from:", url)
} }
data := resp.Body data := resp.Body
byteValue, _ := ioutil.ReadAll(data) byteValue, _ := io.ReadAll(data)
return byteValue return byteValue
} }

View File

@ -1,19 +0,0 @@
image: fftdf/ffmap-ubnt-api:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
{{#if build.tags}}
tags:
- "latest"
{{#each build.tags}}
- {{this}}
{{/each}}
{{/if}}
manifests:
-
image: fftdf/ffmap-ubnt-api:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
platform:
architecture: amd64
os: linux
# -
# image: fftdf/ffmap-ubnt-api:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64
# platform:
# architecture: arm64
# os: linux

View File

@ -2,8 +2,8 @@ package main
import ( import (
"encoding/json" "encoding/json"
"fmt"
"log" "log"
"time"
) )
func getMeshviewerJSON(url string) (mvDevices, error) { func getMeshviewerJSON(url string) (mvDevices, error) {
@ -17,7 +17,7 @@ func getMeshviewerJSON(url string) (mvDevices, error) {
// unmarshal to struct // unmarshal to struct
err := json.Unmarshal(jsonFile, &n) err := json.Unmarshal(jsonFile, &n)
if err != nil { if err != nil {
fmt.Println("can´t get Meshviewer Json file from " + url) log.Println("can´t get Meshviewer Json file from " + url)
log.Println(err) log.Println(err)
} }
return n, nil return n, nil
@ -130,18 +130,31 @@ func addmvDevices(d mvDevices) ([]node, []link) {
} }
func getMeshviewer() ([]node, []link) { func getMeshviewer() ([]node, []link) {
cacheMutex.Lock()
defer cacheMutex.Unlock()
// Überprüfen, ob die Daten kürzlich aktualisiert wurden
if time.Since(lastFetchTime) < fetchInterval {
return cacheNodes, cacheLinks
}
var nodes []node var nodes []node
var links []link var links []link
for i := range conf.Meshviewer.Files { for i := range conf.Meshviewer.Files {
log.Println("Hole Meshviewer JSON von: ", conf.Meshviewer.Files[i].URL)
m, err := getMeshviewerJSON(conf.Meshviewer.Files[i].URL) m, err := getMeshviewerJSON(conf.Meshviewer.Files[i].URL)
if err != nil { if err != nil {
return nodes, links return cacheNodes, cacheLinks
} }
mvNodes, mvLinks := addmvDevices(m) mvNodes, mvLinks := addmvDevices(m)
nodes = append(nodes, mvNodes...) nodes = append(nodes, mvNodes...)
links = append(links, mvLinks...) links = append(links, mvLinks...)
} }
return nodes, links
// Cache aktualisieren
cacheNodes = nodes
cacheLinks = links
lastFetchTime = time.Now()
return cacheNodes, cacheLinks
} }

View File

@ -1,37 +0,0 @@
package main
import (
"encoding/csv"
"log"
"os"
)
func getUNMSLogs() UNMSLogResponse {
var l UNMSLogResponse
log.Println("Get Outages from UNMS")
err := UnmsCallAPI("/outages?count=100&page=1&type=outage", &l)
if err != nil {
log.Fatalln("Error calling Outages API")
}
return l
}
func writeOutagesToCSV(l UNMSLogResponse) error {
csvFile, err := os.Create("output/outages.csv")
if err != nil {
return err
}
writer := csv.NewWriter(csvFile)
for _, o := range l.Items {
var row []string
row = append(row, o.StartTime.Format("02.01.2006 15:04:05"))
row = append(row, o.EndTime.Format("02.01.2006 15:04:05"))
row = append(row, o.Site.Name)
row = append(row, o.Device.DisplayName)
writer.Write(row)
}
writer.Flush()
return nil
}

99
staticDevices.go Normal file
View File

@ -0,0 +1,99 @@
package main
import (
"log"
"strings"
"time"
client "github.com/influxdata/influxdb1-client/v2"
)
func processGateways() []node {
d := getDevices(conf.Gateways.GatewaysURL)
var nodes []node
for i := range d.Devices {
log.Println("Processing Static Device: ", d.Devices[i].Name)
currentDevice := d.Devices[i]
//Collect data
//Calulate Memory (%)
mem := getInfluxDataPoint("mem", currentDevice.FQDN, conf.General.ProxmoxInfluxPort)
maxmem := getInfluxDataPoint("maxmem", currentDevice.FQDN, conf.General.ProxmoxInfluxPort)
memoryMap := mem / maxmem
memory := memoryMap * 100
// Get Network
rx := getInfluxDataPoint("netin", currentDevice.FQDN, conf.General.ProxmoxInfluxPort)
tx := getInfluxDataPoint("netout", currentDevice.FQDN, conf.General.ProxmoxInfluxPort)
// Get CPU (%)
cpuMap := getInfluxDataPoint("cpu", currentDevice.FQDN, conf.General.ProxmoxInfluxPort)
cpu := cpuMap * 100
//Uptime (seconds)
uptime := getInfluxDataPoint("uptime", currentDevice.FQDN, conf.General.ProxmoxInfluxPort)
t := time.Duration(uptime * float64(time.Second))
up := time.Now().Add(-t)
// fields := map[string]interface{}{}
fields := make(map[string]any)
tags := map[string]string{
"hostname": strings.ReplaceAll(d.Devices[i].Name, " ", "-"),
"nodeid": strings.ReplaceAll(d.Devices[i].MAC, ":", ""),
}
//Build fields for InfluxDB
fields["load"] = cpu
fields["ram"] = int(memory)
fields["time.up"] = int(uptime)
//Network
fields["traffic.rx.bytes"] = int(rx)
fields["traffic.tx.bytes"] = int(tx)
point, err := client.NewPoint(
"node",
tags,
fields,
time.Now(),
)
if err != nil {
log.Fatalln("Error: ", err)
}
if conf.General.InfluxEnabled {
sendInfluxBatchDataPoint(point, conf.General.FreifunkInfluxPort)
}
//Build Nodes
nodes = append(nodes, node{
Firstseen: up.Format(iso8601),
Lastseen: time.Now().Format(iso8601),
IsOnline: true,
IsGateway: true,
Clients: 0,
ClientsWifi24: 0,
ClientsWifi5: 0,
ClientsOther: 0,
RootFSUsage: 0,
LoadAVG: cpuMap,
MemoryUsage: memoryMap,
Uptime: up.Format(iso8601),
GatewayNexthop: "",
Gateway: "",
NodeID: strings.ReplaceAll(d.Devices[i].MAC, ":", ""),
MAC: d.Devices[i].MAC,
Adresses: d.Devices[i].Adresses,
Domain: d.Devices[i].Domain,
Hostname: "[Gateway] " + d.Devices[i].Name,
Owner: "Freifunk Troisdorf",
Firmware: firmware{
Base: "KVM",
Release: "Ubuntu 22.04",
},
Autoupdater: autoupdater{
Enabled: false,
Branch: "stable",
},
NProc: 1,
Model: "KVM",
})
}
return nodes
}

View File

@ -10,21 +10,20 @@ import (
) )
type config struct { type config struct {
Unms struct { General struct {
InfluxEnabled bool `json:"influx_enabled"`
FreifunkInfluxPort string `json:"freifunk_influx_port"`
ProxmoxInfluxPort string `json:"proxmox_influx_port"`
InfluxURL string `json:"influx_url"`
}
UISP struct {
Enabled bool `json:"enabled"` Enabled bool `json:"enabled"`
UnmsAPIURL string `json:"unmsAPIUrl"` UnmsAPIURL string `json:"unmsAPIUrl"`
APItoken string `json:"APItoken"` APItoken string `json:"APItoken"`
DevicesURL string `json:"devicesURL"` DevicesURL string `json:"devicesURL"`
RouterURL string `json:"routerURL"` RouterURL string `json:"routerURL"`
} `json:"unms"` } `json:"unms"`
Unifi struct { Unifi []UnifiServer `json:"unifi"`
Enabled bool `json:"enabled"`
DisplayUsers bool `json:"displayusers"`
APIURL string `json:"APIUrl"`
User string `json:"user"`
Password string `json:"password"`
UCDevicesURL string `json:"ucDevicesURL"`
} `json:"unifi"`
Meshviewer struct { Meshviewer struct {
Enabled bool `json:"enabled"` Enabled bool `json:"enabled"`
Files []struct { Files []struct {
@ -32,10 +31,25 @@ type config struct {
URL string `json:"URL"` URL string `json:"URL"`
} `json:"files"` } `json:"files"`
} `json:"meshviewer"` } `json:"meshviewer"`
Gateways struct {
Enabled bool `json:"enabled"`
GatewaysURL string `json:"gatewaysurl"`
} `json:"gateways"`
}
type UnifiServer struct {
Name string `json:"name"`
Enabled bool `json:"enabled"`
DisplayUsers bool `json:"displayusers"`
APIURL string `json:"APIUrl"`
User string `json:"user"`
Password string `json:"password"`
UCDevicesURL string `json:"ucDevicesURL"`
} }
type device struct { type device struct {
Name string `json:"name"` Name string `json:"name"`
FQDN string `json:"fqdn"`
MAC string `json:"mac"` MAC string `json:"mac"`
GatewayNexthop string `json:"gateway_nexthop"` GatewayNexthop string `json:"gateway_nexthop"`
LinkedTo string `json:"linked_to"` LinkedTo string `json:"linked_to"`
@ -45,6 +59,7 @@ type device struct {
Longitude float64 `json:"longitude"` Longitude float64 `json:"longitude"`
Latitude float64 `json:"latitude"` Latitude float64 `json:"latitude"`
} `json:"location"` } `json:"location"`
Adresses []string `json:"adresses"`
} }
type devices struct { type devices struct {
Devices []device `json:"devices"` Devices []device `json:"devices"`
@ -314,40 +329,31 @@ type UNMSLogResponse struct {
} `json:"items"` } `json:"items"`
} }
type XY struct {
X int `json:"x"`
Y int `json:"y"`
}
type AvgMax struct {
AVG []XY `json:"avg"`
MAX []XY `json:"max"`
}
type UNMSstatistics struct { type UNMSstatistics struct {
Period int `json:"period"` Period int `json:"period"`
Interval struct { Interval struct {
Start int `json:"start"` Start int `json:"start"`
End int `json:"end"` End int `json:"end"`
} `json:"interval"` } `json:"interval"`
CPU []struct { CPU AvgMax `json:"cpu"`
X int `json:"x"` RAM AvgMax `json:"ram"`
Y int `json:"y"` Errors AvgMax `json:"errors"`
} `json:"cpu"`
RAM []struct {
X int `json:"x"`
Y int `json:"y"`
} `json:"ram"`
Ping []struct {
X int `json:"x"`
Y int `json:"y"`
} `json:"ping"`
Errors []struct {
X int `json:"x"`
Y int `json:"y"`
} `json:"errors"`
Interfaces []struct { Interfaces []struct {
ID string `json:"id"` ID string `json:"id"`
Priority int `json:"priority"` Priority int `json:"priority"`
Name string `json:"name"` Name string `json:"name"`
Receive []struct { Receive AvgMax `json:"receive"`
X int `json:"x"` Transmit AvgMax `json:"transmit"`
Y int `json:"y"`
} `json:"receive"`
Transmit []struct {
X int `json:"x"`
Y int `json:"y"`
} `json:"transmit"`
} `json:"interfaces"` } `json:"interfaces"`
} }

111
unifi.go
View File

@ -4,34 +4,38 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io"
"log" "log"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
client "github.com/influxdata/influxdb1-client/v2"
) )
// Unifi Controller API processing // Unifi Controller API processing
func processUcAPIs() ([]node, []link) { func processUnifiAPI(s int) ([]node, []link, error) {
//get list of Unifi devices to display //get list of Unifi devices to display
var nodes []node var nodes []node
var links []link var links []link
d := getDevices(conf.Unifi.UCDevicesURL) d := getDevices(conf.Unifi[s].UCDevicesURL)
//call Unifi Controller //call Unifi Controller
ucAPI := UnifiNewAPI(conf.Unifi.User, conf.Unifi.Password, conf.Unifi.APIURL) ucAPI := UnifiNewAPI(conf.Unifi[s].User, conf.Unifi[s].Password, conf.Unifi[s].APIURL)
//login //login
ucAPI.ucLogin() if err := ucAPI.ucLogin(); err != nil {
return nil, nil, err
}
//get all Sites from Controller //get all Sites from Controller
sites, err := ucAPI.ucGetSites() sites, err := ucAPI.ucGetSites()
if err != nil { if err != nil {
log.Fatalln(err) return nil, nil, err
} }
//get all devices in all sites //get all devices in all sites
devices, err := ucAPI.ucGetDevices(sites) devices, err := ucAPI.ucGetDevices(sites)
if err != nil { if err != nil {
log.Fatalln(err) return nil, nil, err
} }
//build nodes struct //build nodes struct
@ -40,33 +44,96 @@ func processUcAPIs() ([]node, []link) {
var currentDevice ucDevice var currentDevice ucDevice
var currentJSONDevice device var currentJSONDevice device
for _, device := range devices { for _, device := range devices {
if strings.ToUpper(device.Mac) == strings.ToUpper(jsonDevice.MAC) { if strings.EqualFold(device.Mac, jsonDevice.MAC) {
currentDevice = device currentDevice = device
currentJSONDevice = jsonDevice currentJSONDevice = jsonDevice
} }
} }
if isRemoteMACpublished(jsonDevice.MAC, d.Devices) { if isRemoteMACpublished(jsonDevice.MAC, d.Devices) {
//hier muss gecheckt werden ob der link valide ist //hier muss gecheckt werden ob der link valide ist
if checkMeshviewerLink(jsonDevice.LinkedTo) { if checkMeshviewerLink(jsonDevice.LinkedTo) {
links = UnifiAddLink(jsonDevice, links) links = UnifiAddLink(jsonDevice, links)
} }
} }
load, err := strconv.ParseFloat(currentDevice.Sysstats.CPU, 64)
isOnline := currentDevice.State == 1
var load float64
var mem float64
var cpu float64
if isOnline {
load, err = strconv.ParseFloat(currentDevice.Sysstats.CPU, 64)
cpu = load * 100
if err != nil { if err != nil {
fmt.Println("Error: ", currentDevice.Name) log.Println("Error psrsing CPU of device ", currentDevice.Name)
//log.Fatalln(err) log.Println(err)
load = 0 load = 0
cpu = 0
} }
mem, err := strconv.ParseFloat(currentDevice.Sysstats.Memory, 64) mem, err = strconv.ParseFloat(currentDevice.Sysstats.Memory, 64)
if err != nil { if err != nil {
//log.Fatalln(err) log.Println("Error parsing Memory of device ", currentDevice.Name)
load = 0 log.Println(err)
mem = 0
} }
}
var model = lookupModels(currentDevice.Model) var model = lookupModels(currentDevice.Model)
var clients = currentDevice.Users var clients int
if conf.Unifi.DisplayUsers == false { if conf.Unifi[s].DisplayUsers {
clients = 0 clients = currentDevice.Users
} }
//// INFLUX START
// fields := map[string]interface{}{}
fields := make(map[string]any)
tags := map[string]string{
"hostname": strings.ReplaceAll(currentDevice.Name, " ", "-"),
"nodeid": strings.ReplaceAll(currentDevice.Mac, ":", ""),
}
// Generate fields for all network interfaces (not availible for Unifi Nodes)
//for eth := range details.Interfaces {
// interface_name_rx := ("rate.rx" + "_" + details.Interfaces[eth].Identification.Name)
// interface_name_tx := ("rate.tx" + "_" + details.Interfaces[eth].Identification.Name)
// fields[interface_name_rx] = details.Interfaces[eth].Statistics.Rxrate
// fields[interface_name_tx] = details.Interfaces[eth].Statistics.Txrate
//}
// set default values if we can't get statistics
fields["cpu"] = 0
fields["load"] = float64(0)
fields["ram"] = 0
if isOnline {
// Generate fields for all Statistics
//load := (float64(load) / float64(100))
fields["cpu"] = int(cpu)
fields["load"] = load
fields["ram"] = int(mem)
}
// Generate field for DHCP Leases
fields["clients.total"] = clients
fields["time.up"] = currentDevice.Uptime
// Generate Dataponts
point, err := client.NewPoint(
"node",
tags,
fields,
time.Now(),
)
if err != nil {
log.Fatalln("Error: ", err)
}
if conf.General.InfluxEnabled {
sendInfluxBatchDataPoint(point, conf.General.FreifunkInfluxPort)
}
// INFLUX STOP
//log.Println(currentDevice.Mac)
if currentDevice.Mac != "" {
nodes = append(nodes, node{ nodes = append(nodes, node{
Firstseen: "0", Firstseen: "0",
Lastseen: time.Unix(int64(currentDevice.LastSeen), 0).Format(iso8601), Lastseen: time.Unix(int64(currentDevice.LastSeen), 0).Format(iso8601),
@ -101,7 +168,8 @@ func processUcAPIs() ([]node, []link) {
Model: model, Model: model,
}) })
} }
return nodes, links }
return nodes, links, err
} }
func UnifiNewAPI(user string, pass string, baseURL string) UnifiAPIData { func UnifiNewAPI(user string, pass string, baseURL string) UnifiAPIData {
@ -126,10 +194,10 @@ func (u *UnifiAPIData) ucCallAPI(url string, method string, body *bytes.Buffer,
} }
defer response.Body.Close() defer response.Body.Close()
if response.StatusCode != 200 { if response.StatusCode != 200 {
return fmt.Errorf("Login failed %s", u.baseURL+url) return fmt.Errorf("login failed %s", u.baseURL+url)
} }
data, err := ioutil.ReadAll(response.Body) data, err := io.ReadAll(response.Body)
if err != nil { if err != nil {
return err return err
} }
@ -204,10 +272,13 @@ func UnifiAddLink(dev device, links []link) []link {
} }
func findNodeID(NodeID string) bool { func findNodeID(NodeID string) bool {
for s := range conf.Unifi {
ucDev := getDevices(conf.Unifi[s].UCDevicesURL)
for i := range ucDev.Devices { for i := range ucDev.Devices {
if ucDev.Devices[i].GatewayNexthop == NodeID { if ucDev.Devices[i].GatewayNexthop == NodeID {
return true return true
} }
} }
}
return false return false
} }

144
unms.go
View File

@ -17,22 +17,23 @@ import (
) )
// UNMS API processing (Richtfunk) // UNMS API processing (Richtfunk)
func processUNMSAPI() ([]node, []link) { func processUISPRiFu() ([]node, []link, error) {
// Variables for runtime // Variables for runtime
var links []link var links []link
var nodes []node var nodes []node
d := getDevices(conf.Unms.DevicesURL) d := getDevices(conf.UISP.DevicesURL)
// API CALL 1 // API CALL 1 (get Device overview)
log.Println("calling API 1") log.Println("Starting UISP API Crawler for Rifu devices")
log.Println("Getting device overview from UNMS API")
var u []unifiAPIResponse var u []unifiAPIResponse
err := UnmsCallAPI("/devices", &u) if err := UnmsCallAPI("/devices", &u); err != nil {
if err != nil { return nil, nil, err
log.Fatalln(err)
} }
for i := range d.Devices { for i := range d.Devices {
time.Sleep(time.Second)
var dev unifiAPIResponse var dev unifiAPIResponse
var currentDevice device var currentDevice device
for j := range u { for j := range u {
@ -41,23 +42,22 @@ func processUNMSAPI() ([]node, []link) {
currentDevice = d.Devices[i] currentDevice = d.Devices[i]
} }
} }
isOnline := dev.Overview.Status == "active"
var isOnline bool = false
if dev.Overview.Status == "active" {
isOnline = true
}
// END OF API CALL 1 // END OF API CALL 1
// API CALL 2 // Getting details from UISP
log.Println("calling API 2 for device", d.Devices[i].Name) log.Println("Getting device details for: ", d.Devices[i].Name)
var details unifiAPIDetails var details unifiAPIDetails
UnmsCallAPI("/devices/erouters/"+dev.Identification.ID, &details) if err := UnmsCallAPI("/devices/erouters/"+dev.Identification.ID, &details); err != nil {
// END OF API CALL 2 return nil, nil, err
}
// API CALL 3 // Getting details for RiFu
log.Println("calling API 3 for device", d.Devices[i].Name) log.Println("Getting details for RiFu Link for: ", d.Devices[i].Name)
var airmaxes []unifiAPIAirmax var airmaxes []unifiAPIAirmax
UnmsCallAPI("/devices/airmaxes/"+dev.Identification.ID+"/stations", &airmaxes) if err := UnmsCallAPI("/devices/airmaxes/"+dev.Identification.ID+"/stations", &airmaxes); err != nil {
return nil, nil, err
}
// check if remote mac address is part of our published network // check if remote mac address is part of our published network
for i := range airmaxes { for i := range airmaxes {
if isRemoteMACpublished(airmaxes[i].DeviceIdentification.MAC, d.Devices) { if isRemoteMACpublished(airmaxes[i].DeviceIdentification.MAC, d.Devices) {
@ -101,23 +101,21 @@ func processUNMSAPI() ([]node, []link) {
Model: details.Identification.Model, Model: details.Identification.Model,
}) })
} }
return nodes, links return nodes, links, nil
} }
func processUNMSAPIRouter() []node { func processUISPRouter() ([]node, error) {
time.Sleep(time.Second)
// Variables for runtime // Variables for runtime
var nodes []node var nodes []node
d := getDevices(conf.UISP.RouterURL)
d := getDevices(conf.Unms.RouterURL)
// API CALL 1, get all devices list from UNMS // API CALL 1, get all devices list from UNMS
log.Println("Get all devices from UNMS") log.Println("Get all Routers from UISP")
var u []unifiAPIResponse var u []unifiAPIResponse
err := UnmsCallAPI("/devices", &u) if err := UnmsCallAPI("/devices", &u); err != nil {
if err != nil { return nil, err
log.Fatalln(err)
} }
// END OF API CALL 1
// Get Information for devices device // Get Information for devices device
for i := range d.Devices { for i := range d.Devices {
@ -130,41 +128,34 @@ func processUNMSAPIRouter() []node {
} }
} }
var isOnline bool = false isOnline := dev.Overview.Status == "active"
if dev.Overview.Status == "active" {
isOnline = true
}
// API CALL FOR ROUTER DETAILS (Interface RX/TX) // API CALL FOR ROUTER DETAILS (Interface RX/TX)
log.Println("Getting details of ", d.Devices[i].Name, "from UNMS API") log.Println("Getting details of ", d.Devices[i].Name, "from UISP API")
var details unifiAPIDetails var details unifiAPIDetails
UnmsCallAPI("/devices/erouters/"+dev.Identification.ID, &details) if err := UnmsCallAPI("/devices/erouters/"+dev.Identification.ID, &details); err != nil {
return nil, err
}
// API CALL FOR DEVICE STATISTICS (CPU, RAM) // API CALL FOR DEVICE STATISTICS (CPU, RAM)
log.Println("Getting statistics of ", d.Devices[i].Name, "from UNMS API") log.Println("Getting statistics of ", d.Devices[i].Name, "from UISP API")
var statistics UNMSstatistics var statistics UNMSstatistics
UnmsCallAPI("/devices/"+dev.Identification.ID+"/statistics?interval=hour", &statistics) if err := UnmsCallAPI("/devices/"+dev.Identification.ID+"/statistics?interval=hour", &statistics); err != nil {
return nil, err
}
// API CALL FOR DHCP LEASES // API CALL FOR DHCP LEASES
log.Println("Getting DHCP Leases of ", d.Devices[i].Name, "from UNMS API") log.Println("Getting DHCP Leases of ", d.Devices[i].Name, "from UNMS API")
var dhcpleases UNMSdhcp var dhcpleases UNMSdhcp
if isOnline { if isOnline {
UnmsCallAPI("/devices/erouters/"+dev.Identification.ID+"/dhcp/leases", &dhcpleases) if err := UnmsCallAPI("/devices/erouters/"+dev.Identification.ID+"/dhcp/leases", &dhcpleases); err != nil {
return nil, err
}
} else { } else {
log.Println("Router ist offline, skipping DHCP Leases") log.Println("Router ist offline, skipping DHCP Leases")
} }
// Open connection to InfluxDB // fields := map[string]interface{}{}
bp, err := client.NewBatchPoints(client.BatchPointsConfig{ fields := make(map[string]any)
Database: "freifunk",
Precision: "s",
})
if err != nil {
log.Fatalln("Error: ", err)
}
//
fields := map[string]interface{}{}
tags := map[string]string{ tags := map[string]string{
"hostname": strings.ReplaceAll(d.Devices[i].Name, " ", "-"), "hostname": strings.ReplaceAll(d.Devices[i].Name, " ", "-"),
"nodeid": strings.ReplaceAll(dev.Identification.MAC, ":", ""), "nodeid": strings.ReplaceAll(dev.Identification.MAC, ":", ""),
@ -177,15 +168,21 @@ func processUNMSAPIRouter() []node {
fields[interface_name_tx] = details.Interfaces[eth].Statistics.Txrate fields[interface_name_tx] = details.Interfaces[eth].Statistics.Txrate
} }
// set default values if we can't get statistics
fields["cpu"] = 0
fields["load"] = float64(0)
fields["ram"] = 0
if isOnline {
// Generate fields for all Statistics // Generate fields for all Statistics
load := (float64(statistics.CPU[0].Y) / float64(100)) load := (float64(statistics.CPU.AVG[0].Y) / float64(100))
fields["cpu"] = statistics.CPU[0].Y fields["cpu"] = statistics.CPU.AVG[0].Y
fields["load"] = load fields["load"] = load
fields["ram"] = statistics.RAM[0].Y fields["ram"] = statistics.RAM.AVG[0].Y
}
// Generate field for DHCP Leases // Generate field for DHCP Leases
leases := len(dhcpleases) fields["clients.total"] = len(dhcpleases)
fields["clients.total"] = leases
// Generate Dataponts // Generate Dataponts
point, err := client.NewPoint( point, err := client.NewPoint(
@ -197,14 +194,9 @@ func processUNMSAPIRouter() []node {
if err != nil { if err != nil {
log.Fatalln("Error: ", err) log.Fatalln("Error: ", err)
} }
// Add Datapoints in InfluxDB if conf.General.InfluxEnabled {
bp.AddPoint(point) sendInfluxBatchDataPoint(point, conf.General.FreifunkInfluxPort)
c := influxDBClient()
err = c.Write(bp)
if err != nil {
log.Fatal(err)
} }
// Get info from json file (static) // Get info from json file (static)
nodes = append(nodes, node{ nodes = append(nodes, node{
Firstseen: dev.Overview.CreatedAt.Format(iso8601), Firstseen: dev.Overview.CreatedAt.Format(iso8601),
@ -240,32 +232,25 @@ func processUNMSAPIRouter() []node {
Model: details.Identification.Model, Model: details.Identification.Model,
}) })
} }
return nodes return nodes, nil
} }
func influxDBClient() client.Client { func UnmsCallAPI(url string, i any) error {
c, err := client.NewHTTPClient(client.HTTPConfig{ time.Sleep(time.Second)
Addr: "http://statistik.freifunk-troisdorf.de:8886", request, err := http.NewRequest(http.MethodGet, conf.UISP.UnmsAPIURL+url, nil)
})
if err != nil { if err != nil {
log.Fatalln("Error: ", err) return errors.New(fmt.Sprint("can't set request", conf.UISP.UnmsAPIURL+url))
} }
return c //log.Println(conf.UISP.UnmsAPIURL + url)
} request.Header.Set("x-auth-token", conf.UISP.APItoken)
func UnmsCallAPI(url string, i interface{}) error {
request, err := http.NewRequest(http.MethodGet, conf.Unms.UnmsAPIURL+url, nil)
if err != nil {
return errors.New(fmt.Sprint("can't set request", conf.Unms.UnmsAPIURL+url))
}
request.Header.Set("x-auth-token", conf.Unms.APItoken)
client := &http.Client{} client := &http.Client{}
response, err := client.Do(request) response, err := client.Do(request)
if err != nil { if err != nil {
return fmt.Errorf("can't get request %s with x-auth-token %s", conf.Unms.UnmsAPIURL+url, conf.Unms.APItoken) return fmt.Errorf("can't get request %s with x-auth-token %s", conf.UISP.UnmsAPIURL+url, conf.UISP.APItoken)
} }
if response.StatusCode != 200 { if response.StatusCode != 200 {
log.Fatalln("Can´t call UNMS API, check token and URL. HTTP Status: ", response.StatusCode) log.Println("Can't call UNMS API, check token and URL. Skipping device. HTTP Status: ", response.StatusCode)
return nil
} }
data, err := ioutil.ReadAll(response.Body) data, err := ioutil.ReadAll(response.Body)
defer response.Body.Close() defer response.Body.Close()
@ -273,8 +258,7 @@ func UnmsCallAPI(url string, i interface{}) error {
return fmt.Errorf("can't read response body: %+v", response.Body) return fmt.Errorf("can't read response body: %+v", response.Body)
} }
// no error occurred, unmarshal to struct // no error occurred, unmarshal to struct
json.Unmarshal(data, &i) return json.Unmarshal(data, &i)
return nil
} }
func UnmsGetAddresses(ip string) []string { func UnmsGetAddresses(ip string) []string {