Compare commits

...

114 commits

Author SHA1 Message Date
saji 900ad6d495 release build correct package
All checks were successful
release / release-full (push) Successful in 8m25s
2024-03-09 09:15:01 -06:00
saji 16cc7019bc fix xgo version
All checks were successful
release / release-full (push) Successful in 7m40s
2024-03-09 08:37:07 -06:00
saji 4566ea369c make release crossbuild
Some checks failed
release / release-full (push) Failing after 1m55s
2024-03-09 08:33:21 -06:00
saji 1e72b93143 remove unused variable
All checks were successful
Node.js CI / build-openmct (push) Successful in 1m26s
2024-03-08 22:00:18 -06:00
saji 48b40ee30f make CI only run when affected 2024-03-08 21:59:22 -06:00
saji 9e9081fa4a fix unused dotenv plugin
Some checks failed
Go / build (1.21) (push) Successful in 1m7s
Go / build (1.22) (push) Successful in 1m5s
Node.js CI / build (push) Failing after 1m37s
2024-03-08 21:55:15 -06:00
saji 2dc5a0457b added node ci
Some checks failed
Node.js CI / build (push) Failing after 48s
Go / build (1.21) (push) Successful in 1m6s
Go / build (1.22) (push) Successful in 1m5s
2024-03-08 21:53:26 -06:00
saji 0f2af76156 add some types
All checks were successful
Go / build (1.21) (push) Successful in 1m8s
Go / build (1.22) (push) Successful in 1m7s
2024-03-08 19:25:08 -06:00
saji 2e36581665 fix: add missing files
All checks were successful
Go / build (1.21) (push) Successful in 1m7s
Go / build (1.22) (push) Successful in 1m5s
2024-03-08 17:09:08 -06:00
saji 4829dd50c7 move openmct plugin to typescript
All checks were successful
Go / build (1.21) (push) Successful in 1m5s
Go / build (1.22) (push) Successful in 1m4s
remove unused livestream test function
2024-03-08 17:08:43 -06:00
saji 13205c1668 got realtime working
Some checks failed
Go / build (1.21) (push) Failing after 1m6s
Go / build (1.22) (push) Failing after 1m5s
added demo livestream for testing
added openMCT realtime plugin
fixed websocket cross-origin fail
2024-03-08 11:51:59 -06:00
saji fe4cdfa0a4 revert order by change
All checks were successful
Go / build (1.21) (push) Successful in 1m8s
Go / build (1.22) (push) Successful in 1m7s
2024-03-08 09:40:54 -06:00
saji e9d40ce466 remove broker-db listener, db options
All checks were successful
Go / build (1.21) (push) Successful in 1m11s
Go / build (1.22) (push) Successful in 1m9s
2024-03-08 09:28:50 -06:00
saji d702395d5b cleanup comparison (thanks staticcheck)
All checks were successful
Go / build (1.21) (push) Successful in 1m8s
Go / build (1.22) (push) Successful in 1m7s
2024-03-07 23:52:41 -06:00
saji 90e8c3f101 more tests! added BusEvent.Equals
Some checks failed
Go / build (1.21) (push) Failing after 1m6s
Go / build (1.22) (push) Failing after 1m5s
2024-03-07 23:50:13 -06:00
saji a28393388b tests! 2024-03-07 23:02:46 -06:00
saji 54b7427428 misc cleanup
All checks were successful
Go / build (1.21) (push) Successful in 1m7s
Go / build (1.22) (push) Successful in 1m6s
2024-03-07 16:42:49 -06:00
saji e08ab050ef fix websocket endpoint 2024-03-07 16:42:27 -06:00
saji cf112ef561 fix socketcan bugs
All checks were successful
Go / build (1.21) (push) Successful in 1m7s
Go / build (1.22) (push) Successful in 1m5s
2024-03-07 15:06:16 -06:00
saji 641c35afbd add more logging to BusEvent Broker 2024-03-07 15:05:51 -06:00
saji 4a292aa009 change default database path to gotelem.db 2024-03-07 15:05:31 -06:00
saji d5381a3c33 add comments
All checks were successful
Go / build (1.21) (push) Successful in 1m6s
Go / build (1.22) (push) Successful in 1m4s
2024-03-07 13:30:32 -06:00
saji 0b5a917e40 add JSON line parser, rename logparsers 2024-03-07 13:30:15 -06:00
saji 1ff4adf5e4 add generator test for skylab 2024-03-07 13:29:50 -06:00
saji c8034066c9 add delete document test
All checks were successful
Go / build (1.21) (push) Successful in 1m3s
Go / build (1.22) (push) Successful in 1m1s
2024-03-07 07:34:55 -06:00
saji 3c1a96c8e0 format
All checks were successful
Go / build (1.21) (push) Successful in 1m18s
Go / build (1.22) (push) Successful in 1m16s
2024-03-07 06:18:49 -06:00
saji f380631b5e fix Close() before checking errors
All checks were successful
Go / build (1.21) (push) Successful in 1m14s
Go / build (1.22) (push) Successful in 1m12s
2024-03-06 18:45:32 -06:00
saji 456f84b5c7 remove QueryModifiers, replace with explicit
Some checks failed
Go / build (1.21) (push) Failing after 1m4s
Go / build (1.22) (push) Failing after 1m3s
2024-03-06 17:19:16 -06:00
saji daf4fe97dc added DocumentNotFound error 2024-03-06 17:09:02 -06:00
saji 5b38daf74f wip: Telem DB Document API and Tests 2024-03-06 16:42:39 -06:00
saji 7a98f52542 hack: remove faulty table from old migration 2024-03-06 15:16:13 -06:00
saji 0a6a6bb66d add openmct domain object table and skeleton 2024-03-06 15:15:56 -06:00
saji c9b73ee006 add repeated packet support using index parameter 2024-03-06 14:53:39 -06:00
saji b266a84324 fix multiple name packet filter 2024-03-06 14:53:25 -06:00
saji d591fa21b6 fix db test
All checks were successful
Go / build (1.21) (push) Successful in 2m32s
Go / build (1.22) (push) Successful in 2m30s
2024-03-06 10:51:54 -06:00
saji 8e314e9303 add orderby clause
Some checks failed
Go / build (1.22) (push) Failing after 1m58s
Go / build (1.21) (push) Failing after 2m1s
2024-03-06 10:48:40 -06:00
saji d90d7a0af4 openmct historical plugin MVP
All checks were successful
Go / build (1.21) (push) Successful in 1m16s
Go / build (1.22) (push) Successful in 1m15s
2024-03-05 09:49:08 -06:00
saji 9ec01c39de fix skylab json formatting
All checks were successful
Go / build (1.22) (push) Successful in 1m16s
Go / build (1.21) (push) Successful in 1m18s
2024-03-04 20:41:15 -06:00
saji bcd61321e6 add CORS to api 2024-03-04 20:40:55 -06:00
saji a015911e0e split openmct dev/prod 2024-03-04 20:40:32 -06:00
saji 648f2183c2 added node install/build
All checks were successful
Go / build (1.21) (push) Successful in 1m16s
Go / build (1.22) (push) Successful in 1m15s
release / release-openmct (push) Successful in 2m5s
2024-03-03 23:33:48 -06:00
saji 860d749c6b added openmct release
Some checks failed
release / release-openmct (push) Failing after 56s
Go / build (1.21) (push) Successful in 1m18s
Go / build (1.22) (push) Successful in 1m18s
2024-03-03 23:29:50 -06:00
saji 058e8d31b2 update packages
All checks were successful
Go / build (1.21) (push) Successful in 1m12s
Go / build (1.22) (push) Successful in 1m12s
2024-03-03 23:13:55 -06:00
saji 8b8619dd8a added openmct plugin and embedding
All checks were successful
Go / build (1.21) (push) Successful in 1m17s
Go / build (1.22) (push) Successful in 1m15s
2024-03-03 23:04:41 -06:00
saji 93be82f416 update gitignore for WAL mode sqlite db
All checks were successful
Go / build (1.21) (push) Successful in 1m14s
Go / build (1.22) (push) Successful in 1m12s
2024-03-02 21:56:31 -06:00
saji 4e6f8db7ed combine separate packages
All checks were successful
Go / build (1.21) (push) Successful in 1m16s
Go / build (1.22) (push) Successful in 1m15s
2024-03-02 21:49:18 -06:00
saji 00fa67a67d migrate websocket api to buseventfilter
All checks were successful
Go / build (1.22) (push) Successful in 1m13s
Go / build (1.21) (push) Successful in 2m31s
2024-03-02 21:23:35 -06:00
saji 7b48dd0d1c update action versions
All checks were successful
Go / build (1.21) (push) Successful in 1m12s
Go / build (1.22) (push) Successful in 1m10s
2024-03-02 01:00:08 -06:00
saji 56bff97fcf cleanup http api
All checks were successful
Go / build (1.21) (push) Successful in 1m17s
Go / build (1.22) (push) Successful in 1m15s
2024-03-02 00:46:24 -06:00
saji 70e7f0f15d refactor getValue for new filter/limit structs
All checks were successful
Go / build (1.22) (push) Successful in 1m16s
Go / build (1.21) (push) Successful in 1m13s
2024-03-01 21:15:51 -06:00
saji 3f1df06d1b added get bus even API request
All checks were successful
Go / build (1.21) (push) Successful in 1m15s
Go / build (1.22) (push) Successful in 1m14s
also provided framework for extracting filters
2024-03-01 19:12:35 -06:00
saji 7aaa47e142 fix missing error check, improve get test
All checks were successful
Go / build (1.21) (push) Successful in 1m18s
Go / build (1.22) (push) Successful in 1m17s
2024-03-01 16:33:30 -06:00
saji 68347e8b95 rework DB getters
Some checks failed
Go / build (1.21) (push) Failing after 1m6s
Go / build (1.22) (push) Failing after 1m6s
abandon generic query frag for common structures
Instead of using the QueryFrag struct, which was too generic to be
generally useful, we have moved to a BusEventFilter type, which
contains things we may filter on when we're searching for bus events.
At the moment it just contains names, and start/stop times.
Then in each function we can accept this filter struct and convert
it to fit the query.

We also support general modifiers, and currently have one implemented:
the LimitOffsetModifier. This adds a LIMIT and OFFSET clause to any
statement. these are all applied at the end and receive a stringbuilder
which may prevent certain operations from being structured.
We need to work on this one more, potentially abandoning.
2024-03-01 16:25:33 -06:00
saji 4092fdba6f [skip ci] return errors in http api
All checks were successful
Go / build (1.21) (push) Successful in 1m17s
Go / build (1.22) (push) Successful in 1m18s
2024-02-29 13:13:25 -06:00
saji 19b337a84b remove recover, add telem log parser tests
All checks were successful
Go / build (1.21) (push) Successful in 1m5s
Go / build (1.22) (push) Successful in 1m12s
2024-02-28 18:44:48 -06:00
saji 41689eee10 skylab: add unmarshal bounds check
All checks were successful
Go / build (1.22) (push) Successful in 1m15s
Go / build (1.21) (push) Successful in 1m17s
remove recover from telem log parser
2024-02-28 18:41:01 -06:00
saji aaec62fd3a support 1.21, add matrix for 1.21 and 1.22 in CI
All checks were successful
Go / build (1.22) (push) Successful in 1m16s
Go / build (1.21) (push) Successful in 2m30s
2024-02-28 14:17:47 -06:00
saji 3de6bfd7d3 fix version mismatch in tests
All checks were successful
Go / build (push) Successful in 2m31s
2024-02-28 14:12:17 -06:00
saji a8e7d407fe fix up static check issues
Some checks failed
Go / build (push) Failing after 35s
2024-02-28 14:10:40 -06:00
saji 9a98117384 clean up dead code 2024-02-28 14:01:44 -06:00
saji c95593bb86 migrate candump log parser to use regex instead
Some checks failed
Go / build (push) Failing after 1m10s
also add more test cases for error logic.
2024-02-28 13:56:57 -06:00
saji 481cac76c6 wip: rework log parsers
Some checks failed
Go / build (push) Failing after 57s
2024-02-28 13:04:59 -06:00
saji 9a6e1380d1 add vet and staticcheck to CI
Some checks failed
Go / build (push) Failing after 1m11s
2024-02-28 07:24:27 +00:00
saji a3f6e36cc4 didn't fix the actual error lol
All checks were successful
Go / build (push) Successful in 1m53s
2024-02-28 01:08:28 -06:00
saji d6cd4d3e26 fix tests, add packets stuff for db
Some checks failed
Go / build (push) Failing after 1m45s
2024-02-28 01:07:36 -06:00
saji 3f6db8e5c5 remove fixer 2024-02-28 01:07:36 -06:00
saji 05d9584fb5 Update .github/workflows/go.yml
Some checks failed
Go / build (push) Failing after 1m43s
2024-02-26 05:37:08 +00:00
saji 50456fd250 Update readme.md
Some checks reported warnings
Go / build (macos-latest) (push) Has been cancelled
Go / build (ubuntu-latest) (push) Has been cancelled
2024-02-26 05:32:20 +00:00
saji 0afb8d48bd wip: cleanup 2024-02-24 22:57:17 -06:00
saji 027bde3e68 test: change description 2024-02-24 16:58:14 -06:00
saji 34fca3d564 wip: work on http api 2024-02-24 16:48:19 -06:00
saji b5f5289b45 buncha fixes
use builtin slog instead of experimental
update all packages to fix a bug
2024-02-18 22:41:22 -06:00
saji e551390089 wip: more fixes, figure out main problem 2024-02-18 13:40:41 -06:00
saji 8af1b725d1 wip: debugging why parameters don't substitute right 2024-02-18 13:10:27 -06:00
saji d456e64656 rename dbloggingservice to dbwriterservice 2024-02-18 00:11:08 -06:00
saji 3f9df5c1eb wip: add openmct-type historical api 2024-02-17 19:26:13 -06:00
saji 245a654164 Performance optimizations for import
note that you need to limit the number of writing threads.
2024-02-14 10:31:41 -06:00
saji a4ca71d0ad make regex precompiled
that made things fast lol
also fix error output for skylabify
2024-02-14 02:15:21 -06:00
saji efc3ad684a replace raw line with line num in import script 2024-02-13 13:43:26 -06:00
saji 2d9d32dbf4 remove streaming db insert 2024-02-13 13:41:32 -06:00
saji 33c2f3f023 fix bug preventing slice copy 2024-02-13 10:50:11 -06:00
saji 6c5162a8be wip: threaded import, doesn't work 2024-02-13 10:42:28 -06:00
saji 938b5b7000 add import log table (unused) 2024-02-13 10:04:01 -06:00
saji 3ae157a3de revert busevent pointer change 2024-02-13 10:03:39 -06:00
saji 0c8a25a2f4 move parsers to internal library
many fixes related to previous changes
added import command
2024-02-12 14:38:01 -06:00
saji c4bdf122a8 major cleanup/refactor
delete mprpc
move can frame to it's own library
create CANID type (better extended id support)
rework database format, only uses name + json now
busEvent and rawJsonEvent don't store the Id anymore
2024-02-12 09:45:23 -06:00
saji 1812807581 wip: idk 2024-01-07 23:01:22 -06:00
saji 675c42cfd5 fix marshaljson not being called 2023-09-29 07:53:29 +09:30
saji 2d634d863d pytelem major work 2023-09-19 14:17:22 -05:00
saji c772d6e95f some python crap 2023-07-10 18:10:21 -05:00
saji 1570497ddc add old log fixer. 2023-07-07 14:03:53 -05:00
saji 7b5cf8a107 fix inverted socketcan 2023-07-06 15:32:36 -05:00
saji 43b84dc0fb remove vcantest (use canplayer instead) 2023-07-06 15:22:00 -05:00
saji 9280067d87 db migration fixes
- fix offbyone for executing migrations
- fix user_version setter not working
- add one more migration for WIP weather table
2023-07-06 15:21:41 -05:00
saji 62e162e939 set sqlite user version after migration 2023-07-06 13:46:02 -05:00
saji 969e17a169 get migrations working 2023-07-06 11:26:00 -05:00
saji d5b960ad8a wip: more db migration work 2023-07-06 00:24:05 -05:00
saji 23ca4ba9a2 qt logger 2023-07-05 21:17:25 -05:00
saji 63087deffb wip: db migration 2023-07-05 21:16:26 -05:00
saji ec02284657 more gui 2023-07-05 18:55:29 -05:00
saji 6eedae18c6 fix db test compile issue 2023-07-04 00:50:43 -05:00
saji d88313f742 make some python happen 2023-07-04 00:50:34 -05:00
saji fcfc605f77 fix db test compile issue 2023-07-03 13:56:42 -05:00
saji c68dff9d40 fix timestamps once and for all 2023-07-03 13:51:15 -05:00
saji a36e1478bc fix gitignore issues 2023-07-03 13:36:22 -05:00
saji e8ef22c0d0 wip: fix timestamps 2023-07-03 09:44:32 -05:00
saji 96796ad794 fix skylabify output 2023-06-30 22:08:06 -05:00
saji 699cfb5e3d big update 2023-06-30 11:51:06 -05:00
saji 88a170825c update gitignore for executables and databases 2023-06-30 07:41:41 -05:00
saji 48209e7738 add incomplete db test 2023-06-30 07:41:05 -05:00
saji c52bccb140 db fixes 2023-06-30 07:40:50 -05:00
saji c3d6c3b553 use custom error and panic on invariant violation 2023-06-30 07:18:15 -05:00
saji 5ceaa7bf9d name in json 2023-06-29 23:59:31 -05:00
saji 04308611ff make timestamp integer 2023-06-29 20:57:09 -05:00
93 changed files with 15149 additions and 5174 deletions

46
.github/workflows/build-openmct.yml vendored Normal file
View file

@ -0,0 +1,46 @@
name: release
on:
push:
tags:
- '*'
jobs:
release-full:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.22
- name: Set up Node
uses: actions/setup-node@v4
- name: Install OpenMCT
run: npm ci
working-directory: web/
- name: Build OpenMCT bundle
run: npm run build
working-directory: web/
- name: Build
uses: crazy-max/ghaction-xgo@v3
with:
xgo_version: latest
go_version: 1.21
pkg: cmd/gotelem
dest: build
prefix: gotelem-full
targets: windows/amd64,linux/amd64,linux/arm64,linux/arm/v7,darwin/arm64,darwin/amd64
tags: openmct
v: true
x: false
race: false
ldflags: -s -w
buildmode: default
trimpath: true
- name: Release binaries
uses: https://gitea.com/actions/release-action@main
with:
files: |-
build/**
api_key: '${{secrets.RELEASE_TOKEN}}'

View file

@ -1,27 +1,33 @@
# This workflow will build a golang project
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
name: Go
on:
push:
branches: [ "master" ]
paths:
- "**.go"
jobs:
build:
runs-on: ${{ matrix.os }}
build-gotelem:
runs-on: ubuntu-latest
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
go-version: ['1.21', '1.22']
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Install Staticcheck
run: go install honnef.co/go/tools/cmd/staticcheck@latest
- name: Build
run: go build -v ./cmd/gotelem
- name: Test
run: go test -v ./...
- name: Vet
run: go vet ./...
- name: Staticcheck
run: staticcheck ./...

25
.github/workflows/nodejs.yml vendored Normal file
View file

@ -0,0 +1,25 @@
name: Node.js CI
on:
push:
paths:
- "web/**"
jobs:
build-openmct:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./web/
steps:
- uses: actions/checkout@v4
- name: Use Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
- run: npm ci
- run: npm run build --if-present
- run: npx eslint .

11
.gitignore vendored
View file

@ -21,3 +21,14 @@
go.work
.vscode/
/gotelem
/skylabify
*.db
*.db-journal
/logs/
*.db-wal
*.db-shm
*.sqbpro

View file

@ -4,11 +4,14 @@ import (
"errors"
"sync"
"log/slog"
"github.com/kschamplin/gotelem/skylab"
"golang.org/x/exp/slog"
)
type JBroker struct {
// Broker is a Bus Event broadcast system. You can subscribe to events,
// and send events.
type Broker struct {
subs map[string]chan skylab.BusEvent // contains the channel for each subsciber
logger *slog.Logger
@ -16,15 +19,17 @@ type JBroker struct {
bufsize int // size of chan buffer in elements.
}
func NewBroker(bufsize int, logger *slog.Logger) *JBroker {
return &JBroker{
// NewBroker creates a new broker with a given logger.
func NewBroker(bufsize int, logger *slog.Logger) *Broker {
return &Broker{
subs: make(map[string]chan skylab.BusEvent),
logger: logger,
bufsize: bufsize,
}
}
func (b *JBroker) Subscribe(name string) (ch chan skylab.BusEvent, err error) {
// Subscribe joins the broker with the given name. The name must be unique.
func (b *Broker) Subscribe(name string) (ch chan skylab.BusEvent, err error) {
// get rw lock.
b.lock.Lock()
defer b.lock.Unlock()
@ -32,23 +37,33 @@ func (b *JBroker) Subscribe(name string) (ch chan skylab.BusEvent, err error) {
if ok {
return nil, errors.New("name already in use")
}
b.logger.Info("new subscriber", "name", name)
b.logger.Info("subscribe", "name", name)
ch = make(chan skylab.BusEvent, b.bufsize)
b.subs[name] = ch
return
}
func (b *JBroker) Unsubscribe(name string) {
// Unsubscribe removes a subscriber matching the name. It doesn't do anything
// if there's nobody subscribed with that name
func (b *Broker) Unsubscribe(name string) {
// remove the channel from the map. We don't need to close it.
b.lock.Lock()
defer b.lock.Unlock()
delete(b.subs, name)
b.logger.Debug("unsubscribe", "name", name)
if _, ok := b.subs[name]; ok {
close(b.subs[name])
delete(b.subs, name)
}
}
func (b *JBroker) Publish(sender string, message skylab.BusEvent) {
// Publish sends a bus event to all subscribers. It includes a sender
// string which prevents loopback.
func (b *Broker) Publish(sender string, message skylab.BusEvent) {
b.lock.RLock()
defer b.lock.RUnlock()
b.logger.Debug("publish", "sender", sender, "message", message)
for name, ch := range b.subs {
if name == sender {
continue
@ -56,7 +71,6 @@ func (b *JBroker) Publish(sender string, message skylab.BusEvent) {
// non blocking send.
select {
case ch <- message:
b.logger.Debug("sent message", "dest", name, "src", sender)
default:
b.logger.Warn("recipient buffer full", "dest", name)
}

122
broker_test.go Normal file
View file

@ -0,0 +1,122 @@
package gotelem
import (
"log/slog"
"os"
"sync"
"testing"
"time"
"github.com/kschamplin/gotelem/skylab"
)
func makeEvent() skylab.BusEvent {
var pkt skylab.Packet = &skylab.BmsMeasurement{
BatteryVoltage: 12000,
AuxVoltage: 24000,
Current: 1.23,
}
return skylab.BusEvent{
Timestamp: time.Now(),
Name: pkt.String(),
Data: pkt,
}
}
func TestBroker(t *testing.T) {
t.Parallel()
t.Run("test send", func(t *testing.T) {
flog := slog.New(slog.NewTextHandler(os.Stderr, nil))
broker := NewBroker(10, flog)
sub, err := broker.Subscribe("testSub")
if err != nil {
t.Fatalf("error subscribing: %v", err)
}
testEvent := makeEvent()
go func() {
time.Sleep(time.Millisecond * 1)
broker.Publish("other", testEvent)
}()
var recvEvent skylab.BusEvent
select {
case recvEvent = <-sub:
if !testEvent.Equals(&recvEvent) {
t.Fatalf("events not equal, want %v got %v", testEvent, recvEvent)
}
case <-time.After(1 * time.Second):
t.Fatalf("timeout waiting for packet")
}
})
t.Run("multiple broadcast", func(t *testing.T) {
flog := slog.New(slog.NewTextHandler(os.Stderr, nil))
broker := NewBroker(10, flog)
testEvent := makeEvent()
wg := sync.WaitGroup{}
clientFn := func(name string) {
sub, err := broker.Subscribe(name)
if err != nil {
t.Log(err)
return
}
<-sub
wg.Done()
}
wg.Add(2)
go clientFn("client1")
go clientFn("client2")
// yes this is stupid. otherwise we race.
time.Sleep(10 * time.Millisecond)
broker.Publish("sender", testEvent)
done := make(chan bool)
go func() {
wg.Wait()
done <- true
}()
select {
case <-done:
case <-time.After(1 * time.Second):
t.Fatal("timeout waiting for clients")
}
})
t.Run("name collision", func(t *testing.T) {
flog := slog.New(slog.NewTextHandler(os.Stderr, nil))
broker := NewBroker(10, flog)
_, err := broker.Subscribe("collide")
if err != nil {
t.Fatal(err)
}
_, err = broker.Subscribe("collide")
if err == nil {
t.Fatal("expected error, got nil")
}
})
t.Run("unsubscribe", func(t *testing.T) {
flog := slog.New(slog.NewTextHandler(os.Stderr, nil))
broker := NewBroker(10, flog)
ch, err := broker.Subscribe("test")
if err != nil {
t.Fatal(err)
}
broker.Unsubscribe("test")
_, ok := <-ch
if ok {
t.Fatal("expected dead channel, but channel returned result")
}
})
}

View file

@ -1,22 +0,0 @@
package gotelem
import (
"github.com/jmoiron/sqlx"
_ "github.com/mattn/go-sqlite3"
)
// this file implements a CAN adapter for the sqlite db.
type CanDB struct {
Db *sqlx.DB
}
func (cdb *CanDB) Send(_ *Frame) error {
panic("not implemented") // TODO: Implement
}
func (cdb *CanDB) Recv() (*Frame, error) {
panic("not implemented") // TODO: Implement
}
func NewCanDB() {}

View file

@ -1,22 +1,171 @@
package cli
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"strings"
"sync/atomic"
"github.com/kschamplin/gotelem"
"github.com/kschamplin/gotelem/mprpc"
"github.com/kschamplin/gotelem/internal/logparsers"
"github.com/kschamplin/gotelem/skylab"
"github.com/urfave/cli/v2"
"golang.org/x/sync/errgroup"
)
var parsersString string
func init() {
subCmds = append(subCmds, clientCmd)
parsersString = func() string {
// create a string like "'telem', 'candump', 'anotherparser'"
keys := make([]string, len(logparsers.ParsersMap))
i := 0
for k := range logparsers.ParsersMap {
keys[i] = k
i++
}
s := strings.Join(keys, "', '")
return "'" + s + "'"
}()
}
var importCmd = &cli.Command{
Name: "import",
Aliases: []string{"i"},
Usage: "import a log file into a database",
ArgsUsage: "[log file]",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "format",
Aliases: []string{"f"},
Usage: "the format of the log file. One of " + parsersString,
Value: "telem",
},
&cli.PathFlag{
Name: "database",
Aliases: []string{"d", "db"},
Usage: "the path of the database",
Value: "gotelem.db",
},
&cli.UintFlag{
Name: "batch-size",
Usage: "the maximum size of each SQL transaction",
Value: 800,
},
},
Action: importAction,
}
// importAction peforms a file import to the database. It can use any of the parsers provided
// by logparsers. Adding new parsers there will work.
func importAction(ctx *cli.Context) error {
path := ctx.Args().Get(0)
if path == "" {
fmt.Println("missing log file!")
cli.ShowAppHelpAndExit(ctx, -1)
}
fstream, err := os.Open(path)
if err != nil {
return err
}
fReader := bufio.NewReader(fstream)
pfun, ok := logparsers.ParsersMap[ctx.String("format")]
if !ok {
fmt.Println("invalid format provided: must be one of " + parsersString)
cli.ShowAppHelpAndExit(ctx, -1)
}
dbPath := ctx.Path("database")
db, err := gotelem.OpenTelemDb(dbPath)
if err != nil {
return fmt.Errorf("error opening database: %w", err)
}
// we should batch data, avoiding individual transactions to the database.
bSize := ctx.Uint("batch-size")
eventsBatch := make([]skylab.BusEvent, bSize)
batchIdx := 0
// stats for imports
var n_pkt atomic.Int64
delegateInsert := func(events []skylab.BusEvent) {
n, err := db.AddEventsCtx(ctx.Context, events...)
if err != nil {
fmt.Printf("%v", err)
return
}
n_pkt.Add(n)
}
// we use this errorgroup to limit the number of
// running goroutines to a normal value. This way
// we don't thrash the system,
eg := new(errgroup.Group)
eg.SetLimit(5)
var linenum int64 = 0
n_unknown := 0
n_error := 0
for {
line, err := fReader.ReadString('\n')
if err != nil {
if errors.Is(err, io.EOF) {
break // end of file, go to the flush sequence
}
return err
}
f, err := pfun(line)
var idErr *skylab.UnknownIdError
if errors.As(err, &idErr) {
fmt.Printf("unknown id %v\n", idErr.Error())
n_unknown++
continue
} else if err != nil {
fmt.Printf("got an error processing line %d: %v\n", linenum, err)
n_error++
continue
}
eventsBatch[batchIdx] = f
linenum++
batchIdx++
if batchIdx >= int(bSize) {
e := make([]skylab.BusEvent, bSize)
copy(e, eventsBatch)
eg.Go(func() error {
delegateInsert(e)
return nil
})
batchIdx = 0 // reset the batch
}
}
// check if we have remaining packets and flush them
if batchIdx > 0 {
eg.Go(func() error {
// since we don't do any modification
// we can avoid the copy
delegateInsert(eventsBatch[:batchIdx])
return nil
})
}
// wait for any goroutines.
eg.Wait()
fmt.Printf("import status: %d successful, %d unknown, %d errors\n", n_pkt.Load(), n_unknown, n_error)
return nil
}
var clientCmd = &cli.Command{
Name: "client",
Aliases: []string{"c"},
Usage: "interact with a gotelem server",
ArgsUsage: "[server url]",
Name: "client",
Aliases: []string{"c"},
Subcommands: []*cli.Command{importCmd},
Usage: "Client utilities and tools",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "gui",
@ -25,18 +174,11 @@ var clientCmd = &cli.Command{
},
},
Description: `
Connects to a gotelem server or relay. Can be used to
Connects to a gotelem server or relay. Also acts as a helper command line tool.
`,
Action: client,
}
func client(ctx *cli.Context) error {
return nil
}
// the client should connect to a TCP server and listen to packets.
func CANFrameHandler(f *gotelem.Frame) (*mprpc.RPCEmpty, error) {
fmt.Printf("got frame, %v\n", f)
return nil, nil
}

View file

@ -1,8 +1,11 @@
package cli
import (
"context"
"log"
"os"
"os/signal"
"runtime/pprof"
"github.com/urfave/cli/v2"
)
@ -12,15 +15,40 @@ var subCmds = []*cli.Command{
xbeeCmd,
}
func Execute() {
app := &cli.App{
Name: "gotelem",
Usage: "see everything",
Usage: "The Ultimate Telemetry Tool!",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "profile",
Usage: "enable profiling",
},
},
Before: func(ctx *cli.Context) error {
if ctx.Bool("profile") {
f, err := os.Create("cpuprofile")
if err != nil {
return err
}
pprof.StartCPUProfile(f)
}
return nil
},
After: func(ctx *cli.Context) error {
if ctx.Bool("profile") {
pprof.StopCPUProfile()
}
return nil
},
Commands: subCmds,
}
if err := app.Run(os.Args); err != nil {
// setup context for cancellation.
ctx := context.Background()
ctx, _ = signal.NotifyContext(ctx, os.Interrupt)
if err := app.RunContext(ctx, os.Args); err != nil {
log.Fatal(err)
}
}

View file

@ -3,16 +3,19 @@ package cli
import (
"encoding/json"
"fmt"
"net"
"io"
"net/http"
"math"
"time"
"os"
"sync"
"time"
"log/slog"
"github.com/kschamplin/gotelem"
"github.com/kschamplin/gotelem/skylab"
"github.com/kschamplin/gotelem/xbee"
"github.com/urfave/cli/v2"
"golang.org/x/exp/slog"
)
var serveFlags = []cli.Flag{
@ -22,11 +25,21 @@ var serveFlags = []cli.Flag{
Usage: "The XBee to connect to. Leave blank to not use XBee",
EnvVars: []string{"XBEE_DEVICE"},
},
&cli.StringFlag{
Name: "logfile",
Aliases: []string{"l"},
Value: "log.txt",
Usage: "file to store log to",
&cli.PathFlag{
Name: "logfile",
Aliases: []string{"l"},
DefaultText: "log.txt",
Usage: "file to store log to",
},
&cli.PathFlag{
Name: "db",
Aliases: []string{"d"},
DefaultText: "gotelem.db",
Usage: "database to serve, if not specified will use memory",
},
&cli.BoolFlag{
Name: "demo",
Usage: "enable the demo packet stream",
},
}
@ -44,8 +57,13 @@ var serveCmd = &cli.Command{
type service interface {
fmt.Stringer
Start(cCtx *cli.Context, broker *gotelem.JBroker, logger *slog.Logger) (err error)
Status()
Start(cCtx *cli.Context, deps svcDeps) (err error)
}
type svcDeps struct {
Broker *gotelem.Broker
Db *gotelem.TelemDb
Logger *slog.Logger
}
// this variable stores all the hanlders. It has some basic ones, but also
@ -53,28 +71,61 @@ type service interface {
// or if certain features are present (see cli/sqlite.go)
var serveThings = []service{
&xBeeService{},
&canLoggerService{},
&rpcService{},
&httpService{},
&DemoService{},
}
func serve(cCtx *cli.Context) error {
// TODO: output both to stderr and a file.
logger := slog.New(slog.NewTextHandler(os.Stderr))
var output io.Writer = os.Stderr
if cCtx.IsSet("logfile") {
// open the file.
p := cCtx.Path("logfile")
f, err := os.OpenFile(p, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}
output = io.MultiWriter(os.Stderr, f)
}
// create a new logger
logger := slog.New(slog.NewTextHandler(output, nil))
slog.SetDefault(logger)
broker := gotelem.NewBroker(3, logger.WithGroup("broker"))
broker := gotelem.NewBroker(20, logger.WithGroup("broker"))
// open database
dbPath := "gotelem.db"
if cCtx.IsSet("db") {
dbPath = cCtx.Path("db")
}
logger.Info("opening database", "path", dbPath)
db, err := gotelem.OpenTelemDb(dbPath)
if err != nil {
return err
}
wg := sync.WaitGroup{}
deps := svcDeps{
Logger: logger,
Broker: broker,
Db: db,
}
for _, svc := range serveThings {
logger.Info("starting service", "svc", svc.String())
logger.Info("starting service", "service", svc.String())
wg.Add(1)
go func(mySvc service, baseLogger *slog.Logger) {
svcLogger := logger.With("svc", mySvc.String())
svcLogger := logger.With("service", mySvc.String())
s := deps
s.Logger = svcLogger
defer wg.Done()
err := mySvc.Start(cCtx, broker, svcLogger)
// TODO: recover
err := mySvc.Start(cCtx, s)
if err != nil {
logger.Error("service stopped!", "err", err, "svc", mySvc.String())
logger.Error("service stopped!", "err", err, "service", mySvc.String())
}
}(svc, logger)
}
@ -84,106 +135,6 @@ func serve(cCtx *cli.Context) error {
return nil
}
type rpcService struct {
}
func (r *rpcService) Status() {
}
func (r *rpcService) String() string {
return "rpcService"
}
func (r *rpcService) Start(ctx *cli.Context, broker *gotelem.JBroker, logger *slog.Logger) error {
// TODO: extract port/ip from cli context.
ln, err := net.Listen("tcp", "0.0.0.0:8082")
if err != nil {
logger.Warn("error listening", "err", err)
return err
}
for {
conn, err := ln.Accept()
if err != nil {
logger.Warn("error accepting connection", "err", err)
}
go handleCon(conn, broker, logger.With("addr", conn.RemoteAddr()), ctx.Done())
}
}
func handleCon(conn net.Conn, broker *gotelem.JBroker, l *slog.Logger, done <-chan struct{}) {
// reader := msgp.NewReader(conn)
subname := fmt.Sprint("tcp", conn.RemoteAddr().String())
l.Info("started handling", "name", subname)
defer conn.Close()
rxCh, err := broker.Subscribe(subname)
if err != nil {
l.Error("error subscribing to connection", "err", err)
return
}
defer broker.Unsubscribe(subname)
jEncode := json.NewEncoder(conn)
for {
select {
case msg := <-rxCh:
l.Info("got packet")
// FIXME: poorly optimized
err := jEncode.Encode(msg)
if err != nil {
l.Warn("error encoding json", "err", err)
}
case <-done:
return
}
}
}
// this spins up a new can socket on vcan0 and broadcasts a packet every second. for testing.
type canLoggerService struct {
}
func (c *canLoggerService) String() string {
return "CanLoggerService"
}
func (c *canLoggerService) Status() {
}
func (c *canLoggerService) Start(cCtx *cli.Context, broker *gotelem.JBroker, l *slog.Logger) (err error) {
rxCh, err := broker.Subscribe("canDump")
if err != nil {
return err
}
t := time.Now()
fname := fmt.Sprintf("candump_%d-%02d-%02dT%02d.%02d.%02d.txt",
t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
l.Info("logging to file", "filename", fname)
f, err := os.Create(fname)
if err != nil {
l.Error("error opening file", "filename", fname, "err", err)
return
}
enc := json.NewEncoder(f)
for {
select {
case msg := <-rxCh:
enc.Encode(msg)
case <-cCtx.Done():
f.Close()
return
}
}
}
// xBeeService provides data over an Xbee device, either by serial or TCP
// based on the url provided in the xbee flag. see the description for details.
type xBeeService struct {
@ -196,7 +147,10 @@ func (x *xBeeService) String() string {
func (x *xBeeService) Status() {
}
func (x *xBeeService) Start(cCtx *cli.Context, broker *gotelem.JBroker, logger *slog.Logger) (err error) {
func (x *xBeeService) Start(cCtx *cli.Context, deps svcDeps) (err error) {
logger := deps.Logger
broker := deps.Broker
tdb := deps.Db
if cCtx.String("xbee") == "" {
logger.Info("not using xbee")
return
@ -219,7 +173,22 @@ func (x *xBeeService) Start(cCtx *cli.Context, broker *gotelem.JBroker, logger *
}
logger.Info("connected to local xbee", "addr", x.session.LocalAddr())
encode := json.NewEncoder(x.session)
// these are the ways we send/recieve data. we could swap for binary format
// TODO: buffering and/or binary encoding instead of json which is horribly ineffective.
xbeeTxer := json.NewEncoder(x.session)
xbeeRxer := json.NewDecoder(x.session)
go func() {
for {
var p skylab.BusEvent
err := xbeeRxer.Decode(&p)
if err != nil {
logger.Error("failed to decode xbee packet")
}
broker.Publish("xbee", p)
tdb.AddEventsCtx(cCtx.Context, p)
}
}()
for {
select {
case <-cCtx.Done():
@ -227,7 +196,7 @@ func (x *xBeeService) Start(cCtx *cli.Context, broker *gotelem.JBroker, logger *
return
case msg := <-rxCh:
logger.Info("got msg", "msg", msg)
encode.Encode(msg)
err := xbeeTxer.Encode(msg)
if err != nil {
logger.Warn("error writing to xbee", "err", err)
}
@ -247,10 +216,84 @@ func (h *httpService) Status() {
}
func (h *httpService) Start(cCtx *cli.Context, broker *gotelem.JBroker, logger *slog.Logger) (err error) {
func (h *httpService) Start(cCtx *cli.Context, deps svcDeps) (err error) {
r := gotelem.TelemRouter(logger)
logger := deps.Logger
broker := deps.Broker
db := deps.Db
http.ListenAndServe(":8080", r)
r := gotelem.TelemRouter(logger, broker, db)
//
/// TODO: use custom port if specified
server := &http.Server{
Addr: ":8080",
Handler: r,
}
go func() {
<-cCtx.Done()
logger.Info("shutting down server")
server.Shutdown(cCtx.Context)
}()
if err := server.ListenAndServe(); err != http.ErrServerClosed {
logger.ErrorContext(cCtx.Context, "Error listening", "err", err)
}
return
}
type DemoService struct {
}
func (d *DemoService) String() string {
return "demo service"
}
func (d *DemoService) Start(cCtx *cli.Context, deps svcDeps) (err error) {
if !cCtx.Bool("demo") {
return
}
broker := deps.Broker
bmsPkt := &skylab.BmsMeasurement{
Current: 1.23,
BatteryVoltage: 11111,
AuxVoltage: 22222,
}
wslPkt := &skylab.WslVelocity{
MotorVelocity: 0,
VehicleVelocity: 100.0,
}
var next skylab.Packet = bmsPkt
for {
select {
case <-cCtx.Done():
return
case <-time.After(100 * time.Millisecond):
// send the next packet.
if next == bmsPkt {
bmsPkt.Current = float32(math.Sin(float64(time.Now().UnixMilli()) / 2000.0))
ev := skylab.BusEvent{
Timestamp: time.Now(),
Name: next.String(),
Data: next,
}
broker.Publish("livestream", ev)
next = wslPkt
} else {
// send the wsl
ev := skylab.BusEvent{
Timestamp: time.Now(),
Name: next.String(),
Data: next,
}
broker.Publish("livestream", ev)
next = bmsPkt
}
}
}
}

View file

@ -3,14 +3,14 @@
package cli
import (
"strings"
"errors"
"io"
"time"
"github.com/kschamplin/gotelem"
"github.com/kschamplin/gotelem/internal/can"
"github.com/kschamplin/gotelem/skylab"
"github.com/kschamplin/gotelem/socketcan"
"github.com/urfave/cli/v2"
"golang.org/x/exp/slog"
)
// this file adds socketCAN commands and functionality when building on linux.
@ -43,7 +43,6 @@ type socketCANService struct {
}
func (s *socketCANService) Status() {
return
}
func (s *socketCANService) String() string {
@ -53,18 +52,18 @@ func (s *socketCANService) String() string {
return s.name
}
func (s *socketCANService) Start(cCtx *cli.Context, broker *gotelem.JBroker, logger *slog.Logger) (err error) {
// vcan0 demo
// Start starts the socketCAN service - emitting packets sent from the broker.
func (s *socketCANService) Start(cCtx *cli.Context, deps svcDeps) (err error) {
if cCtx.String("can") == "" {
logger.Info("no can device provided")
logger := deps.Logger
broker := deps.Broker
tdb := deps.Db
if !cCtx.IsSet("can") {
logger.Debug("no can device provided, skip")
return
}
if strings.HasPrefix(cCtx.String("can"), "v") {
go vcanTest(cCtx.String("can"))
}
s.sock, err = socketcan.NewCanSocket(cCtx.String("can"))
if err != nil {
logger.Error("error opening socket", "err", err)
@ -81,11 +80,14 @@ func (s *socketCANService) Start(cCtx *cli.Context, broker *gotelem.JBroker, log
defer broker.Unsubscribe("socketCAN")
// make a channel to receive socketCAN frames.
rxCan := make(chan gotelem.Frame)
rxCan := make(chan can.Frame)
go func() {
for {
pkt, err := s.sock.Recv()
if errors.Is(err, io.EOF) {
return
}
if err != nil {
logger.Warn("error receiving CAN packet", "err", err)
}
@ -93,31 +95,35 @@ func (s *socketCANService) Start(cCtx *cli.Context, broker *gotelem.JBroker, log
}
}()
var frame gotelem.Frame
var frame can.Frame
for {
select {
case msg := <-rxCh:
id, d, _ := skylab.ToCanFrame(msg.Data)
frame.Id = id
frame.Data = d
frame, err = skylab.ToCanFrame(msg.Data)
if err != nil {
logger.Warn("error encoding can frame", "name", msg.Name, "err", err)
continue
}
s.sock.Send(&frame)
case msg := <-rxCan:
p, err := skylab.FromCanFrame(msg.Id, msg.Data)
p, err := skylab.FromCanFrame(msg)
if err != nil {
logger.Warn("error parsing can packet", "id", msg.Id)
logger.Warn("error parsing can packet", "id", msg.Id, "err", err)
continue
}
cde := skylab.BusEvent{
Timestamp: float64(time.Now().UnixNano()) / 1e9,
Id: uint64(msg.Id),
event := skylab.BusEvent{
Timestamp: time.Now(),
Name: p.String(),
Data: p,
}
broker.Publish("socketCAN", cde)
broker.Publish("socketCAN", event)
tdb.AddEventsCtx(cCtx.Context, event)
case <-cCtx.Done():
// close the socket.
s.sock.Close()
return
}
}
@ -147,28 +153,3 @@ Various helper utilties for CAN bus on sockets.
},
},
}
func vcanTest(devname string) {
sock, err := socketcan.NewCanSocket(devname)
if err != nil {
slog.Error("error opening socket", "err", err)
return
}
testPkt := skylab.WslMotorCurrentVector{
Iq: 0.1,
Id: 0.2,
}
id, data, err := skylab.ToCanFrame(&testPkt)
testFrame := gotelem.Frame{
Id: id,
Data: data,
Kind: gotelem.CanSFFFrame,
}
for {
slog.Info("sending test packet")
sock.Send(&testFrame)
time.Sleep(1 * time.Second)
}
}

View file

@ -11,9 +11,10 @@ import (
"os"
"syscall"
"log/slog"
"github.com/kschamplin/gotelem/xbee"
"github.com/urfave/cli/v2"
"golang.org/x/exp/slog"
)
// context key stuff to prevent collisions
@ -92,7 +93,7 @@ writtend to stdout.
}
func xbeeInfo(ctx *cli.Context) error {
logger := slog.New(slog.NewTextHandler(os.Stderr))
logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
transport := ctx.Context.Value(keyIODevice).(*xbee.Transport)
xb, err := xbee.NewSession(transport, logger.With("device", transport.Type()))
if err != nil {
@ -115,7 +116,7 @@ func netcat(ctx *cli.Context) error {
return cli.Exit("missing [addr] argument", int(syscall.EINVAL))
}
logger := slog.New(slog.NewTextHandler(os.Stderr))
logger := slog.New(slog.NewTextHandler(os.Stderr, nil))
transport := ctx.Context.Value(keyIODevice).(*xbee.Transport)
xb, _ := xbee.NewSession(transport, logger.With("devtype", transport.Type()))

View file

@ -2,19 +2,19 @@ package main
import (
"bufio"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
"syscall"
"log/slog"
"github.com/kschamplin/gotelem/internal/logparsers"
"github.com/kschamplin/gotelem/skylab"
"github.com/urfave/cli/v2"
"golang.org/x/exp/slog"
)
// this command can be used to decode candump logs and dump json output.
@ -43,16 +43,31 @@ required for piping candump into skylabify. Likewise, data should be stored with
-l.
`
parsersString := func() string {
// create a string like "'telem', 'candump', 'anotherparser'"
keys := make([]string, len(logparsers.ParsersMap))
i := 0
for k := range logparsers.ParsersMap {
keys[i] = k
i++
}
s := strings.Join(keys, "', '")
return "'" + s + "'"
}()
app.Flags = []cli.Flag{
&cli.BoolFlag{
Name: "verbose",
Aliases: []string{"v"},
},
&cli.StringFlag{
Name: "format",
Aliases: []string{"f"},
Usage: "the format of the incoming data. One of " + parsersString,
},
}
app.Action = run
app.HideHelp = true
if err := app.Run(os.Args); err != nil {
panic(err)
}
@ -61,7 +76,7 @@ required for piping candump into skylabify. Likewise, data should be stored with
func run(ctx *cli.Context) (err error) {
path := ctx.Args().Get(0)
if path == "" {
fmt.Printf("missing input file\n")
fmt.Println("missing input file")
cli.ShowAppHelpAndExit(ctx, int(syscall.EINVAL))
}
@ -75,54 +90,43 @@ func run(ctx *cli.Context) (err error) {
}
}
canDumpReader := bufio.NewReader(istream)
fileReader := bufio.NewReader(istream)
var pfun logparsers.BusEventParser
pfun, ok := logparsers.ParsersMap[ctx.String("format")]
if !ok {
fmt.Println("invalid format!")
cli.ShowAppHelpAndExit(ctx, int(syscall.EINVAL))
}
n_err := 0
unknown_packets := 0
for {
// dumpline looks like this:
// (1684538768.521889) can0 200#8D643546
dumpLine, err := canDumpReader.ReadString('\n')
line, err := fileReader.ReadString('\n')
if err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return err
return err // i/o failures are fatal
}
// remove trailing newline
dumpLine = strings.TrimSpace(dumpLine)
segments := strings.Split(dumpLine, " ")
var cd skylab.BusEvent
// this is cursed but easiest way to get a float from a string.
fmt.Sscanf(segments[0], "(%g)", &cd.Timestamp)
// this is for the latter part, we need to split id/data
hexes := strings.Split(segments[2], "#")
// get the id
cd.Id, err = strconv.ParseUint(hexes[0], 16, 64)
if err != nil {
return err
}
// get the data to a []byte
rawData, err := hex.DecodeString(hexes[1])
if err != nil {
return err
}
// parse the data []byte to a skylab packet
cd.Data, err = skylab.FromCanFrame(uint32(cd.Id), rawData)
f, err := pfun(line)
var idErr *skylab.UnknownIdError
if errors.As(err, &idErr) {
// unknown id
slog.Info("unknown id", "err", err)
unknown_packets++
continue
} else if err != nil {
return err
// TODO: we should consider absorbing all errors.
slog.Error("got an error", "err", err)
n_err++
continue
}
// format and print out the JSON.
out, _ := json.Marshal(cd)
out, _ := json.Marshal(&f)
fmt.Println(string(out))
}

463
db.go
View file

@ -1,9 +1,12 @@
package gotelem
// this file implements the database functions to load/store/read from a sql database.
import (
"context"
"encoding/json"
"strconv"
"errors"
"fmt"
"strings"
"time"
@ -16,168 +19,370 @@ type TelemDb struct {
db *sqlx.DB
}
type TelemDbOption func(*TelemDb) error
func OpenTelemDb(path string, options ...TelemDbOption) (tdb *TelemDb, err error) {
// this function is internal use. It actually opens the database, but uses
// a raw path string instead of formatting one like the exported functions.
func OpenRawDb(rawpath string) (tdb *TelemDb, err error) {
tdb = &TelemDb{}
tdb.db, err = sqlx.Connect("sqlite3", path)
tdb.db, err = sqlx.Connect("sqlite3", rawpath)
if err != nil {
return
}
// TODO: add options support.
for _, fn := range options {
err = fn(tdb)
if err != nil {
return
}
}
// execute database up statement (better hope it is idempotent!)
_, err = tdb.db.Exec(sqlDbUp)
// perform any database migrations
version, err := tdb.GetVersion()
if err != nil {
return
}
// TODO: use logging instead of printf
fmt.Printf("starting version %d\n", version)
return tdb, nil
version, err = RunMigrations(tdb)
fmt.Printf("ending version %d\n", version)
return tdb, err
}
// the sql commands to create the database.
const sqlDbUp = `
CREATE TABLE IF NOT EXISTS "bus_events" (
"ts" REAL NOT NULL, -- timestamp
"id" INTEGER NOT NULL, -- can ID
"name" TEXT NOT NULL, -- name of base packet
"index" INTEGER, -- index of the repeated packet (base_id = id - index)
"packet" TEXT NOT NULL CHECK(json_valid(packet)) -- JSON object describing the data
);
// this string is used to open the read-write db.
// the extra options improve performance significantly.
const ProductionDbURI = "file:%s?_journal_mode=wal&mode=rwc&_txlock=immediate&_timeout=10000"
CREATE INDEX IF NOT EXISTS "ids_timestamped" ON "bus_events" (
"id",
"ts" DESC
);
// OpenTelemDb opens a new telemetry database at the given path.
func OpenTelemDb(path string) (*TelemDb, error) {
dbStr := fmt.Sprintf(ProductionDbURI, path)
return OpenRawDb(dbStr)
}
CREATE INDEX IF NOT EXISTS "times" ON "bus_events" (
"ts" DESC
);
func (tdb *TelemDb) GetVersion() (int, error) {
var version int
err := tdb.db.Get(&version, "PRAGMA user_version")
return version, err
}
-- this table shows when we started/stopped logging.
CREATE TABLE "bus_records" (
"id" INTEGER NOT NULL UNIQUE,
"start_time" INTEGER NOT NULL,
"end_time" INTEGER,
"note" TEXT,
PRIMARY KEY("id" AUTOINCREMENT),
CONSTRAINT "duration_valid" CHECK(end_time is null or start_time < end_time)
);
`
// sql sequence to tear down the database.
// not used often, but good to keep track of what's going on.
// Up() then Down() should result in an empty database.
const sqlDbDown = `
DROP TABLE "bus_events";
DROP INDEX "ids_timestamped";
DROP INDEX "times";
DROP TABLE "bus_records";
`
func (tdb *TelemDb) SetVersion(version int) error {
stmt := fmt.Sprintf("PRAGMA user_version = %d", version)
_, err := tdb.db.Exec(stmt)
return err
}
// sql expression to insert a bus event into the packets database.1
const sqlInsertEvent = `
INSERT INTO "bus_events" (time, can_id, name, index, packet) VALUES ($1, $2, $3, json($4));
`
const sqlInsertEvent = `INSERT INTO "bus_events" (ts, name, data) VALUES `
// AddEvent adds the bus event to the database.
func (tdb *TelemDb) AddEvents(events ...skylab.BusEvent) {
//
tx, err := tdb.db.Begin()
func (tdb *TelemDb) AddEventsCtx(ctx context.Context, events ...skylab.BusEvent) (n int64, err error) {
// edge case - zero events.
if len(events) == 0 {
return 0, nil
}
n = 0
tx, err := tdb.db.BeginTx(ctx, nil)
defer tx.Rollback()
if err != nil {
tx.Rollback()
return
}
sqlStmt := sqlInsertEvent
const rowSql = "(?, ?, json(?))"
inserts := make([]string, len(events))
vals := []interface{}{}
idx := 0 // we have to manually increment, because sometimes we don't insert.
for _, b := range events {
j, err := json.Marshal(b.Data)
inserts[idx] = rowSql
var j []byte
j, err = json.Marshal(b.Data)
if err != nil {
tx.Rollback()
return
// we had some error turning the packet into json.
continue // we silently skip.
}
tx.Exec(sqlInsertEvent, b.Timestamp, b.Id, b.Name, j)
vals = append(vals, b.Timestamp.UnixMilli(), b.Data.String(), j)
idx++
}
// construct the full statement now
sqlStmt = sqlStmt + strings.Join(inserts[:idx], ",")
stmt, err := tx.PrepareContext(ctx, sqlStmt)
// defer stmt.Close()
if err != nil {
return
}
res, err := stmt.ExecContext(ctx, vals...)
if err != nil {
return
}
n, err = res.RowsAffected()
tx.Commit()
return
}
// QueryIdString is a string that filters ids from the set. use ID query functions to
// create them.
type QueryIdString string
func (tdb *TelemDb) AddEvents(events ...skylab.BusEvent) (int64, error) {
// QueryIds constructs a CAN Id filter for one or more distinct Ids.
// For a range of ids, use QueryIdRange(start, stop uint32)
func QueryIds(ids ...uint32) QueryIdString {
// FIXME: zero elements case?
var idsString []string
for _, id := range ids {
idsString = append(idsString, strconv.FormatUint(uint64(id), 10))
}
return QueryIdString("id IN (" + strings.Join(idsString, ",") + ")")
return tdb.AddEventsCtx(context.Background(), events...)
}
func QueryIdsInv(ids ...uint32) QueryIdString {
// LimitOffsetModifier is a modifier to support pagniation.
type LimitOffsetModifier struct {
Limit int
Offset int
}
// QueryIdRange selects all IDs between start and end, *inclusive*.
// This function is preferred over a generated list of IDs.
func QueryIdRange(start, end uint32) QueryIdString {
startString := strconv.FormatUint(uint64(start), 10)
endString := strconv.FormatUint(uint64(end), 10)
return QueryIdString("id BETWEEN " + startString + " AND " + endString)
}
// QueryIdRangeInv removes all IDs between start and end from the results.
// See QueryIdRange for more details.
func QueryIdRangeInv(start, end uint32) QueryIdString {
return QueryIdString("NOT ") + QueryIdRange(start, end)
}
type QueryTimestampString string
// QueryDuration takes a start and end time and filters where the packets are between that time range.
func QueryDuration(start, end time.Time) QueryTimestampString {
// the time in the database is a float, we have a time.Time so use unixNano() / 1e9 to float it.
startString := strconv.FormatFloat(float64(start.UnixNano())/1e9, 'f', -1, 64)
endString := strconv.FormatFloat(float64(start.UnixNano())/1e9, 'f', -1, 64)
return QueryTimestampString("ts BETWEEN " + startString + " AND " + endString)
}
type QueryNameString string
func QueryNames(names ...string) QueryNameString
func QueryNamesInv(names ...string) QueryNameString
// Describes the parameters for an event query
type EventsQuery struct {
Ids []QueryIdString // Ids contains a list of CAN ID filters that are OR'd together.
Times []QueryTimestampString
Names []QueryNameString
Limit uint // max number of results.
}
// GetEvents is the mechanism to request underlying event data.
// it takes functions (which are defined in db.go) that modify the query,
// and then return the results.
func (tdb *TelemDb) GetEvents(q *EventsQuery) []skylab.BusEvent {
// if function is inverse, AND and OR are switched.
// Demorgan's
// how to know if function is inverted???
func (l *LimitOffsetModifier) ModifyStatement(sb *strings.Builder) error {
clause := fmt.Sprintf(" LIMIT %d OFFSET %d", l.Limit, l.Offset)
sb.WriteString(clause)
return nil
}
// BusEventFilter is a filter for bus events.
type BusEventFilter struct {
Names []string // The name(s) of packets to filter for
StartTime time.Time // Starting time range. All packets >= StartTime
EndTime time.Time // Ending time range. All packets <= EndTime
Indexes []int // The specific index of the packets to index.
}
// now we can optionally add a limit.
func (tdb *TelemDb) GetPackets(ctx context.Context, filter BusEventFilter, lim *LimitOffsetModifier) ([]skylab.BusEvent, error) {
// construct a simple
var whereFrags = make([]string, 0)
// if we're filtering by names, add a where clause for it.
if len(filter.Names) > 0 {
// we have to quote our individual names
names := strings.Join(filter.Names, `", "`)
qString := fmt.Sprintf(`name IN ("%s")`, names)
whereFrags = append(whereFrags, qString)
}
// TODO: identify if we need a special case for both time ranges
// using BETWEEN since apparenlty that can be better?
// next, check if we have a start/end time, add constraints
if !filter.EndTime.IsZero() {
qString := fmt.Sprintf("ts <= %d", filter.EndTime.UnixMilli())
whereFrags = append(whereFrags, qString)
}
if !filter.StartTime.IsZero() {
// we have an end range
qString := fmt.Sprintf("ts >= %d", filter.StartTime.UnixMilli())
whereFrags = append(whereFrags, qString)
}
if len(filter.Indexes) > 0 {
s := make([]string, 0)
for _, idx := range filter.Indexes {
s = append(s, fmt.Sprint(idx))
}
idxs := strings.Join(s, ", ")
qString := fmt.Sprintf(`idx in (%s)`, idxs)
whereFrags = append(whereFrags, qString)
}
sb := strings.Builder{}
sb.WriteString(`SELECT ts, name, data from "bus_events"`)
// construct the full statement.
if len(whereFrags) > 0 {
// use the where clauses.
sb.WriteString(" WHERE ")
sb.WriteString(strings.Join(whereFrags, " AND "))
}
sb.WriteString(" ORDER BY ts DESC")
// Augment our data further if there's i.e a limit modifier.
// TODO: factor this out maybe?
if lim != nil {
lim.ModifyStatement(&sb)
}
rows, err := tdb.db.QueryxContext(ctx, sb.String())
if err != nil {
return nil, err
}
defer rows.Close()
var events = make([]skylab.BusEvent, 0, 10)
for rows.Next() {
var ev skylab.RawJsonEvent
err := rows.Scan(&ev.Timestamp, &ev.Name, (*[]byte)(&ev.Data))
if err != nil {
return nil, err
}
BusEv := skylab.BusEvent{
Timestamp: time.UnixMilli(int64(ev.Timestamp)),
Name: ev.Name,
}
BusEv.Data, err = skylab.FromJson(ev.Name, ev.Data)
if err != nil {
return events, nil
}
events = append(events, BusEv)
}
err = rows.Err()
return events, err
}
// We now need a different use-case: we would like to extract a value from
// a specific packet.
// Datum is a single measurement - it is more granular than a packet.
// the classic example is bms_measurement.current
type Datum struct {
Timestamp time.Time `db:"timestamp" json:"ts"`
Value any `db:"val" json:"val"`
}
// GetValues queries the database for values in a given time range.
// A value is a specific data point. For example, bms_measurement.current
// would be a value.
func (tdb *TelemDb) GetValues(ctx context.Context, filter BusEventFilter,
field string, lim *LimitOffsetModifier) ([]Datum, error) {
// this fragment uses json_extract from sqlite to get a single
// nested value.
sb := strings.Builder{}
sb.WriteString(`SELECT ts as timestamp, json_extract(data, '$.' || ?) as val FROM bus_events WHERE `)
if len(filter.Names) != 1 {
return nil, errors.New("invalid number of names")
}
whereFrags := []string{"name is ?"}
if !filter.StartTime.IsZero() {
qString := fmt.Sprintf("ts >= %d", filter.StartTime.UnixMilli())
whereFrags = append(whereFrags, qString)
}
if !filter.EndTime.IsZero() {
qString := fmt.Sprintf("ts <= %d", filter.EndTime.UnixMilli())
whereFrags = append(whereFrags, qString)
}
if len(filter.Indexes) > 0 {
s := make([]string, 0)
for _, idx := range filter.Indexes {
s = append(s, fmt.Sprint(idx))
}
idxs := strings.Join(s, ", ")
qString := fmt.Sprintf(`idx in (%s)`, idxs)
whereFrags = append(whereFrags, qString)
}
// join qstrings with AND
sb.WriteString(strings.Join(whereFrags, " AND "))
sb.WriteString(" ORDER BY ts DESC")
if lim != nil {
lim.ModifyStatement(&sb)
}
rows, err := tdb.db.QueryxContext(ctx, sb.String(), field, filter.Names[0])
if err != nil {
return nil, err
}
defer rows.Close()
data := make([]Datum, 0, 10)
for rows.Next() {
var d Datum = Datum{}
var ts int64
err = rows.Scan(&ts, &d.Value)
d.Timestamp = time.UnixMilli(ts)
if err != nil {
fmt.Print(err)
return data, err
}
data = append(data, d)
}
fmt.Print(rows.Err())
return data, nil
}
// AddDocument inserts a new document to the store if it is unique and valid.
func (tdb *TelemDb) AddDocument(ctx context.Context, obj json.RawMessage) error {
const insertStmt = `INSERT INTO openmct_objects (data) VALUES (json(?))`
_, err := tdb.db.ExecContext(ctx, insertStmt, obj)
return err
}
// DocumentNotFoundError is when the underlying document cannot be found.
type DocumentNotFoundError string
func (e DocumentNotFoundError) Error() string {
return fmt.Sprintf("document could not find key: %s", string(e))
}
// UpdateDocument replaces the entire contents of a document matching
// the given key. Note that the key is derived from the document,
// and no checks are done to ensure that the new key is the same.
func (tdb *TelemDb) UpdateDocument(ctx context.Context, key string,
obj json.RawMessage) error {
const upd = `UPDATE openmct_objects SET data = json(?) WHERE key IS ?`
r, err := tdb.db.ExecContext(ctx, upd, obj, key)
if err != nil {
return err
}
n, err := r.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return DocumentNotFoundError(key)
}
return err
}
// GetDocument gets the document matching the corresponding key.
func (tdb *TelemDb) GetDocument(ctx context.Context, key string) (json.RawMessage, error) {
const get = `SELECT data FROM openmct_objects WHERE key IS ?`
row := tdb.db.QueryRowxContext(ctx, get, key)
var res []byte // VERY important, json.RawMessage won't work here
// since the scan function does not look at underlying types.
row.Scan(&res)
if len(res) == 0 {
return nil, DocumentNotFoundError(key)
}
return res, nil
}
// GetAllDocuments returns all documents in the database.
func (tdb *TelemDb) GetAllDocuments(ctx context.Context) ([]json.RawMessage, error) {
const getall = `SELECT data FROM openmct_objects`
rows, err := tdb.db.QueryxContext(ctx, getall)
if err != nil {
return nil, err
}
defer rows.Close()
docs := make([]json.RawMessage, 0)
for rows.Next() {
var j json.RawMessage
rows.Scan(&j)
docs = append(docs, j)
}
return docs, nil
}
// DeleteDocument removes a document from the store, or errors
// if it does not exist.
func (tdb *TelemDb) DeleteDocument(ctx context.Context, key string) error {
const del = `DELETE FROM openmct_objects WHERE key IS ?`
res, err := tdb.db.ExecContext(ctx, del, key)
if err != nil {
return err
}
n, err := res.RowsAffected()
if err != nil {
return err
}
if n != 1 {
return DocumentNotFoundError(key)
}
return err
}

325
db_test.go Normal file
View file

@ -0,0 +1,325 @@
package gotelem
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"reflect"
"strings"
"testing"
"time"
"github.com/kschamplin/gotelem/internal/logparsers"
"github.com/kschamplin/gotelem/skylab"
)
// helper func to get a random bus event with random data.
func GetRandomBusEvent() skylab.BusEvent {
data := skylab.WsrVelocity{
MotorVelocity: 1.0,
VehicleVelocity: 4.0,
}
ev := skylab.BusEvent{
Timestamp: time.Now(),
Data: &data,
}
return ev
}
// exampleData is a telemetry log data snippet that
// we use to seed the database.
const exampleData = `1698013005.164 1455ED8FDBDFF4FC3BD
1698013005.168 1460000000000000000
1698013005.170 1470000000000000000
1698013005.172 1610000000000000000
1698013005.175 1210000000000000000
1698013005.177 157FFFFC74200000000
1698013005.181 1030000000000000000
1698013005.184 1430000000000000000
1698013005.187 04020D281405EA8FB41
1698013005.210 0413BDF81406AF70042
1698013005.212 042569F81408EF0FF41
1698013005.215 04358A8814041060242
1698013005.219 04481958140D2A40342
1698013005.221 0452DB2814042990442
1698013005.224 047AF948140C031FD41
1698013005.226 04B27A081401ACD0B42
1698013005.229 04DCEAA81403C8C0A42
1698013005.283 04E0378814024580142
1698013005.286 04F97908140BFBC0142
1698013005.289 050098A81402F0F0A42
1698013005.293 051E6AE81402AF20842
1698013005.297 0521AC081403A970742
1698013005.300 0535BB181403CEB0542
1698013005.304 054ECC0814088FE0142
1698013005.307 0554ED181401F44F341
1698013005.309 05726E48140D42BEB41
1698013005.312 059EFC98140EC400142
`
// MakeMockDatabase creates a new dummy database.
func MakeMockDatabase(name string) *TelemDb {
fstring := fmt.Sprintf("file:%s?mode=memory&cache=shared", name)
tdb, err := OpenRawDb(fstring)
if err != nil {
panic(err)
}
return tdb
}
func SeedMockDatabase(tdb *TelemDb) {
// seed the database now.
scanner := bufio.NewScanner(strings.NewReader(exampleData))
for scanner.Scan() {
str := scanner.Text()
bev, err := logparsers.ParsersMap["telem"](str)
if err != nil {
panic(err)
}
_, err = tdb.AddEvents(bev)
if err != nil {
panic(err)
}
}
}
func GetSeedEvents() []skylab.BusEvent {
evs := make([]skylab.BusEvent, 0)
scanner := bufio.NewScanner(strings.NewReader(exampleData))
for scanner.Scan() {
str := scanner.Text()
bev, err := logparsers.ParsersMap["telem"](str)
if err != nil {
panic(err)
}
evs = append(evs, bev)
}
return evs
}
func TestTelemDb(t *testing.T) {
t.Run("test opening database", func(t *testing.T) {
// create our mock
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
})
t.Run("test inserting bus event", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
type args struct {
events []skylab.BusEvent
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "add no packet",
args: args{
events: []skylab.BusEvent{},
},
wantErr: false,
},
{
name: "add single packet",
args: args{
events: []skylab.BusEvent{GetRandomBusEvent()},
},
wantErr: false,
},
{
name: "add multiple packet",
args: args{
events: []skylab.BusEvent{GetRandomBusEvent(), GetRandomBusEvent()},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if _, err := tdb.AddEvents(tt.args.events...); (err != nil) != tt.wantErr {
t.Errorf("TelemDb.AddEvents() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
})
t.Run("test getting packets", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
SeedMockDatabase(tdb)
ctx := context.Background()
f := BusEventFilter{}
limitMod := &LimitOffsetModifier{Limit: 1}
pkt, err := tdb.GetPackets(ctx, f, limitMod)
if err != nil {
t.Fatalf("error getting packets: %v", err)
}
if len(pkt) != 1 {
t.Fatalf("expected exactly one response, got %d", len(pkt))
}
// todo - validate what this should be.
})
t.Run("test read-write packet", func(t *testing.T) {
})
}
func MockDocument(key string) json.RawMessage {
var v = make(map[string]interface{})
v["identifier"] = map[string]string{"key": key}
v["randomdata"] = rand.Int()
res, err := json.Marshal(v)
if err != nil {
panic(err)
}
return res
}
func TestDbDocuments(t *testing.T) {
t.Run("test inserting a document", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
ctx := context.Background()
err := tdb.AddDocument(ctx, MockDocument("hi"))
if err != nil {
t.Fatalf("AddDocument expected no error, got err=%v", err)
}
})
t.Run("test inserting duplicate documents", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
ctx := context.Background()
doc := MockDocument("hi")
err := tdb.AddDocument(ctx, doc)
if err != nil {
t.Fatalf("AddDocument expected no error, got err=%v", err)
}
err = tdb.AddDocument(ctx, doc)
if err == nil {
t.Fatalf("AddDocument expected duplicate key error, got nil")
}
})
t.Run("test inserting bad document", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
ctx := context.Background()
var badDoc = map[string]string{"bad": "duh"}
msg, err := json.Marshal(badDoc)
if err != nil {
panic(err)
}
err = tdb.AddDocument(ctx, msg)
if err == nil {
t.Fatalf("AddDocument expected error, got nil")
}
})
t.Run("test getting document", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
ctx := context.Background()
doc := MockDocument("hi")
err := tdb.AddDocument(ctx, doc)
if err != nil {
t.Fatalf("AddDocument expected no error, got err=%v", err)
}
res, err := tdb.GetDocument(ctx, "hi")
if err != nil {
t.Fatalf("GetDocument expected no error, got err=%v", err)
}
if !reflect.DeepEqual(res, doc) {
t.Fatalf("GetDocument did not return identical document")
}
})
t.Run("test getting nonexistent document", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
ctx := context.Background()
res, err := tdb.GetDocument(ctx, "hi")
if err == nil || !errors.Is(err, DocumentNotFoundError("hi")) {
t.Fatalf("GetDocument expected DocumentNotFoundError, got %v", err)
}
if res != nil {
t.Fatalf("GetDocument expected nil result, got %v", res)
}
})
t.Run("test update document", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
ctx := context.Background()
doc1 := MockDocument("hi")
doc2 := MockDocument("hi") // same key, we want to update.
tdb.AddDocument(ctx, doc1)
err := tdb.UpdateDocument(ctx, "hi", doc2)
if err != nil {
t.Fatalf("UpdateDocument expected no error, got err=%v", err)
}
// compare.
res, _ := tdb.GetDocument(ctx, "hi")
if !reflect.DeepEqual(res, doc2) {
t.Fatalf("UpdateDocument did not return new doc, got %s", res)
}
})
t.Run("test update nonexistent document", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
ctx := context.Background()
doc := MockDocument("hi")
err := tdb.UpdateDocument(ctx, "badKey", doc)
if err == nil {
t.Fatalf("UpdateDocument expected error, got nil")
}
})
t.Run("test delete document", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
ctx := context.Background()
doc := MockDocument("hi")
tdb.AddDocument(ctx, doc)
err := tdb.DeleteDocument(ctx, "hi")
if err != nil {
t.Fatalf("DeleteDocument expected no error, got err=%v", err)
}
})
t.Run("test delete nonexistent document", func(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
tdb.db.Ping()
ctx := context.Background()
err := tdb.DeleteDocument(ctx, "hi")
if !errors.Is(err, DocumentNotFoundError("hi")) {
t.Fatalf("DeleteDocument expected not found, got err=%v", err)
}
})
}

View file

@ -1,26 +0,0 @@
// Code generated by "stringer -output=frame_kind.go -type Kind"; DO NOT EDIT.
package gotelem
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[CanSFFFrame-0]
_ = x[CanEFFFrame-1]
_ = x[CanRTRFrame-2]
_ = x[CanErrFrame-3]
}
const _Kind_name = "CanSFFFrameCanEFFFrameCanRTRFrameCanErrFrame"
var _Kind_index = [...]uint8{0, 11, 22, 33, 44}
func (i Kind) String() string {
if i >= Kind(len(_Kind_index)-1) {
return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
}

15
go.mod
View file

@ -1,26 +1,23 @@
module github.com/kschamplin/gotelem
go 1.20
go 1.21
require (
github.com/go-chi/chi/v5 v5.0.8
github.com/google/uuid v1.3.0
github.com/go-chi/chi/v5 v5.0.12
github.com/google/uuid v1.6.0
github.com/jmoiron/sqlx v1.3.5
github.com/mattn/go-sqlite3 v1.14.16
github.com/tinylib/msgp v1.1.8
github.com/mattn/go-sqlite3 v1.14.22
github.com/urfave/cli/v2 v2.25.1
go.bug.st/serial v1.5.0
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53
golang.org/x/sync v0.1.0
golang.org/x/sys v0.7.0
gopkg.in/yaml.v3 v3.0.1
nhooyr.io/websocket v1.8.7
nhooyr.io/websocket v1.8.10
)
require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/creack/goselect v0.1.2 // indirect
github.com/klauspost/compress v1.10.3 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stretchr/testify v1.8.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect

102
go.sum
View file

@ -5,126 +5,42 @@ github.com/creack/goselect v0.1.2/go.mod h1:a/NhLweNvqIYMuxcMOuWY516Cimucms3DglD
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0=
github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY=
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s=
github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/urfave/cli/v2 v2.25.1 h1:zw8dSP7ghX0Gmm8vugrs6q9Ku0wzweqPyshy+syu9Gw=
github.com/urfave/cli/v2 v2.25.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.bug.st/serial v1.5.0 h1:ThuUkHpOEmCVXxGEfpoExjQCS2WBVV4ZcUKVYInM9T4=
go.bug.st/serial v1.5.0/go.mod h1:UABfsluHAiaNI+La2iESysd9Vetq7VRdpxvjx7CmmOE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g=
nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q=
nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c=

284
http.go
View file

@ -6,33 +6,95 @@ import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"time"
"log/slog"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/google/uuid"
"github.com/kschamplin/gotelem/skylab"
"golang.org/x/exp/slog"
"nhooyr.io/websocket"
"nhooyr.io/websocket/wsjson"
)
type slogHttpLogger struct {
slog.Logger
func extractBusEventFilter(r *http.Request) (*BusEventFilter, error) {
bef := &BusEventFilter{}
v := r.URL.Query()
if v.Has("name") {
bef.Names = v["name"]
}
if el := v.Get("start"); el != "" {
// parse the start time query.
t, err := time.Parse(time.RFC3339, el)
if err != nil {
return bef, err
}
bef.StartTime = t
}
if el := v.Get("end"); el != "" {
// parse the start time query.
t, err := time.Parse(time.RFC3339, el)
if err != nil {
return bef, err
}
bef.EndTime = t
}
if v.Has("idx") {
bef.Indexes = make([]int, 0)
for _, strIdx := range v["idx"] {
idx, err := strconv.ParseInt(strIdx, 10, 32)
if err != nil {
return nil, err
}
bef.Indexes = append(bef.Indexes, int(idx))
}
}
return bef, nil
}
func TelemRouter(log *slog.Logger, broker *JBroker, db *TelemDb) http.Handler {
func extractLimitModifier(r *http.Request) (*LimitOffsetModifier, error) {
lim := &LimitOffsetModifier{}
v := r.URL.Query()
if el := v.Get("limit"); el != "" {
val, err := strconv.ParseInt(el, 10, 64)
if err != nil {
return nil, err
}
lim.Limit = int(val)
// next, we check if we have an offset.
// we only check offset if we also have a limit.
// offset without limit isn't valid and is ignored.
if el := v.Get("offset"); el != "" {
val, err := strconv.ParseInt(el, 10, 64)
if err != nil {
return nil, err
}
lim.Offset = int(val)
}
return lim, nil
}
// we use the nil case to indicate that no limit was provided.
return nil, nil
}
type RouterMod func(chi.Router)
var RouterMods = []RouterMod{}
func TelemRouter(log *slog.Logger, broker *Broker, db *TelemDb) http.Handler {
r := chi.NewRouter()
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(middleware.Logger) // TODO: integrate with slog
r.Use(middleware.Logger) // TODO: integrate with slog instead of go default logger.
r.Use(middleware.Recoverer)
r.Get("/schema", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// return the spicy json response.
w.WriteHeader(http.StatusOK)
w.Write([]byte(skylab.SkylabDefinitions))
})
r.Use(middleware.SetHeader("Access-Control-Allow-Origin", "*"))
// heartbeat request.
r.Get("/ping", func(w http.ResponseWriter, r *http.Request) {
@ -41,74 +103,75 @@ func TelemRouter(log *slog.Logger, broker *JBroker, db *TelemDb) http.Handler {
r.Mount("/api/v1", apiV1(broker, db))
for _, mod := range RouterMods {
mod(r)
}
// To future residents - you can add new API calls/systems in /api/v2
// Don't break anything in api v1! keep legacy code working!
// serve up a local status page.
return r
}
// define API version 1 routes.
func apiV1(broker *JBroker, db *TelemDb) chi.Router {
func apiV1(broker *Broker, tdb *TelemDb) chi.Router {
r := chi.NewRouter()
// this API only accepts JSON.
r.Use(middleware.AllowContentType("application/json"))
// no caching - always get the latest data.
// TODO: add a smart short expiry cache for queries that take a while.
r.Use(middleware.NoCache)
r.Get("/schema", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// return the spicy json response.
w.WriteHeader(http.StatusOK)
// return the Skylab JSON definitions
w.Write([]byte(skylab.SkylabDefinitions))
})
r.Route("/packets", func(r chi.Router) {
r.Get("/subscribe", apiV1PacketSubscribe(broker, db))
r.Get("/subscribe", apiV1PacketSubscribe(broker))
r.Post("/", func(w http.ResponseWriter, r *http.Request) {
var pkgs []skylab.BusEvent
var pkts []skylab.BusEvent
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&pkgs); err != nil {
w.WriteHeader(http.StatusTeapot)
if err := decoder.Decode(&pkts); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// we have a list of packets now. let's commit them.
db.AddEvents(pkgs...)
return
conn_id := r.RemoteAddr + uuid.NewString()
for _, pkt := range pkts {
broker.Publish(conn_id, pkt)
}
tdb.AddEventsCtx(r.Context(), pkts...)
})
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
// this should use query params to return a list of packets.
// general packet history get.
r.Get("/", apiV1GetPackets(tdb))
})
// this is to get packets by a name.
r.Get("/{name:[a-z_]+}", func(w http.ResponseWriter, r *http.Request) {
})
// this is to get a single field from a packet.
r.Get("/{name:[a-z_]+}/{field:[a-z_]+}", apiV1GetValues(tdb))
})
// OpenMCT domain object storage. Basically an arbitrary JSON document store
r.Route("/openmct", apiV1OpenMCTStore(tdb))
// records are driving segments/runs.
r.Route("/records", func(r chi.Router) {
r.Get("/") // get all runs
r.Get("/active") // get current run (no end time)
r.Post("/") // create a new run (with note). Ends active run if any, and creates new active run (no end time)
r.Get("/{id}") // get details on a specific run
r.Put("/{id}") // update a specific run. Can only be used to add notes/metadata, and not to change time/id.
})
r.Get("/stats", func(w http.ResponseWriter, r *http.Request) {
r.Get("/stats") // v1 api stats (calls, clients, xbee connected, meta health ok)
r.
}) // v1 api stats (calls, clients, xbee connected, meta health ok)
return r
}
// apiV1Subscriber is a websocket session for the v1 api.
type apiV1Subscriber struct {
idFilter []uint64 // list of Ids to subscribe to. If it's empty, subscribes to all.
}
func apiV1PacketSubscribe(broker *JBroker, db *TelemDb) http.HandlerFunc {
// this is a websocket stream.
func apiV1PacketSubscribe(broker *Broker) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
conn_id := r.RemoteAddr + uuid.New().String()
// pull filter from url query params.
bef, err := extractBusEventFilter(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
// setup connection
conn_id := r.RemoteAddr + uuid.NewString()
sub, err := broker.Subscribe(conn_id)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
@ -116,38 +179,127 @@ func apiV1PacketSubscribe(broker *JBroker, db *TelemDb) http.HandlerFunc {
return
}
defer broker.Unsubscribe(conn_id)
// attempt to upgrade.
c, err := websocket.Accept(w, r, nil)
// setup websocket
c, err := websocket.Accept(w, r, &websocket.AcceptOptions{
InsecureSkipVerify: true,
})
if err != nil {
// TODO: is this the correct option?
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "error ws handshake: %s", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// TODO: use K/V with session token?
sess := &apiV1Subscriber{}
// closeread handles protocol/status messages,
// also handles clients closing the connection.
// we get a context to use from it.
ctx := c.CloseRead(r.Context())
for {
select {
case <-r.Context().Done():
case <-ctx.Done():
return
case msgIn := <-sub:
if len(sess.idFilter) == 0 {
// send it.
goto escapeFilter
// short circuit if there's no names - send everything
if len(bef.Names) == 0 {
wsjson.Write(r.Context(), c, msgIn)
}
for _, id := range sess.idFilter {
if id == msgIn.Id {
// otherwise, send it if it matches one of our names.
for _, name := range bef.Names {
if name == msgIn.Name {
// send it
wsjson.Write(ctx, c, msgIn)
break
}
}
escapeFilter:
return
}
}
}
}
func apiV1GetPackets(tdb *TelemDb) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// this should use http query params to return a list of packets.
bef, err := extractBusEventFilter(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
lim, err := extractLimitModifier(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var res []skylab.BusEvent
res, err = tdb.GetPackets(r.Context(), *bef, lim)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
b, err := json.Marshal(res)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Write(b)
}
}
// apiV1GetValues is a function that creates a handler for
// getting the specific value from a packet.
// this is useful for OpenMCT or other viewer APIs
func apiV1GetValues(db *TelemDb) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var err error
bef, err := extractBusEventFilter(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
lim, err := extractLimitModifier(r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// get the URL parameters, these are guaranteed to exist.
name := chi.URLParam(r, "name")
field := chi.URLParam(r, "field")
// override the bus event filter name option
bef.Names = []string{name}
var res []Datum
// make the call, skip the limit modifier if it's nil.
res, err = db.GetValues(r.Context(), *bef, field, lim)
if err != nil {
// 500 server error:
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
b, err := json.Marshal(res)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Write(b)
}
}
func apiV1OpenMCTStore(db *TelemDb) func(chi.Router) {
return func(r chi.Router) {
// key is a column on our json store, it's nested under identifier.key
r.Get("/{key}", func(w http.ResponseWriter, r *http.Request) {})
r.Put("/{key}", func(w http.ResponseWriter, r *http.Request) {})
r.Delete("/{key}", func(w http.ResponseWriter, r *http.Request) {})
// create a new object.
r.Post("/", func(w http.ResponseWriter, r *http.Request) {})
// subscribe to object updates.
r.Get("/subscribe", func(w http.ResponseWriter, r *http.Request) {})
}
}

217
http_test.go Normal file
View file

@ -0,0 +1,217 @@
package gotelem
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"testing"
"time"
"github.com/kschamplin/gotelem/skylab"
)
func Test_extractBusEventFilter(t *testing.T) {
makeReq := func(path string) *http.Request {
return httptest.NewRequest(http.MethodGet, path, nil)
}
tests := []struct {
name string
req *http.Request
want *BusEventFilter
wantErr bool
}{
{
name: "test no extractions",
req: makeReq("http://localhost/"),
want: &BusEventFilter{},
wantErr: false,
},
{
name: "test single name extract",
req: makeReq("http://localhost/?name=hi"),
want: &BusEventFilter{
Names: []string{"hi"},
},
wantErr: false,
},
{
name: "test multi name extract",
req: makeReq("http://localhost/?name=hi1&name=hi2"),
want: &BusEventFilter{
Names: []string{"hi1", "hi2"},
},
wantErr: false,
},
{
name: "test start time valid extract",
req: makeReq(fmt.Sprintf("http://localhost/?start=%s", url.QueryEscape(time.Unix(160000000, 0).Format(time.RFC3339)))),
want: &BusEventFilter{
StartTime: time.Unix(160000000, 0),
},
wantErr: false,
},
// {
// name: "test start time invalid extract",
// req: makeReq(fmt.Sprintf("http://localhost/?start=%s", url.QueryEscape("ajlaskdj"))),
// wantErr: true,
// },
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Logf("Testing URL %s", tt.req.URL.String())
got, err := extractBusEventFilter(tt.req)
if (err != nil) != tt.wantErr {
t.Errorf("extractBusEventFilter() error = %v, wantErr %v", err, tt.wantErr)
return
}
// we have to manually compare fields because timestamps can't be deeply compared.
if !reflect.DeepEqual(got.Names, tt.want.Names) {
t.Errorf("extractBusEventFilter() Names bad = %v, want %v", got.Names, tt.want.Names)
}
if !reflect.DeepEqual(got.Indexes, tt.want.Indexes) {
t.Errorf("extractBusEventFilter() Indexes bad = %v, want %v", got.Indexes, tt.want.Indexes)
}
if !got.StartTime.Equal(tt.want.StartTime) {
t.Errorf("extractBusEventFilter() StartTime mismatch = %v, want %v", got.StartTime, tt.want.StartTime)
}
if !got.EndTime.Equal(tt.want.EndTime) {
t.Errorf("extractBusEventFilter() EndTime mismatch = %v, want %v", got.EndTime, tt.want.EndTime)
}
})
}
}
func Test_extractLimitModifier(t *testing.T) {
makeReq := func(path string) *http.Request {
return httptest.NewRequest(http.MethodGet, path, nil)
}
tests := []struct {
name string
req *http.Request
want *LimitOffsetModifier
wantErr bool
}{
{
name: "test no limit/offset",
req: makeReq("http://localhost/"),
want: nil,
wantErr: false,
},
{
name: "test limit, no offset",
req: makeReq("http://localhost/?limit=10"),
want: &LimitOffsetModifier{Limit: 10},
wantErr: false,
},
{
name: "test limit and offset",
req: makeReq("http://localhost/?limit=100&offset=200"),
want: &LimitOffsetModifier{Limit: 100, Offset: 200},
wantErr: false,
},
{
name: "test only offset",
req: makeReq("http://localhost/?&offset=200"),
want: nil,
wantErr: false,
},
{
name: "test bad limit",
req: makeReq("http://localhost/?limit=aaaa"),
want: nil,
wantErr: true,
},
{
name: "test good limit, bad offset",
req: makeReq("http://localhost/?limit=10&offset=jjjj"),
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := extractLimitModifier(tt.req)
if (err != nil) != tt.wantErr {
t.Errorf("extractLimitModifier() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("extractLimitModifier() = %v, want %v", got, tt.want)
}
})
}
}
func Test_ApiV1GetPackets(t *testing.T) {
tdb := MakeMockDatabase(t.Name())
SeedMockDatabase(tdb)
evs := GetSeedEvents()
handler := apiV1GetPackets(tdb)
tests := []struct{
name string
req *http.Request
statusCode int
expectedResults []skylab.BusEvent
}{
{
name: "get all packets test",
req: httptest.NewRequest(http.MethodGet, "http://localhost/", nil),
statusCode: http.StatusOK,
expectedResults: evs,
},
{
name: "filter name test",
req: httptest.NewRequest(http.MethodGet, "http://localhost/?name=bms_module", nil),
statusCode: http.StatusOK,
expectedResults: func() []skylab.BusEvent {
filtered := make([]skylab.BusEvent, 0)
for _, pkt := range evs {
if pkt.Name == "bms_module" {
filtered = append(filtered, pkt)
}
}
return filtered
}(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// construct the recorder
w := httptest.NewRecorder()
handler(w, tt.req)
resp := w.Result()
if tt.statusCode != resp.StatusCode {
t.Errorf("incorrect status code: expected %d got %d", tt.statusCode, resp.StatusCode)
}
decoder := json.NewDecoder(resp.Body)
var resultEvents []skylab.BusEvent
err := decoder.Decode(&resultEvents)
if err != nil {
t.Fatalf("could not parse JSON response: %v", err)
}
if len(resultEvents) != len(tt.expectedResults) {
t.Fatalf("response length did not match, want %d got %d", len(tt.expectedResults), len(resultEvents))
}
// Note, the results are flipped here. We return earliest first.
for idx := range tt.expectedResults {
expected := tt.expectedResults[idx]
actual := resultEvents[len(resultEvents) - 1 - idx]
if !expected.Equals(&actual) {
t.Errorf("packet did not match, want %v got %v", expected, actual)
}
}
})
}
}

View file

@ -1,3 +0,0 @@
package badger
// this file has a global internal K/V database used for sessions/stats/???

View file

@ -2,20 +2,26 @@
//
// It has a generic can Frame (packet), as well as a filter type.
// we also define standard interfaces for objects that can accept
// can frames. We can use this pattern to easily extend the capabiltiies of the program
// by writing "adapters" to various devices/formats (xbee, sqlite, network socket, socketcan)
package gotelem
// can frames. We can use this pattern to easily extend the capabilities of the program
// by writing "adapters" to various devices/formats (xbee, socketcan)
package can
type CanID struct {
Id uint32
Extended bool // since the id itself is not enough.
}
// Frame represents a protocol-agnostic CAN frame. The Id can be standard or extended,
// but if it is extended, the Kind should be EFF.
type Frame struct {
Id uint32
Id CanID
Data []byte
Kind Kind
}
// TODO: should this be replaced
type CANFrame interface {
Id() uint32
Id()
Data() []byte
Type() Kind
}
@ -26,15 +32,15 @@ type CANFrame interface {
type Kind uint8
const (
CanSFFFrame Kind = iota // Standard ID Frame
CanEFFFrame // Extended ID Frame
CanRTRFrame // Remote Transmission Request Frame
CanErrFrame // Error Frame
CanDataFrame Kind = iota // Standard ID Frame
CanRTRFrame // Remote Transmission Request Frame
CanErrFrame // Error Frame
)
// CanFilter is a basic filter for masking out data. It has an Inverted flag
// which indicates opposite behavior (reject all packets that match Id and Mask).
// The filter matches when (packet.Id & filter.Mask) == filter.Id
// TODO: is this needed anymore since we are using firmware based version instead?
type CanFilter struct {
Id uint32
Mask uint32

View file

@ -0,0 +1,194 @@
package logparsers
import (
"encoding/hex"
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/kschamplin/gotelem/internal/can"
"github.com/kschamplin/gotelem/skylab"
)
// A FormatError is an error when parsing a format. Typically we simply ignore
// these and move on, but they can optionally wrap another error that is fatal.
type FormatError struct {
msg string
err error
}
func (e *FormatError) Error() string {
if e.err != nil {
return fmt.Sprintf("%s:%s", e.msg, e.err.Error())
}
return e.msg
}
func (e *FormatError) Unwrap() error {
return e.err
}
// NewFormatError constructs a new format error.
func NewFormatError(msg string, err error) error {
return &FormatError{msg: msg, err: err}
}
// type CanFrameParser is a function that takes a string
// and returns a can frame. This is useful for common
// can dump formats.
type CanFrameParser func(string) (can.Frame, time.Time, error)
var candumpRegex = regexp.MustCompile(`^\((\d+)\.(\d{6})\) \w+ (\w+)#(\w+)$`)
func parseCanDumpLine(dumpLine string) (frame can.Frame, ts time.Time, err error) {
frame = can.Frame{}
ts = time.Unix(0, 0)
// dumpline looks like this:
// (1684538768.521889) can0 200#8D643546
// remove trailing newline/whitespaces
dumpLine = strings.TrimSpace(dumpLine)
m := candumpRegex.FindStringSubmatch(dumpLine)
if m == nil || len(m) != 5 {
err = NewFormatError("no regex match", nil)
return
}
var unixSeconds, unixMicros int64
unixSeconds, err = strconv.ParseInt(m[1], 10, 0)
if err != nil {
err = NewFormatError("failed to parse unix seconds", err)
return
}
unixMicros, err = strconv.ParseInt(m[2], 10, 0)
if err != nil {
err = NewFormatError("failed to parse unix micros", err)
return
}
id, err := strconv.ParseUint(m[3], 16, 64)
if err != nil {
err = NewFormatError("failed to parse id", err)
return
}
if (len(m[4]) % 2) != 0 {
err = NewFormatError("odd number of hex characters", nil)
return
}
rawData, err := hex.DecodeString(m[4])
if err != nil {
err = NewFormatError("failed to decode hex data", err)
return
}
// TODO: add extended id support, need an example log and a test.
frame.Id = can.CanID{Id: uint32(id), Extended: false}
frame.Data = rawData
frame.Kind = can.CanDataFrame
ts = time.Unix(unixSeconds, unixMicros*int64(time.Microsecond))
return
}
// data is of the form
// 1698180835.318 0619D80564080EBE241
// the second part there is 3 nibbles (12 bits, 3 hex chars) for can ID,
// the rest is data.
// this regex does the processing. we precompile for speed.
var telemRegex = regexp.MustCompile(`^(\d+)\.(\d{3}) (\w{3})(\w+)$`)
func parseTelemLogLine(line string) (frame can.Frame, ts time.Time, err error) {
frame = can.Frame{}
ts = time.Unix(0, 0)
// strip trailng newline since we rely on it being gone
line = strings.TrimSpace(line)
a := telemRegex.FindStringSubmatch(line)
if a == nil || len(a) != 5 {
err = NewFormatError("no regex match", nil)
return
}
var unixSeconds, unixMillis int64
// note that a contains 5 elements, the first being the full match.
// so we start from the second element
unixSeconds, err = strconv.ParseInt(a[1], 10, 0)
if err != nil {
err = NewFormatError("failed to parse unix seconds", err)
return
}
unixMillis, err = strconv.ParseInt(a[2], 10, 0)
if err != nil {
err = NewFormatError("failed to parse unix millis", err)
return
}
ts = time.Unix(unixSeconds, unixMillis*int64(time.Millisecond))
// VALIDATION STEP: sometimes the data gets really whack, but remains valid.
// We check that the time is between 2017 and 2032.
// Realistically we will not be using this software then.
// TODO: add this
id, err := strconv.ParseUint(a[3], 16, 16)
if err != nil {
err = NewFormatError("failed to parse id", err)
return
}
if len(a[4])%2 != 0 {
// odd hex chars, protect against a panic
err = NewFormatError("wrong amount of hex chars", nil)
return
}
rawData, err := hex.DecodeString(a[4])
if err != nil {
err = NewFormatError("failed to parse hex data", err)
return
}
frame = can.Frame{
Id: can.CanID{Id: uint32(id), Extended: false},
Data: rawData,
Kind: can.CanDataFrame,
}
return frame, ts, nil
}
// BusEventParser is a function that takes a string and returns a busevent.
type BusEventParser func(string) (skylab.BusEvent, error)
// skylabify JSON parser.
func parseSkylabifyLogLine(input string) (skylab.BusEvent, error) {
var b = skylab.BusEvent{}
err := json.Unmarshal([]byte(input), &b)
return b, err
}
// frameParseToBusEvent takes a line parser (that returns a can frame)
// and makes it return a busEvent instead.
func frameParseToBusEvent(fun CanFrameParser) BusEventParser {
return func(s string) (skylab.BusEvent, error) {
var b = skylab.BusEvent{}
frame, ts, err := fun(s)
if err != nil {
return b, err
}
b.Timestamp = ts
b.Data, err = skylab.FromCanFrame(frame)
if err != nil {
return b, err
}
b.Name = b.Data.String()
return b, nil
}
}
var ParsersMap = map[string]BusEventParser{
"telem": frameParseToBusEvent(parseTelemLogLine),
"candump": frameParseToBusEvent(parseCanDumpLine),
"json": parseSkylabifyLogLine,
}

View file

@ -0,0 +1,212 @@
package logparsers
import (
"reflect"
"testing"
"time"
"github.com/kschamplin/gotelem/internal/can"
"github.com/kschamplin/gotelem/skylab"
)
func Test_parseCanDumpLine(t *testing.T) {
type args struct {
dumpLine string
}
tests := []struct {
name string
args args
wantFrame can.Frame
wantTs time.Time
wantErr bool
}{
{
name: "test normal data",
args: args{dumpLine: "(1684538768.521889) can0 200#8D643546"},
wantFrame: can.Frame{
Id: can.CanID{Id: 0x200, Extended: false},
Data: []byte{0x8d, 0x64, 0x35, 0x46},
Kind: can.CanDataFrame,
},
wantTs: time.Unix(1684538768, 521889*int64(time.Microsecond)),
wantErr: false,
},
// TODO: add extended id test case
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotFrame, gotTs, err := parseCanDumpLine(tt.args.dumpLine)
if (err == nil) == tt.wantErr {
t.Errorf("parseCanDumpLine() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotFrame, tt.wantFrame) {
t.Errorf("parseCanDumpLine() gotFrame = %v, want %v", gotFrame, tt.wantFrame)
}
if !reflect.DeepEqual(gotTs, tt.wantTs) {
t.Errorf("parseCanDumpLine() gotTs = %v, want %v", gotTs, tt.wantTs)
}
})
}
}
func Test_parseCanDumpLine_errors(t *testing.T) {
// this test tries a bunch of failure cases to ensure that they are caught and not panicking.
tests := []struct {
name string
input string
}{
{
name: "garbage input",
input: "hoiseorhijkl",
},
{
name: "bad data length",
// odd number of hex data nibbles
input: "(1684538768.521889) can0 200#8D64354",
},
{
name: "invalid hex",
// J is not valid hex.
input: "(1684538768.521889) can0 200#8D64354J",
},
{
name: "bad time",
// we destroy the time structure.
input: "(badtime.521889) can0 200#8D643546",
},
{
name: "utf8 corruption",
// we attempt to mess up the data with broken utf8
input: "(1684538768.521889) can0 200#8D6\xed\xa0\x8043546",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
f, ts, err := parseCanDumpLine(tt.input)
if err == nil {
t.Fatalf("parseCanDumpLine() expected error but instead got f = %v, ts = %v", f, ts)
}
})
}
}
func Test_parseTelemLogLine(t *testing.T) {
type args struct {
line string
}
tests := []struct {
name string
args args
wantFrame can.Frame
wantTs time.Time
wantErr bool
}{
{
name: "basic test",
args: args{line: "1698180835.318 0619D80564080EBE241"},
wantFrame: can.Frame{
Id: can.CanID{Id: 0x61, Extended: false},
Data: []byte{0x9D, 0x80, 0x56, 0x40, 0x80, 0xEB, 0xE2, 0x41},
Kind: can.CanDataFrame,
},
wantTs: time.Unix(1698180835, 318*int64(time.Millisecond)),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotFrame, gotTs, err := parseTelemLogLine(tt.args.line)
if (err != nil) != tt.wantErr {
t.Errorf("parseTelemLogLine() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotFrame, tt.wantFrame) {
t.Errorf("parseTelemLogLine() gotFrame = %v, want %v", gotFrame, tt.wantFrame)
}
if !reflect.DeepEqual(gotTs, tt.wantTs) {
t.Errorf("parseTelemLogLine() gotTs = %v, want %v", gotTs, tt.wantTs)
}
})
}
}
func Test_parseTelemLogLine_errors(t *testing.T) {
tests := []struct {
name string
input string
}{
{
name: "garbage input",
input: "ajl;ksdoifhge\xEB",
},
{
name: "bad data length",
input: "1698180835.318 0619D80564080EBE24",
},
{
name: "bad timestamp",
input: "99999999999999999999999999999999999999999999999.318 0619D80564080EBE24",
},
{
name: "invalid hex characters",
input: "1698180835.318 0619D805640X0EBE24",
},
{
name: "utf8 corruption",
input: "1698180835.318 0619\xed\xa0\x80fsadfD805640X0EBE24",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
f, ts, err := parseTelemLogLine(tt.input)
if err == nil {
t.Fatalf("parseTelemLogLine() expected error but instead got f = %v, ts = %v", f, ts)
}
})
}
}
func Test_parseSkylabifyLogLine(t *testing.T) {
type args struct {
input string
}
tests := []struct {
name string
args args
want skylab.BusEvent
wantErr bool
}{
{
name: "basic test",
args: args{
input: `{"ts":1685141873612,"id":259,"name":"wsl_velocity","data":{"motor_velocity":89.97547,"vehicle_velocity":2.38853}}`},
want: skylab.BusEvent{
Timestamp: time.UnixMilli(1685141873612),
Name: "wsl_velocity",
Data: &skylab.WslVelocity{
MotorVelocity: 89.97547,
VehicleVelocity: 2.38853,
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := parseSkylabifyLogLine(tt.args.input)
if (err != nil) != tt.wantErr {
t.Errorf("parseSkylabifyLogLine() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("parseSkylabifyLogLine() = %v, want %v", got, tt.want)
}
})
}
}

View file

@ -1,66 +0,0 @@
package middleware
import (
"net/http"
"time"
chi_middleware "github.com/go-chi/chi/v5/middleware"
"golang.org/x/exp/slog"
)
// Slogger is a slog-enabled logging middleware.
// It logs the start and end of the request, and logs info
// about the request itself, response status, and response time.
// Slogger returns a log handler that uses the given slog logger as the base.
func Slogger(sl *slog.Logger) func(next http.Handler) http.Handler {
logger := sl.WithGroup("http")
return func(next http.Handler) http.Handler {
// this triple-nested function is strange, but basically the Slogger() call makes a new middleware function (above)
// the middleware function returns a handler that calls the next handler in the chain(wrapping it)
fn := func(w http.ResponseWriter, r *http.Request) {
// wrap writer allows us to get info on the response from further handlers.
ww := chi_middleware.NewWrapResponseWriter(w, r.ProtoMajor)
t1 := time.Now()
// attrs is stored to allow for the helpers to add additional elements to the main record.
attrs := make([]slog.Attr, 0)
// This function runs at the end and adds all the response details to the attrs before logging them.
defer func() {
attrs = append(attrs, slog.Int("status_code", ww.Status()))
attrs = append(attrs, slog.Int("resp_size", ww.BytesWritten()))
attrs = append(attrs, slog.Duration("duration", time.Since(t1)))
attrs = append(attrs, slog.String("method", r.Method))
logger.LogAttrs(r.Context(), slog.LevelInfo, r.RequestURI, attrs...)
}()
// embed the logger and the attrs for later items in the chain.
next.ServeHTTP(ww, r)
}
return http.HandlerFunc(fn)
}
}
type slogKeyType int
const (
SloggerLogKey slogKeyType = iota
SloggerAttrsKey
)
func addSlogAttr(r *http.Request, attr slog.Attr) {
ctx := r.Context()
attrs, ok := ctx.Value(SloggerAttrsKey).([]slog.Attr)
if !ok {
return
}
attrs = append(attrs, attr)
}

138
migration.go Normal file
View file

@ -0,0 +1,138 @@
package gotelem
import (
"embed"
"errors"
"io"
"io/fs"
"path"
"regexp"
"sort"
"strconv"
)
// embed the migrations into applications so they can update databases.
//go:embed migrations/*
var migrationsFs embed.FS
var migrationRegex = regexp.MustCompile(`^([0-9]+)_(.*)_(down|up)\.sql$`)
type Migration struct {
Name string
Version uint
FileName string
}
type MigrationError struct {
}
// getMigrations returns a list of migrations, which are correctly index. zero is nil.
func getMigrations(files fs.FS) map[int]map[string]Migration {
res := make(map[int]map[string]Migration) // version number -> direction -> migration.
fs.WalkDir(files, ".", func(path string, d fs.DirEntry, err error) error {
if d.IsDir() {
return nil
}
m := migrationRegex.FindStringSubmatch(d.Name())
if len(m) != 4 {
panic("error parsing migration name")
}
migrationVer, _ := strconv.ParseInt(m[1], 10, 64)
mig := Migration{
Name: m[2],
Version: uint(migrationVer),
FileName: d.Name(),
}
var mMap map[string]Migration
mMap, ok := res[int(migrationVer)]
if !ok {
mMap = make(map[string]Migration)
}
mMap[m[3]] = mig
res[int(migrationVer)] = mMap
return nil
})
return res
}
func RunMigrations(tdb *TelemDb) (finalVer int, err error) {
currentVer, err := tdb.GetVersion()
if err != nil {
return
}
migrations := getMigrations(migrationsFs)
// get a sorted list of versions.
vers := make([]int, len(migrations))
i := 0
for k := range migrations {
vers[i] = k
i++
}
sort.Ints(vers)
expectedVer := 1
// check to make sure that there are no gaps (increasing by one each time)
for _, v := range vers {
if v != expectedVer {
err = errors.New("missing update between")
return 0, err
// invalid
}
expectedVer = v + 1
}
finalVer = vers[len(vers)-1]
// now apply the mappings based on current ver.
tx, err := tdb.db.Begin()
defer tx.Rollback()
if err != nil {
return 0, err
}
for v := currentVer + 1; v <= finalVer; v++ {
// attempt to get the "up" migration.
mMap, ok := migrations[v]
if !ok {
err = errors.New("could not find migration for version")
return 0, err
}
upMigration, ok := mMap["up"]
if !ok {
err = errors.New("could not get up migration")
return 0, err
}
upFile, err := migrationsFs.Open(path.Join("migrations", upMigration.FileName))
if err != nil {
return 0, err
}
upStmt, err := io.ReadAll(upFile)
if err != nil {
return 0, err
}
// open the file name
// execute the file.
_, err = tx.Exec(string(upStmt))
if err != nil {
return 0, err
}
}
// if all the versions applied correctly, update the PRAGMA user_version in the database.
tx.Commit()
err = tdb.SetVersion(finalVer)
return
}

84
migration_test.go Normal file
View file

@ -0,0 +1,84 @@
package gotelem
import (
"embed"
"reflect"
"testing"
)
// import just the first and second migrations to ensure stability.
//
//go:embed migrations/1_*.sql
//go:embed migrations/2_*.sql
var testFs embed.FS
func Test_getMigrations(t *testing.T) {
tests := []struct {
name string
want map[int]map[string]Migration
}{
{
name: "main test",
want: map[int]map[string]Migration{
1: {
"up": Migration{
Name: "initial",
Version: 1,
FileName: "1_initial_up.sql",
},
"down": Migration{
Name: "initial",
Version: 1,
FileName: "1_initial_down.sql",
},
},
2: {
"up": Migration{
Name: "addl_tables",
Version: 2,
FileName: "2_addl_tables_up.sql",
},
"down": Migration{
Name: "addl_tables",
Version: 2,
FileName: "2_addl_tables_down.sql",
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getMigrations(testFs); !reflect.DeepEqual(got, tt.want) {
t.Errorf("getMigrations() = %v, want %v", got, tt.want)
}
})
}
}
func TestRunMigrations(t *testing.T) {
type args struct {
tdb *TelemDb
}
tests := []struct {
name string
args args
wantFinalVer int
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotFinalVer, err := RunMigrations(tt.args.tdb)
if (err != nil) != tt.wantErr {
t.Errorf("RunMigrations() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotFinalVer != tt.wantFinalVer {
t.Errorf("RunMigrations() = %v, want %v", gotFinalVer, tt.wantFinalVer)
}
})
}
}

View file

@ -0,0 +1,3 @@
DROP TABLE "bus_events";
DROP INDEX "ids_timestamped";
DROP INDEX "times";

View file

@ -0,0 +1,14 @@
CREATE TABLE "bus_events" (
"ts" INTEGER NOT NULL, -- timestamp, unix milliseconds
"name" TEXT NOT NULL, -- name of base packet
"data" JSON NOT NULL CHECK(json_valid(data)) -- JSON object describing the data, including index if any
);
CREATE INDEX "ids_timestamped" ON "bus_events" (
"name",
"ts" DESC
);
CREATE INDEX "times" ON "bus_events" (
"ts" DESC
);

View file

@ -0,0 +1 @@
DROP TABLE "drive_records";

View file

@ -0,0 +1,9 @@
-- this table shows when we started/stopped logging.
CREATE TABLE "drive_records" (
"id" INTEGER NOT NULL UNIQUE, -- unique ID of the drive.
"start_time" INTEGER NOT NULL, -- when the drive started
"end_time" INTEGER, -- when it ended, or NULL if it's ongoing.
"note" TEXT, -- optional description of the segment/experiment/drive
PRIMARY KEY("id" AUTOINCREMENT),
CONSTRAINT "duration_valid" CHECK(end_time is null or start_time < end_time)
);

View file

@ -0,0 +1 @@
DROP TABLE "weather_station_logs";

View file

@ -0,0 +1,6 @@
CREATE TABLE "weather_station_logs" (
"ts" INTEGER NOT NULL,
"wind_speed" REAL,
"ground speed" REAL,
"heading" REAL
);

View file

@ -0,0 +1 @@
DROP TABLE "import_log";

View file

@ -0,0 +1,7 @@
CREATE TABLE "import_log" (
"filename" TEXT NOT NULL,
"date" TIMESTAMP NOT NULL,
"count" INTEGER NOT NULL,
"start_time" INTEGER NOT NULL,
"end_time" INTEGER NOT NULL
);

View file

@ -0,0 +1,2 @@
DROP TABLE "packet_definitions";
DROP TABLE "field_definitions";

View file

@ -0,0 +1,13 @@
CREATE TABLE "packet_definitions" (
"name" TEXT NOT NULL,
"description" TEXT,
"id" INTEGER NOT NULL
);
CREATE TABLE "field_definitions" (
"name" TEXT NOT NULL,
"subname" TEXT, -- if the data type is a bitfield, we can use subname to identify the bit.
"packet_name" TEXT NOT NULL,
"type" TEXT NOT NULL,
FOREIGN KEY("packet_name") REFERENCES packet_definitions(name)
);

View file

@ -0,0 +1 @@
ALTER TABLE "bus_events" DROP COLUMN idx;

View file

@ -0,0 +1 @@
ALTER TABLE "bus_events" ADD COLUMN idx GENERATED ALWAYS AS (json_extract(data, '$.idx')) VIRTUAL;

View file

@ -0,0 +1,2 @@
DROP TABLE openmct_objects;
DROP INDEX openmct_key;

View file

@ -0,0 +1,6 @@
CREATE TABLE openmct_objects (
data TEXT,
key TEXT GENERATED ALWAYS AS (json_extract(data, '$.identifier.key')) VIRTUAL UNIQUE NOT NULL
);
-- fast key-lookup
CREATE INDEX openmct_key on openmct_objects(key);

View file

@ -1,344 +0,0 @@
/*
mprpc is a simple bidirectional RPC library using the MessagePack-RPC spec.
It fully implements the spec and additionally provides Go `error handling by
converting the error to a standard format for other clients.
mprpc does not have a typical server/client designation - both use "handlers",
which expose methods to be called over the network. A "client" would be an
RPCConn which doesn't expose any services, and a "server" would be an RPCConn
that doesn't make any `Call`s to the other side.
This lack of discrete server and client enables mprpc to implement a basic
"streaming" architecture on top of the MessagePack-RPC spec, which does not
include streaming primitives. Instead, we can provide simple "service handlers"
as a callback/destination for streaming data.
For example, a "client" could subscribe to events from the "server", by
providing a callback service to point events to. Then, the "server" would
Notify() the callback service with the new event as an argument every time it
occured. While this may be less optimal than protocol-level streaming, it is
far simpler.
# Generic Helper Functions
The idiomatic way to use mprpc is to use the generic functions that are provided
as helpers. They allow the programmer to easily wrap existing functions in a
closure that automatically encodes and decodes the parameters and results to
their MessagePack representations. See the Make* generic functions for more
information.
// Assume myParam and myResult are MessagePack-enabled structs.
// Use `msgp` to generate the required functions for them.
// this is our plain function - we can call it locally to test.
func myPlainFunction(p myParam) (r myResult, err error)
// wrapped is a ServiceFunc that can be passed to rpcConn.RegisterHandler
var wrapped := MakeService(myPlainFunction)
The generic functions allow for flexiblity and elegant code while still keeping
the underlying implementation reflect-free. For more complex functions (i.e
multiple parameters or return types), a second layer of indirection can be used.
There is also a `MakeCaller` function that can make a stub function that handles
encoding the arguments and decoding the response for a remote procedure.
*/
package mprpc
import (
"errors"
"io"
"github.com/tinylib/msgp/msgp"
"golang.org/x/exp/slog"
)
// ServiceFunc is a RPC service handler.
// It can be created manually, or by using the generic MakeService function on a
//
// func(msgp.Encoder) (msgp.Decoder, error)
//
// type.
type ServiceFunc func(params msgp.Raw) (res msgp.Raw, err error)
// RPCConn is a single RPC communication pair.
// It is used by both the
// "server" aka listener, and client.
type RPCConn struct {
// TODO: use io.readwritecloser?
rwc io.ReadWriteCloser
handlers map[string]ServiceFunc
ct rpcConnTrack
logger slog.Logger
}
// creates a new RPC connection on top of an io.ReadWriteCloser. Can be
// pre-seeded with handlers.
func NewRPC(rwc io.ReadWriteCloser, logger *slog.Logger, initialHandlers map[string]ServiceFunc) (rpc *RPCConn, err error) {
rpc = &RPCConn{
rwc: rwc,
handlers: make(map[string]ServiceFunc),
ct: NewRPCConnTrack(),
}
if initialHandlers != nil {
for k, v := range initialHandlers {
rpc.handlers[k] = v
}
}
return
}
// Call intiates an RPC call to a remote method and returns the
// response, or the error, if any. To make calling easier, you can
// construct a "Caller" with MakeCaller
func (rpc *RPCConn) Call(method string, params msgp.Raw) (msgp.Raw, error) {
// TODO: error handling.
id, cb := rpc.ct.Claim()
req := NewRequest(id, method, params)
w := msgp.NewWriter(rpc.rwc)
req.EncodeMsg(w)
// block and wait for response.
resp := <-cb
return resp.Result, &resp.Error
}
// Notify initiates a notification to a remote method. It does not
// return any information. There is no response from the server.
// This method will not block nor will it inform the caller if any errors occur.
func (rpc *RPCConn) Notify(method string, params msgp.Raw) {
// TODO: return an error if there's a local problem?
req := NewNotification(method, params)
w := msgp.NewWriter(rpc.rwc)
req.EncodeMsg(w)
}
// Register a new handler to be called by the remote side. An error
// is returned if the handler name is already in use.
func (rpc *RPCConn) RegisterHandler(name string, fn ServiceFunc) error {
// TODO: check if name in use.
// TODO: mutex lock for sync (or use sync.map?
rpc.handlers[name] = fn
rpc.logger.Info("registered a new handler", "name", name, "fn", fn)
return nil
}
// Removes a handler, if it exists. Never errors. No-op if the name
// is not a registered handler.
func (rpc *RPCConn) RemoveHandler(name string) error {
delete(rpc.handlers, name)
return nil
}
// Serve runs the server. It will dispatch goroutines to handle each method
// call. This can (and should in most cases) be run in the background to allow
// for sending and receving on the same connection.
func (rpc *RPCConn) Serve() {
// construct a stream reader.
msgReader := msgp.NewReader(rpc.rwc)
// read a request/notification from the connection.
var rawmsg msgp.Raw = make(msgp.Raw, 0, 4)
for {
err := rawmsg.DecodeMsg(msgReader)
if err != nil {
if errors.Is(err, io.EOF) {
rpc.logger.Info("reached EOF, stopping server")
return
}
rpc.logger.Warn("error decoding message", "err", err)
continue
}
rpcIntf, err := parseRPC(rawmsg)
if err != nil {
rpc.logger.Warn("Could not parse RPC message", "err", err)
continue
}
switch rpcObject := rpcIntf.(type) {
case Request:
// the object is a request - we must dispatch a goroutine
// that will call the handler and also send a return value.
go rpc.dispatch(rpcObject)
case Notification:
go rpc.dispatchNotif(rpcObject)
case Response:
cbCh, err := rpc.ct.Clear(rpcObject.MsgId)
if err != nil {
rpc.logger.Warn("could not get rpc callback", "msgid", rpcObject.MsgId, "err", err)
continue
}
cbCh <- rpcObject
default:
panic("invalid rpcObject!")
}
}
}
// INTERNAL functions for rpcConn
// dispatch is an internal method used to execute a Request sent by the remote:w
func (rpc *RPCConn) dispatch(req Request) {
result, err := rpc.handlers[req.Method](req.Params)
if err != nil {
rpc.logger.Warn("error dispatching rpc function", "method", req.Method, "err", err)
}
// construct the response frame.
var rpcE *RPCError = MakeRPCError(err)
w := msgp.NewWriter(rpc.rwc)
response := NewResponse(req.MsgId, *rpcE, result)
response.EncodeMsg(w)
}
// dispatchNotif is like dispatch, but for Notifications. This means that it never replies,
// even if there is an error.
func (rpc *RPCConn) dispatchNotif(req Notification) {
_, err := rpc.handlers[req.Method](req.Params)
if err != nil {
// log the error, but don't do anything about it.
rpc.logger.Warn("error dispatching rpc function", "method", req.Method, "err", err)
}
}
// Next, we define some helper generic functions that can be used to make
// implementing a msg wrapper easier.
// msgpackObject is anything that has implemented all the msgpack interfaces.
type msgpackObject interface {
msgp.Decodable
msgp.Encodable
msgp.MarshalSizer
msgp.Unmarshaler
}
// MakeService is a generic wrapper function. It takes a function with the signature
// of func(T msgpObject)(R msgpObject, error) where T and R can be *concrete* types.
// and returns a new function that handles conversion to/from msgp.Raw.
// The function returned can be used by the RPCConn as a handler function.
// This function can typically have it's paramters inferred.
func MakeService[T, R msgpackObject](fn func(T) (R, error)) ServiceFunc {
return func(p msgp.Raw) (msgp.Raw, error) {
// decode the raw data into a new underlying type.
var params T
_, err := params.UnmarshalMsg(p)
if err != nil {
return nil, err
}
// now, call the function fn with the given params, and record the value.
resp, err := fn(params)
if err != nil {
return nil, err
}
return resp.MarshalMsg([]byte{})
}
}
// should the RPCConn/method name be baked into the function or should they be
// part of the returned function paramters?
// MakeCaller creates a simple wrapper around a parameter of call. The method name
// and RPC connection can be given to the returned function to make a RPC call on that
// function with the given type parameters.
//
// This function is slightly obtuse compared to MakeBoundCaller but is more flexible
// since you can reuse the same function across multiple connections and method names.
//
// This generic function must always have it's type paratmers declared explicitly.
// They cannot be inferred from the given parameters.
func MakeCaller[T, R msgpackObject]() func(string, T, *RPCConn) (R, error) {
return func(method string, param T, rpc *RPCConn) (R, error) {
rawParam, err := param.MarshalMsg([]byte{})
if err != nil {
var emtpyR R
return emtpyR, err
}
rawResponse, err := rpc.Call(method, rawParam)
if err != nil {
var emtpyR R
return emtpyR, err
}
var resp R
_, err = resp.UnmarshalMsg(rawResponse)
return resp, err
}
}
// MakeBoundCaller is like MakeCaller, except the RPC connection and method name are
// fixed and cannot be adjusted later. This function is more elegant but less flexible
// than MakeCaller and should be used when performance is not critical.
//
// This generic function must always have it's type paratmers declared explicitly.
// They cannot be inferred from the given parameters.
func MakeBoundCaller[T, R msgpackObject](rpc *RPCConn, method string) func(T) (R, error) {
return func(param T) (R, error) {
// encode parameters
// invoke rpc.Call
// await response
// unpack values.
rawParam, _ := param.MarshalMsg([]byte{})
rawResponse, err := rpc.Call(method, rawParam)
if err != nil {
var emtpyR R
return emtpyR, err
}
var resp R
_, err = resp.UnmarshalMsg(rawResponse)
return resp, err
}
}
// MakeNotifier creates a new notification function that notifies the remote
func MakeNotifier[T msgpackObject](method string) func(T, *RPCConn) error {
return func(param T, rpc *RPCConn) error {
rawParam, err := param.MarshalMsg([]byte{})
rpc.Notify(method, rawParam)
return err
}
}

View file

@ -1,170 +0,0 @@
package mprpc
import (
"errors"
"github.com/tinylib/msgp/msgp"
)
// this file is a simple implementation of the msgpack-rpc data formats.
// RPCType is the message type that is being sent.
type RPCType int
const (
RequestType RPCType = 0
ResponseType RPCType = 1
NotificationType RPCType = 2
)
// the messagepack RPC spec requires that the RPC wire formts are ordered arrays,
// aka tuples. we can use msgp options to make them tuple automatically,
// based on the order they are declared. This makes the order of these
// structs *critical*! Do not touch!
//go:generate msgp
//msgp:tuple Request
//msgp:tuple Response
//msgp:tuple Notification
// Request represents a function call that expects a Response.
type Request struct {
// should always be zero.
msgtype RPCType `msg:"type"`
// MsgId is used to match a Response with a Request
MsgId uint32 `msg:"msgid"`
// Method is the name of the method/service to execute on the remote
Method string `msg:"method"`
// Params is the arguments of the method/service. It can be any
// MessagePack-serializable type.
Params msgp.Raw `msg:"params,allownil"`
}
func NewRequest(msgid uint32, method string, params msgp.Raw) *Request {
return &Request{
msgtype: 0,
MsgId: msgid,
Method: method,
Params: params,
}
}
// A Response is the result and error given from calling a service.
type Response struct {
// should always be one.
msgtype RPCType `msg:"type"`
// MsgId is an identifier used to match this Response with the Request that created it.
MsgId uint32 `msg:"msgid"`
// Error is the error encountered while attempting to execute the method, if any.
Error RPCError `msg:"error,allownil"`
// Result is the raw object that was returned by the calling method. It
// can be any MessagePack-serializable object.
Result msgp.Raw `msg:"result,allownil"`
}
func NewResponse(msgid uint32, respErr RPCError, res msgp.Raw) *Response {
return &Response{
msgtype: 1,
MsgId: msgid,
Error: respErr,
Result: res,
}
}
// A notification is a function call that does not care if the call
// succeeds and ignores responses.
type Notification struct {
// should always be *2*
msgtype RPCType `msg:"type"`
Method string `msg:"method"`
Params msgp.Raw `msg:"params,allownil"`
}
func NewNotification(method string, params msgp.Raw) *Notification {
return &Notification{
msgtype: 2,
Method: method,
Params: params,
}
}
// getMsgType uses raw messagpack RPC to return the underlying message type from
// the raw array given by b.
func getMsgType(b msgp.Raw) RPCType {
size, next, err := msgp.ReadArrayHeaderBytes(b)
if err != nil {
panic(err)
}
if size == 3 { // hot path for notifications.
return NotificationType
}
vtype, _, err := msgp.ReadIntBytes(next)
if err != nil {
panic(err)
}
// todo: use readIntf instead? returns a []interface{} and we can map it ourselves...
return RPCType(vtype)
}
// parseRPC takes a raw message and decodes it based on the first value
// of the array (the type). It returns the decoded object. Callers
// can use a type-switch to determine the type of the data.
func parseRPC(raw msgp.Raw) (interface{}, error) {
t := getMsgType(raw)
switch RPCType(t) {
case RequestType:
// create and return a request struct.
req := &Request{}
_, err := req.UnmarshalMsg(raw)
return req, err
case ResponseType:
res := &Response{}
_, err := res.UnmarshalMsg(raw)
return res, err
case NotificationType:
notif := &Notification{}
_, err := notif.UnmarshalMsg(raw)
return notif, err
default:
// uh oh.
return nil, errors.New("unmatched RPC type")
}
}
//msgp:tuple RPCError
// RPCError is a common RPC error format. It is basically a clone of the
// JSON-RPC error format. We use it so we know what to expect there.
type RPCError struct {
Code int
Desc string
}
// Converts a Go error into a RPC error.
func MakeRPCError(err error) *RPCError {
if err == nil {
return nil
}
return &RPCError{
Code: -1,
Desc: err.Error(),
}
}
// Implements the Error interface for RPCError
func (r *RPCError) Error() string {
return r.Desc
}
// we need to describe an empty data that will be excluded in the msgp
// for functions without an argument or return value.
type RPCEmpty struct {
}

View file

@ -1,577 +0,0 @@
package mprpc
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *Notification) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 2 {
err = msgp.ArrayError{Wanted: 2, Got: zb0001}
return
}
z.Method, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Method")
return
}
err = z.Params.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Params")
return
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Notification) EncodeMsg(en *msgp.Writer) (err error) {
// array header, size 2
err = en.Append(0x92)
if err != nil {
return
}
err = en.WriteString(z.Method)
if err != nil {
err = msgp.WrapError(err, "Method")
return
}
err = z.Params.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Params")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Notification) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// array header, size 2
o = append(o, 0x92)
o = msgp.AppendString(o, z.Method)
o, err = z.Params.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Params")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Notification) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0001 uint32
zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 2 {
err = msgp.ArrayError{Wanted: 2, Got: zb0001}
return
}
z.Method, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Method")
return
}
bts, err = z.Params.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Params")
return
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Notification) Msgsize() (s int) {
s = 1 + msgp.StringPrefixSize + len(z.Method) + z.Params.Msgsize()
return
}
// DecodeMsg implements msgp.Decodable
func (z *RPCEmpty) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z RPCEmpty) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 0
err = en.Append(0x80)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z RPCEmpty) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 0
o = append(o, 0x80)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *RPCEmpty) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z RPCEmpty) Msgsize() (s int) {
s = 1
return
}
// DecodeMsg implements msgp.Decodable
func (z *RPCError) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 2 {
err = msgp.ArrayError{Wanted: 2, Got: zb0001}
return
}
z.Code, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Code")
return
}
z.Desc, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Desc")
return
}
return
}
// EncodeMsg implements msgp.Encodable
func (z RPCError) EncodeMsg(en *msgp.Writer) (err error) {
// array header, size 2
err = en.Append(0x92)
if err != nil {
return
}
err = en.WriteInt(z.Code)
if err != nil {
err = msgp.WrapError(err, "Code")
return
}
err = en.WriteString(z.Desc)
if err != nil {
err = msgp.WrapError(err, "Desc")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z RPCError) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// array header, size 2
o = append(o, 0x92)
o = msgp.AppendInt(o, z.Code)
o = msgp.AppendString(o, z.Desc)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *RPCError) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0001 uint32
zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 2 {
err = msgp.ArrayError{Wanted: 2, Got: zb0001}
return
}
z.Code, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Code")
return
}
z.Desc, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Desc")
return
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z RPCError) Msgsize() (s int) {
s = 1 + msgp.IntSize + msgp.StringPrefixSize + len(z.Desc)
return
}
// DecodeMsg implements msgp.Decodable
func (z *RPCType) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 int
zb0001, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = RPCType(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z RPCType) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteInt(int(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z RPCType) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendInt(o, int(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *RPCType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 int
zb0001, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = RPCType(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z RPCType) Msgsize() (s int) {
s = msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *Request) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 3 {
err = msgp.ArrayError{Wanted: 3, Got: zb0001}
return
}
z.MsgId, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "MsgId")
return
}
z.Method, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Method")
return
}
err = z.Params.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Params")
return
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Request) EncodeMsg(en *msgp.Writer) (err error) {
// array header, size 3
err = en.Append(0x93)
if err != nil {
return
}
err = en.WriteUint32(z.MsgId)
if err != nil {
err = msgp.WrapError(err, "MsgId")
return
}
err = en.WriteString(z.Method)
if err != nil {
err = msgp.WrapError(err, "Method")
return
}
err = z.Params.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Params")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Request) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// array header, size 3
o = append(o, 0x93)
o = msgp.AppendUint32(o, z.MsgId)
o = msgp.AppendString(o, z.Method)
o, err = z.Params.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Params")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Request) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0001 uint32
zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 3 {
err = msgp.ArrayError{Wanted: 3, Got: zb0001}
return
}
z.MsgId, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "MsgId")
return
}
z.Method, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Method")
return
}
bts, err = z.Params.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Params")
return
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Request) Msgsize() (s int) {
s = 1 + msgp.Uint32Size + msgp.StringPrefixSize + len(z.Method) + z.Params.Msgsize()
return
}
// DecodeMsg implements msgp.Decodable
func (z *Response) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0001 uint32
zb0001, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 3 {
err = msgp.ArrayError{Wanted: 3, Got: zb0001}
return
}
z.MsgId, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "MsgId")
return
}
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
if zb0002 != 2 {
err = msgp.ArrayError{Wanted: 2, Got: zb0002}
return
}
z.Error.Code, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Error", "Code")
return
}
z.Error.Desc, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Error", "Desc")
return
}
err = z.Result.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Result")
return
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *Response) EncodeMsg(en *msgp.Writer) (err error) {
// array header, size 3
err = en.Append(0x93)
if err != nil {
return
}
err = en.WriteUint32(z.MsgId)
if err != nil {
err = msgp.WrapError(err, "MsgId")
return
}
// array header, size 2
err = en.Append(0x92)
if err != nil {
return
}
err = en.WriteInt(z.Error.Code)
if err != nil {
err = msgp.WrapError(err, "Error", "Code")
return
}
err = en.WriteString(z.Error.Desc)
if err != nil {
err = msgp.WrapError(err, "Error", "Desc")
return
}
err = z.Result.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Result")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *Response) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// array header, size 3
o = append(o, 0x93)
o = msgp.AppendUint32(o, z.MsgId)
// array header, size 2
o = append(o, 0x92)
o = msgp.AppendInt(o, z.Error.Code)
o = msgp.AppendString(o, z.Error.Desc)
o, err = z.Result.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Result")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Response) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0001 uint32
zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if zb0001 != 3 {
err = msgp.ArrayError{Wanted: 3, Got: zb0001}
return
}
z.MsgId, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "MsgId")
return
}
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
if zb0002 != 2 {
err = msgp.ArrayError{Wanted: 2, Got: zb0002}
return
}
z.Error.Code, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error", "Code")
return
}
z.Error.Desc, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error", "Desc")
return
}
bts, err = z.Result.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Result")
return
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Response) Msgsize() (s int) {
s = 1 + msgp.Uint32Size + 1 + msgp.IntSize + msgp.StringPrefixSize + len(z.Error.Desc) + z.Result.Msgsize()
return
}

View file

@ -1,575 +0,0 @@
package mprpc
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalNotification(t *testing.T) {
v := Notification{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgNotification(b *testing.B) {
v := Notification{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgNotification(b *testing.B) {
v := Notification{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalNotification(b *testing.B) {
v := Notification{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeNotification(t *testing.T) {
v := Notification{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeNotification Msgsize() is inaccurate")
}
vn := Notification{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeNotification(b *testing.B) {
v := Notification{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeNotification(b *testing.B) {
v := Notification{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalRPCEmpty(t *testing.T) {
v := RPCEmpty{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgRPCEmpty(b *testing.B) {
v := RPCEmpty{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgRPCEmpty(b *testing.B) {
v := RPCEmpty{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalRPCEmpty(b *testing.B) {
v := RPCEmpty{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeRPCEmpty(t *testing.T) {
v := RPCEmpty{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeRPCEmpty Msgsize() is inaccurate")
}
vn := RPCEmpty{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeRPCEmpty(b *testing.B) {
v := RPCEmpty{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeRPCEmpty(b *testing.B) {
v := RPCEmpty{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalRPCError(t *testing.T) {
v := RPCError{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgRPCError(b *testing.B) {
v := RPCError{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgRPCError(b *testing.B) {
v := RPCError{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalRPCError(b *testing.B) {
v := RPCError{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeRPCError(t *testing.T) {
v := RPCError{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeRPCError Msgsize() is inaccurate")
}
vn := RPCError{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeRPCError(b *testing.B) {
v := RPCError{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeRPCError(b *testing.B) {
v := RPCError{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalRequest(t *testing.T) {
v := Request{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgRequest(b *testing.B) {
v := Request{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgRequest(b *testing.B) {
v := Request{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalRequest(b *testing.B) {
v := Request{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeRequest(t *testing.T) {
v := Request{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeRequest Msgsize() is inaccurate")
}
vn := Request{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeRequest(b *testing.B) {
v := Request{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeRequest(b *testing.B) {
v := Request{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalResponse(t *testing.T) {
v := Response{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgResponse(b *testing.B) {
v := Response{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgResponse(b *testing.B) {
v := Response{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalResponse(b *testing.B) {
v := Response{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeResponse(t *testing.T) {
v := Response{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeResponse Msgsize() is inaccurate")
}
vn := Response{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeResponse(b *testing.B) {
v := Response{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeResponse(b *testing.B) {
v := Response{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View file

@ -1 +0,0 @@
package mprpc_test

View file

@ -1,69 +0,0 @@
package mprpc
import (
"errors"
"math/rand"
"sync"
)
// RPCConntrack is a request-response tracker that is used to connect
// the response to the appropriate caller.
type rpcConnTrack struct {
ct map[uint32]chan Response
mu sync.RWMutex
}
func NewRPCConnTrack() rpcConnTrack {
return rpcConnTrack{
ct: make(map[uint32]chan Response),
}
}
// Get attempts to get a random mark from the mutex.
func (c *rpcConnTrack) Claim() (uint32, chan Response) {
var val uint32
for {
//
newVal := rand.Uint32()
// BUG(saji): rpcConnTrack collisions are inefficient.
// collision is *rare* - so we just try again.
// I hope to god you don't saturate this tracker.
c.mu.RLock()
if _, exist := c.ct[newVal]; !exist {
val = newVal
c.mu.RUnlock()
break
}
c.mu.RUnlock()
}
// claim it
// the channel should be buffered. We only expect one value to go through.
// so the size is fixed to 1.
ch := make(chan Response, 1)
c.mu.Lock()
c.ct[val] = ch
c.mu.Unlock()
return val, ch
}
// Clear deletes the connection from the tracker and returns the channel
// associated with it. The caller can use the channel afterwards
// to send the response. It is the caller's responsibility to close the channel.
func (c *rpcConnTrack) Clear(val uint32) (chan Response, error) {
c.mu.RLock()
ch, ok := c.ct[val]
c.mu.RUnlock()
if !ok {
return nil, errors.New("invalid msg id")
}
c.mu.Lock()
delete(c.ct, val)
c.mu.Unlock()
return ch, nil
}

30
openmct.go Normal file
View file

@ -0,0 +1,30 @@
//go:build openmct
package gotelem
import (
"embed"
"io/fs"
"net/http"
"github.com/go-chi/chi/v5"
)
// this package provides a web router for the statif openmct build.
// it should only be included if the build has been run,
// to do so, run npm install and then npm run build.
//go:embed web/dist
var public embed.FS
func OpenMCTRouter(r chi.Router) {
// strip the subdirectory
pfs, _ := fs.Sub(public, "web/dist")
// default route.
r.Handle("/*", http.FileServerFS(pfs))
}
func init() {
RouterMods = append(RouterMods, OpenMCTRouter)
}

View file

@ -1,20 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="JupyterPersistentConnectionParameters">
<option name="knownRemoteServers">
<list>
<JupyterConnectionParameters>
<option name="authType" value="notebook" />
<option name="token" value="5a7fb936e2f1eafcdefbb7fa3ea339000213214ae7e35195" />
<option name="urlString" value="http://127.0.0.1:8888" />
<authParams2>
<map>
<entry key="token" value="5a7fb936e2f1eafcdefbb7fa3ea339000213214ae7e35195" />
</map>
</authParams2>
</JupyterConnectionParameters>
</list>
</option>
<option name="moduleParameters">
<map>
<entry key="$PROJECT_DIR$/.idea/py.iml">

View file

@ -1,4 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="executionMode" value="BINARY" />
<option name="pathToExecutable" value="/usr/bin/black" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Poetry (py)" project-jdk-type="Python SDK" />
<component name="PyPackaging">
<option name="earlyReleasesAsUpgrades" value="true" />
</component>
</project>

View file

@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="jdk" jdkName="Poetry (py)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TemplatesService">

83
py/notebooks/hacking.py Normal file
View file

@ -0,0 +1,83 @@
import orjson
import numpy as np
import pyqtgraph as pg
from pathlib import Path
from dataclasses import dataclass
import glom
# define a structure that can be used to describe what data to graph
print("hi")
@dataclass
class PlotFeature:
"""Class that represents a feature extraction"""
pkt_name: str
info_path: list[str]
# now make a function that takes a bunch of these and then matches the pkt_name.
# if there is a match, we must push the data.
# data format : dict[dict[list[timestamp, value]]]
# first dict is pkt_name, second dict is each variable we care about, and the
# list is a timestamp-value plot.
def rip_and_tear(fname: Path, features: list[PlotFeature]):
data = {}
for feat in features:
v = {}
for path in feat.info_path:
v[path] = []
data[feat.pkt_name] = v
# now we have initialized the data structure, start parsing the file.
with open(fname) as f:
while line := f.readline():
if len(line) < 3:
continue # kludge to skip empty lines
j = orjson.loads(line)
if not j['name'] in data:
continue
# use the glom, harry
for path in data[j['name']].keys():
d = glom.glom(j['data'], path)
ts = j['ts'] - 1688756556040
data[j['name']][path].append([ts, d])
# TODO: numpy the last list???
return data
if __name__ == "__main__":
features = [
PlotFeature("bms_measurement", ["current"]),
PlotFeature("wsr_phase_current", ["phase_b_current"]),
PlotFeature("wsr_motor_current_vector", ["iq"]),
PlotFeature("wsr_motor_voltage_vector", ["vq"]),
PlotFeature("wsr_velocity", ["motor_velocity"])
]
logs_path = Path("../../logs/")
logfile = logs_path / "RETIME_7-2-hillstart.txt"
res = rip_and_tear(logfile, features)
# now fuck my shit up and render some GRAPHHHSSS
app = pg.mkQApp("i see no god up here\n OTHER THAN ME")
win = pg.GraphicsLayoutWidget(show=True, title="boy howdy")
prev_plot = None
for packet_name, fields in res.items():
win.addLabel(f"{packet_name}")
win.nextRow()
for field_name, field_data in fields.items():
d = np.array(field_data)
p = win.addPlot(title=f"{field_name}")
if prev_plot is not None:
p.setXLink(prev_plot)
p.plot(d)
prev_plot = p
win.nextRow()
pg.exec()

View file

@ -2,15 +2,11 @@
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2023-06-21T00:28:49.748311944Z",
"start_time": "2023-06-21T00:28:49.744946948Z"
},
"collapsed": true,
"jupyter": {
"outputs_hidden": true
}
},
"outputs": [
@ -18,7 +14,14 @@
"name": "stderr",
"output_type": "stream",
"text": [
"No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n"
"/home/saji/Documents/Code/buildroot/gotelem/py/pytelem/optimus.py:50: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n",
" @jit\n",
"/home/saji/Documents/Code/buildroot/gotelem/py/pytelem/optimus.py:65: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n",
" @jit\n",
"/home/saji/Documents/Code/buildroot/gotelem/py/pytelem/optimus.py:79: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n",
" @jit\n",
"/home/saji/Documents/Code/buildroot/gotelem/py/pytelem/optimus.py:479: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n",
" @jit\n"
]
}
],
@ -58,6 +61,13 @@
"ffast = jit(optim.solar_position)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 6,
@ -292,9 +302,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "telemstrategy",
"language": "python",
"name": "python3"
"name": "telemstrategy"
},
"language_info": {
"codemirror_mode": {

2738
py/poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -12,14 +12,18 @@ orjson = "^3.8.14"
imgui-bundle = "^0.8.5"
numpy = "^1.24.3"
aiohttp = "^3.8.4"
pyside6 = "^6.5.1"
pydantic = "^1.10.9"
pyside6 = "^6.5.0"
pydantic = "^2"
pyyaml = "^6.0"
jinja2 = "^3.1.2"
pyqtgraph = "^0.13.3"
scipy = "^1.10.1"
numba = "^0.57.0"
jax = {extras = ["cpu"], version = "^0.4.12"}
matplotlib = "^3.7.2"
glom = "^23.3.0"
cattrs = "^23.2.3"
attrs = "^23.1.0"
[tool.poetry.group.dev.dependencies]

View file

@ -1,3 +1,39 @@
from functools import cached_property
import aiohttp
import orjson
import threading
from typing import Dict
from PySide6.QtCore import QObject, Signal, Slot
from pytelem.skylab import SkylabFile
# connect to websocket - create thread that handles JSON events
class TelemetryServer(QObject):
"""Connection to upstream database"""
conn_url: str
"Something like http://<some_ip>:8082"
def __init__(self, url: str, parent=None):
super().__init__(parent)
self.conn_url = url
NewPacket = Signal(object)
"""Signal that is emitted when a new packet is received in realtime. Contains the packet itself"""
@cached_property
def schema(self) -> SkylabFile:
"""Gets the Packet Schema from the server"""
pass
@Slot()
def connect(self):
"""Attempt to connect to server"""
def query(self, queryparams):
"""Query the historical data and store the result in the datastore"""

142
py/pytelem/bms.py Normal file
View file

@ -0,0 +1,142 @@
from typing import Dict
from typing_extensions import TypedDict
from PySide6.QtCore import QObject, Qt, Slot
from PySide6.QtGui import QFontDatabase
from PySide6.QtWidgets import QButtonGroup, QDockWidget, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QRadioButton, QVBoxLayout, QWidget
ContactorStates = TypedDict("ContactorStates", {})
class BMSState(QObject):
"""Represents the BMS state, including history."""
main_voltage: float
aux_voltage: float
current: float
def __init__(self, parent=None, upstream=None):
super().__init__(parent)
# uhh, take a connection to the upstream?
class BMSModuleViewer(QWidget):
"""BMS module status viewer (temp and voltage)"""
# use graphics view for rendering.
temps: list[float] = []
volts: list[float] = []
def __init__(self, parent = None) -> None:
super().__init__(parent)
layout = QGridLayout()
bg = QButtonGroup(self)
self.volts_btn = QRadioButton("Voltage", self)
self.temps_btn = QRadioButton("Temperatures", self)
bg.addButton(self.volts_btn)
bg.addButton(self.temps_btn)
layout.addWidget(self.volts_btn, 0, 0)
layout.addWidget(self.temps_btn, 0, 1)
class BMSOverview(QWidget):
current: QLabel
main_voltage: QLabel
aux_voltage: QLabel
def __init__(self, parent=None) -> None:
super().__init__(parent)
# self.setMaximumWidth()
layout = QGridLayout()
layout.setRowStretch(0, 80)
layout.setRowStretch(1, 20)
number_font = QFontDatabase.systemFont(QFontDatabase.SystemFont.FixedFont)
number_font.setPointSize(18)
hcenter = Qt.AlignmentFlag.AlignHCenter
self.main_voltage = QLabel("0.000", self)
self.main_voltage.setAlignment(hcenter)
self.main_voltage.setFont(number_font)
layout.addWidget(self.main_voltage, 0, 0)
main_v_label = QLabel("Main Voltage", self)
main_v_label.setAlignment(hcenter)
layout.addWidget(main_v_label, 1, 0)
self.aux_voltage = QLabel("0.000", self)
self.aux_voltage.setAlignment(hcenter)
self.aux_voltage.setFont(number_font)
layout.addWidget(self.aux_voltage, 0, 1)
aux_v_label = QLabel("Aux Voltage", self)
aux_v_label.setAlignment(hcenter)
layout.addWidget(aux_v_label, 1, 1)
self.current = QLabel("0.000", self)
self.current.setAlignment(hcenter)
self.current.setFont(number_font)
layout.addWidget(self.current, 0, 2)
current_label = QLabel("Battery Current", self)
current_label.setAlignment(hcenter)
layout.addWidget(current_label, 1, 2)
# now add widgets that display the numeric values.
# then make slots that take floats and display them.
self.setLayout(layout)
@Slot(float)
def update_main_v(self, value: float):
self.main_voltage.setText(f"{value:.2f}")
@Slot(float)
def set_aux_v(self, value:float):
self.aux_voltage.setText(f"{value:.3f}")
@Slot(float)
def set_current(self, value: float):
self.current.setText(f"{value:.3f}")
class BMSStatus(QWidget):
contactor_items: Dict[str, QLabel] = dict()
"A mapping of string names to the label, used to set open/closed"
def __init__(self, parent: QWidget | None = None, contactors: list[str] = []):
super().__init__(parent)
layout = QVBoxLayout(self)
self.contactors_grp = QGroupBox("Contactor State", self)
contactor_layout = QGridLayout()
self.contactors_grp.setLayout(contactor_layout)
layout.addWidget(self.contactors_grp)
for c in contactors:
label = QLabel(c, self)
self.contactor_items[c] = label
contactor_layout.addWidget(label)
class BMSPlotsWidget(QWidget):
pass

View file

@ -1,27 +1,83 @@
import random
import sys
import logging
import pyqtgraph.parametertree
from PySide6 import QtWidgets, QtCore
from PySide6.QtCore import QDir, Qt
from PySide6.QtCore import QDir, Qt, QObject, Slot, Signal, QTimer
from PySide6.QtGui import QAction
from PySide6.QtWidgets import (
QApplication,
QWidget,
QMainWindow,
QTreeView,
QDockWidget,
QDockWidget, QToolBar, QPlainTextEdit,
)
from gui_log import QLogHandler
from pytelem.widgets.smart_display import SmartDisplay
from bms import BMSOverview
class DataStore(QObject):
"""Stores all packets and timestamps for display and logging.
Queries the upstreams for the packets as they come in as well as historical"""
def __init__(self, remote):
super().__init__()
class MainApp(QMainWindow):
new_data = Signal(float)
def __init__(self):
super().__init__()
self.setWindowTitle("Hey there")
self.setWindowTitle("pyview")
layout = QtWidgets.QVBoxLayout()
ptree = PacketTree(self)
self.setCentralWidget(ptree)
mb = self.menuBar()
self.WindowMenu = mb.addMenu("Windows")
bms = BMSOverview()
packet_tree = QDockWidget('Packet Tree', self)
self.addDockWidget(Qt.DockWidgetArea.LeftDockWidgetArea, packet_tree)
packet_tree.setWidget(PacketTreeView())
packet_tree.hide()
self.ShowPacketTree = packet_tree.toggleViewAction()
self.WindowMenu.addAction(self.ShowPacketTree)
log_dock = QDockWidget('Application Log', self)
self.qlogger = QLogHandler()
self.log_box = QPlainTextEdit()
self.log_box.setReadOnly(True)
log_dock.setWidget(self.log_box)
self.qlogger.bridge.log.connect(self.log_box.appendPlainText)
self.addDockWidget(Qt.DockWidgetArea.BottomDockWidgetArea, log_dock)
self.logger = logging.Logger("Main")
self.logger.addHandler(self.qlogger)
self.logger.info("hi there!")
self.ShowLog = log_dock.toggleViewAction()
self.ShowLog.setShortcut("CTRL+L")
self.WindowMenu.addAction(self.ShowLog)
self.display = SmartDisplay(self, "test")
self.new_data.connect(self.display.update_value)
# start a qtimer to generate random data.
self.timer = QTimer(parent=self)
self.timer.timeout.connect(self.__random_data)
# self.__random_data.connect(self.timer.timeout)
self.timer.start(100)
self.setCentralWidget(self.display)
@Slot()
def __random_data(self):
# emit random data to the new_data
yay = random.normalvariate(10, 1)
self.logger.info(yay)
self.new_data.emit(yay)
class PacketTree(QWidget):
class PacketTreeView(QWidget):
"""PacketView is a widget that shows a tree of packets as well as properties on them when selected."""
def __init__(self, parent: QtWidgets.QWidget | None = None):
@ -30,7 +86,7 @@ class PacketTree(QWidget):
splitter = QtWidgets.QSplitter(self)
layout = QtWidgets.QVBoxLayout()
splitter.setOrientation(Qt.Vertical)
# splitter.setOrientation(Qt.Vertical)
self.tree = QTreeView()
self.prop_table = pyqtgraph.parametertree.ParameterTree()
splitter.addWidget(self.tree)
@ -40,6 +96,10 @@ class PacketTree(QWidget):
self.setLayout(layout)
class SolverView(QWidget):
"""Main Solver Widget/Window"""
if __name__ == "__main__":
app = QApplication(sys.argv)
main_window = MainApp()

20
py/pytelem/gui_log.py Normal file
View file

@ -0,0 +1,20 @@
import sys
import logging
from PySide6.QtCore import QObject, Slot, Signal
from PySide6.QtWidgets import QPlainTextEdit
class Bridge(QObject):
log = Signal(str)
class QLogHandler(logging.Handler):
bridge = Bridge()
def __init__(self):
super().__init__()
def emit(self, record):
msg = self.format(record)
self.bridge.log.emit(msg)

View file

@ -24,14 +24,14 @@ from numba import jit
def fsolve_discrete():
...
"""Forward compute a route segment."""
def dist_to_pos(dist: float):
"convert a distance along the race path to a position in 3d space"
"""convert a distance along the race path to a position in 3d space"""
### All units are BASE SI (no prefix except for kilogram)
# All units are BASE SI (no prefix except for kilogram)
ATM_MOLAR_MASS = 0.0289644 # kg/mol
STANDARD_TEMP = 288.15 # K
STANDARD_PRES = 101325.0 # Pa
@ -77,7 +77,6 @@ def make_cubic(a, b, c, d):
@jit
@vmap
def get_radiation_direct(yday, altitude_deg):
"""Calculate the direct radiation at a given day of the year given the angle of the sun
from the horizon."""
@ -584,18 +583,18 @@ def solar_position(timestamp, latitude, longitude, elevation):
v = v_0 + d_psi * np.cos(np.deg2rad(epsilon))
alpha = np.arctan2(np.sin(np.radians(sun_longitude)) *
np.cos(np.radians(epsilon)) -
np.tan(np.radians(beta)) *
np.sin(np.radians(epsilon)),
np.cos(np.radians(sun_longitude)))
alpha = np.arctan2(np.sin(np.deg2rad(sun_longitude)) *
np.cos(np.deg2rad(epsilon)) -
np.tan(np.deg2rad(beta)) *
np.sin(np.deg2rad(epsilon)),
np.cos(np.deg2rad(sun_longitude)))
alpha_deg = np.rad2deg(alpha) % 360
delta = np.arcsin(
np.sin(np.radians(beta)) *
np.cos(np.radians(epsilon)) +
np.cos(np.radians(beta)) *
np.sin(np.radians(epsilon)) *
np.cos(np.radians(sun_longitude))
np.sin(np.deg2rad(beta)) *
np.cos(np.deg2rad(epsilon)) +
np.cos(np.deg2rad(beta)) *
np.sin(np.deg2rad(epsilon)) *
np.cos(np.deg2rad(sun_longitude))
)
delta_deg = np.rad2deg(delta) % 360
@ -613,4 +612,5 @@ def solar_position(timestamp, latitude, longitude, elevation):
alpha_prime = alpha_deg + d_alpha
delta_prime = np.arctan2((np.sin(delta) - y * np.sin(np.radians(xi_deg))) * np.cos(np.radians(d_alpha)),
np.cos(delta) - x * np.sin(np.radians(xi_deg)) * np.cos(np.radians(h)))
topo_local_hour_angle_deg = h - d_alpha
h_prime = h - d_alpha
e_0 = np.arcsin(np.sin(latitude) * np.sin(delta) + np.cos(latitude) * np.cos(delta_prime))

View file

@ -1,114 +0,0 @@
import time
import numpy as np
from imgui_bundle import implot, imgui_knobs, imgui, immapp, hello_imgui
import aiohttp
import orjson
# Fill x and y whose plot is a heart
vals = np.arange(0, np.pi * 2, 0.01)
x = np.power(np.sin(vals), 3) * 16
y = 13 * np.cos(vals) - 5 * np.cos(2 * vals) - 2 * np.cos(3 * vals) - np.cos(4 * vals)
# Heart pulse rate and time tracking
phase = 0
t0 = time.time() + 0.2
heart_pulse_rate = 80
class PacketState:
"""PacketState is the state representation for a packet. It contains metadata about the packet
as well as a description of the packet fields. Also contains a buffer.
"""
def render_tree(self):
"""Render the Tree view entry for the packet. Only called if the packet is shown."""
pass
def render_graphs(self):
pass
def __init__(self, name: str, description: str | None = None):
self.name = name
self.description = description
# take the data fragment and create internal data representing it.
boards = {
"bms": {
"bms_measurement": {
"description": "Voltages for main battery and aux pack",
"id": 0x10,
"data": {
"battery_voltage": 127.34,
"aux_voltage": 23.456,
"current": 1.23,
},
},
"battery_status": {
"description": "Status bits for the battery",
"id": 0x11,
"data": {
"battery_state": {
"startup": True,
"precharge": False,
"discharging": False,
"lv_only": False,
"charging": False,
"wall_charging": False,
"killed": False,
}, # repeat for rest fo fields
},
},
}
}
def gui():
global heart_pulse_rate, phase, t0, x, y
# Make sure that the animation is smooth
hello_imgui.get_runner_params().fps_idling.enable_idling = False
t = time.time()
phase += (t - t0) * heart_pulse_rate / (np.pi * 2)
k = 0.8 + 0.1 * np.cos(phase)
t0 = t
imgui.show_demo_window()
main_window_flags: imgui.WindowFlags = imgui.WindowFlags_.no_collapse.value
imgui.begin("my application", p_open=None, flags=main_window_flags)
imgui.text("Bloat free code")
if implot.begin_plot("Heart", immapp.em_to_vec2(21, 21)):
implot.plot_line("", x * k, y * k)
implot.end_plot()
for board_name, board_packets in boards.items():
if imgui.tree_node(board_name):
for packet_name in board_packets:
if imgui.tree_node(packet_name):
# display description if hovered
pkt = board_packets[packet_name]
if imgui.is_item_hovered():
imgui.set_tooltip(pkt["description"])
imgui.text(f"0x{pkt['id']:03X}")
imgui.tree_pop()
imgui.tree_pop()
imgui.end() # my application
_, heart_pulse_rate = imgui_knobs.knob("Pulse", heart_pulse_rate, 30, 180)
# class State:
# def __init__(self):
#
# def gui(self):
if __name__ == "__main__":
immapp.run(
gui,
window_size=(300, 450),
window_title="Hello!",
with_implot=True,
fps_idle=0,
) # type: ignore

View file

@ -3,13 +3,31 @@
from abc import ABC, abstractmethod
import re
from pathlib import Path
from typing import Callable, Iterable, NewType, TypedDict, List, Protocol, Union, Set
from typing import Annotated, Callable, Iterable, Literal, NewType, TypedDict, List, Protocol, Union, Set, Optional
from pydantic import BaseModel, validator
from pydantic import field_validator, BaseModel, validator, model_validator
from pydantic.functional_validators import AfterValidator
from enum import Enum
import yaml
import jinja2
def name_valid(s: str) -> str:
if len(s) == 0:
raise ValueError("name cannot be empty string")
if not re.match(r"^[A-Za-z_][A-Za-z0-9_]?$", s):
raise ValueError(f"invalid name: {s}")
return s
# ObjectName is a string that is a valid name, it can only be alphanumeric and underscore.
# it must start with
ObjectName = Annotated[str, AfterValidator(name_valid)]
def is_valid_can_id(i: int) -> int:
if i < 0:
raise ValueError("CAN ID cannot be negative")
return i
CanID = Annotated[int, AfterValidator(is_valid_can_id)]
# This part of the file is dedicated to parsing the skylab yaml files. We define
# classes that represent objects in the yaml files, and perform basic validation on
@ -31,7 +49,6 @@ class FieldType(str, Enum):
I64 = "int64_t"
F32 = "float"
Bitfield = "bitfield"
def size(self) -> int:
"""Returns the size, in bytes, of the type."""
@ -54,8 +71,6 @@ class FieldType(str, Enum):
return 8
case FieldType.F32:
return 4
case FieldType.Bitfield:
return 1
return -1
@ -69,100 +84,61 @@ class _Bits(TypedDict):
name: str
class BitField(BaseModel):
name: ObjectName
type: Literal["bitfield"]
bits: List[_Bits]
class SkylabField(BaseModel):
class EnumField(BaseModel):
name: ObjectName
type: Literal["enum"]
enum_reference: str
"The name of the custom enum to use"
class BasicField(BaseModel):
"""Represents a field (data element) inside a Skylab Packet."""
name: str
"the name of the field. must be alphanumeric and underscores"
name: ObjectName
type: FieldType
"the type of the field"
units: str | None
units: Optional[str]
"optional descriptor of the unit representation"
conversion: float | None
conversion: Optional[float]
"optional conversion factor to be applied when parsing"
bits: List[_Bits] | None
"if the type if a bitfield, "
@validator("bits")
def bits_must_exist_if_bitfield(cls, v, values):
if v is None and "type" in values and values["type"] is FieldType.Bitfield:
raise ValueError("bits are not present on bitfield type")
if (
v is not None
and "type" in values
and values["type"] is not FieldType.Bitfield
):
raise ValueError("bits are present on non-bitfield type")
return v
@validator("name")
def name_valid_string(cls, v: str):
if not re.match(r"^[A-Za-z0-9_]+$", v):
return ValueError("invalid name")
return v
@validator("name")
def name_nonzero_length(cls, v: str):
if len(v) == 0:
return ValueError("name cannot be empty string")
return v
class Endian(str, Enum):
"""Symbol representing the endianness of the packet"""
Big = "big"
Little = "little"
SkylabField = Union[BasicField, EnumField, BitField]
class SkylabPacket(BaseModel):
"""Represents a CAN packet. Contains SkylabFields with information on the structure of the data."""
name: str
description: str | None
id: int
endian: Endian
repeat: int | None
offset: int | None
name: ObjectName
description: str | None = None
id: CanID
endian: Literal["big", "little"]
repeat: int | None = None
offset: int | None = None
data: List[SkylabField]
# @validator("data")
# def packet_size_limit(cls, v: List[SkylabField]):
# tot = sum([f.type.size() for f in v])
# if tot > 8:
# return ValueError("Total packet size cannot exceed 8 bytes")
# return v
@validator("id")
@field_validator("id")
@classmethod
def id_non_negative(cls, v: int) -> int:
if v < 0:
raise ValueError("id must be above zero")
return v
@validator("name")
def name_valid_string(cls, v: str) -> str:
if not re.match(r"^[A-Za-z0-9_]+$", v):
raise ValueError("invalid name", v)
return v
@validator("name")
def name_nonzero_length(cls, v: str) -> str:
if len(v) == 0:
raise ValueError("name cannot be empty string")
return v
class RepeatedPacket(BaseModel):
name: ObjectName
description: str | None = None
id: CanID
endian: Literal["big", "little"]
repeat: int
offset: int
data: List[SkylabField]
@validator("offset")
def offset_must_have_repeat(cls, v: int | None, values) -> int | None:
if v is not None and "repeat" in values and values["repeat"] is not None:
raise ValueError("field with offset must have repeat defined")
return v
@validator("repeat")
def repeat_gt_one(cls, v: int | None):
if v is not None and v <= 1:
raise ValueError("repeat must be strictly greater than one")
return v
class SkylabBoard(BaseModel):
@ -173,41 +149,25 @@ class SkylabBoard(BaseModel):
- every name in the transmit/receive list must have a corresponding packet.
"""
name: str
name: ObjectName
"The name of the board"
transmit: List[str]
"The packets sent by this board"
receive: List[str]
"The packets received by this board."
@validator("name")
def name_valid_string(cls, v: str):
if not re.match(r"^[A-Za-z0-9_]+$", v):
return ValueError("invalid name", v)
return v
@validator("name")
def name_nonzero_length(cls, v: str):
if len(v) == 0:
return ValueError("name cannot be empty string")
return v
class SkylabBus(BaseModel):
name: str
name: ObjectName
"The name of the bus"
baud_rate: int
"Baud rate setting for the bus"
extended_id: bool
"If the bus uses extended ids"
@validator("name")
def name_valid_string(cls, v: str):
if not re.match(r"^[A-Za-z0-9_]+$", v):
return ValueError("invalid name", v)
return v
@validator("baud_rate")
@field_validator("baud_rate")
@classmethod
def baud_rate_supported(cls, v: int):
if v not in [125000, 250000, 500000, 750000, 1000000]:
raise ValueError("unsupported baud rate", v)

115
py/pytelem/skylab_attr.py Normal file
View file

@ -0,0 +1,115 @@
from typing import Dict, Optional, List, Union
from enum import Enum
from attrs import define, field, validators
# we define a validator for our names - alphanumeric and underscores
# most things can't support numbers as the first character, so we don't either.
name_validator = validators.matches_re(r"^[A-Za-z_][A-Za-z0-9_]?$")
@define
class Bus():
name: str
baud_rate: str
extended_id: bool = False
class FieldType(str, Enum):
"""FieldType indicates the type of the field - the enum represents the C type,
but you can use a map to convert the type to another language."""
# used to ensure types are valid, and act as representations for other languages/mappings.
U8 = "uint8_t"
U16 = "uint16_t"
U32 = "uint32_t"
U64 = "uint64_t"
I8 = "int8_t"
I16 = "int16_t"
I32 = "int32_t"
I64 = "int64_t"
F32 = "float"
Bitfield = "bitfield"
def size(self) -> int:
"""Returns the size, in bytes, of the type."""
match self:
case FieldType.U8:
return 1
case FieldType.U16:
return 2
case FieldType.U32:
return 4
case FieldType.U64:
return 8
case FieldType.I8:
return 1
case FieldType.I16:
return 2
case FieldType.I32:
return 4
case FieldType.I64:
return 8
case FieldType.F32:
return 4
case FieldType.Bitfield:
return 1
return -1
@define
class CustomTypeDef():
name: str = field(validator=[name_validator])
base_type: FieldType # should be a strict size
values: Union[List[str], Dict[str, int]]
@define
class BitfieldBit():
"micro class to represent one bit in bitfields"
name: str = field(validator=[name_validator])
@define
class Field():
name: str = field(validator=[name_validator])
type: FieldType
#metadata
units: Optional[str]
conversion: Optional[float]
@define
class BitField():
name: str = field(validator=[name_validator])
type: str = field(default="bitfield", init=False) # it's a constant value
bits: List[BitfieldBit]
class Endian(str, Enum):
BIG = "big"
LITTLE = "little"
@define
class Packet():
name: str
description: str
id: int
endian: Endian
frequency: Optional[int]
data: List[Field]
@define
class RepeatedPacket():
name: str
description: str
id: int
endian: Endian
frequency: Optional[int]
data: List[Field]
repeat: int
offset: int

View file

View file

View file

@ -0,0 +1,145 @@
# A simple display for numbers with optional trend_data line, histogram, min/max, and rolling average.
from PySide6.QtCore import Qt, Slot, QSize
from PySide6.QtGui import QAction, QFontDatabase
from PySide6.QtWidgets import (
QWidget, QVBoxLayout, QLabel, QSizePolicy, QGridLayout
)
import numpy as np
import pyqtgraph as pg
from typing import Optional, List
class _StatsDisplay(QWidget):
"""Helper Widget for the stats display."""
def __init__(self, parent=None):
super().__init__(parent)
# create grid array, minimum size vertically.
layout = QGridLayout(self)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
@Slot(float, float, float)
def update_values(self, new_min: float, new_avg: float, new_max: float):
class SmartDisplay(QWidget):
"""A simple numeric display with optional statistics, trends, and histogram"""
value: float = 0.0
min: float = -float("inf")
max: float = float("inf")
avg: float = 0.0
trend_data: List[float] = []
histogram_data: List[float] = []
# TODO: settable sample count for histogram/trend in right click menu
def __init__(self, parent=None, title: str = None, initial_value: float = None, unit_suffix=None,
show_histogram=False, show_trendline: bool = False, show_stats=False,
histogram_samples=100, trend_samples=30):
super().__init__(parent)
self.trend_samples = trend_samples
self.histogram_samples = histogram_samples
layout = QVBoxLayout(self)
if title is not None:
self.title = title
# create the title label
self.title_widget = QLabel(title, self)
self.title_widget.setAlignment(Qt.AlignmentFlag.AlignHCenter)
self.title_widget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
layout.addWidget(self.title_widget)
number_font = QFontDatabase.systemFont(QFontDatabase.SystemFont.FixedFont)
number_font.setPointSize(18)
self.value = initial_value
self.suffix = unit_suffix or ""
self.value_widget = QLabel(f"{self.value}{self.suffix}", self)
self.value_widget.setAlignment(Qt.AlignmentFlag.AlignHCenter)
self.value_widget.setFont(number_font)
layout.addWidget(self.value_widget)
# histogram widget
self.histogram_widget = pg.PlotWidget(self, title="Histogram")
self.histogram_widget.enableAutoRange()
self.histogram_widget.setVisible(False)
self.histogram_graph = pg.PlotDataItem()
self.histogram_widget.addItem(self.histogram_graph)
layout.addWidget(self.histogram_widget)
# stats display
# trendline display
self.trendline_widget = pg.PlotWidget(self, title="Trend")
self.trendline_widget.enableAutoRange()
self.trendline_widget.setVisible(False)
self.trendline_data = pg.PlotDataItem()
self.trendline_widget.addItem(self.trendline_data)
layout.addWidget(self.trendline_widget)
toggle_histogram = QAction("Show Histogram", self, checkable=True)
toggle_histogram.toggled.connect(self._toggle_histogram)
self.addAction(toggle_histogram)
toggle_trendline = QAction("Show Trendline", self, checkable=True)
toggle_trendline.toggled.connect(self._toggle_trendline)
self.addAction(toggle_trendline)
reset_stats = QAction("Reset Data", self)
reset_stats.triggered.connect(self.reset_data)
self.addAction(reset_stats)
# use the QWidget Actions list as the right click context menu. This is inherited by children.
self.setContextMenuPolicy(Qt.ActionsContextMenu)
def _toggle_histogram(self):
self.histogram_widget.setVisible(not self.histogram_widget.isVisible())
def _toggle_trendline(self):
self.trendline_widget.setVisible(not self.trendline_widget.isVisible())
def _update_view(self):
self.trendline_data.setData(self.trend_data)
self.value_widget.setText(f"{self.value:4g}{self.suffix}")
if self.histogram_widget.isVisible():
hist, bins = np.histogram(self.histogram_data)
self.histogram_graph.setData(bins, hist, stepMode="center")
@Slot(float)
def update_value(self, value: float):
"""Update the value displayed and associated stats."""
self.value = value
# update stats.
if self.value > self.max:
self.max = self.value
if self.value < self.min:
self.min = self.value
# update trend_data data.
self.trend_data.append(value)
if len(self.trend_data) > self.trend_samples:
self.trend_data.pop(0)
# update histogram
self.histogram_data.append(value)
if len(self.histogram_data) > self.histogram_samples:
self.histogram_data.pop(0)
# update average
# noinspection PyTypeChecker
self.avg = np.cumsum(self.trend_data) / len(self.trend_data)
# re-render data.
self._update_view()
@Slot()
def reset_data(self):
"""Resets the existing data (trendline, stats, histogram)"""
self.max = float("inf")
self.min = -float("inf")
self.trend_data = []
self.histogram_data = []

View file

@ -9,6 +9,7 @@ Features:
- TCP streaming system based around MessagePack-RPC for LAN control/inspection.
- XBee integration and control for long-range communication.
- HTTP API for easy external tool integration.
- SQLite database format for storing telemetry, and tools to work with it.
`GoTelem` provides a flexible system for ingesting, storing, analyzing, and distributing
@ -20,20 +21,23 @@ telemetry information.
There are probably two questions:
1. Why a telemetry library that runs on an OS?
1. What's this for?
2. Why is it written in Go?
To answer the first question, the needs of the telemetry board are ill-suited for a microcontroller
since it requires doing multiple non-trivial tasks in parallel. The on-car system must ingest
all can packets, write them to disk, and then transmit them over XBee if they match a filter.
Doing fast disk I/O is difficult.
Telemetry is an interesting system since it not only involves a microcontroller on the car acting as a transmitter,
it also requires software running on a laptop that can recieve the data and do useful things with it.
Previous iterations of this PC software usually involved Python scripts that were thrown together quickly
due to time constraints. This has a few problems, namely that performance is usually limited,
APIs are not type-safe, and environments are not portable and require setup.
There are also significant advantages to moving to using a Linux system for telemetry. We gain
Wifi/Bluetooth/network support easily, we can integrate USB devices like a USB GPS reciever,
and we can share common tooling between the car code and the receiver code.
So we aught to invest in better tooling - schemas and programs that make working with
the data we collect easier and more consistent, as well as being [the standard](https://xkcd.com/927/).
This tool/repo aims to package several ideas and utilities into a single, all-in-one binary.
While that's a noble goal, design decisions are being made to support long-term evolution
of software; we have versioned SQLite databases, that are entirely standalone.
I chose to write this in Go because Go has good concurrency support, good cross-compilation,
and relatively good performance.
and relatively good performance, especially when compared to interpreted languages.
C/C++ was eliminated due to being too close to the metal and having bad tooling/cross compilation.
@ -44,7 +48,7 @@ robustness of the code.
Rust was elminiated due to being too different from more common programming languages. Likewise
for F#, C#, D, Zig, Nim, Julia, Racket, Elixr, and Common Lisp. Yes, I did seriouisly consider each
of these.
of these. C# was a viable competitor but had issues with the cross-platform story.
Go has some quirks and -isms, like lacking "true" Object-Orientation, but the language is designed
around being normal to look at, easy to write, and straightforward to understand.
@ -58,4 +62,45 @@ own system, and it's a single executable to share to others with the same OS/arc
## Building
There are build tags to enable/disable certain features, like the graphical GUI.
`gotelem` was designed to be all-inclusive while being easy to build and have good cross-platform support.
Binaries are a single, statically linked file that can be shared to other users of the same OS.
Certain features, like socketCAN support, are only enabled on platforms that support them (Linux).
This is handled automatically; builds will exclude the socketCAN files and
the additional commands and features will not be present in the CLI.
### Lightweight Build
This doesn't include the OpenMCT files, but is simpler to build, and doesn't require Node setup.
You must install Go.
```
$ go build ./cmd/gotelem
```
### Full Build
This includes an integrated OpenMCT build, which automatically connects to the Telemetry server
for historical and live data. You must have both Go and Node.JS installed.
```
$ cd web/
$ npm install
$ npm run build
$ cd ..
$ go build -tags openmct ./cmd/gotelem
```
## Development
During development, it can be useful to have the OpenMCT sources be served separately from Gotelem,
so you don't need to rebuild everything. This case is supported:
```
$ go run ./cmd/gotelem server --db gotelem.db # in one terminal
$ npm run serve # in a separate terminal
```
When using the dev server, webpack will set the Gotelem URL to `localhost:8080`. If you're running
Gotelem using the default settings, this should work out of the box. Making changes to the OpenMCT
plugins will trigger a refresh automatically.

View file

@ -18,36 +18,37 @@ import (
// SkylabFile is a yaml file from skylab.
type SkylabFile struct {
Packets []PacketDef `json:"packets"`
Boards []BoardDef `json:"boards"`
Packets []PacketDef `yaml:"packets,omitempty" json:"packets,omitempty"`
Boards []BoardDef `yaml:"boards,omitempty" json:"boards,omitempty"`
}
type BoardDef struct {
Name string `json:"name"`
Transmit []string `json:"transmit"`
Receive []string `json:"receive"`
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Transmit []string `yaml:"transmit,omitempty" json:"transmit,omitempty"`
Receive []string `yaml:"receive,omitempty" json:"receive,omitempty"`
}
// data field.
type FieldDef struct {
Name string `json:"name"`
Type string `json:"type"`
Units string `json:"units"`
Conversion float32 `json:"conversion"`
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Type string `yaml:"type,omitempty" json:"type,omitempty"`
Units string `yaml:"units,omitempty" json:"units,omitempty"`
Conversion float32 `yaml:"conversion,omitempty" json:"conversion,omitempty"`
Bits []struct {
Name string `json:"name"`
} `json:"bits"`
Name string `yaml:"name,omitempty" json:"name,omitempty"`
} `yaml:"bits,omitempty" json:"bits,omitempty"`
}
// a PacketDef is a full can packet.
type PacketDef struct {
Name string `json:"name"`
Description string `json:"description"`
Id uint32 `json:"id"`
Endian string `json:"endian"`
Repeat int `json:"repeat"`
Offset int `json:"offset"`
Data []FieldDef `json:"data"`
Name string `yaml:"name,omitempty" json:"name,omitempty"`
Description string `yaml:"description,omitempty" json:"description,omitempty"`
Id uint32 `yaml:"id,omitempty" json:"id,omitempty"`
Endian string `yaml:"endian,omitempty" json:"endian,omitempty"`
IsExtended bool `yaml:"is_extended,omitempty" json:"is_extended,omitempty"`
Repeat int `yaml:"repeat,omitempty" json:"repeat,omitempty"`
Offset int `yaml:"offset,omitempty" json:"offset,omitempty"`
Data []FieldDef `yaml:"data,omitempty" json:"data,omitempty"`
}
// we need to generate bitfield types.
@ -273,6 +274,20 @@ func mapf(format string, els []int) []string {
return resp
}
func idToString(p PacketDef) string {
if p.Repeat > 0 {
resp := make([]string, p.Repeat)
for idx := 0; idx < p.Repeat; idx++ {
resp[idx] = fmt.Sprintf("can.CanID{ Id: 0x%X, Extended: %t }", int(p.Id)+idx*p.Offset, p.IsExtended)
}
return strings.Join(resp, ",")
} else {
return fmt.Sprintf("can.CanID{ Id: 0x%X, Extended: %t }", p.Id, p.IsExtended)
}
}
func main() {
// read path as the first arg, glob it for yamls, read each yaml into a skylabFile.
// then take each skylab file, put all the packets into one big array.
@ -309,15 +324,16 @@ func main() {
// we add any functions mapping we need here.
fnMap := template.FuncMap{
"camelCase": toCamelInitCase,
"Time": time.Now,
"N": N,
"Nx": Nx,
"int": uint32ToInt,
"strJoin": strJoin,
"mapf": mapf,
"maptype": MapType,
"json": json.Marshal,
"camelCase": toCamelInitCase,
"Time": time.Now,
"N": N,
"Nx": Nx,
"int": uint32ToInt,
"strJoin": strJoin,
"mapf": mapf,
"maptype": MapType,
"json": json.Marshal,
"idToString": idToString,
}
tmpl, err := template.New("golang.go.tmpl").Funcs(fnMap).ParseGlob("templates/*.go.tmpl")

View file

@ -1,14 +1,19 @@
// Package skylab provides CAN packet encoding and decoding information based off
// of skylab.yaml. It can convert packets to/from CAN raw bytes and JSON objects.
package skylab
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"math"
"time"
// this is needed so that we can run make_skylab.go
// without this, the yaml library will be removed
// when we run `go mod tidy`
"github.com/kschamplin/gotelem/internal/can"
_ "gopkg.in/yaml.v3"
)
@ -40,11 +45,11 @@ func float32FromBytes(b []byte, bigEndian bool) (f float32) {
// Packet is any Skylab-generated packet.
type Packet interface {
MarshalPacket() ([]byte, error)
UnmarshalPacket(p []byte) error
CANId() (uint32, error)
Size() uint
String() string
Marshaler
Unmarshaler
Ider
Sizer
fmt.Stringer // to get the name
}
// Marshaler is a packet that can be marshalled into bytes.
@ -59,7 +64,7 @@ type Unmarshaler interface {
// Ider is a packet that can get its ID, based on the index of the packet, if any.
type Ider interface {
CANId() (uint32, error)
CanId() (can.CanID, error)
}
// Sizer allows for fast allocation.
@ -68,42 +73,41 @@ type Sizer interface {
}
// CanSend takes a packet and makes CAN framing data.
func ToCanFrame(p Packet) (id uint32, data []byte, err error) {
func ToCanFrame(p Packet) (f can.Frame, err error) {
id, err = p.CANId()
f.Id, err = p.CanId()
if err != nil {
return
}
data, err = p.MarshalPacket()
f.Data, err = p.MarshalPacket()
f.Kind = can.CanDataFrame
return
}
// ---- other wire encoding business ----
// internal structure for partially decoding json object.
type jsonRawEvent struct {
Timestamp float64
Id uint32
Name string
Data json.RawMessage
type RawJsonEvent struct {
Timestamp int64 `json:"ts" db:"ts"`
Name string `json:"name"`
Data json.RawMessage `json:"data"`
}
// BusEvent is a timestamped Skylab packet
// BusEvent is a timestamped Skylab packet - it contains
type BusEvent struct {
Timestamp float64 `json:"ts"`
Id uint64 `json:"id"`
Name string `json:"name"`
Data Packet `json:"data"`
Timestamp time.Time
Name string
Data Packet
}
func (e *BusEvent) MarshalJSON() (b []byte, err error) {
func (e BusEvent) MarshalJSON() (b []byte, err error) {
// create the underlying raw event
j := &jsonRawEvent{
Timestamp: e.Timestamp,
Id: uint32(e.Id),
Name: e.Data.String(),
j := &RawJsonEvent{
Timestamp: e.Timestamp.UnixMilli(),
Name: e.Name,
}
// now we use the magic Packet -> map[string]interface{} function
// FIXME: this uses reflection and isn't good for the economy
j.Data, err = json.Marshal(e.Data)
if err != nil {
return nil, err
@ -113,62 +117,54 @@ func (e *BusEvent) MarshalJSON() (b []byte, err error) {
}
// UnmarshalJSON implements JSON unmarshalling. Note that this
// uses RawJSON events, which are formatted differently.
// also it uses int64 milliseconds instead of times.
func (e *BusEvent) UnmarshalJSON(b []byte) error {
var jRaw *jsonRawEvent
j := &RawJsonEvent{}
err := json.Unmarshal(b, jRaw)
err := json.Unmarshal(b, j)
if err != nil {
return err
}
e.Timestamp = jRaw.Timestamp
e.Id = uint64(jRaw.Id)
e.Data, err = FromJson(jRaw.Id, jRaw.Data)
e.Name = e.Data.String()
e.Timestamp = time.UnixMilli(j.Timestamp)
e.Name = j.Name
e.Data, err = FromJson(j.Name, j.Data)
return err
}
func (e *BusEvent) MarshalMsg(b []byte) ([]byte, error) {
// we need to send the bytes as a []byte instead of
// an object like the JSON one (lose self-documenting)
data, err := e.Data.MarshalPacket()
if err != nil {
return nil, err
// Equals compares two bus events deeply.
func (e *BusEvent) Equals(other *BusEvent) bool {
if e.Name != other.Name {
return false
}
rawEv := &msgpRawEvent{
Timestamp: e.Timestamp,
Id: uint32(e.Id),
Data: data,
if !e.Timestamp.Equal(other.Timestamp) {
return false
}
return rawEv.MarshalMsg(b)
}
func (e *BusEvent) UnmarshalMsg(b []byte) ([]byte, error) {
rawEv := &msgpRawEvent{}
remain, err := rawEv.UnmarshalMsg(b)
if err != nil {
return remain, err
}
e.Timestamp = rawEv.Timestamp
e.Id = uint64(rawEv.Id)
e.Data, err = FromCanFrame(rawEv.Id, rawEv.Data)
e.Name = e.Data.String()
return remain, err
pkt1, _ := e.Data.MarshalPacket()
pkt2, _ := e.Data.MarshalPacket()
return bytes.Equal(pkt1, pkt2)
}
// we need to be able to parse the JSON as well. this is done using the
// generator since we can use the switch/case thing since it's the fastest
type UnknownIdError struct {
id uint64
id uint32
}
func (e *UnknownIdError) Error() string {
return fmt.Sprintf("unknown id: %x", e.id)
}
type BadLengthError struct {
expected uint32
actual uint32
}
func (e *BadLengthError) Error() string {
return fmt.Sprintf("bad data length, expected %d, got %d", e.expected, e.actual)
}

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -1,18 +0,0 @@
package skylab
//go:generate msgp -unexported
// internal structure for handling
type msgpRawEvent struct {
Timestamp float64 `msg:"ts"`
Id uint32 `msg:"id"`
Data []byte `msg:"data"`
}
// internal structure to represent a raw can packet over the network.
// this is what's sent over the solar car to lead xbee connection
// for brevity while still having some robustness.
type msgpRawPacket struct {
Id uint32 `msg:"id"`
Data []byte `msg:"data"`
}

View file

@ -1,288 +0,0 @@
package skylab
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *msgpRawEvent) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ts":
z.Timestamp, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "Timestamp")
return
}
case "id":
z.Id, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "Id")
return
}
case "data":
z.Data, err = dc.ReadBytes(z.Data)
if err != nil {
err = msgp.WrapError(err, "Data")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *msgpRawEvent) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "ts"
err = en.Append(0x83, 0xa2, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteFloat64(z.Timestamp)
if err != nil {
err = msgp.WrapError(err, "Timestamp")
return
}
// write "id"
err = en.Append(0xa2, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteUint32(z.Id)
if err != nil {
err = msgp.WrapError(err, "Id")
return
}
// write "data"
err = en.Append(0xa4, 0x64, 0x61, 0x74, 0x61)
if err != nil {
return
}
err = en.WriteBytes(z.Data)
if err != nil {
err = msgp.WrapError(err, "Data")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *msgpRawEvent) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "ts"
o = append(o, 0x83, 0xa2, 0x74, 0x73)
o = msgp.AppendFloat64(o, z.Timestamp)
// string "id"
o = append(o, 0xa2, 0x69, 0x64)
o = msgp.AppendUint32(o, z.Id)
// string "data"
o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x61)
o = msgp.AppendBytes(o, z.Data)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *msgpRawEvent) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ts":
z.Timestamp, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Timestamp")
return
}
case "id":
z.Id, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Id")
return
}
case "data":
z.Data, bts, err = msgp.ReadBytesBytes(bts, z.Data)
if err != nil {
err = msgp.WrapError(err, "Data")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *msgpRawEvent) Msgsize() (s int) {
s = 1 + 3 + msgp.Float64Size + 3 + msgp.Uint32Size + 5 + msgp.BytesPrefixSize + len(z.Data)
return
}
// DecodeMsg implements msgp.Decodable
func (z *msgpRawPacket) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "id":
z.Id, err = dc.ReadUint32()
if err != nil {
err = msgp.WrapError(err, "Id")
return
}
case "data":
z.Data, err = dc.ReadBytes(z.Data)
if err != nil {
err = msgp.WrapError(err, "Data")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *msgpRawPacket) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "id"
err = en.Append(0x82, 0xa2, 0x69, 0x64)
if err != nil {
return
}
err = en.WriteUint32(z.Id)
if err != nil {
err = msgp.WrapError(err, "Id")
return
}
// write "data"
err = en.Append(0xa4, 0x64, 0x61, 0x74, 0x61)
if err != nil {
return
}
err = en.WriteBytes(z.Data)
if err != nil {
err = msgp.WrapError(err, "Data")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *msgpRawPacket) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "id"
o = append(o, 0x82, 0xa2, 0x69, 0x64)
o = msgp.AppendUint32(o, z.Id)
// string "data"
o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x61)
o = msgp.AppendBytes(o, z.Data)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *msgpRawPacket) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "id":
z.Id, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Id")
return
}
case "data":
z.Data, bts, err = msgp.ReadBytesBytes(bts, z.Data)
if err != nil {
err = msgp.WrapError(err, "Data")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *msgpRawPacket) Msgsize() (s int) {
s = 1 + 3 + msgp.Uint32Size + 5 + msgp.BytesPrefixSize + len(z.Data)
return
}

View file

@ -1,236 +0,0 @@
package skylab
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalmsgpRawEvent(t *testing.T) {
v := msgpRawEvent{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgmsgpRawEvent(b *testing.B) {
v := msgpRawEvent{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgmsgpRawEvent(b *testing.B) {
v := msgpRawEvent{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalmsgpRawEvent(b *testing.B) {
v := msgpRawEvent{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodemsgpRawEvent(t *testing.T) {
v := msgpRawEvent{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodemsgpRawEvent Msgsize() is inaccurate")
}
vn := msgpRawEvent{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodemsgpRawEvent(b *testing.B) {
v := msgpRawEvent{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodemsgpRawEvent(b *testing.B) {
v := msgpRawEvent{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalmsgpRawPacket(t *testing.T) {
v := msgpRawPacket{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgmsgpRawPacket(b *testing.B) {
v := msgpRawPacket{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgmsgpRawPacket(b *testing.B) {
v := msgpRawPacket{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalmsgpRawPacket(b *testing.B) {
v := msgpRawPacket{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodemsgpRawPacket(t *testing.T) {
v := msgpRawPacket{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodemsgpRawPacket Msgsize() is inaccurate")
}
vn := msgpRawPacket{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodemsgpRawPacket(b *testing.B) {
v := msgpRawPacket{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodemsgpRawPacket(b *testing.B) {
v := msgpRawPacket{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View file

@ -45,15 +45,17 @@ type {{$structName}} struct {
{{- end }}
}
func (p *{{$structName}}) CANId() (uint32, error) {
func (p *{{$structName}}) CanId() (can.CanID, error) {
c := can.CanID{Extended: {{.IsExtended}}}
{{- if .Repeat }}
if p.Idx >= {{.Repeat}} {
return 0, &UnknownIdError{ {{ printf "0x%X" .Id }} }
return c, &UnknownIdError{ {{ printf "0x%X" .Id }} }
}
return {{ printf "0x%X" .Id }} + p.Idx, nil
c.Id = {{ printf "0x%X" .Id }} + p.Idx
{{- else }}
return {{ printf "0x%X" .Id }}, nil
c.Id = {{ printf "0x%X" .Id }}
{{- end }}
return c, nil
}
func (p *{{$structName}}) Size() uint {
@ -67,6 +69,9 @@ func (p *{{$structName}}) MarshalPacket() ([]byte, error) {
}
func (p *{{$structName}}) UnmarshalPacket(b []byte) error {
if len(b) != {{.CalcSize}} {
return &BadLengthError{expected: {{.CalcSize}}, actual: uint32(len(b))}
}
{{.MakeUnmarshal}}
return nil
}
@ -86,6 +91,7 @@ package skylab
import (
"errors"
"encoding/binary"
"github.com/kschamplin/gotelem/internal/can"
"encoding/json"
)
@ -97,68 +103,59 @@ const (
{{- end}}
)
// list of every packet ID. can be used for O(1) checks.
var idMap = map[uint32]bool{
// list of every packet ID. Can be used for O(1) checks.
var idMap = map[can.CanID]bool{
{{ range $p := .Packets -}}
{{ if $p.Repeat }}
{{ range $idx := Nx (int $p.Id) $p.Repeat $p.Offset -}}
{{ $idx | printf "0x%X"}}: true,
{ Id: {{ $idx | printf "0x%X"}}, Extended: {{$p.IsExtended}} }: true,
{{ end }}
{{- else }}
{{ $p.Id | printf "0x%X" }}: true,
{ Id: {{ $p.Id | printf "0x%X" }}, Extended: {{$p.IsExtended}} }: true,
{{- end}}
{{- end}}
}
// FromCanFrame creates a Packet from a given CAN ID and data payload.
// If the CAN ID is unknown, it will return an error.
func FromCanFrame(id uint32, data []byte) (Packet, error) {
func FromCanFrame(f can.Frame) (Packet, error) {
id := f.Id
if !idMap[id] {
return nil, &UnknownIdError{ uint64(id) }
return nil, &UnknownIdError{ id.Id }
}
switch id {
{{- range $p := .Packets }}
{{- if $p.Repeat }}
case {{ Nx (int $p.Id) $p.Repeat $p.Offset | mapf "0x%X" | strJoin ", " -}}:
case {{ $p | idToString -}}:
var res = &{{camelCase $p.Name true}}{}
res.UnmarshalPacket(data)
res.Idx = id - {{$p.Id | printf "0x%X" }}
res.UnmarshalPacket(f.Data)
res.Idx = id.Id - {{$p.Id | printf "0x%X" }}
return res, nil
{{- else }}
case {{ $p.Id | printf "0x%X" }}:
case {{ $p | idToString }}:
var res = &{{camelCase $p.Name true}}{}
res.UnmarshalPacket(data)
res.UnmarshalPacket(f.Data)
return res, nil
{{- end}}
{{- end}}
}
return nil, errors.New("failed to match Id, something is really wrong")
panic("This should never happen. CAN ID didn't match but was in ID map")
}
func FromJson (id uint32, raw []byte) (Packet, error) {
if !idMap[id] {
return nil, errors.New("unknown id")
}
switch id {
func FromJson (name string, raw []byte) (Packet, error) {
switch name {
{{- range $p := .Packets }}
{{- if $p.Repeat }}
case {{ Nx (int $p.Id) $p.Repeat $p.Offset | mapf "0x%X" | strJoin ", " -}}:
var res = &{{camelCase $p.Name true}}{}
err := json.Unmarshal(raw, res)
res.Idx = id - {{ $p.Id | printf "0x%X" }}
return res, err
{{- else }}
case {{ $p.Id | printf "0x%X" }}:
case "{{ $p.Name }}":
var res = &{{camelCase $p.Name true}}{}
err := json.Unmarshal(raw, res)
return res, err
{{- end }}
{{- end }}
}
return nil, errors.New("failed to match id")
return nil, errors.New("unknown packet name")
}
{{range .Packets -}}

View file

@ -3,6 +3,7 @@ package skylab
import (
"testing"
"reflect"
"encoding/json"
)
@ -29,8 +30,7 @@ func TestJSON{{$structName}}(t *testing.T) {
t.Fatal(err)
}
id, _ := v.CANId()
p, err := FromJson(id, rawData)
p, err := FromJson(v.String(), rawData)
if err != nil {
t.Fatal(err)
}
@ -45,4 +45,20 @@ func TestJSON{{$structName}}(t *testing.T) {
}
func TestCanFrame{{$structName}}(t *testing.T) {
v := &{{$structName}}{}
frame, err := ToCanFrame(v)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
retpkt, err := FromCanFrame(frame)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if !reflect.DeepEqual(v, retpkt) {
t.Fatalf("decoded packet did not match sent %v got %v", v, retpkt)
}
}
{{- end }}

View file

@ -1,5 +1,14 @@
//go:build linux
//go:build ignore
// +build ignore
package socketcan
package main
// TODO: implement netlink support to set baud rate and other parameters.
import (
"github.com/mdlayher/netlink"
"golang.org/x/sys/unix"
)
// this program demonstrates basic CAN stuff.
// i give up this shit is so hard

View file

@ -11,7 +11,7 @@ import (
"fmt"
"net"
"github.com/kschamplin/gotelem"
"github.com/kschamplin/gotelem/internal/can"
"golang.org/x/sys/unix"
)
@ -23,6 +23,7 @@ type CanSocket struct {
fd int
}
// CanFilter is a filter for an interface.
type CanFilter interface {
Inverted() bool
Mask() uint32
@ -127,22 +128,24 @@ func (sck *CanSocket) SetFilters(filters []CanFilter) error {
}
// Send sends a CAN frame
func (sck *CanSocket) Send(msg *gotelem.Frame) error {
func (sck *CanSocket) Send(msg *can.Frame) error {
buf := make([]byte, fdFrameSize)
idToWrite := msg.Id
idToWrite := msg.Id.Id
switch msg.Kind {
case gotelem.CanSFFFrame:
idToWrite &= unix.CAN_SFF_MASK
case gotelem.CanEFFFrame:
if msg.Id.Extended {
idToWrite &= unix.CAN_EFF_MASK
idToWrite |= unix.CAN_EFF_FLAG
case gotelem.CanRTRFrame:
}
switch msg.Kind {
case can.CanRTRFrame:
idToWrite |= unix.CAN_RTR_FLAG
case gotelem.CanErrFrame:
case can.CanErrFrame:
return errors.New("you can't send error frames")
case can.CanDataFrame:
default:
return errors.New("unknown frame type")
}
@ -174,7 +177,7 @@ func (sck *CanSocket) Send(msg *gotelem.Frame) error {
return nil
}
func (sck *CanSocket) Recv() (*gotelem.Frame, error) {
func (sck *CanSocket) Recv() (*can.Frame, error) {
// todo: support extended frames.
buf := make([]byte, fdFrameSize)
@ -183,21 +186,33 @@ func (sck *CanSocket) Recv() (*gotelem.Frame, error) {
return nil, err
}
id := binary.LittleEndian.Uint32(buf[0:4])
raw_id := binary.LittleEndian.Uint32(buf[0:4])
var k gotelem.Kind
if id&unix.CAN_EFF_FLAG != 0 {
var id can.CanID
id.Id = raw_id
if raw_id&unix.CAN_EFF_FLAG != 0 {
// extended id frame
k = gotelem.CanEFFFrame
id.Extended = true
} else {
// it's a normal can frame
k = gotelem.CanSFFFrame
id.Extended = false
}
var k can.Kind = can.CanDataFrame
if raw_id&unix.CAN_ERR_FLAG != 0 {
// we got an error...
k = can.CanErrFrame
}
if raw_id&unix.CAN_RTR_FLAG != 0 {
k = can.CanRTRFrame
}
dataLength := uint8(buf[4])
result := &gotelem.Frame{
Id: id & unix.CAN_EFF_MASK,
result := &can.Frame{
Id: id,
Kind: k,
Data: buf[8 : dataLength+8],
}

View file

@ -7,7 +7,7 @@ import (
"net"
"testing"
"github.com/kschamplin/gotelem"
"github.com/kschamplin/gotelem/internal/can"
)
func TestCanSocket(t *testing.T) {
@ -29,7 +29,7 @@ func TestCanSocket(t *testing.T) {
}
})
t.Run("test name", func(t *testing.T) {
t.Run("test interface name", func(t *testing.T) {
sock, _ := NewCanSocket("vcan0")
defer sock.Close()
@ -43,15 +43,15 @@ func TestCanSocket(t *testing.T) {
defer sock.Close()
// make a packet.
testFrame := &gotelem.Frame{
Id: 0x123,
Kind: gotelem.CanSFFFrame,
testFrame := &can.Frame{
Id: can.CanID{Id: 0x123, Extended: false},
Kind: can.CanDataFrame,
Data: []byte{0, 1, 2, 3, 4, 5, 6, 7},
}
err := sock.Send(testFrame)
if err != nil {
t.Error(err)
t.Fatal(err)
}
})
@ -61,9 +61,9 @@ func TestCanSocket(t *testing.T) {
defer sock.Close()
defer rsock.Close()
testFrame := &gotelem.Frame{
Id: 0x234,
Kind: gotelem.CanSFFFrame,
testFrame := &can.Frame{
Id: can.CanID{Id: 0x234, Extended: false},
Kind: can.CanDataFrame,
Data: []byte{0, 1, 2, 3, 4, 5, 6, 7},
}
_ = sock.Send(testFrame)

131
web/.gitignore vendored Normal file
View file

@ -0,0 +1,131 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# Snowpack dependency directory (https://snowpack.dev/)
web_modules/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional stylelint cache
.stylelintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
out
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# vuepress v2.x temp and cache directory
.temp
.cache
# Docusaurus cache and generated files
.docusaurus
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*

10
web/eslint.config.js Normal file
View file

@ -0,0 +1,10 @@
import eslint from '@eslint/js';
import tseslint from 'typescript-eslint';
export default tseslint.config(
eslint.configs.recommended,
...tseslint.configs.recommended,
{
"ignores": ["dist/*"]
}
);

5920
web/package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

30
web/package.json Normal file
View file

@ -0,0 +1,30 @@
{
"name": "g1_openmct",
"version": "1.0.0",
"description": "dev environment for openmct plugins for g1 strategy tool",
"type": "module",
"scripts": {
"build": "webpack --config webpack.prod.js",
"serve": "webpack serve --config webpack.dev.js"
},
"author": "",
"license": "ISC",
"dependencies": {
"openmct": "^3.2.1"
},
"devDependencies": {
"@types/node": "^20.11.25",
"@types/webpack": "^5.28.5",
"copy-webpack-plugin": "^12.0.2",
"eslint": "^8.57.0",
"html-webpack-plugin": "^5.6.0",
"ts-loader": "^9.5.1",
"ts-node": "^10.9.2",
"typescript": "^5.4.2",
"typescript-eslint": "^7.1.1",
"webpack": "^5.90.3",
"webpack-cli": "^5.1.4",
"webpack-dev-server": "^5.0.2",
"webpack-merge": "^5.10.0"
}
}

332
web/src/app.ts Normal file
View file

@ -0,0 +1,332 @@
import openmct from "openmct";
//@ts-expect-error openmct
openmct.setAssetPath('openmct');
//@ts-expect-error openmct
openmct.install(openmct.plugins.LocalStorage());
//@ts-expect-error openmct
openmct.install(openmct.plugins.MyItems());
//@ts-expect-error openmct
openmct.install(openmct.plugins.Timeline());
//@ts-expect-error openmct
openmct.install(openmct.plugins.UTCTimeSystem());
//@ts-expect-error openmct
openmct.install(openmct.plugins.Clock({ enableClockIndicator: true }));
//@ts-expect-error openmct
openmct.install(openmct.plugins.Timer());
//@ts-expect-error openmct
openmct.install(openmct.plugins.Timelist());
//@ts-expect-error openmct
openmct.install(openmct.plugins.Hyperlink())
//@ts-expect-error openmct
openmct.install(openmct.plugins.Notebook())
//@ts-expect-error openmct
openmct.install(openmct.plugins.BarChart())
//@ts-expect-error openmct
openmct.install(openmct.plugins.ScatterPlot())
//@ts-expect-error openmct
openmct.install(openmct.plugins.SummaryWidget())
//@ts-expect-error openmct
openmct.install(openmct.plugins.LADTable());
openmct.time.clock('local', { start: -5 * 60 * 1000, end: 0 });
//@ts-expect-error openmct
openmct.time.timeSystem('utc');
//@ts-expect-error openmct
openmct.install(openmct.plugins.Espresso());
openmct.install(
//@ts-expect-error openmct
openmct.plugins.Conductor({
menuOptions: [
{
name: 'Fixed',
timeSystem: 'utc',
bounds: {
start: Date.now() - 30000000,
end: Date.now()
},
},
{
name: 'Realtime',
timeSystem: 'utc',
clock: 'local',
clockOffsets: {
start: -30000000,
end: 30000
},
}
]
})
);
if (process.env.BASE_URL) {
console.log("got a thing")
console.log(process.env.BASE_URL)
}
interface SkylabField {
name: string
type: string
units?: string
conversion?: number
bits?: {
name: string
}
}
interface SkylabPacket {
name: string
description?: string
id: number
endian?: string
is_extended: boolean
repeat?: number
offset?: number
data: [SkylabField]
}
interface SkylabBoard {
name: string
transmit: [string]
receive: [string]
}
interface SkylabSchema {
packets: [SkylabPacket]
boards: [SkylabBoard]
}
let schemaCached = null;
function getSchema(): Promise<SkylabSchema> {
if (schemaCached === null) {
return fetch(`${process.env.BASE_URL}/api/v1/schema`).then((resp) => {
const res = resp.json()
console.log("got schema, caching", res);
schemaCached = res
return res
})
}
return Promise.resolve(schemaCached)
}
const objectProvider = {
get: function (id) {
return getSchema().then((schema) => {
if (id.key === "car") {
const comp = schema.packets.map((x) => {
return {
key: x.name,
namespace: "umnsvp"
}
})
return {
identifier: id,
name: "the solar car",
type: 'folder',
location: 'ROOT',
composition: comp
}
}
const pkt = schema.packets.find((x) => x.name === id.key)
if (pkt) {
// if the key matches one of the packet names,
// we know it's a packet.
// construct a list of fields for this packet.
const comp = pkt.data.map((field) => {
if (field.type === "bitfield") {
//
}
return {
// we have to do this since
// we can't get the packet name otherwise.
key: `${pkt.name}.${field.name}`,
namespace: "umnsvp"
}
})
return {
identifier: id,
name: pkt.name,
type: 'folder',
composition: comp
}
}
// at this point it's definitely a field aka umnsvp-datum
const [pktName, fieldName] = id.key.split('.')
return {
identifier: id,
name: fieldName,
type: 'umnsvp-datum',
conversion: schema.packets.find((x) => x.name === pktName).data.find((f) => f.name === fieldName).conversion,
telemetry: {
values: [
{
key: "value",
source: "val",
name: "Value",
"format": "float",
hints: {
range: 1
}
},
{
key: "utc",
source: "ts",
name: "Timestamp",
format: "utc",
hints: {
domain: 1
}
}
]
}
}
})
}
}
interface Datum {
ts: number
val: number
}
const TelemHistoryProvider = {
supportsRequest: function (dObj) {
return dObj.type === 'umnsvp-datum'
},
request: function (dObj, opt) {
const [pktName, fieldName] = dObj.identifier.key.split('.')
const url = `${process.env.BASE_URL}/api/v1/packets/${pktName}/${fieldName}?`
const params = new URLSearchParams({
start: new Date(opt.start).toISOString(),
end: new Date(opt.end).toISOString(),
})
console.log((opt.end - opt.start) / opt.size)
return fetch(url + params).then((resp) => {
resp.json().then((result: [Datum]) => {
if (dObj.conversion && dObj.conversion != 0) {
// apply conversion
result.map((dat) => {
dat.val = dat.val * dObj.conversion
return dat
})
}
return result
})
})
}
}
interface PacketData {
ts: number
name: string
data: object
}
function TelemRealtimeProvider() {
return function (openmct: openmct.OpenMCT) {
const simpleIndicator = openmct.indicators.simpleIndicator();
openmct.indicators.add(simpleIndicator);
simpleIndicator.text("0 Listeners")
const url = `${process.env.BASE_URL.replace(/^http/, 'ws')}/api/v1/packets/subscribe?`
// we put our websocket connection here.
let connection = new WebSocket(url)
// connections contains name: callback mapping
const callbacks = {}
const conversions: Map<string, number> = new Map()
// names contains a set of *packet names*
const names = new Set()
function handleMessage(event) {
const data: PacketData = JSON.parse(event.data)
for (const [key, value] of Object.entries(data.data)) { // for each of the fields in the data
const id = `${data.name}.${key}` // if we have a matching callback for that field.
if (id in callbacks) {
// we should construct a telem point and make a callback.
// compute if we need to scale the value.
callbacks[id]({
"ts": data.ts,
"val": value * conversions.get(id)
})
}
}
}
function updateWebsocket() {
const params = new URLSearchParams()
for (const name in names) {
params.append("name", name)
}
connection = new WebSocket(url + params)
connection.onmessage = handleMessage
simpleIndicator.text(`${names.size} Listeners`)
}
const provider = {
supportsSubscribe: function (dObj) {
return dObj.type === "umnsvp-datum"
},
subscribe: function (dObj, callback) {
// identifier is packetname.fieldname. we add the packet name to the set.
const key = dObj.identifier.key
const pktName = key.split('.')[0]
// add our callback to the dictionary,
// add the packet name to the set
callbacks[key] = callback
conversions.set(key, dObj.conversion || 1)
names.add(pktName)
// update the websocket URL with the new name.
updateWebsocket()
return function unsubscribe() {
// if there's no more listeners on this packet,
// we can remove it.
console.log("subscribe called %s", JSON.stringify(dObj))
if (!Object.keys(callbacks).some((k) => k.startsWith(pktName))) {
names.delete(pktName)
updateWebsocket()
}
delete callbacks[key]
conversions.delete(key)
}
}
}
openmct.telemetry.addProvider(provider)
}
}
function GotelemPlugin() {
return function install(openmct) {
openmct.types.addType('umnsvp-datum', {
name: "UMN SVP Data Field",
description: "A data field of a packet from the car",
creatable: false,
cssClass: "icon-telemetry"
})
openmct.objects.addRoot({
namespace: "umnsvp",
key: 'car'
}, openmct.priority.HIGH)
openmct.objects.addProvider('umnsvp', objectProvider);
openmct.telemetry.addProvider(TelemHistoryProvider)
openmct.install(TelemRealtimeProvider())
}
}
openmct.install(GotelemPlugin())
//@ts-expect-error openmct
openmct.start()

10
web/src/index.html Normal file
View file

@ -0,0 +1,10 @@
<!DOCTYPE html>
<html>
<head>
<title>Open MCT Tutorials</title>
<script src="openmct/openmct.js"></script>
</head>
<body>
<div id="app"></div>
</body>
</html>

19
web/tsconfig.json Normal file
View file

@ -0,0 +1,19 @@
{
"compilerOptions": {
// "baseUrl": "./src",
"target": "es6",
"checkJs": true,
"allowJs": true,
// "moduleResolution": "NodeNext",
"allowSyntheticDefaultImports": true,
"paths": {
"openmct": ["./node_modules/openmct/dist/openmct.d.ts"]
},
"esModuleInterop": true,
},
"exclude": [
"./dist/**/*",
"webpack.*.js",
"eslint.config.js"
]
}

40
web/webpack.common.js Normal file
View file

@ -0,0 +1,40 @@
import path from 'path';
import {fileURLToPath} from 'url';
import HtmlWebpackPlugin from 'html-webpack-plugin';
import CopyPlugin from 'copy-webpack-plugin';
const config = {
entry: './src/app.ts',
module: {
rules: [
{
test: /\.tsx?$/,
use: 'ts-loader',
exclude: /node_modules/,
},
],
},
plugins: [
new HtmlWebpackPlugin({
template: 'src/index.html',
filename: 'index.html',
}),
new CopyPlugin({
patterns: [
{ from: "**/*", to: "openmct/", context: "node_modules/openmct/dist"},
]
})
],
resolve: {
extensions: ['.tsx', '.ts', '.js'],
},
externals: {
openmct: "openmct",
},
output: {
filename: 'main.js',
path: path.resolve(path.dirname(fileURLToPath(import.meta.url)), 'dist'),
},
};
export default config

24
web/webpack.dev.js Normal file
View file

@ -0,0 +1,24 @@
import {merge} from 'webpack-merge';
import common from "./webpack.common.js"
import webpack from 'webpack'
const config = merge(common, {
mode: "development",
devtool: 'inline-source-map',
plugins: [
new webpack.EnvironmentPlugin({
NODE_ENV: "development",
BASE_URL: "http://localhost:8080"
}),
],
devServer: {
static: "./dist",
headers: {
"Access-Control-Allow-Origin": "*",
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Methods': '*',
},
},
})
export default config

16
web/webpack.prod.js Normal file
View file

@ -0,0 +1,16 @@
import { merge } from 'webpack-merge'
import common from './webpack.common.js'
import webpack from 'webpack'
const config = merge(common, {
mode: "production",
plugins: [
new webpack.EnvironmentPlugin({
NODE_ENV: "production",
BASE_URL: "",
})
],
devtool: 'source-map',
})
export default config

View file

@ -5,7 +5,6 @@ import (
"encoding/binary"
"errors"
"io"
)
// Frameable is an object that can be sent in an XBee Frame. An XBee Frame
@ -104,7 +103,6 @@ func xbeeFrameSplit(data []byte, atEOF bool) (advance int, token []byte, err err
}
// FIXME: add bounds checking! this can panic.
var frameLen = int(binary.BigEndian.Uint16(data[startIdx+1:startIdx+3])) + 4
// if the value of frameLen is > 0x100, we know that it's screwed up.
// this helps keep error duration lowered.

View file

@ -103,14 +103,13 @@ func Test_xbeeFrameSplit(t *testing.T) {
{
name: "start delimiter inside partial packet",
args: args{
data: advTest,
data: advTest,
atEOF: false,
},
wantAdvance: 2,
wantToken: nil,
wantErr: false,
wantToken: nil,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@ -129,8 +128,6 @@ func Test_xbeeFrameSplit(t *testing.T) {
}
}
func Test_parseFrame(t *testing.T) {
type args struct {
frame []byte

View file

@ -20,8 +20,9 @@ import (
"sync"
"time"
"log/slog"
"go.bug.st/serial"
"golang.org/x/exp/slog"
)
// TODO: implement net.Conn for Session/Conn. We are missing LocalAddr, RemoteAddr,
@ -79,7 +80,6 @@ func NewSession(dev io.ReadWriteCloser, baseLog *slog.Logger) (*Session, error)
go sess.rxHandler()
// now we should get the local address cached so LocalAddr is fast.
sh, err := sess.ATCommand([2]byte{'S', 'H'}, nil, false)
if err != nil {
@ -211,7 +211,6 @@ func (sess *Session) writeAddr(p []byte, dest uint64) (n int, err error) {
return 0, errors.New("timeout waiting for response")
}
// this is a tx status frame bytes, so lets parse it out.
if err != nil {
return
@ -250,7 +249,6 @@ func (sess *Session) ATCommand(cmd [2]byte, data []byte, queued bool) (payload [
return nil, fmt.Errorf("error writing xbee frame: %w", err)
}
var resp *ATCmdResponse
select {
case b := <-ch:
@ -259,7 +257,6 @@ func (sess *Session) ATCommand(cmd [2]byte, data []byte, queued bool) (payload [
return nil, errors.New("timeout waiting for response frame")
}
if err != nil {
return nil, err
}
@ -290,7 +287,6 @@ func (sess *Session) RemoteAddr() XBeeAddr {
return 0xFFFF
}
func (sess *Session) Dial(addr uint64) (conn *Conn, err error) {
if _, exist := sess.conns[addr]; exist {
return nil, errors.New("address already in use")

View file

@ -13,7 +13,7 @@ import (
"reflect"
"testing"
"golang.org/x/exp/slog"
"log/slog"
)
func TestXBeeHardware(t *testing.T) {
@ -69,14 +69,12 @@ func TestXBeeHardware(t *testing.T) {
}
})
t.Run("check source address", func(t *testing.T) {
a := sess.LocalAddr()
t.Logf("local device address is %v", a)
})
})
t.Run("Check device name", func(t *testing.T) {
a, err := sess.ATCommand([2]byte{'N', 'I'}, nil, false)
@ -88,7 +86,6 @@ func TestXBeeHardware(t *testing.T) {
name := string(a)
t.Logf("Device Name: %s", name)
})
}
@ -107,7 +104,7 @@ func TestParseDeviceString(t *testing.T) {
args: args{
dev: "blah",
},
want: nil,
want: nil,
wantErr: true,
},
// TODO: moar tests!