diff --git a/board/common/rootfs/etc/default/webui b/board/common/rootfs/etc/default/webui new file mode 100644 index 000000000..765ba175b --- /dev/null +++ b/board/common/rootfs/etc/default/webui @@ -0,0 +1,2 @@ +RESTCONF_URL=https://127.0.0.1/restconf +INSECURE_TLS=1 diff --git a/board/common/rootfs/etc/nginx/app/restconf.conf b/board/common/rootfs/etc/nginx/app/restconf.conf new file mode 120000 index 000000000..01182dc2a --- /dev/null +++ b/board/common/rootfs/etc/nginx/app/restconf.conf @@ -0,0 +1 @@ +../restconf.app \ No newline at end of file diff --git a/board/common/rootfs/etc/nginx/available/default.conf b/board/common/rootfs/etc/nginx/available/default.conf index 76a2552e8..b3835fdc8 100644 --- a/board/common/rootfs/etc/nginx/available/default.conf +++ b/board/common/rootfs/etc/nginx/available/default.conf @@ -20,10 +20,5 @@ server { root html; } - location / { - root html; - index index.html index.htm; - } - include /etc/nginx/app/*.conf; } diff --git a/board/common/rootfs/etc/nginx/restconf-access.conf b/board/common/rootfs/etc/nginx/restconf-access.conf new file mode 100644 index 000000000..ec361e7d9 --- /dev/null +++ b/board/common/rootfs/etc/nginx/restconf-access.conf @@ -0,0 +1,3 @@ +allow 127.0.0.1; +allow ::1; +deny all; diff --git a/board/common/rootfs/etc/nginx/restconf.app b/board/common/rootfs/etc/nginx/restconf.app index 7c8caddae..1d2617412 100644 --- a/board/common/rootfs/etc/nginx/restconf.app +++ b/board/common/rootfs/etc/nginx/restconf.app @@ -1,5 +1,6 @@ # /telemetry/optics is for streaming (not used atm) location ~ ^/(restconf|yang|.well-known)/ { + include /etc/nginx/restconf-access.conf; grpc_pass grpc://[::1]:10080; grpc_set_header Host $host; grpc_set_header X-Real-IP $remote_addr; diff --git a/package/Config.in b/package/Config.in index 110d247b2..6998f5804 100644 --- a/package/Config.in +++ b/package/Config.in @@ -42,6 +42,7 @@ source "$BR2_EXTERNAL_INFIX_PATH/package/tetris/Config.in" source "$BR2_EXTERNAL_INFIX_PATH/package/libyang-cpp/Config.in" source "$BR2_EXTERNAL_INFIX_PATH/package/sysrepo-cpp/Config.in" source "$BR2_EXTERNAL_INFIX_PATH/package/rousette/Config.in" +source "$BR2_EXTERNAL_INFIX_PATH/package/webui/Config.in" source "$BR2_EXTERNAL_INFIX_PATH/package/nghttp2-asio/Config.in" source "$BR2_EXTERNAL_INFIX_PATH/package/date-cpp/Config.in" source "$BR2_EXTERNAL_INFIX_PATH/package/rauc-installation-status/Config.in" diff --git a/package/webui/Config.in b/package/webui/Config.in new file mode 100644 index 000000000..55cfabc73 --- /dev/null +++ b/package/webui/Config.in @@ -0,0 +1,8 @@ +config BR2_PACKAGE_WEBUI + bool "webui" + depends on BR2_PACKAGE_HOST_GO_TARGET_ARCH_SUPPORTS + depends on BR2_PACKAGE_ROUSETTE + help + Web management interface for Infix, a Go+HTMX application + that provides browser-based configuration and monitoring + via RESTCONF. diff --git a/package/webui/webui.conf b/package/webui/webui.conf new file mode 100644 index 000000000..bcedda91d --- /dev/null +++ b/package/webui/webui.conf @@ -0,0 +1,9 @@ +location / { + proxy_pass http://127.0.0.1:8080; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_redirect off; +} diff --git a/package/webui/webui.mk b/package/webui/webui.mk new file mode 100644 index 000000000..d04577ce9 --- /dev/null +++ b/package/webui/webui.mk @@ -0,0 +1,23 @@ +################################################################################ +# +# webui +# +################################################################################ + +WEBUI_VERSION = 1.0 +WEBUI_SITE_METHOD = local +WEBUI_SITE = $(BR2_EXTERNAL_INFIX_PATH)/src/webui +WEBUI_GOMOD = github.com/kernelkit/webui +WEBUI_LICENSE = MIT +WEBUI_LICENSE_FILES = LICENSE +WEBUI_REDISTRIBUTE = NO + +define WEBUI_INSTALL_EXTRA + $(INSTALL) -D -m 0644 $(WEBUI_PKGDIR)/webui.svc \ + $(FINIT_D)/available/webui.conf + $(INSTALL) -D -m 0644 $(WEBUI_PKGDIR)/webui.conf \ + $(TARGET_DIR)/etc/nginx/app/webui.conf +endef +WEBUI_POST_INSTALL_TARGET_HOOKS += WEBUI_INSTALL_EXTRA + +$(eval $(golang-package)) diff --git a/package/webui/webui.svc b/package/webui/webui.svc new file mode 100644 index 000000000..731bbbba3 --- /dev/null +++ b/package/webui/webui.svc @@ -0,0 +1,3 @@ +service name:webui log:prio:daemon.info,tag:webui \ + [2345] env:-/etc/default/webui webui -listen 127.0.0.1:8080 \ + -- Web management interface diff --git a/src/confd/src/services.c b/src/confd/src/services.c index 0981f80df..8ebb99fe2 100644 --- a/src/confd/src/services.c +++ b/src/confd/src/services.c @@ -557,7 +557,20 @@ static int restconf_change(sr_session_ctx_t *session, struct lyd_node *config, s ena = lydx_is_enabled(srv, "enabled") && lydx_is_enabled(lydx_get_xpathf(config, WEB_XPATH), "enabled"); - svc_enable(ena, restconf, "restconf"); + + /* + * restconf.app is permanently installed in nginx/app/ so rousette is + * always reachable from loopback (required by the WebUI). When external + * RESTCONF access is disabled we tighten the location to loopback-only + * by writing the appropriate allow/deny rules into the include file. + */ + FILE *fp = fopen("/etc/nginx/restconf-access.conf", "w"); + if (fp) { + if (!ena) + fputs("allow 127.0.0.1;\nallow ::1;\ndeny all;\n", fp); + fclose(fp); + } + mdns_records(ena ? MDNS_ADD : MDNS_DELETE, restconf); finit_reload("nginx"); return put(cfg); @@ -703,13 +716,16 @@ static int web_change(sr_session_ctx_t *session, struct lyd_node *config, struct /* Web master on/off: propagate to nginx and all sub-services */ if (lydx_get_xpathf(diff, WEB_XPATH "/enabled")) { + int rc_ena = ena && lydx_is_enabled(lydx_get_xpathf(config, WEB_RESTCONF_XPATH), "enabled"); int nb_ena = ena && lydx_is_enabled(lydx_get_xpathf(config, WEB_NETBROWSE_XPATH), "enabled"); svc_enable(ena && lydx_is_enabled(lydx_get_xpathf(config, WEB_CONSOLE_XPATH), "enabled"), ttyd, "ttyd"); svc_enable(nb_ena, netbrowse, "netbrowse"); - svc_enable(ena && lydx_is_enabled(lydx_get_xpathf(config, WEB_RESTCONF_XPATH), "enabled"), - restconf, "restconf"); + /* Rousette follows web/enabled; external access is gated separately via restconf/enabled */ + ena ? finit_enable("restconf") : finit_disable("restconf"); + ena ? finit_enable("webui") : finit_disable("webui"); + mdns_records(rc_ena ? MDNS_ADD : MDNS_DELETE, restconf); svc_enable(ena, web, "nginx"); mdns_alias_conf(nb_ena); finit_reload("mdns-alias"); diff --git a/src/statd/python/yanger/ietf_system.py b/src/statd/python/yanger/ietf_system.py index 60c7c6c01..0ef7a7b7d 100644 --- a/src/statd/python/yanger/ietf_system.py +++ b/src/statd/python/yanger/ietf_system.py @@ -257,9 +257,16 @@ def add_software(out): insert(out, "infix-system:software", software) def add_hostname(out): - hostname = HOST.run(tuple(["hostname"])) + hostname = HOST.run(tuple(["hostname"])) out["hostname"] = hostname.strip() +def add_contact_location(out): + for name in ("contact", "location"): + data = HOST.run_json(("copy", "running", "-x", f"/system/{name}"), {}) + val = data.get("ietf-system:system", {}).get(name) + if val: + out[name] = val + def add_timezone(out): path = HOST.run(tuple("realpath /etc/localtime".split()), "") timezone = None @@ -448,6 +455,7 @@ def operational(): out_state = out["ietf-system:system-state"] out_system = out["ietf-system:system"] add_hostname(out_system) + add_contact_location(out_system) add_users(out_system) add_timezone(out_system) add_software(out_state) diff --git a/src/webui/.gitignore b/src/webui/.gitignore new file mode 100644 index 000000000..859dc4235 --- /dev/null +++ b/src/webui/.gitignore @@ -0,0 +1 @@ +webui diff --git a/src/webui/LICENSE b/src/webui/LICENSE new file mode 100644 index 000000000..364389fb6 --- /dev/null +++ b/src/webui/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2026 The KernelKit Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/webui/Makefile b/src/webui/Makefile new file mode 100644 index 000000000..664896455 --- /dev/null +++ b/src/webui/Makefile @@ -0,0 +1,15 @@ +BINARY = webui +GOARCH ?= $(shell go env GOARCH) +GOOS ?= $(shell go env GOOS) + +build: + CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) \ + go build -ldflags="-s -w" -o $(BINARY) . + +dev: build + go run . --listen :8080 --session-key /tmp/webui-session.key --insecure-tls $(ARGS) + +clean: + rm -f $(BINARY) + +.PHONY: build dev clean diff --git a/src/webui/README.md b/src/webui/README.md new file mode 100644 index 000000000..8e3e76651 --- /dev/null +++ b/src/webui/README.md @@ -0,0 +1,120 @@ +# Infix WebUI + +A lightweight web management interface for [Infix][1] network devices, +built with Go and [htmx][2]. + +The WebUI communicates with the device over [RESTCONF][3] (RFC 8040), +presenting the same operational data available through the Infix CLI in +a browser-friendly format. + +## Features + +- **Dashboard** -- system info, hardware, sensors, and interface summary + with bridge member grouping +- **Interfaces** -- list with status, addresses, and per-type detail; + click through to a detail page with live-updating counters, WiFi + station table, scan results, WireGuard peers, and ethernet frame + statistics +- **Firewall** -- zone-to-zone policy matrix +- **Keystore** -- symmetric and asymmetric key display +- **Firmware** -- slot overview, install from URL with live progress +- **Reboot** -- two-phase status polling (wait down, wait up) +- **Config download** -- startup datastore as JSON + + +## Building + +Requires Go 1.22 or later. + +```sh +make build +``` + +Produces a statically linked `webui` binary with all templates, +CSS, and JS embedded. + +Cross-compile for the target: + +```sh +GOOS=linux GOARCH=arm64 make build +``` + + +## Running + +```sh +./webui --restconf https://192.168.0.1/restconf --listen :8080 +``` + +| **Flag** | **Default** | **Description** | +|-------------------|-----------------------------------|-------------------------------------------| +| `--listen` | `:8080` | Address to listen on | +| `--restconf` | `http://localhost:8080/restconf` | RESTCONF base URL of the device | +| `--session-key` | `/var/lib/misc/webui-session.key` | Path to persistent session encryption key | +| `--insecure-tls` | `false` | Disable TLS certificate verification | + +The RESTCONF URL can also be set via the `RESTCONF_URL` environment +variable. + + +## Development + +Point `RESTCONF_URL` at a running Infix device and start the dev +server: + +```sh +make dev ARGS="--restconf https://192.168.0.1/restconf" +``` + +This runs `go run .` on port 8080 with `--insecure-tls` already set. + + +## Architecture + +``` +Browser ──htmx──▶ Go server ──RESTCONF──▶ Infix device (rousette/sysrepo) +``` + +- **Single binary** -- templates, CSS, JS, and images are embedded via + `go:embed` +- **Server-side rendering** -- Go `html/template` with per-page parsing + to avoid `{{define "content"}}` collisions +- **htmx SPA navigation** -- sidebar links use `hx-get` / `hx-target` + for partial page updates with `hx-push-url` for browser history +- **Stateless sessions** -- AES-256-GCM encrypted cookies carry + credentials (needed for every RESTCONF call); no server-side session + store +- **Live polling** -- counters update every 5s, firmware progress every + 3s, all via htmx triggers + +``` +main.go Entry point, flags, embedded FS +internal/ + auth/ Login, logout, session (AES-GCM cookies) + restconf/ HTTP client (Get, GetRaw, Post, PostJSON) + handlers/ Page handlers + dashboard.go Dashboard, hardware, sensors + interfaces.go Interface list, detail, counters + firewall.go Zone matrix + keystore.go Key display + system.go Firmware, reboot, config download + server/ + server.go Route registration, template wiring, middleware +templates/ + layouts/ base.html (shell), sidebar.html + pages/ Per-page templates (one per route) + fragments/ htmx partial fragments +static/ + css/style.css All styles + js/htmx.min.js htmx library + img/ Logo, favicon +``` + + +## License + +See [LICENSE](LICENSE). + +[1]: https://github.com/kernelkit/infix +[2]: https://htmx.org +[3]: https://datatracker.ietf.org/doc/html/rfc8040 diff --git a/src/webui/TODO.md b/src/webui/TODO.md new file mode 100644 index 000000000..269dbda22 --- /dev/null +++ b/src/webui/TODO.md @@ -0,0 +1,89 @@ +# Issues and Features TODO + +## Reminder: Working Locally + +``` +~/src/infix/src/webui(web)$ make clean; sudo make dev ARGS="--restconf https://192.168.0.1/restconf --insecure-tls" +``` + +### Graphics & Design + +Use smaller logo, without the three pillars, on top-bar and raise top-bar. +Similar to the WebUI v1 design. The left side-bar should not cover the +top-bar. + +Review all the icons. DHCP, Keystore, Interfaces, Routing, and Advanced look +weird. Advanced + Routing look like a swastika ... Tobias says the macOS +network/interface icon could be a good fit. + +### Refactor Save/Save all/Apply/Apply & Save/Abort + +It's a bit of a mess currently on auto-generated pages that collapse any +sub-containers or lists, they show multiple Save and Save all buttons. + +That in combination with the Apply, Apply & Save, and Abort buttons at the +bottom of the screen. One user told me; "What do they do, what if I do +something wrong?" + +So we had a discussion in the team and we all agreed we want to mimic the +semantics of the CLI. A user building up new candidate config should be able +to see the diff between running and candidate before applying (CLI `leave`) or +aborting (CLI `abort`). When applied, regardless of context, the WebUI should +display a permanent "status" of sorts, reminding the user they've got unsaved +activated changes. From that status the user should be able to, again, view +the diff (this time running vs startup) and/or save to startup-config. This +status should also show if a CLI user makes a change in the background to the +running-config. + +We have very different opinions on how this should be implemented, so we are +very open to design ideas and discussions around this topic before we go ahead +and make a change. + +### Ideas for auto-generated pages + +Some pages, like IPv4 addresses could be shown similar to how the curated +Users configuration page looks. I.e., when a container has a list, the +complexity of the list items decide if it deserves a separate new page or can +be shown on the current page. Q: how should this "complexity score" be +calculated? + +## Important + +### Fork goyang in kernelkit org + +The webui currently carries a local copy of goyang in `internal/goyang/` with +three patches that fix genuine upstream bugs (unresolved since 2015–2024): + +1. `Uses.Augment *Augment` → `[]*Augment` — multiple `augment` inside `uses` + (upstream Issue #75, PR #272, open since Aug 2024) +2. `Value` struct: add `Reference *Value` field — `when { reference "..."; }` + (not reported upstream) +3. `Input`/`Output` structs: add `Must []*Must` — `must` in rpc input/output + (upstream PR #270, open since Aug 2024) + +The right long-term fix is to fork openconfig/goyang into the kernelkit org, +create a `v1.6.3-kkit` branch, apply the patches there, `git format-patch +v1.6.3` and add them to the Infix `patches/` directory. Then point go.mod at +the kernelkit fork instead of the local `internal/goyang` copy. + +## YANG tree pruning (Phase 5) + +- Sysrepo-internal modules: `sysrepo`, `sysrepo-*`, `sysrepo-factory-default` +- NETCONF/RESTCONF protocol modules: `ietf-netconf*`, `notifications`, + `nc-notifications`, `ietf-restconf*`, `ietf-yang-patch`, etc. +- YANG library/type utility modules: `ietf-yang-library`, `ietf-yang-types`, + `ietf-yang-metadata`, `yang`, `default`, etc. +- Nodes with an active `deviate not-supported` deviation + +Approach: maintain a module deny-list (or better, an allow-list seeded from the +modules that actually appear in the running datastore), combined with an +Entry.Config check and a deviation walk in `topLevelNodes`/`dirToNodes`. + +## Later, investigate statd/copy behavior + +- Why does /ietf-routing:routing/interfaces XPath not return anything, but /ietf-routing:routing does? +- Command `copy operational` fails hard (segfault?) for invalid XPath: + + admin@rpi-42-a6-03:~$ copy operational -x /system-data + Error: (null) (5) + Error: failed retrieving operational-state data diff --git a/src/webui/go.mod b/src/webui/go.mod new file mode 100644 index 000000000..8f181dbf8 --- /dev/null +++ b/src/webui/go.mod @@ -0,0 +1,17 @@ +module github.com/kernelkit/webui + +go 1.22.0 + +toolchain go1.22.2 + +require ( + github.com/google/go-cmp v0.7.0 // indirect + github.com/openconfig/goyang v1.6.3 // indirect + github.com/pborman/getopt v1.1.0 // indirect +) + +// Local fork of goyang with YANG 1.1 fixes: +// - Uses.Augment: *Augment → []*Augment (multiple augments per uses) +// - Value: add Reference field (when { reference "..."; }) +// - Input/Output: add Must field (must statements in rpc input/output) +replace github.com/openconfig/goyang => ./internal/goyang diff --git a/src/webui/go.sum b/src/webui/go.sum new file mode 100644 index 000000000..e836eaa6d --- /dev/null +++ b/src/webui/go.sum @@ -0,0 +1,6 @@ +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/openconfig/goyang v1.6.3 h1:9nWXBwd6b4+nZr8ni7O4zUXVhrVMXCLFz8os5YWFuo4= +github.com/openconfig/goyang v1.6.3/go.mod h1:5WolITjek1NF8yrNERyVZ7jqjOClJTpO8p/+OwmETM4= +github.com/pborman/getopt v1.1.0 h1:eJ3aFZroQqq0bWmraivjQNt6Dmm5M0h2JcDW38/Azb0= +github.com/pborman/getopt v1.1.0/go.mod h1:FxXoW1Re00sQG/+KIkuSqRL/LwQgSkv7uyac+STFsbk= diff --git a/src/webui/internal/auth/login.go b/src/webui/internal/auth/login.go new file mode 100644 index 000000000..38564655a --- /dev/null +++ b/src/webui/internal/auth/login.go @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: MIT + +package auth + +import ( + "context" + "errors" + "html/template" + "log" + "net/http" + + "github.com/kernelkit/webui/internal/handlers" + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/security" +) + +const cookieName = "session" + +// LoginHandler serves the login page and processes login/logout requests. +type LoginHandler struct { + Store *SessionStore + RC *restconf.Client + Template *template.Template + // OnLogin is called after every successful login with a context that + // carries the authenticated user's credentials. It is invoked in the + // foreground, so implementations should start their own goroutines for + // slow work. May be nil. + OnLogin func(ctx context.Context) +} + +type loginData struct { + Error string + CsrfToken string +} + +// ShowLogin renders the login page (GET /login). +func (h *LoginHandler) ShowLogin(w http.ResponseWriter, r *http.Request) { + h.renderLogin(w, r, "") +} + +// DoLogin validates credentials against RESTCONF and creates a session (POST /login). +func (h *LoginHandler) DoLogin(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + h.renderLogin(w, r, "Invalid request.") + return + } + + username := r.FormValue("username") + password := r.FormValue("password") + + if username == "" || password == "" { + h.renderLogin(w, r, "Username and password are required.") + return + } + + // Verify credentials by making a RESTCONF call with Basic Auth. + err := h.RC.CheckAuth(username, password) + if err != nil { + log.Printf("login failed for %q: %v", username, err) + var authErr *restconf.AuthError + if errors.As(err, &authErr) { + h.renderLogin(w, r, "Invalid username or password.") + } else { + h.renderLogin(w, r, "Unable to reach the device. Please try again later.") + } + return + } + + // Build an authenticated context for post-login work. + ctx := restconf.ContextWithCredentials(r.Context(), restconf.Credentials{ + Username: username, + Password: password, + }) + + // Probe optional features once at login and bake into the session. + caps := handlers.DetectCapabilities(ctx, h.RC) + + // Trigger any post-login hooks (e.g. schema sync) with full credentials. + if h.OnLogin != nil { + h.OnLogin(ctx) + } + + token, csrfToken, err := h.Store.Create(username, password, caps.Features()) + if err != nil { + log.Printf("session create error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + return + } + + http.SetCookie(w, &http.Cookie{ + Name: cookieName, + Value: token, + Path: "/", + HttpOnly: true, + Secure: security.IsSecureRequest(r), + SameSite: http.SameSiteLaxMode, + }) + security.EnsureToken(w, r, csrfToken) + + fullRedirect(w, r, "/") +} + +// DoLogout destroys the session and redirects to the login page (POST /logout). +func (h *LoginHandler) DoLogout(w http.ResponseWriter, r *http.Request) { + if c, err := r.Cookie(cookieName); err == nil { + h.Store.Delete(c.Value) + } + + http.SetCookie(w, &http.Cookie{ + Name: cookieName, + Value: "", + Path: "/", + MaxAge: -1, + HttpOnly: true, + Secure: security.IsSecureRequest(r), + SameSite: http.SameSiteLaxMode, + }) + security.ClearToken(w, r) + + fullRedirect(w, r, "/login") +} + +// fullRedirect forces a full page navigation. When the request comes +// from htmx (boosted form) we use HX-Redirect so the browser does a +// real page load instead of an AJAX swap — this is essential for the +// login/logout transition where the page layout changes completely. +func fullRedirect(w http.ResponseWriter, r *http.Request, url string) { + if r.Header.Get("HX-Request") == "true" { + w.Header().Set("HX-Redirect", url) + return + } + http.Redirect(w, r, url, http.StatusSeeOther) +} + +func (h *LoginHandler) renderLogin(w http.ResponseWriter, r *http.Request, errMsg string) { + data := loginData{ + Error: errMsg, + CsrfToken: security.TokenFromContext(r.Context()), + } + if err := h.Template.ExecuteTemplate(w, "login.html", data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} diff --git a/src/webui/internal/auth/session.go b/src/webui/internal/auth/session.go new file mode 100644 index 000000000..f8d87aabe --- /dev/null +++ b/src/webui/internal/auth/session.go @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: MIT + +package auth diff --git a/src/webui/internal/auth/store.go b/src/webui/internal/auth/store.go new file mode 100644 index 000000000..25051ce92 --- /dev/null +++ b/src/webui/internal/auth/store.go @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: MIT + +package auth + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" +) + +const sessionTimeout = 1 * time.Hour + +type tokenPayload struct { + Username string `json:"u"` + Password string `json:"p"` + CsrfToken string `json:"c"` + CreatedAt int64 `json:"t"` + Features map[string]bool `json:"f,omitempty"` +} + +// SessionStore issues and validates stateless encrypted tokens. +// The cookie value is a base64url-encoded AES-256-GCM sealed blob +// containing the user's credentials and a creation timestamp. +// No server-side session map is needed — only the AES key must +// persist across restarts. +type SessionStore struct { + aead cipher.AEAD +} + +// NewSessionStore creates a store. If keyFile is non-empty, the AES +// key is read from that path (or generated and written there on first +// run). If keyFile is empty, a random ephemeral key is used. +func NewSessionStore(keyFile string) (*SessionStore, error) { + key, err := loadOrCreateKey(keyFile) + if err != nil { + return nil, err + } + + block, err := aes.NewCipher(key[:]) + if err != nil { + return nil, err + } + aead, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + return &SessionStore{aead: aead}, nil +} + +// Create returns an encrypted token carrying the user's credentials and capabilities. +func (s *SessionStore) Create(username, password string, features map[string]bool) (string, string, error) { + csrf := randomToken() + token, err := s.CreateWithCSRF(username, password, csrf, features) + return token, csrf, err +} + +// CreateWithCSRF returns an encrypted token carrying the user's credentials, +// capabilities, and a bound CSRF token. +func (s *SessionStore) CreateWithCSRF(username, password, csrf string, features map[string]bool) (string, error) { + payload, err := json.Marshal(tokenPayload{ + Username: username, + Password: password, + CsrfToken: csrf, + CreatedAt: time.Now().Unix(), + Features: features, + }) + if err != nil { + return "", err + } + + nonce := make([]byte, s.aead.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return "", err + } + + sealed := s.aead.Seal(nonce, nonce, payload, nil) + return base64.RawURLEncoding.EncodeToString(sealed), nil +} + +// Lookup decrypts a token and returns the credentials and capabilities if valid. +func (s *SessionStore) Lookup(token string) (username, password, csrf string, features map[string]bool, ok bool) { + raw, err := base64.RawURLEncoding.DecodeString(token) + if err != nil { + return "", "", "", nil, false + } + + ns := s.aead.NonceSize() + if len(raw) < ns { + return "", "", "", nil, false + } + + plaintext, err := s.aead.Open(nil, raw[:ns], raw[ns:], nil) + if err != nil { + return "", "", "", nil, false + } + + var p tokenPayload + if err := json.Unmarshal(plaintext, &p); err != nil { + return "", "", "", nil, false + } + + if time.Since(time.Unix(p.CreatedAt, 0)) > sessionTimeout { + return "", "", "", nil, false + } + + return p.Username, p.Password, p.CsrfToken, p.Features, true +} + +// Delete is a no-op for stateless tokens (the cookie is cleared by +// the caller), but kept to satisfy the existing logout flow. +func (s *SessionStore) Delete(token string) {} + +// loadOrCreateKey returns a 32-byte AES key. When path is non-empty +// the key is persisted so sessions survive restarts. +func loadOrCreateKey(path string) ([32]byte, error) { + var key [32]byte + + if path != "" { + data, err := os.ReadFile(path) + if err == nil && len(data) == 32 { + copy(key[:], data) + return key, nil + } + } + + if _, err := io.ReadFull(rand.Reader, key[:]); err != nil { + return key, fmt.Errorf("generate session key: %w", err) + } + + if path != "" { + if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { + return key, fmt.Errorf("create key directory: %w", err) + } + if err := os.WriteFile(path, key[:], 0600); err != nil { + return key, fmt.Errorf("write session key: %w", err) + } + } + + return key, nil +} + +func randomToken() string { + var b [32]byte + if _, err := io.ReadFull(rand.Reader, b[:]); err != nil { + return "" + } + return base64.RawURLEncoding.EncodeToString(b[:]) +} diff --git a/src/webui/internal/goyang/.github/dependabot.yml b/src/webui/internal/goyang/.github/dependabot.yml new file mode 100644 index 000000000..a2a66d097 --- /dev/null +++ b/src/webui/internal/goyang/.github/dependabot.yml @@ -0,0 +1,15 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "gomod" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" + - package-ecosystem: "github-actions" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/src/webui/internal/goyang/.github/linters/.golangci.yml b/src/webui/internal/goyang/.github/linters/.golangci.yml new file mode 100644 index 000000000..dca2af2e7 --- /dev/null +++ b/src/webui/internal/goyang/.github/linters/.golangci.yml @@ -0,0 +1,53 @@ +--- +######################### +######################### +## Golang Linter rules ## +######################### +######################### + +# configure golangci-lint +# see https://github.com/golangci/golangci-lint/blob/master/.golangci.example.yml +run: + timeout: 10m +issues: + exclude-rules: + - path: _test\.go + linters: + - dupl + - gosec + - goconst + new: true +linters: + enable: + - gosec + - unconvert + - goconst + - goimports + - gofmt + - gocritic + - govet + - revive + - staticcheck + - unconvert + - unparam + - unused + - wastedassign + - whitespace +linters-settings: + errcheck: + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: true + govet: + # report about shadowed variables + check-shadowing: false + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + gocritic: + disabled-checks: + - singleCaseSwitch + - appendAssign + revive: + ignore-generated-header: true + severity: warning diff --git a/src/webui/internal/goyang/.github/linters/.yaml-lint.yml b/src/webui/internal/goyang/.github/linters/.yaml-lint.yml new file mode 100644 index 000000000..e9ec8bef4 --- /dev/null +++ b/src/webui/internal/goyang/.github/linters/.yaml-lint.yml @@ -0,0 +1,59 @@ +--- +########################################### +# These are the rules used for # +# linting all the yaml files in the stack # +# NOTE: # +# You can disable line with: # +# # yamllint disable-line # +########################################### +rules: + braces: + level: warning + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: 1 + max-spaces-inside-empty: 5 + brackets: + level: warning + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: 1 + max-spaces-inside-empty: 5 + colons: + level: warning + max-spaces-before: 0 + max-spaces-after: 1 + commas: + level: warning + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + comments: disable + comments-indentation: disable + document-end: disable + document-start: + level: warning + present: true + empty-lines: + level: warning + max: 2 + max-start: 0 + max-end: 0 + hyphens: + level: warning + max-spaces-after: 1 + indentation: + level: warning + spaces: consistent + indent-sequences: true + check-multi-line-strings: false + key-duplicates: enable + line-length: + level: warning + max: 120 + allow-non-breakable-words: true + allow-non-breakable-inline-mappings: true + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable diff --git a/src/webui/internal/goyang/.github/workflows/go.yml b/src/webui/internal/goyang/.github/workflows/go.yml new file mode 100644 index 000000000..241920888 --- /dev/null +++ b/src/webui/internal/goyang/.github/workflows/go.yml @@ -0,0 +1,15 @@ +name: Go + +on: + push: + branches: [ master ] + pull_request: + schedule: + - cron: "0 0 * * *" + +jobs: + go: + uses: openconfig/common-ci/.github/workflows/go.yml@125b6b58286d116b216e45c33cb859f547965d61 + + linter: + uses: openconfig/common-ci/.github/workflows/linter.yml@125b6b58286d116b216e45c33cb859f547965d61 diff --git a/src/webui/internal/goyang/.gitignore b/src/webui/internal/goyang/.gitignore new file mode 100644 index 000000000..6e92f57d4 --- /dev/null +++ b/src/webui/internal/goyang/.gitignore @@ -0,0 +1 @@ +tags diff --git a/src/webui/internal/goyang/AUTHORS b/src/webui/internal/goyang/AUTHORS new file mode 100644 index 000000000..121ba4efe --- /dev/null +++ b/src/webui/internal/goyang/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of goyang authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Google Inc. \ No newline at end of file diff --git a/src/webui/internal/goyang/CONTRIBUTING b/src/webui/internal/goyang/CONTRIBUTING new file mode 100644 index 000000000..574d217e7 --- /dev/null +++ b/src/webui/internal/goyang/CONTRIBUTING @@ -0,0 +1,25 @@ +Want to contribute? Great! First, read this page (including the small print at the end). + +### Before you contribute +Before we can use your code, you must sign the +[Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual?csw=1) +(CLA), which you can do online. The CLA is necessary mainly because you own the +copyright to your changes, even after your contribution becomes part of our +codebase, so we need your permission to use and distribute your code. We also +need to be sure of various other things—for instance that you'll tell us if you +know that your code infringes on other people's patents. You don't have to sign +the CLA until after you've submitted your code for review and a member has +approved it, but you must do it before we can put your code into our codebase. +Before you start working on a larger contribution, you should get in touch with +us first through the issue tracker with your idea so that we can help out and +possibly guide you. Coordinating up front makes it much easier to avoid +frustration later on. + +### Code reviews +All submissions, including submissions by project members, require review. We +use Github pull requests for this purpose. + +### The small print +Contributions made by corporations are covered by a different agreement than +the one above, the +[Software Grant and Corporate Contributor License Agreement](https://cla.developers.google.com/about/google-corporate). \ No newline at end of file diff --git a/src/webui/internal/goyang/CONTRIBUTORS b/src/webui/internal/goyang/CONTRIBUTORS new file mode 100644 index 000000000..b2ac0e81d --- /dev/null +++ b/src/webui/internal/goyang/CONTRIBUTORS @@ -0,0 +1,15 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +Paul Borman +Andrew Fort +Rob Shakir +Sean Condon diff --git a/src/webui/internal/goyang/Copyright b/src/webui/internal/goyang/Copyright new file mode 100644 index 000000000..663fa3a0b --- /dev/null +++ b/src/webui/internal/goyang/Copyright @@ -0,0 +1,14 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + diff --git a/src/webui/internal/goyang/LICENSE b/src/webui/internal/goyang/LICENSE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/src/webui/internal/goyang/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/src/webui/internal/goyang/README.md b/src/webui/internal/goyang/README.md new file mode 100644 index 000000000..df07ad8f0 --- /dev/null +++ b/src/webui/internal/goyang/README.md @@ -0,0 +1,54 @@ +![Go](https://github.com/openconfig/goyang/workflows/Go/badge.svg?branch=master) +[![Coverage Status](https://coveralls.io/repos/github/openconfig/goyang/badge.svg?branch=master)](https://coveralls.io/github/openconfig/goyang?branch=master) + +Current support for `goyang` is for the [latest 3 Go releases](https://golang.org/project/#release). + +# goyang +YANG parser and compiler for Go programs. + +The yang package (pkg/yang) is used to convert a YANG schema into either an +in memory abstract syntax trees (ast) or more fully resolved, in memory, "Entry" +trees. An Entry tree consists only of Entry structures and has had +augmentation, imports, and includes all applied. + +goyang is a sample program that uses the yang (pkg/yang) package. + +goyang uses the yang package to create an in-memory tree representation of +schemas defined in YANG and then dumps out the contents in several forms. +The forms include: + +* tree - a simple tree representation +* types - list understood types extracted from the schema + +The yang package, and the goyang program, are not complete and are a work in +progress. + +For more complex output types, such as Go structs, and protobuf messages +please use the [openconfig/ygot](https://github.com/openconfig/ygot) package, +which uses this package as its backend. + +### Getting started + +To build goyang, ensure you have go language tools installed +(available at [golang.org](https://golang.org/dl)) and that the `GOPATH` +environment variable is set to your Go workspace. + +1. `go get github.com/openconfig/goyang` + * This will download goyang code and dependencies into the src +subdirectory in your workspace. + +2. `cd /src/github.com/openconfig/goyang` + +3. `go build` + + * This will build the goyang binary and place it in the bin +subdirectory in your workspace. + +### Contributing to goyang + +goyang is still a work-in-progress and we welcome contributions. Please see +the `CONTRIBUTING` file for information about how to contribute to the codebase. + +### Disclaimer + +This is not an official Google product. diff --git a/src/webui/internal/goyang/go.mod b/src/webui/internal/goyang/go.mod new file mode 100644 index 000000000..0e7fc6ce5 --- /dev/null +++ b/src/webui/internal/goyang/go.mod @@ -0,0 +1,12 @@ +module github.com/openconfig/goyang + +go 1.22.0 + +toolchain go1.24.1 + +require ( + github.com/google/go-cmp v0.7.0 + github.com/kylelemons/godebug v1.1.0 + github.com/openconfig/gnmi v0.14.1 + github.com/pborman/getopt v1.1.0 +) diff --git a/src/webui/internal/goyang/go.sum b/src/webui/internal/goyang/go.sum new file mode 100644 index 000000000..8e9dc6ede --- /dev/null +++ b/src/webui/internal/goyang/go.sum @@ -0,0 +1,8 @@ +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/openconfig/gnmi v0.14.1 h1:qKMuFvhIRR2/xxCOsStPQ25aKpbMDdWr3kI+nP9bhMs= +github.com/openconfig/gnmi v0.14.1/go.mod h1:whr6zVq9PCU8mV1D0K9v7Ajd3+swoN6Yam9n8OH3eT0= +github.com/pborman/getopt v1.1.0 h1:eJ3aFZroQqq0bWmraivjQNt6Dmm5M0h2JcDW38/Azb0= +github.com/pborman/getopt v1.1.0/go.mod h1:FxXoW1Re00sQG/+KIkuSqRL/LwQgSkv7uyac+STFsbk= diff --git a/src/webui/internal/goyang/pkg/indent/indent.go b/src/webui/internal/goyang/pkg/indent/indent.go new file mode 100644 index 000000000..a67b88856 --- /dev/null +++ b/src/webui/internal/goyang/pkg/indent/indent.go @@ -0,0 +1,112 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package indent indents lines of text. +package indent + +import ( + "bytes" + "io" + "strings" +) + +// String returns s with each line in s prefixed by indent. +func String(indent, s string) string { + if indent == "" || s == "" { + return s + } + lines := strings.SplitAfter(s, "\n") + if len(lines[len(lines)-1]) == 0 { + lines = lines[:len(lines)-1] + } + return strings.Join(append([]string{""}, lines...), indent) +} + +// Bytes returns b with each line in b prefixed by indent. +func Bytes(indent, b []byte) []byte { + if len(indent) == 0 || len(b) == 0 { + return b + } + lines := bytes.SplitAfter(b, []byte{'\n'}) + if len(lines[len(lines)-1]) == 0 { + lines = lines[:len(lines)-1] + } + return bytes.Join(append([][]byte{{}}, lines...), indent) +} + +// NewWriter returns an io.Writer that prefixes the lines written to it with +// indent and then writes them to w. The writer returns the number of bytes +// written to the underlying Writer. +func NewWriter(w io.Writer, indent string) io.Writer { + if indent == "" { + return w + } + return &iw{ + w: w, + prefix: []byte(indent), + } +} + +type iw struct { + w io.Writer + prefix []byte + partial bool // true if next line's indent already written +} + +// Write implements io.Writer. +func (w *iw) Write(buf []byte) (int, error) { + if len(buf) == 0 { + return 0, nil + } + lines := bytes.SplitAfter(buf, []byte{'\n'}) + if len(lines[len(lines)-1]) == 0 { + lines = lines[:len(lines)-1] + } + if !w.partial { + lines = append([][]byte{{}}, lines...) + } + joined := bytes.Join(lines, w.prefix) + w.partial = joined[len(joined)-1] != '\n' + + n, err := w.w.Write(joined) + if err != nil { + return actualWrittenSize(n, len(w.prefix), lines), err + } + + return len(buf), nil +} + +func actualWrittenSize(underlay, prefix int, lines [][]byte) int { + actual := 0 + remain := underlay + for _, line := range lines { + if len(line) == 0 { + continue + } + + addition := remain - prefix + if addition <= 0 { + return actual + } + + if addition <= len(line) { + return actual + addition + } + + actual += len(line) + remain -= prefix + len(line) + } + + return actual +} diff --git a/src/webui/internal/goyang/pkg/indent/indent_test.go b/src/webui/internal/goyang/pkg/indent/indent_test.go new file mode 100644 index 000000000..3fad0cc91 --- /dev/null +++ b/src/webui/internal/goyang/pkg/indent/indent_test.go @@ -0,0 +1,146 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package indent + +import ( + "bytes" + "errors" + "testing" +) + +var tests = []struct { + prefix, in, out string +}{ + { + "", "", "", + }, { + "--", "", "", + }, { + "", "x\nx", "x\nx", + }, { + "--", "x", "--x", + }, { + "--", "\n", "--\n", + }, { + "--", "\n\n", "--\n--\n", + }, { + "--", "x\n", "--x\n", + }, { + "--", "\nx", "--\n--x", + }, { + "--", "two\nlines\n", "--two\n--lines\n", + }, { + "--", "\nempty\nfirst\n", "--\n--empty\n--first\n", + }, { + "--", "empty\nlast\n\n", "--empty\n--last\n--\n", + }, { + "--", "empty\n\nmiddle\n", "--empty\n--\n--middle\n", + }, +} + +func TestIndent(t *testing.T) { + for x, tt := range tests { + out := String(tt.prefix, tt.in) + if out != tt.out { + t.Errorf("#%d: got %q, want %q", x, out, tt.out) + } + bout := string(Bytes([]byte(tt.prefix), []byte(tt.in))) + if bout != out { + t.Errorf("#%d: Bytes got %q\n String got %q", x, bout, out) + } + } +} + +func TestWriter(t *testing.T) { +Test: + for x, tt := range tests { + for size := 1; size < 64; size <<= 1 { + var b bytes.Buffer + w := NewWriter(&b, tt.prefix) + data := []byte(tt.in) + for len(data) > size { + if _, err := w.Write(data[:size]); err != nil { + t.Errorf("#%d: %v", x, err) + continue Test + } + data = data[size:] + } + if _, err := w.Write(data); err != nil { + t.Errorf("#%d/%d: %v", x, size, err) + continue Test + } + + out := b.String() + if out != tt.out { + t.Errorf("#%d/%d: got %q, want %q", x, size, out, tt.out) + } + } + } +} + +func TestWrittenSize(t *testing.T) { + for x, tt := range tests { + var b bytes.Buffer + w := NewWriter(&b, tt.prefix) + data := []byte(tt.in) + if n, _ := w.Write(data); n != len(data) { + t.Errorf("#%d: got %d, want %d", x, n, len(data)) + } + } +} + +func TestWrittenSizeWithError(t *testing.T) { + table := []struct { + prefix string + input string + underlay int + expected int + }{ + {"--", "two\nlines\n", 0, 0}, + {"--", "two\nlines\n", 1, 0}, // - + {"--", "two\nlines\n", 2, 0}, // - + {"--", "two\nlines\n", 3, 1}, // t + {"--", "two\nlines\n", 4, 2}, // w + {"--", "two\nlines\n", 5, 3}, // o + {"--", "two\nlines\n", 6, 4}, // \n + {"--", "two\nlines\n", 7, 4}, // - + {"--", "two\nlines\n", 8, 4}, // - + {"--", "two\nlines\n", 9, 5}, // l + {"--", "two\nlines\n", 10, 6}, // i + {"--", "two\nlines\n", 11, 7}, // n + {"--", "two\nlines\n", 12, 8}, // e + {"--", "two\nlines\n", 13, 9}, // s + {"--", "two\nlines\n", 14, 10}, // \n + {"--", "two\nlines\n", 15, 10}, // - + {"--", "two\nlines\n", 16, 10}, // - + } + + for _, d := range table { + uw := errorWriter{d.underlay} + w := NewWriter(uw, d.prefix) + data := []byte(d.input) + if n, _ := w.Write(data); n != d.expected { + t.Errorf("underlay: %d, got %d, want %d, err: ", d.underlay, n, d.expected) + } + } +} + +type errorWriter struct { + ret int +} + +func (w errorWriter) Write(buf []byte) (int, error) { + return w.ret, errors.New("error") +} diff --git a/src/webui/internal/goyang/pkg/yang/ast.go b/src/webui/internal/goyang/pkg/yang/ast.go new file mode 100644 index 000000000..3c7edb6a4 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/ast.go @@ -0,0 +1,461 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +// This file implements BuildAST() and its associated helper structs and +// functions for constructing an AST of Nodes from a Statement tree. This +// function also populates all typedefs into a type cache. +// +// The initTypes function generates the helper struct and functions that +// recursively fill in the various Node structures defined in yang.go. +// BuildAST() then uses those functions to convert raw parsed Statements into +// an AST. + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +func init() { + // Initialize the global variables `typeMap` and `nameMap`. + // By doing this, we are making the assumption that all modules will be + // parsed according to the type hierarchy rooted at `meta`, and thus + // all input YANG modules will be parsed in this manner. + initTypes(reflect.TypeOf(&meta{})) +} + +// A yangStatement contains all information needed to build a particular +// type of statement into an AST node. +type yangStatement struct { + // funcs is the map of YANG field names to the function that populates + // the statement into the AST node. + funcs map[string]func(*Statement, reflect.Value, reflect.Value, *typeDictionary) error + // required is a list of fields that must be present in the statement. + required []string + // sRequired maps a statement name to a list of required sub-field + // names. The statement name can be an alias of the primary field type. + // e.g. If a field is required by statement type foo, then only foo + // should have the field. If bar is an alias of foo, it must not + // have this field. + sRequired map[string][]string + // addext is the function to handle possible extensions. + addext func(*Statement, reflect.Value, reflect.Value) error +} + +// newYangStatement creates a new yangStatement. +func newYangStatement() *yangStatement { + return &yangStatement{ + funcs: make(map[string]func(*Statement, reflect.Value, reflect.Value, *typeDictionary) error), + sRequired: make(map[string][]string), + } +} + +var ( + // The following maps are built up at init time. + // typeMap provides a lookup from a Node type to the corresponding + // yangStatement. + typeMap = map[reflect.Type]*yangStatement{} + // nameMap provides a lookup from a keyword string to the corresponding + // concrete type implementing the Node interface (see yang.go). + nameMap = map[string]reflect.Type{} + + // The following are helper types used by the implementation. + statementType = reflect.TypeOf(&Statement{}) + nilValue = reflect.ValueOf(nil) + // nodeType is the reflect.Type of the Node interface. + nodeType = reflect.TypeOf((*Node)(nil)).Elem() +) + +// meta is a collection of top-level statements. There is no actual +// statement named "meta". All other statements are a sub-statement of one +// of the meta statements. +type meta struct { + Module []*Module `yang:"module"` +} + +// aliases is a map of "aliased" names, that is, two types of statements +// that parse (nearly) the same. +// NOTE: This only works for root-level aliasing for now, which is good enough +// for module/submodule. This is because yangStatement.funcs doesn't store the +// handler function for aliased fields, and sRequired also may only store the +// correct values when processing a root-level statement due to aliasing. These +// issues would need to be fixed in order to support aliasing for non-top-level +// statements. +var aliases = map[string]string{ + "submodule": "module", +} + +// buildASTWithTypeDict creates an AST for the input statement, and returns its +// root node. It also takes as input a type dictionary into which any +// encountered typedefs within the statement are cached. +func buildASTWithTypeDict(stmt *Statement, types *typeDictionary) (Node, error) { + v, err := build(stmt, nilValue, types) + if err != nil { + return nil, err + } + return v.Interface().(Node), nil +} + +// build builds and returns an AST from the statement stmt and with parent node +// parent. It also takes as input a type dictionary types into which any +// encountered typedefs within the statement are cached. The type of value +// returned depends on the keyword in stmt (see yang.go). It returns an error +// if it cannot build the statement into its corresponding Node type. +func build(stmt *Statement, parent reflect.Value, types *typeDictionary) (v reflect.Value, err error) { + defer func() { + // If we are returning a real Node then call addTypedefs + // if the node possibly contains typedefs. + // Cache these in the typedef cache for look-ups. + if err != nil || v == nilValue { + return + } + if t, ok := v.Interface().(Typedefer); ok { + types.addTypedefs(t) + } + }() + keyword := stmt.Keyword + if k, ok := aliases[stmt.Keyword]; ok { + keyword = k + } + t := nameMap[keyword] + y := typeMap[t] + // Keep track of which substatements are present in the statement. + found := map[string]bool{} + + // Get the struct type we are pointing to. + t = t.Elem() + // v is a pointer to the instantiated structure we are building. + v = reflect.New(t) + + // Handle special cases that are not actually substatements: + + if fn := y.funcs["Name"]; fn != nil { + // Name uses stmt directly. + if err := fn(stmt, v, parent, types); err != nil { + return nilValue, err + } + } + if fn := y.funcs["Statement"]; fn != nil { + // Statement uses stmt directly. + if err := fn(stmt, v, parent, types); err != nil { + return nilValue, err + } + } + if fn := y.funcs["Parent"]; fn != nil { + // parent is the parent node, which is nilValue (reflect.ValueOf(nil)) if there is none. + // parent.IsValid will return false when parent is a nil interface + // parent.IsValid will true if parent references a concrete type + // (even if it is nil). + if parent.IsValid() { + if err := fn(stmt, v, parent, types); err != nil { + return nilValue, err + } + } + } + + // Now handle the substatements + + for _, ss := range stmt.statements { + found[ss.Keyword] = true + fn := y.funcs[ss.Keyword] + switch { + case fn != nil: + // Normal case, the keyword is known. + if err := fn(ss, v, parent, types); err != nil { + return nilValue, err + } + case len(strings.Split(ss.Keyword, ":")) == 2: + // Keyword is not known but it has a prefix so it might + // be an extension. + if y.addext == nil { + return nilValue, fmt.Errorf("%s: no extension function", ss.Location()) + } + y.addext(ss, v, parent) + default: + return nilValue, fmt.Errorf("%s: unknown %s field: %s", ss.Location(), stmt.Keyword, ss.Keyword) + } + } + + // Make sure all of our required field are there. + for _, r := range y.required { + if !found[r] { + return nilValue, fmt.Errorf("%s: missing required %s field: %s", stmt.Location(), stmt.Keyword, r) + } + } + + // Make sure required fields based on our keyword are there (module vs submodule) + for _, r := range y.sRequired[stmt.Keyword] { + if !found[r] { + return nilValue, fmt.Errorf("%s: missing required %s field: %s", stmt.Location(), stmt.Keyword, r) + } + } + + // Make sure we don't have any field set that is required by a different keyword. + for n, or := range y.sRequired { + if n == stmt.Keyword { + continue + } + for _, r := range or { + if found[r] { + return nilValue, fmt.Errorf("%s: unknown %s field: %s", stmt.Location(), stmt.Keyword, r) + } + } + } + return v, nil +} + +// initTypes creates the functions necessary to build a Statement into the +// given the type "at" based on its possible substatements. at must implement +// Node, with its concrete type being a pointer to a struct defined in yang.go. +// +// This function also builds up the functions to populate the input type +// dictionary types with any encountered typedefs within the statement. +// +// For each field of the struct with a yang tag (e.g., `yang:"command"`), a +// function is created with "command" as its unique ID. The complete map of +// builder functions for at is then added to the typeMap map with at as the +// key. The idea is to call these builder functions for each substatement +// encountered. +// +// The functions have the form: +// +// func fn(ss *Statement, v, p reflect.Value, types *typeDictionary) error +// +// Given stmt as a Statement of type at, ss is a substatement of stmt (in a few +// exceptional cases, ss is the Statement itself). v must have the same type +// as at and is the structure being filled in. p is the parent Node, or nil. +// types is the type dictionary cache of the current set of modules being parsed, +// which is used for looking up typedefs. p is only used to set the Parent +// field of a Node. For example, given the following structure and variables: +// +// type Include struct { +// Name string `yang:"Name"` +// Source *Statement `yang:"Statement"` +// Parent Node `yang:"Parent"` +// Extensions []*Statement `yang:"Ext"` +// RevisionDate *Value `yang:"revision-date"` +// } +// +// var inc = &Include{} +// var vInc = reflect.ValueOf(inc) +// var tInc = reflect.TypeOf(inc) +// +// Functions are created for each fields and named Name, Statement, Parent, Ext, +// and revision-date. +// +// The function built for RevisionDate will be called for any substatement, +// ds, of stmt that has the keyword "revision-date" along with the value of +// vInc and its parent: +// +// typeMap[tInc]["revision-date"](ss, vInc, parent, types) +// +// Normal fields are all processed this same way. +// +// The other 4 fields are special. In the case of Name, Statement, and Parent, +// the function is passed stmt, rather than ss, as these fields are not filled in +// by substatements. +// +// The Name command must set its field to the Statement's argument. The +// Statement command must set its field to the Statement itself. The +// Parent command must set its field with the Node of its parent (the +// parent parameter). +// +// The Ext command is unique and must decode into a []*Statement. This is a +// slice of all statements that use unknown keywords with a prefix (in a valid +// .yang file these should be the extensions). +// +// The Field can have attributes delimited by a ','. The only +// supported attributes are: +// +// nomerge: Do not merge this field +// required: This field must be populated +// required=KIND: This field must be populated if the keyword is KIND +// otherwise this field must not be present. +// (This is to support merging Module and SubModule). +// +// If at contains substructures, initTypes recurses on the substructures. +func initTypes(at reflect.Type) { + if at.Kind() != reflect.Ptr || at.Elem().Kind() != reflect.Struct { + panic(fmt.Sprintf("interface not a struct pointer, is %v", at)) + } + if typeMap[at] != nil { + return // we already defined this type + } + + y := newYangStatement() + typeMap[at] = y + t := at.Elem() + for i := 0; i != t.NumField(); i++ { + i := i + f := t.Field(i) + yang := f.Tag.Get("yang") + if yang == "" { + continue + } + parts := strings.Split(yang, ",") + name := parts[0] + if a, ok := aliases[name]; ok { + name = a + } + + const reqe = "required=" + for _, p := range parts[1:] { + switch { + case p == "nomerge": + case p == "required": + y.required = append(y.required, name) + case strings.HasPrefix(p, reqe): + p = p[len(reqe):] + y.sRequired[p] = append(y.sRequired[p], name) + default: + panic(f.Name + ": unknown tag: " + p) + } + } + + // Ext means this is where we squirrel away extensions + if name == "Ext" { + // stmt is the extension to put into v at for field f. + y.addext = func(stmt *Statement, v, _ reflect.Value) error { + if v.Type() != at { + panic(fmt.Sprintf("given type %s, need type %s", v.Type(), at)) + } + fv := v.Elem().Field(i) + fv.Set(reflect.Append(fv, reflect.ValueOf(stmt))) + return nil + } + continue + } + + // descend runs initType on dt if it has not already done so. + descend := func(name string, dt reflect.Type) { + switch nameMap[name] { + case nil: + nameMap[name] = dt + initTypes(dt) // Make sure that structure type is included + case dt: + default: + panic("redeclared type " + name) + } + } + + // Create a function, fn, that will build the field from a + // Statement. These functions are used when actually making + // an AST from a Statement Tree. + var fn func(*Statement, reflect.Value, reflect.Value, *typeDictionary) error + + // The field can be a pointer, a slice or a string + switch f.Type.Kind() { + default: + panic(fmt.Sprintf("invalid type: %v", f.Type.Kind())) + + case reflect.Interface: + // The only case of this should be the "Parent" field. + if name != "Parent" { + panic(fmt.Sprintf("interface field is %s, not Parent", name)) + } + fn = func(stmt *Statement, v, p reflect.Value, types *typeDictionary) error { + if !p.Type().Implements(nodeType) { + panic(fmt.Sprintf("invalid interface: %v", f.Type.Kind())) + } + v.Elem().Field(i).Set(p) + return nil + } + case reflect.String: + // The only case of this should be the "Name" field + if name != "Name" { + panic(fmt.Sprintf("string field is %s, not Name", name)) + } + fn = func(stmt *Statement, v, _ reflect.Value, types *typeDictionary) error { + if v.Type() != at { + panic(fmt.Sprintf("got type %v, want %v", v.Type(), at)) + } + fv := v.Elem().Field(i) + if fv.String() != "" { + return errors.New(stmt.Keyword + ": already set") + } + + v.Elem().Field(i).SetString(stmt.Argument) + return nil + } + + case reflect.Ptr: + if f.Type == statementType { + // The only case of this should be the + // "Statement" field + if name != "Statement" { + panic(fmt.Sprintf("string field is %s, not Statement", name)) + } + fn = func(stmt *Statement, v, _ reflect.Value, types *typeDictionary) error { + if v.Type() != at { + panic(fmt.Sprintf("got type %v, want %v", v.Type(), at)) + } + v.Elem().Field(i).Set(reflect.ValueOf(stmt)) + return nil + } + break + } + + // Make sure our field type is also setup. + descend(name, f.Type) + + fn = func(stmt *Statement, v, p reflect.Value, types *typeDictionary) error { + if v.Type() != at { + panic(fmt.Sprintf("given type %s, need type %s", v.Type(), at)) + } + fv := v.Elem().Field(i) + if !fv.IsNil() { + return errors.New(stmt.Keyword + ": already set") + } + + // Use build to build the value for this field. + sv, err := build(stmt, v, types) + if err != nil { + return err + } + v.Elem().Field(i).Set(sv) + return nil + } + + case reflect.Slice: + // A slice at this point is always a slice of + // substructures. We may see the same keyword multiple + // times, each time we see it we just append to the + // slice. + st := f.Type.Elem() + switch st.Kind() { + default: + panic(fmt.Sprintf("invalid type: %v", st.Kind())) + case reflect.Ptr: + descend(name, st) + fn = func(stmt *Statement, v, p reflect.Value, types *typeDictionary) error { + if v.Type() != at { + panic(fmt.Sprintf("given type %s, need type %s", v.Type(), at)) + } + sv, err := build(stmt, v, types) + if err != nil { + return err + } + + fv := v.Elem().Field(i) + fv.Set(reflect.Append(fv, sv)) + return nil + } + } + } + y.funcs[name] = fn + } +} diff --git a/src/webui/internal/goyang/pkg/yang/ast_test.go b/src/webui/internal/goyang/pkg/yang/ast_test.go new file mode 100644 index 000000000..9010e5be0 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/ast_test.go @@ -0,0 +1,538 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "bytes" + "fmt" + "reflect" + "testing" +) + +type MainNode struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Field *Value `yang:"field"` + Slice []*Value `yang:"slice"` + ChildNode *SubNode `yang:"child_node"` + ChildSlice []*SubNode `yang:"child_slice"` + ReqNode *ReqNode `yang:"req_node"` + MainField *Value `yang:"main_field,required=main_node"` + AltField *Value `yang:"alt_field,required=alt_node"` +} + +func (m *MainNode) Kind() string { + if m.AltField != nil { + return "alt_node" + } + return "main_node" +} + +func (m *MainNode) ParentNode() Node { return m.Parent } +func (m *MainNode) NName() string { return m.Name } +func (m *MainNode) Statement() *Statement { return m.Source } +func (m *MainNode) Exts() []*Statement { return m.Extensions } + +func (m *MainNode) checkEqual(n Node) string { + o, ok := n.(*MainNode) + if !ok { + return fmt.Sprintf("expected *MainNode, got %T", n) + } + if m.Name != o.Name { + return fmt.Sprintf("got name %s, want %s", o.Name, m.Name) + } + if s := m.Source.checkEqual(o.Source); s != "" { + return s + } + if (m.Field == nil) != (o.Field == nil) { + if m.Field == nil { + return "unexpected field entry" + } + return "missing expected field entry" + } + if m.Field != nil { + if m.Field.Name != o.Field.Name { + return fmt.Sprintf("got field of %s, want %s", o.Field.Name, m.Field.Name) + } + } + if len(m.Slice) != len(o.Slice) { + return fmt.Sprintf("got slice of %d, want slice of %d", len(o.Slice), len(m.Slice)) + } + for x, s1 := range m.Slice { + s2 := o.Slice[x] + if s1.Name != s2.Name { + return fmt.Sprintf("slice[%d] got %s, want %s", x, s2.Name, s1.Name) + } + } + if (m.ChildNode == nil) != (o.ChildNode == nil) { + if m.ChildNode == nil { + return "unexpected child_node entry" + } + return "missing expected child_node entry" + } + if m.ChildNode != nil { + if s := m.ChildNode.checkEqual(o.ChildNode); s != "" { + return fmt.Sprintf("child_node: %s", s) + } + } + if len(m.ChildSlice) != len(o.ChildSlice) { + return fmt.Sprintf("got child_slice of %d, want slice of %d", len(o.ChildSlice), len(m.ChildSlice)) + } + for x, s1 := range m.ChildSlice { + s2 := o.ChildSlice[x] + if s := s1.checkEqual(s2); s != "" { + return fmt.Sprintf("child_slice[%d]: %s", x, s) + } + } + if (m.ReqNode == nil) != (o.ReqNode == nil) { + if m.ReqNode == nil { + return "unexpected req_node entry" + } + return "missing expected req_node entry" + } + if m.ReqNode != nil { + if s := m.ReqNode.checkEqual(o.ReqNode); s != "" { + return fmt.Sprintf("req_node: %s", s) + } + } + if (m.AltField == nil) != (o.AltField == nil) { + if m.AltField == nil { + return "unexpected alt_field entry" + } + return "missing expected alt_field entry" + } + if m.AltField != nil { + if m.AltField.Name != o.AltField.Name { + return fmt.Sprintf("got alt_field of %s, want %s", o.AltField.Name, m.AltField.Name) + } + } + return "" +} + +type SubNode struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + SubField *Value `yang:"sub_field"` +} + +func (SubNode) Kind() string { return "sub_node" } +func (s *SubNode) ParentNode() Node { return s.Parent } +func (s *SubNode) NName() string { return s.Name } +func (s *SubNode) Statement() *Statement { return s.Source } +func (s *SubNode) Exts() []*Statement { return s.Extensions } + +func (s *SubNode) checkEqual(o *SubNode) string { + if s.Name != o.Name { + return fmt.Sprintf("got name %s, want %s", o.Name, s.Name) + } + if s := s.Source.checkEqual(o.Source); s != "" { + return s + } + if (s.SubField == nil) != (o.SubField == nil) { + if s.SubField == nil { + return "unexpected sub_field entry" + } + return "missing expected sub_field entry" + } + if s.SubField != nil { + if s.SubField.Name != o.SubField.Name { + return fmt.Sprintf("got sub_field of %s, want %s", o.SubField.Name, s.SubField.Name) + } + } + return "" +} + +type ReqNode struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + + ReqField *Value `yang:"req_field,required"` + AltReqField *Value `yang:"alt_req_field,required=alt_req_node"` + Field *Value `yang:"field"` +} + +func (s *ReqNode) Kind() string { + return "req_node" +} +func (s *ReqNode) ParentNode() Node { return s.Parent } +func (s *ReqNode) NName() string { return s.Name } +func (s *ReqNode) Statement() *Statement { return s.Source } +func (m *ReqNode) Exts() []*Statement { return nil } + +func (s *ReqNode) checkEqual(o *ReqNode) string { + if s.Name != o.Name { + return fmt.Sprintf("got name %s, want %s", o.Name, s.Name) + } + if s := s.Source.checkEqual(o.Source); s != "" { + return s + } + if (s.ReqField == nil) != (o.ReqField == nil) { + if s.ReqField == nil { + return "unexpected req_field entry" + } + return "missing expected req_field entry" + } + if s.ReqField != nil { + if s.ReqField.Name != o.ReqField.Name { + return fmt.Sprintf("got req_field of %s, want %s", o.ReqField.Name, s.ReqField.Name) + } + } + if (s.AltReqField == nil) != (o.AltReqField == nil) { + if s.AltReqField == nil { + return "unexpected alt_req_field entry" + } + return "missing expected alt_req_field entry" + } + if s.AltReqField != nil { + if s.AltReqField.Name != o.AltReqField.Name { + return fmt.Sprintf("got alt_req_field of %s, want %s", o.AltReqField.Name, s.AltReqField.Name) + } + } + return "" +} + +func (s *Statement) checkEqual(o *Statement) string { + if (s == nil) != (o == nil) { + var b bytes.Buffer + if s == nil { + o.Write(&b, "") + return fmt.Sprintf("unexpected Statement entry\n%s", &b) + } + s.Write(&b, "") + return fmt.Sprintf("missing expected Statement entry\n%s", &b) + } + if s == nil { + return "" + } + var b1, b2 bytes.Buffer + s.Write(&b1, "") + o.Write(&b2, "") + ss := b1.String() + os := b2.String() + if ss != os { + return fmt.Sprintf("got statement:\n%swant:\n%s", os, ss) + } + return "" +} + +func TestAST(t *testing.T) { + // Teach the AST parser about our testing nodes + type meta struct { + MainNode []*MainNode `yang:"main_node"` + } + + old_aliases := aliases + aliases = map[string]string{ + "alt_node": "main_node", + } + + for _, tt := range []struct { + line int + in string + out *MainNode + err string + }{ + { + line: line(), + in: ` +main_node the_node { + // This test is testing to make sure unknown statements, that + // might be extensions, are properly put in the Extensions slice. + // When an extension is used, it must be of the form "prefix:name". + // See https://tools.ietf.org/html/rfc6020#section-7.17 + ex:ext1 value1; + ex:ext2 value2; + main_field foo; +} +`, + out: &MainNode{ + Source: SA("main_node", "the_node", + SA("ex:ext1", "value1"), + SA("ex:ext2", "value2"), + SA("main_field", "foo")), + Name: "the_node", + Extensions: []*Statement{ + SA("ex:ext1", "value1"), + SA("ex:ext2", "value2"), + }, + MainField: &Value{ + Name: "foo", + }, + }, + }, + { + line: line(), + in: ` +main_node the_node { + // This test tests fields, slices, and sub-statements. + field field_value; + slice sl1; + slice sl2; + child_node the_child { + sub_field val1; + } + child_slice element1 { + sub_field el1; + } + child_slice element2 { + sub_field el2; + } + main_field foo; +}`, + out: &MainNode{ + Source: SA("main_node", "the_node", + SA("field", "field_value"), + SA("slice", "sl1"), + SA("slice", "sl2"), + SA("child_node", "the_child", + SA("sub_field", "val1")), + SA("child_slice", "element1", + SA("sub_field", "el1")), + SA("child_slice", "element2", + SA("sub_field", "el2")), + SA("main_field", "foo"), + ), + Name: "the_node", + Field: &Value{ + Name: "field_value", + }, + Slice: []*Value{ + { + Name: "sl1", + }, + { + Name: "sl2", + }, + }, + ChildNode: &SubNode{ + Source: SA("child_node", "the_child", + SA("sub_field", "val1")), + Name: "the_child", + SubField: &Value{ + Name: "val1", + }, + }, + ChildSlice: []*SubNode{ + { + Source: SA("child_slice", "element1", + SA("sub_field", "el1")), + Name: "element1", + SubField: &Value{ + Name: "el1", + }, + }, + { + Source: SA("child_slice", "element2", + SA("sub_field", "el2")), + Name: "element2", + SubField: &Value{ + Name: "el2", + }, + }, + }, + MainField: &Value{ + Name: "foo", + }, + }, + }, + { + line: line(), + in: ` +// This test tests for the presence of a required field. +// main_node requires the field named "main_field". +main_node the_node { + main_field value1 { + } +} +`, + out: &MainNode{ + Source: SA("main_node", "the_node", + SA("main_field", "value1"), + ), + Name: "the_node", + MainField: &Value{ + Name: "value1", + }, + }, + }, + { + line: line(), + in: ` +// This test tests for the presence of a required= field. +// alt_node requires the field named "alt_field". +alt_node the_node { + alt_field value2 { + } +} +`, + out: &MainNode{ + Source: SA("alt_node", "the_node", + SA("alt_field", "value2"), + ), + Name: "the_node", + AltField: &Value{ + Name: "value2", + }, + }, + }, + { + line: line(), + in: ` +main_node the_node { + // This test tests that extensions are rejected when the node is not + // supposed to contain them. + req_node value1 { + req_field foo { + } + ex:ext1 value1; + ex:ext2 value2; + } +} +`, + err: `ast.yang:8:3: no extension function`, + }, + { + line: line(), + in: ` +main_node the_node { + // This test tests for the presence of a required field. + // req_node requires the field named "req_field". + req_node value1 { + req_field foo { + } + } + main_field foo; +} +`, + out: &MainNode{ + Source: SA("main_node", "the_node", + SA("req_node", "value1", + SA("req_field", "foo")), + SA("main_field", "foo"), + ), + Name: "the_node", + ReqNode: &ReqNode{ + Source: SA("req_node", "value1", + SA("req_field", "foo")), + Name: "value1", + ReqField: &Value{ + Name: "foo", + }, + }, + MainField: &Value{ + Name: "foo", + }, + }, + }, + { + line: line(), + in: ` +main_node the_node { + // This test tests that the absence of a required field fails. + // req_node requires the field named "req_field". + req_node value1 { + } + main_field foo; +} +`, + err: `ast.yang:5:2: missing required req_node field: req_field`, + }, + { + line: line(), + in: ` +main_node the_node { + // This test tests that the absence of a required field. + // main_node requires the field named "main_field". + req_node value1 { + req_field foo { + } + } +} +`, + err: `ast.yang:2:1: missing required main_node field: main_field`, + }, + { + line: line(), + in: ` +// This test tests that the alt_field, specified with +// required=alt_node, causes the AST construction to error when a +// main_node contains it. +main_node the_node { + main_field foo; + alt_field foo; +} +`, + err: `ast.yang:5:1: unknown main_node field: alt_field`, + }, + { + line: line(), + in: ` +// This test tests that required=alt_node enforces that +// alt_node must contain it. +alt_node the_node { + main_field foo; + alt_field foo; +} +`, + err: `ast.yang:4:1: unknown alt_node field: main_field`, + }, + { + line: line(), + in: ` +// This test tests that required=alt_node enforces that +// alt_node must contain it. +alt_node the_node { +} +`, + err: `ast.yang:4:1: missing required alt_node field: alt_field`, + }, + } { + ss, err := Parse(tt.in, "ast.yang") + if err != nil { + t.Errorf("%d: %v", tt.line, err) + continue + } + if len(ss) != 1 { + t.Errorf("%d: got %d results, want 1", tt.line, len(ss)) + continue + } + + typeDict := newTypeDictionary() + initTypes(reflect.TypeOf(&meta{})) + + ast, err := buildASTWithTypeDict(ss[0], typeDict) + switch { + case err == nil && tt.err == "": + if s := tt.out.checkEqual(ast); s != "" { + t.Errorf("%d: %s", tt.line, s) + } + case err == nil: + t.Errorf("%d: did not get expected error %s", tt.line, tt.err) + case tt.err == "": + t.Errorf("%d: %v", tt.line, err) + case err.Error() != tt.err: + t.Errorf("%d: got error %v, want %s", tt.line, err, tt.err) + } + } + + aliases = old_aliases +} diff --git a/src/webui/internal/goyang/pkg/yang/bgp_test.go b/src/webui/internal/goyang/pkg/yang/bgp_test.go new file mode 100644 index 000000000..1d96a6d5c --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/bgp_test.go @@ -0,0 +1,571 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "reflect" + "testing" +) + +// TestBGP simply makes sure we are able to parse a version of Anees's +// BGP model. We don't actually attempt to validate we got the right +// AST. ast_test.go will test smaller peices to make sure the basics +// of BuildAST produce expected results. +func TestBGP(t *testing.T) { + ss, err := Parse(bgp, "bgp.yang") + if err != nil { + t.Fatal(err) + } + if len(ss) != 1 { + t.Fatalf("got %d results, want 1", len(ss)) + } + typeDict := newTypeDictionary() + initTypes(reflect.TypeOf(&meta{})) + if _, err := buildASTWithTypeDict(ss[0], typeDict); err != nil { + t.Fatal(err) + } +} + +var bgp = ` +module google-bgp { + + yang-version "1"; + + // namespace + namespace "http://google.com/yang/google-bgp-protocol-cfg"; + + prefix "gbgp"; + + // import some basic types -- no other dependency on + // in-progress models in draft status + import ietf-inet-types { prefix inet; } + + + // meta + organization "Google, Inc."; + + contact + "Google, Inc. + 1600 Amphitheatre Way + Mountain View, CA 94043"; + + description + "This module describes a YANG model for BGP protocol + configuration.It is a limited subset of all of the configuration + parameters available in the variety of vendor implementations, + hence it is expected that it would be augmented with vendor- + specific configuration data as needed.Additional modules or + submodules to handle other aspects of BGP configuration, + including policy, VRFs, and additional address families are also + expected."; + + revision "2014-07-07" { + description + "Initial revision"; + reference "TBD"; + } + + + identity afi-type { + description + "base identity type for BGP address family identifiers (AFI)"; + reference "IETF RFC 4760"; + } + + identity safi-type { + description + "base identity type for BGP subsequent address family + identifiers (SAFI)"; + reference "IETF RFC 4760"; + } + + identity ipv4-afi { + base afi-type; + description + "IPv4 AF identifier"; + } + + identity ipv6-afi { + base afi-type; + description + "IPv6 AF identifier"; + } + + identity unicast-safi { + base safi-type; + description + "unicast SAFI identifier"; + } + + identity labeled-unicast-safi { + base safi-type; + description + "labeled unicast SAFI identifier"; + reference "RFC 3107 - Carrying Label Information in BGP-4"; + } + + + typedef peer-group-type { + type enumeration { + enum INTERNAL { + description "internal (iBGP) peer"; + } + enum EXTERNAL { + description "external (eBGP) peer"; + } + } + description + "labels a peer as explicitly internal or external"; + } + + + typedef remove-private-as-option { + type enumeration { + enum ALL { + description "remove all private ASes in the path"; + } + enum REPLACE { + description "replace private ASes with local AS"; + } + } + description + "set of options for configuring how private AS path numbers + are removed from advertisements"; + } + + typedef percentage { + type uint8 { + range "0..100"; + } + description + "Integer indicating a percentage value"; + } + + typedef rr-cluster-id-type { + type union { + type uint32; + type inet:ipv4-address; + } + description + "union type for route reflector cluster ids: + option 1: 4-byte number + option 2: IP address"; + } + + grouping bgp-common-configuration { + description "Common configuration across neighbors, groups, + etc."; + + leaf description { + type string; + description + "A textual description of the peer or group"; + } + container use-multiple-paths { + description + "Configuration of BGP multipath to enable load sharing across + multiple paths to peers."; + leaf allow-multiple-as { + type boolean; + default "false"; + description + "Allow multipath to use paths from different neighboring + ASes. The default is to only consider multiple paths from + the same neighboring AS."; + } + leaf maximum-paths { + type uint32; + default 1; + description + "Maximum number of parallel paths to consider when using + BGP multipath. The default is to use a single path."; + reference "draft-ietf-idr-add-paths-09.txt"; + } + } + + } + + grouping bgp-group-common-configuration { + description "Configuration items that are applied at the peer + group level"; + } + + grouping bgp-group-neighbor-common-configuration { + description "Configuration options for peer and group context"; + + leaf auth-password { + type string; + description + "Configures an authentication password for use with + neighboring devices."; + } + + container timers { + description "Configuration of various BGP timers"; + + leaf hold-time { + type decimal64 { + fraction-digits 2; + } + default 90; + // hold-time should typically be set to 3x the + // keepalive-interval -- create a constraint for this? + description + "Time interval in seconds that a BGP session will be + considered active in the absence of keepalive or other + messages from the peer"; + reference + "RFC 1771 - A Border Gateway Protocol 4"; + } + + leaf keepalive-interval { + type decimal64 { + fraction-digits 2; + } + default 30; + description + "Time interval in seconds between transmission of keepalive + messages to the neighbor. Typically set to 1/3 the + hold-time."; + } + + leaf advertisement-interval { + type decimal64 { + fraction-digits 2; + } + default 30; + description + "Mininum time interval in seconds between transmission + of BGP updates to neighbors"; + reference + "RFC 1771 - A Border Gateway Protocol 4"; + } + + leaf connect-retry { + type decimal64 { + fraction-digits 2; + } + default 30; + description + "Time interval in seconds between attempts to establish a + session with the peer."; + } + } + + container ebgp-multihop { + description + "Configure multihop BGP for peers that are not directly + connected"; + + leaf multihop-ttl { + type uint8; + default 1; + description + "Time-to-live for multihop BGP sessions. The default + value of 1 is for directly connected peers (i.e., + multihop disabled"; + + } + + } + + container route-reflector { + description + "Configure the local router as a route-reflector + server"; + leaf route-reflector-clusterid { + type rr-cluster-id-type; + description + "route-reflector cluster id to use when local router is + configured as a route reflector. Commonly set at the group + level, but allows a different cluster + id to be set for each neighbor."; + } + + leaf route-reflector-client { + type boolean; + default "false"; + description + "configure the neighbor as a route reflector client"; + } + } + + leaf remove-private-as { + // could also make this a container with a flag to enable + // remove-private and separate option. here, option implies + // remove-private is enabled. + type remove-private-as-option; + description + "Remove private AS numbers from updates sent to peers"; + } + + + container bgp-logging-options { + description + "Configure various tracing/logging options for BGP peers + or groups. Expected that additional vendor-specific log + options would augment this container"; + + leaf log-neighbor-state-changes { + type boolean; + default "true"; + description + "Configure logging of peer state changes. Default is + to enable logging of peer state changes."; + } + } + + container transport-options { + description + "Transport protocol options for BGP sessions"; + + leaf tcp-mss { + type uint16; + description + "Sets the max segment size for BGP TCP sessions"; + } + + leaf passive-mode { + type boolean; + description + "Wait for peers to issue requests to open a BGP session, + rather than initiating sessions from the local router"; + } + } + + leaf local-address { + type inet:ip-address; + description + "Set the local IP (either IPv4 or IPv6) address to use for + the session when sending BGP update messages"; + } + + leaf route-flap-damping { + type boolean; + description + "Enable route flap damping"; + } + } + + grouping bgp-address-family-common-configuration { + description "Configuration options per address family context"; + + list address-family { + + key "afi-name"; + description + "Per address-family configuration, uniquely identified by AF + name"; + leaf afi-name { + type identityref { + base "afi-type"; + } + description + "Address family names are drawn from the afi-type base + identity, which has specific address family types as + derived identities"; + } + + list subsequent-address-family { + + key "safi-name"; + description + "Per subsequent address family configuration, under a + specific address family"; + + leaf safi-name { + // do we need to specify which SAFIs are possible within + // each AF? with the current set of AF/SAFI, all are + /// applicable + type identityref { + base "safi-type"; + } + description + "Within each address family, subsequent address family + names are drawn from the subsequent-address-family base + identity"; + } + + + container prefix-limit { + description + "Configure the maximum number of prefixes that will be + accepted from a peer"; + + leaf max-prefixes { + type uint32; + description + "Maximum number of prefixes that will be accepted from + the neighbor"; + } + + leaf shutdown-threshold-pct { + type percentage; + description + "Threshold on number of prefixes that can be received + from a neighbor before generation of warning messages + or log entries. Expressed as a percentage of + max-prefixes."; + } + + leaf restart-timer { + type decimal64 { + fraction-digits 2; + } + units "seconds"; + description + "Time interval in seconds after which the BGP session + is reestablished after being torn down due to exceeding + the max-prefixes limit."; + } + } + } + } + } + + + + container bgp { + description "Top-level configuration data for the BGP router"; + + container global { + description + "Top-level bgp protocol options applied across peer-groups, + neighbors, and address families"; + + leaf as { + type inet:as-number; + mandatory "true"; + description + "Local autonomous system number of the router. Uses + the as-number type defined in RFC 6991"; + } + leaf router-id { + type inet:ipv4-address; + description + "Router id of the router, expressed as an + IPv4 address"; + // there is a typedef for this in draft module ietf-routing + // but it does not use an appropriate type + } + container route-selection-options { + description + "Set of configuration options that govern best + path selection"; + leaf always-compare-med { + type boolean; + default "false"; + description + "Compare multi-exit discriminator (MED) value from + different ASes when selecting the best route. The + default behavior is to only compare MEDs for paths + received from the same AS."; + } + leaf ignore-as-path { + type boolean; + default "false"; + description + "Ignore the AS path length when selecting the best path. + The default is to use the AS path length and prefer paths + with shorter length."; + } + leaf external-compare-router-id { + type boolean; + default "true"; + description + "When comparing similar routes received from external + BGP peers, use the router-id as a criterion to select + the active path. The default is to use the router-id to + select among similar routes."; + } + leaf advertise-inactive-routes { + type boolean; + default "false"; + description + "Advertise inactive routes to external peers. The + default is to only advertise active routes."; + } + } + container default-route-distance { + description + "Administrative distance (or preference) assigned to + routes received from different sources + (external, internal, and local.)"; + leaf external-route-distance { + type uint8 { + range "1..255"; + } + description + "Administrative distance for routes learned from external + BGP (eBGP)"; + } + leaf internal-route-distance { + type uint8 { + range "1..255"; + } + description + "Administrative distance for routes learned from internal + BGP (iBGP)"; + + } + } + } + + uses bgp-address-family-common-configuration; + + list peer-group { + key "group-name"; + description + "List of peer-groups, uniquely identified by the peer group + names"; + leaf group-name { + type string; + description "Name of the peer group"; + } + leaf group-type { + type peer-group-type; + description + "Explicitly designate the peer group as internal (iBGP) + or external (eBGP)"; + } + uses bgp-common-configuration; + uses bgp-address-family-common-configuration; + uses bgp-group-neighbor-common-configuration; + } + + list neighbor { + key "neighbor-address"; + description + "List of BGP peers, uniquely identified by neighbor address"; + leaf neighbor-address { + type inet:ip-address; + description + "Address of the BGP peer, either IPv4 or IPv6"; + } + + leaf peer-as { + type inet:as-number; + mandatory "true"; + description + "AS number of the peer"; + + } + uses bgp-common-configuration; + uses bgp-address-family-common-configuration; + uses bgp-group-neighbor-common-configuration; + } + + } +}` diff --git a/src/webui/internal/goyang/pkg/yang/camelcase.go b/src/webui/internal/goyang/pkg/yang/camelcase.go new file mode 100644 index 000000000..2704ddcd5 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/camelcase.go @@ -0,0 +1,94 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +var knownWords = map[string]string{ + "Ietf": "IETF", +} + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns a CamelCased name for a YANG identifier. +// Currently this supports the output being used for a Go or proto identifier. +// Dash and dot are first converted to underscore, and then any underscores +// before a lower-case letter are removed, and the letter converted to +// upper-case. Any input characters not part of the YANG identifier +// specification (https://tools.ietf.org/html/rfc7950#section-6.2) are treated +// as lower-case characters. +// The first letter is always upper-case in order to be an exported name in Go. +// There is a remote possibility of this rewrite causing a name collision, but +// it's so remote we're prepared to pretend it's nonexistent - since the C++ +// generator lowercases names, it's extremely unlikely to have two fields with +// different capitalizations. In short, _my_field-name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + + fix := func(c byte) byte { + if c == '-' || c == '.' { + return '_' + } + return c + } + + t := make([]byte, 0, 32) + i := 0 + if fix(s[0]) == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := fix(s[i]) + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + start := len(t) + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + // If the word turns out to be a special word, then use that instead. + if kn := knownWords[string(t[start:])]; kn != "" { + t = append(t[:start], []byte(kn)...) + } + } + return string(t) +} diff --git a/src/webui/internal/goyang/pkg/yang/camelcase_test.go b/src/webui/internal/goyang/pkg/yang/camelcase_test.go new file mode 100644 index 000000000..0539bfa8e --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/camelcase_test.go @@ -0,0 +1,53 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "testing" +) + +func TestCamelCase(t *testing.T) { + tests := []struct { + in, want string + }{ + {"one", "One"}, + {"one_two", "OneTwo"}, + {"__one__two__three__four", "XOne_Two_Three_Four"}, + {"one.two.three", "OneTwoThree"}, + {"one.two.three.", "OneTwoThree_"}, + {"_my_field_name_2", "XMyFieldName_2"}, + {"Something_Capped", "Something_Capped"}, + {"_Foo-bar", "XFooBar"}, + {"my_Name", "My_Name"}, + {"OneTwo", "OneTwo"}, + {"_", "X"}, + {"_a_", "XA_"}, + {"ietf-interface", "IETFInterface"}, + {"ietf-interface-1", "IETFInterface_1"}, + {"out-unicast.pkts", "OutUnicastPkts"}, + // Invalid input conversion behaviours: + {"one/two", "One/two"}, + {"/one/two", "/one/two"}, + {"one:two", "One:two"}, + {"::one::two", "::one::two"}, + {"one|two", "One|two"}, + {"one||two", "One||two"}, + } + for _, tc := range tests { + if got := CamelCase(tc.in); got != tc.want { + t.Errorf("CamelCase(%q) = %q, want %q", tc.in, got, tc.want) + } + } +} diff --git a/src/webui/internal/goyang/pkg/yang/doc.go b/src/webui/internal/goyang/pkg/yang/doc.go new file mode 100644 index 000000000..3dab829c4 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/doc.go @@ -0,0 +1,48 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yang is used to parse .yang files (see RFC 6020). +// +// A generic yang statements takes one of the forms: +// +// keyword [argument] ; +// keyword [argument] { [statement [...]] } +// +// At the lowest level, package yang returns a simple tree of statements via the +// Parse function. The Parse function makes no attempt to determine the +// validity of the source, other than checking for generic syntax errors. +// +// At it's simplest, the GetModule function is used. The GetModule function +// searches the current directory, and any directory added to the Path variable, +// for a matching .yang source file by appending .yang to the name of the +// module: +// +// // Get the tree for the module module-name by looking for the source +// // file named module-name.yang. +// e, errs := yang.GetModule("module-name" [, optional sources...]) +// if len(errs) > 0 { +// for _, err := range errs { +// fmt.Fprintln(os.Stderr, err) +// } +// os.Exit(1) +// } +// +// // e is the Entry tree for "module-name" +// +// More complicated uses cases should use NewModules and then some combination +// of Modules.GetModule, Modules.Read, Modules.Parse, and Modules.GetErrors. +// +// The GetErrors method is mandatory, however, both yang.GetModule and +// Modules.GetModule automatically call Modules.GetErrors. +package yang diff --git a/src/webui/internal/goyang/pkg/yang/entry.go b/src/webui/internal/goyang/pkg/yang/entry.go new file mode 100644 index 000000000..cbba30b7d --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/entry.go @@ -0,0 +1,1664 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +// The file contains the code to convert an AST (Node) tree into an Entry tree +// via the ToEntry function. The entry tree, once fully resolved, is the +// product of this package. The tree should have all types and references +// resolved. +// +// TODO(borman): handle types, leafrefs, and extensions + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/openconfig/goyang/pkg/indent" +) + +// A TriState may be true, false, or unset +type TriState int + +// The possible states of a TriState. +const ( + TSUnset = TriState(iota) + TSTrue + TSFalse +) + +// Value returns the value of t as a boolean. Unset is returned as false. +func (t TriState) Value() bool { + return t == TSTrue +} + +// String displays t as a string. +func (t TriState) String() string { + switch t { + case TSUnset: + return "unset" + case TSTrue: + return "true" + case TSFalse: + return "false" + default: + return fmt.Sprintf("ts-%d", t) + } +} + +// deviationPresence stores whether certain attributes for a DeviateEntry-type +// Entry have been given deviation values. This is useful when the attribute +// doesn't have a presence indicator (e.g. non-pointers). +type deviationPresence struct { + hasMinElements bool + hasMaxElements bool +} + +// Entry represents a single schema tree node, which can be a directory +// (containing a subtree) or a leaf node (which contains YANG types that have +// no children, e.g., leaf, leaf-list). They can be distinguished by whether +// their "Dir" field is nil. This object is created from a corresponding AST +// node after applying modifications (i.e. uses, augments, deviations). If +// Errors is not nil then it means semantic errors existed while converting the +// AST, in which case the only other valid field other than Errors is Node. +type Entry struct { + Parent *Entry `json:"-"` + Node Node `json:"-"` // the base node this Entry was derived from. + Name string // our name, same as the key in our parent Dirs + Description string `json:",omitempty"` // description from node, if any + // Default value for the node, if any. Note that only leaf-lists may + // have more than one value. For all other types, use the + // SingleDefaultValue() method to access the default value. + Default []string `json:",omitempty"` + Units string `json:",omitempty"` // units associated with the type, if any + Errors []error `json:"-"` // list of errors encountered on this node + Kind EntryKind // kind of Entry + Config TriState // config state of this entry, if known + Prefix *Value `json:",omitempty"` // prefix to use from this point down + Mandatory TriState `json:",omitempty"` // whether this entry is mandatory in the tree + + // Fields associated with directory nodes + Dir map[string]*Entry `json:",omitempty"` + Key string `json:",omitempty"` // Optional key name for lists (i.e., maps) + + // Fields associated with leaf nodes + Type *YangType `json:",omitempty"` + + // Extensions found + Exts []*Statement `json:",omitempty"` + + // Fields associated with list nodes (both lists and leaf-lists) + ListAttr *ListAttr `json:",omitempty"` + + RPC *RPCEntry `json:",omitempty"` // set if we are an RPC + + // Identities that are defined in this context, this is set if the Entry + // is a module only. + Identities []*Identity `json:",omitempty"` + + Augments []*Entry `json:",omitempty"` // Augments defined in this entry. + Augmented []*Entry `json:",omitempty"` // Augments merged into this entry. + Deviations []*DeviatedEntry `json:"-"` // Deviations associated with this entry. + Deviate map[deviationType][]*Entry `json:"-"` + // deviationPresence tracks whether certain attributes for a DeviateEntry-type + // Entry have been given deviation values. + deviatePresence deviationPresence + Uses []*UsesStmt `json:",omitempty"` // Uses merged into this entry. + + // Extra maps all the unsupported fields to their values + Extra map[string][]interface{} `json:"extra-unstable,omitempty"` + + // Annotation stores annotated values, and is not populated by this + // library but rather can be used by calling code where additional + // information should be stored alongside the Entry. + Annotation map[string]interface{} `json:",omitempty"` + + // namespace stores the namespace of the Entry if it overrides the + // root namespace within the schema tree. This is the case where an + // entry is augmented into the tree, and it retains the namespace of + // the augmenting entity per RFC6020 Section 7.15.2. The namespace + // of the Entry should be accessed using the Namespace function. + namespace *Value +} + +// An RPCEntry contains information related to an RPC Node. +type RPCEntry struct { + Input *Entry + Output *Entry +} + +// A ListAttr is associated with an Entry that represents a List node +type ListAttr struct { + MinElements uint64 // leaf-list or list MUST have at least min-elements + MaxElements uint64 // leaf-list or list has at most max-elements + // OrderedBy is deprecated. Use OrderedByUser instead. + OrderedBy *Value + // OrderedByUser indicates whether the entries are "ordered-by user". + // Otherwise the order is determined by the system. + OrderedByUser bool +} + +// parseOrderedBy parses the ordered-by value and classifies the list/leaf-list +// by whether the `ordered-by user` modifier is active. +// +// For more information see +// https://datatracker.ietf.org/doc/html/rfc7950#section-7.7.7 +func (l *ListAttr) parseOrderedBy(s *Value) error { + if s == nil { + return nil + } + l.OrderedBy = s + switch s.Name { + case "user": + l.OrderedByUser = true + case "system": + default: + return fmt.Errorf("%s: ordered-by has invalid argument: %q", Source(s), s.Name) + } + return nil +} + +// NewDefaultListAttr returns a new ListAttr object with min/max elements being +// set to 0/math.MaxUint64 respectively. +func NewDefaultListAttr() *ListAttr { + return &ListAttr{ + MinElements: 0, + MaxElements: math.MaxUint64, + } +} + +// A UsesStmt associates a *Uses with its referenced grouping *Entry +type UsesStmt struct { + Uses *Uses + Grouping *Entry +} + +// Modules returns the Modules structure that e is part of. This is needed +// when looking for rooted nodes not part of this Entry tree. +func (e *Entry) Modules() *Modules { + for e.Parent != nil { + e = e.Parent + } + return e.Node.(*Module).Modules +} + +// IsDir returns true if e is a directory. +func (e *Entry) IsDir() bool { + return e.Dir != nil +} + +// IsLeaf returns true if e is a leaf i.e. is not a container, list, leaf-list, +// choice or case statement. +func (e *Entry) IsLeaf() bool { + return !e.IsDir() && e.Kind == LeafEntry && e.ListAttr == nil +} + +// IsLeafList returns true if e is a leaf-list. +func (e *Entry) IsLeafList() bool { + return !e.IsDir() && e.Kind == LeafEntry && e.ListAttr != nil +} + +// IsList returns true if e is a list. +func (e *Entry) IsList() bool { + return e.IsDir() && e.ListAttr != nil +} + +// IsContainer returns true if e is a container. +func (e *Entry) IsContainer() bool { + return e.Kind == DirectoryEntry && e.ListAttr == nil +} + +// IsChoice returns true if the entry is a choice node within the schema. +func (e *Entry) IsChoice() bool { + return e.Kind == ChoiceEntry +} + +// IsCase returns true if the entry is a case node within the schema. +func (e *Entry) IsCase() bool { + return e.Kind == CaseEntry +} + +// Print prints e to w in human readable form. +func (e *Entry) Print(w io.Writer) { + if e.Description != "" { + fmt.Fprintln(w) + fmt.Fprintln(indent.NewWriter(w, "// "), e.Description) + } + if e.ReadOnly() { + fmt.Fprintf(w, "RO: ") + } else { + fmt.Fprintf(w, "rw: ") + } + if e.Type != nil { + fmt.Fprintf(w, "%s ", e.Type.Name) + } + switch { + case e.Dir == nil && e.ListAttr != nil: + fmt.Fprintf(w, "[]%s\n", e.Name) + return + case e.Dir == nil: + fmt.Fprintf(w, "%s\n", e.Name) + return + case e.ListAttr != nil: + fmt.Fprintf(w, "[%s]%s {\n", e.Key, e.Name) //} + default: + fmt.Fprintf(w, "%s {\n", e.Name) //} + } + var names []string + for k := range e.Dir { + names = append(names, k) + } + sort.Strings(names) + for _, k := range names { + e.Dir[k].Print(indent.NewWriter(w, " ")) + } + // { to match the brace below to keep brace matching working + fmt.Fprintln(w, "}") +} + +// An EntryKind is the kind of node an Entry is. All leaf nodes are of kind +// LeafEntry. A LeafList is also considered a leaf node. All other kinds are +// directory nodes. +type EntryKind int + +// Enumeration of the types of entries. +const ( + LeafEntry = EntryKind(iota) + DirectoryEntry + AnyDataEntry + AnyXMLEntry + CaseEntry + ChoiceEntry + InputEntry + NotificationEntry + OutputEntry + DeviateEntry +) + +// EntryKindToName maps EntryKind to their names +var EntryKindToName = map[EntryKind]string{ + LeafEntry: "Leaf", + DirectoryEntry: "Directory", + AnyDataEntry: "AnyData", + AnyXMLEntry: "AnyXML", + CaseEntry: "Case", + ChoiceEntry: "Choice", + InputEntry: "Input", + NotificationEntry: "Notification", + OutputEntry: "Output", + DeviateEntry: "Deviate", +} + +func (k EntryKind) String() string { + if s := EntryKindToName[k]; s != "" { + return s + } + return fmt.Sprintf("unknown-entry-%d", k) +} + +// newDirectory returns an empty directory Entry. +func newDirectory(n Node) *Entry { + return &Entry{ + Kind: DirectoryEntry, + Dir: make(map[string]*Entry), + Node: n, + Name: n.NName(), + Extra: map[string][]interface{}{}, + } +} + +// newLeaf returns an empty leaf Entry. +func newLeaf(n Node) *Entry { + return &Entry{ + Kind: LeafEntry, + Node: n, + Name: n.NName(), + Extra: map[string][]interface{}{}, + } +} + +// newError returns an error Entry using format and v to create the error +// contained in the node. The location of the error is prepended. +func newError(n Node, format string, v ...interface{}) *Entry { + e := &Entry{Node: n} + e.errorf("%s: "+format, append([]interface{}{Source(n)}, v...)...) + return e +} + +// errorf appends the error constructed from string and v to the list of errors +// on e. +func (e *Entry) errorf(format string, v ...interface{}) { + e.Errors = append(e.Errors, fmt.Errorf(format, v...)) +} + +// addError appends err to the list of errors on e if err is not nil. +func (e *Entry) addError(err error) { + if err != nil { + e.Errors = append(e.Errors, err) + } +} + +// importErrors imports all the errors from c and its children into e. +func (e *Entry) importErrors(c *Entry) { + if c == nil { + return + } + for _, err := range c.Errors { + e.addError(err) + } + // TODO(borman): need to determine if the extensions have errors + // for _, ce := range e.Exts { + // e.importErrors(ce) + // } + for _, ce := range c.Dir { + e.importErrors(ce) + } +} + +// checkErrors calls f on every error found in the tree e and its children. +func (e *Entry) checkErrors(f func(error)) { + if e == nil { + return + } + for _, e := range e.Dir { + e.checkErrors(f) + } + for _, err := range e.Errors { + f(err) + } + // TODO(borman): need to determine if the extensions have errors + // for _, e := range e.Exts { + // e.checkErrors(f) + // } +} + +// GetErrors returns a sorted list of errors found in e. +func (e *Entry) GetErrors() []error { + // the seen map is used to eliminate duplicate errors. + // Some entries will be processed more than once + // (groupings in particular) and as such may cause + // duplication of errors. + seen := map[error]bool{} + var errs []error + e.checkErrors(func(err error) { + if !seen[err] { + errs = append(errs, err) + seen[err] = true + } + }) + return errorSort(errs) +} + +// add adds the directory entry key assigned to the provided value. +func (e *Entry) add(key string, value *Entry) *Entry { + value.Parent = e + if e.Dir[key] != nil { + e.errorf("%s: duplicate key from %s: %s", Source(e.Node), Source(value.Node), key) + return e + } + e.Dir[key] = value + return e +} + +// delete removes the directory entry key from the entry. +func (e *Entry) delete(key string) { + if _, ok := e.Dir[key]; !ok { + e.errorf("%s: unknown child key %s", Source(e.Node), key) + } + delete(e.Dir, key) +} + +// GetWhenXPath returns the when XPath statement of e if able. +func (e *Entry) GetWhenXPath() (string, bool) { + switch n := e.Node.(type) { + case *Container: + if n.When != nil && n.When.Statement() != nil { + return n.When.Statement().Arg() + } + case *Leaf: + if n.When != nil && n.When.Statement() != nil { + return n.When.Statement().Arg() + } + case *LeafList: + if n.When != nil && n.When.Statement() != nil { + return n.When.Statement().Arg() + } + case *List: + if n.When != nil && n.When.Statement() != nil { + return n.When.Statement().Arg() + } + case *Choice: + if n.When != nil && n.When.Statement() != nil { + return n.When.Statement().Arg() + } + case *Case: + if n.When != nil && n.When.Statement() != nil { + return n.When.Statement().Arg() + } + case *AnyXML: + if n.When != nil && n.When.Statement() != nil { + return n.When.Statement().Arg() + } + case *AnyData: + if n.When != nil && n.When.Statement() != nil { + return n.When.Statement().Arg() + } + case *Augment: + if n.When != nil && n.When.Statement() != nil { + return n.When.Statement().Arg() + } + } + return "", false +} + +// deviationType specifies an enumerated value covering the different substatements +// to the deviate statement. +type deviationType int64 + +const ( + // DeviationUnset specifies that the argument was unset, which is invalid. + DeviationUnset deviationType = iota + // DeviationNotSupported corresponds to the not-supported deviate argument. + DeviationNotSupported + // DeviationAdd corresponds to the add deviate argument to the deviate stmt. + DeviationAdd + // DeviationReplace corresponds to the replace argument to the deviate stmt. + DeviationReplace + // DeviationDelete corresponds to the delete argument to the deviate stmt. + DeviationDelete +) + +var ( + // fromDeviation maps from an enumerated deviation type to the YANG keyword. + fromDeviation = map[deviationType]string{ + DeviationNotSupported: "not-supported", + DeviationAdd: "add", + DeviationReplace: "replace", + DeviationDelete: "delete", + DeviationUnset: "unknown", + } + + // toDeviation maps from the YANG keyword to an enumerated deviation type. + toDeviation = map[string]deviationType{ + "not-supported": DeviationNotSupported, + "add": DeviationAdd, + "replace": DeviationReplace, + "delete": DeviationDelete, + } +) + +func (d deviationType) String() string { + return fromDeviation[d] +} + +// DeviatedEntry stores a wrapped Entry that corresponds to a deviation. +type DeviatedEntry struct { + Type deviationType // Type specifies the deviation type. + DeviatedPath string // DeviatedPath corresponds to the path that is being deviated. + // Entry is the embedded Entry storing the deviations that are made. Fields + // are set to the value in the schema after the deviation has been applied. + *Entry +} + +// semCheckMaxElements checks whether the max-element argument is valid, and returns the specified value. +func semCheckMaxElements(v *Value) (uint64, error) { + if v == nil || v.Name == "unbounded" { + return math.MaxUint64, nil + } + val, err := strconv.ParseUint(v.Name, 10, 64) + if err != nil { + return val, fmt.Errorf(`%s: invalid max-elements value %q (expect "unbounded" or a positive integer): %v`, Source(v), v.Name, err) + } + if val == 0 { + return val, fmt.Errorf(`%s: invalid max-elements value 0 (expect "unbounded" or a positive integer)`, Source(v)) + } + return val, nil +} + +// semCheckMinElements checks whether the min-element argument is valid, and returns the specified value. +func semCheckMinElements(v *Value) (uint64, error) { + if v == nil { + return 0, nil + } + val, err := strconv.ParseUint(v.Name, 10, 64) + if err != nil { + return val, fmt.Errorf(`%s: invalid min-elements value %q (expect a non-negative integer): %v`, Source(v), v.Name, err) + } + return val, nil +} + +// ToEntry expands node n into a directory Entry. Expansion is based on the +// YANG tags in the structure behind n. ToEntry must only be used +// with nodes that are directories, such as top level modules and sub-modules. +// ToEntry never returns nil. Any errors encountered are found in the Errors +// fields of the returned Entry and its children. Use GetErrors to determine +// if there were any errors. +func ToEntry(n Node) (e *Entry) { + if n == nil { + err := errors.New("ToEntry called on nil AST node") + return &Entry{ + Node: &ErrorNode{Error: err}, + Errors: []error{err}, + } + } + ms := RootNode(n).Modules + if e := ms.getEntryCache(n); e != nil { + return e + } + defer func() { + ms.setEntryCache(n, e) + }() + + // Copy in the extensions from our Node, if any. + defer func(n Node) { + if e != nil { + e.Exts = append(e.Exts, n.Exts()...) + } + }(n) + + // tristateValue returns TSTrue if i contains the value of true, TSFalse + // if it contains the value of false, and TSUnset if i does not have + // a set value (for instance, i is nil). An error is returned if i + // contains a value other than true or false. + tristateValue := func(i interface{}) (TriState, error) { + if v, ok := i.(*Value); ok && v != nil { + switch v.Name { + case "true": + return TSTrue, nil + case "false": + return TSFalse, nil + default: + return TSUnset, fmt.Errorf("%s: invalid config value: %s", Source(n), v.Name) + } + } + return TSUnset, nil + } + + var err error + // Handle non-directory nodes (leaf, leafref, and oddly enough, uses). + switch s := n.(type) { + case *Leaf: + e := newLeaf(n) + if errs := s.Type.resolve(ms.typeDict); errs != nil { + e.Errors = errs + } + if s.Description != nil { + e.Description = s.Description.Name + } + if s.Default != nil { + e.Default = []string{s.Default.Name} + } + e.Type = s.Type.YangType + e.Config, err = tristateValue(s.Config) + e.addError(err) + e.Prefix = getRootPrefix(e) + addExtraKeywordsToLeafEntry(n, e) + e.Mandatory, err = tristateValue(s.Mandatory) + e.addError(err) + return e + case *LeafList: + // Create the equivalent leaf element that we are a list of. + // We can then just annotate it as a list rather than a leaf. + leaf := &Leaf{ + Name: s.Name, + Source: s.Source, + Parent: s.Parent, + Extensions: s.Extensions, + Config: s.Config, + Description: s.Description, + IfFeature: s.IfFeature, + Must: s.Must, + Reference: s.Reference, + Status: s.Status, + Type: s.Type, + Units: s.Units, + When: s.When, + } + + e = ToEntry(leaf) + e.ListAttr = NewDefaultListAttr() + if err := e.ListAttr.parseOrderedBy(s.OrderedBy); err != nil { + e.addError(err) + } + var err error + if e.ListAttr.MaxElements, err = semCheckMaxElements(s.MaxElements); err != nil { + e.addError(err) + } + if e.ListAttr.MinElements, err = semCheckMinElements(s.MinElements); err != nil { + e.addError(err) + } + if len(s.Default) != 0 { + for _, def := range s.Default { + e.Default = append(e.Default, def.Name) + } + } + e.Prefix = getRootPrefix(e) + return e + case *Uses: + g := FindGrouping(s, s.Name, map[string]bool{}) + if g == nil { + return newError(n, "unknown group: %s", s.Name) + } + // We need to return a duplicate so we resolve properly + // when the group is used in multiple locations and the + // grouping has a leafref that references outside the group. + e = ToEntry(g).dup() + addExtraKeywordsToLeafEntry(n, e) + return e + } + + e = newDirectory(n) + + // Special handling for individual Node types. Lists are like any other + // node except a List has a ListAttr. + // + // Nodes of identified special kinds have their Kind set here. + switch s := n.(type) { + case *List: + e.ListAttr = NewDefaultListAttr() + if err := e.ListAttr.parseOrderedBy(s.OrderedBy); err != nil { + e.addError(err) + } + var err error + if e.ListAttr.MaxElements, err = semCheckMaxElements(s.MaxElements); err != nil { + e.addError(err) + } + if e.ListAttr.MinElements, err = semCheckMinElements(s.MinElements); err != nil { + e.addError(err) + } + case *Choice: + e.Kind = ChoiceEntry + if s.Default != nil { + e.Default = []string{s.Default.Name} + } + case *Case: + e.Kind = CaseEntry + case *AnyData: + e.Kind = AnyDataEntry + case *AnyXML: + e.Kind = AnyXMLEntry + case *Input: + e.Kind = InputEntry + case *Output: + e.Kind = OutputEntry + case *Notification: + e.Kind = NotificationEntry + case *Deviate: + e.Kind = DeviateEntry + } + + // Use Elem to get the Value of structure that n is pointing to. + v := reflect.ValueOf(n).Elem() + t := v.Type() + found := false + + for i := t.NumField() - 1; i > 0; i-- { + f := t.Field(i) + yang := f.Tag.Get("yang") + if yang == "" { + continue + } + fv := v.Field(i) + name := strings.Split(yang, ",")[0] + switch name { + case "": + e.addError(fmt.Errorf("%s: nil statement", Source(n))) + case "config": + e.Config, err = tristateValue(fv.Interface()) + e.addError(err) + case "description": + if v := fv.Interface().(*Value); v != nil { + e.Description = v.Name + } + case "prefix": + if v := fv.Interface().(*Value); v != nil { + e.Prefix = v + } + case "action": + for _, r := range fv.Interface().([]*Action) { + e.add(r.Name, ToEntry(r)) + } + case "augment": + for _, a := range fv.Interface().([]*Augment) { + ne := ToEntry(a) + ne.Parent = e + e.Augments = append(e.Augments, ne) + } + case "anydata": + for _, a := range fv.Interface().([]*AnyData) { + e.add(a.Name, ToEntry(a)) + } + case "anyxml": + for _, a := range fv.Interface().([]*AnyXML) { + e.add(a.Name, ToEntry(a)) + } + case "case": + for _, a := range fv.Interface().([]*Case) { + e.add(a.Name, ToEntry(a)) + } + case "choice": + for _, a := range fv.Interface().([]*Choice) { + e.add(a.Name, ToEntry(a)) + } + case "container": + for _, a := range fv.Interface().([]*Container) { + e.add(a.Name, ToEntry(a)) + } + case "grouping": + for _, a := range fv.Interface().([]*Grouping) { + // We just want to parse the grouping to + // collect errors. + e.importErrors(ToEntry(a)) + } + case "import": + // Import only makes types and such available. + // There is nothing else for us to do. + case "include": + for _, a := range fv.Interface().([]*Include) { + // Handle circular dependencies between submodules. This can occur in + // two ways: + // - Where submodule A imports submodule B, and vice versa then the + // whilst processing A we will also try and process A (learnt via + // B). The default case of the switch handles this case. + // - Where submodule A imports submodule B that imports C, which also + // imports A, then we need to check whether we already have merged + // the specified module during this parse attempt. We check this + // against a map of merged submodules. + // The key of the map used is a synthesised value which is formed by + // concatenating the name of this node and the included submodule, + // separated by a ":". + srcToIncluded := a.Module.Name + ":" + n.NName() + includedToSrc := n.NName() + ":" + a.Module.Name + + switch { + case ms.mergedSubmodule[srcToIncluded]: + // We have already merged this module, so don't try and do it + // again. + continue + case !ms.mergedSubmodule[includedToSrc] && a.Module.NName() != n.NName(): + // We have not merged A->B, and B != B hence go ahead and merge. + includedToParent := a.Module.Name + ":" + a.Module.BelongsTo.Name + if ms.mergedSubmodule[includedToParent] { + // Don't try and re-import submodules that have already been imported + // into the top-level module. Note that this ensures that we get to the + // top the tree (whichever the actual module for the chain of + // submodules is). The tracking of the immediate parent is achieved + // through 'key', which ensures that we do not end up in loops + // walking through a sub-cycle of the include graph. + continue + } + ms.mergedSubmodule[srcToIncluded] = true + ms.mergedSubmodule[includedToParent] = true + e.merge(a.Module.Prefix, nil, ToEntry(a.Module)) + case ms.ParseOptions.IgnoreSubmoduleCircularDependencies: + continue + default: + e.addError(fmt.Errorf("%s: has a circular dependency, importing %s", n.NName(), a.Module.NName())) + } + } + case "leaf": + for _, a := range fv.Interface().([]*Leaf) { + e.add(a.Name, ToEntry(a)) + } + case "leaf-list": + for _, a := range fv.Interface().([]*LeafList) { + e.add(a.Name, ToEntry(a)) + } + case "list": + for _, a := range fv.Interface().([]*List) { + e.add(a.Name, ToEntry(a)) + } + case "key": + if v := fv.Interface().(*Value); v != nil { + e.Key = v.Name + } + case "notification": + for _, a := range fv.Interface().([]*Notification) { + e.add(a.Name, ToEntry(a)) + } + case "rpc": + // TODO(borman): what do we do with these? + // seems fine to ignore them for now, we are + // just interested in the tree structure. + for _, r := range fv.Interface().([]*RPC) { + switch rpc := ToEntry(r); { + case rpc.RPC == nil: + // When "rpc" has no "input" or "output" children + rpc.RPC = &RPCEntry{} + fallthrough + default: + e.add(r.Name, rpc) + } + } + + case "input": + if i := fv.Interface().(*Input); i != nil { + if e.RPC == nil { + e.RPC = &RPCEntry{} + } + in := ToEntry(i) + in.Parent = e + e.RPC.Input = in + e.RPC.Input.Name = "input" + e.RPC.Input.Kind = InputEntry + } + case "output": + if o := fv.Interface().(*Output); o != nil { + if e.RPC == nil { + e.RPC = &RPCEntry{} + } + out := ToEntry(o) + out.Parent = e + e.RPC.Output = out + e.RPC.Output.Name = "output" + e.RPC.Output.Kind = OutputEntry + } + case "identity": + if i := fv.Interface().([]*Identity); i != nil { + e.Identities = i + } + case "uses": + for _, a := range fv.Interface().([]*Uses) { + grouping := ToEntry(a) + e.merge(nil, nil, grouping) + // Apply inline augments from the uses statement. Their paths + // are relative to e (the node where the uses appears), so we + // resolve them directly rather than deferring to e.Augment(). + for _, aug := range a.Augment { + target := e.Find(aug.Name) + if target != nil { + augEntry := ToEntry(aug) + target.merge(nil, augEntry.Namespace(), augEntry) + } + } + if ms.ParseOptions.StoreUses { + e.Uses = append(e.Uses, &UsesStmt{a, grouping.shallowDup()}) + } + } + case "type": + // The type keyword is specific to deviate to change a type. Other type handling + // (e.g., leaf type resolution) is done outside of this case. + n, ok := n.(*Deviate) + if !ok { + e.addError(fmt.Errorf("unexpected type found, only valid under Deviate, is %T", n)) + continue + } + + if n.Type != nil { + if errs := n.Type.resolve(ms.typeDict); errs != nil { + e.addError(fmt.Errorf("deviation has unresolvable type, %v", errs)) + continue + } + e.Type = n.Type.YangType + } + continue + // Keywords that do not need to be handled as an Entry as they are added + // to other dictionaries. + case "default": + switch e.Kind { + case LeafEntry, ChoiceEntry: + // default is handled separately for leaf, leaf-list and choice + case DeviateEntry: + // handle deviate statements. + // TODO(wenovus): support refine statement's default substatement. + d, ok := fv.Interface().(*Value) + if !ok { + e.addError(fmt.Errorf("%s: unexpected default type in %s:%s", Source(n), n.Kind(), n.NName())) + } + // TODO(wenovus): deviate statement and refine statement should + // allow multiple default substatements for leaf-list types (YANG1.1). + if d != nil { + e.Default = []string{d.asString()} + } + } + case "typedef": + continue + case "deviation": + if a := fv.Interface().([]*Deviation); a != nil { + for _, d := range a { + deviatedEntry := ToEntry(d) + e.importErrors(deviatedEntry) + e.Deviations = append(e.Deviations, &DeviatedEntry{ + Entry: deviatedEntry, + DeviatedPath: d.Statement().Argument, + }) + + for _, sd := range d.Deviate { + if sd.Type != nil { + sd.Type.resolve(ms.typeDict) + } + } + } + } + case "deviate": + if a := fv.Interface().([]*Deviate); a != nil { + for _, d := range a { + de := ToEntry(d) + + dt, ok := toDeviation[d.Statement().Argument] + if !ok { + e.addError(fmt.Errorf("%s: unknown deviation type in %s:%s", Source(n), n.Kind(), n.NName())) + continue + } + + if e.Deviate == nil { + e.Deviate = map[deviationType][]*Entry{} + } + + e.Deviate[dt] = append(e.Deviate[dt], de) + } + } + case "mandatory": + v, ok := fv.Interface().(*Value) + if !ok { + e.addError(fmt.Errorf("%s: did not get expected value type", Source(n))) + } + e.Mandatory, err = tristateValue(v) + e.addError(err) + case "max-elements", "min-elements": + if e.Kind != DeviateEntry { + continue + } + // we can get max-elements or min-elements in a deviate statement, so create the + // corresponding logic. + v, ok := fv.Interface().(*Value) + if !ok { + e.addError(fmt.Errorf("%s: max or min elements had wrong type, %s:%s", Source(n), n.Kind(), n.NName())) + continue + } + + if e.ListAttr == nil { + e.ListAttr = NewDefaultListAttr() + } + + // Only record the deviation if the statement exists. + if v != nil { + var err error + if name == "max-elements" { + e.deviatePresence.hasMaxElements = true + if e.ListAttr.MaxElements, err = semCheckMaxElements(v); err != nil { + e.addError(err) + } + } else { + e.deviatePresence.hasMinElements = true + if e.ListAttr.MinElements, err = semCheckMinElements(v); err != nil { + e.addError(err) + } + } + } + case "units": + v, ok := fv.Interface().(*Value) + if !ok { + e.addError(fmt.Errorf("%s: units had wrong type, %s:%s", Source(n), n.Kind(), n.NName())) + } + if v != nil { + e.Units = v.asString() + } + // TODO(borman): unimplemented keywords + case "belongs-to", + "contact", + "extension", + "feature", + "if-feature", + "must", + "namespace", + "ordered-by", + "organization", + "presence", + "reference", + "revision", + "status", + "unique", + "when", + "yang-version": + if !fv.IsNil() { + addToExtrasSlice(fv, name, e) + } + continue + + case "Ext", "Name", "Parent", "Statement": + // These are meta-keywords used internally + continue + default: + e.addError(fmt.Errorf("%s: unexpected statement: %s", Source(n), name)) + continue + + } + // We found at least one field. + found = true + } + if !found { + return newError(n, "%T: cannot be converted to a *Entry", n) + } + // If prefix isn't set, provide it based on our root node (module) + if e.Prefix == nil { + e.Prefix = getRootPrefix(e) + } + + return e +} + +// addExtraKeywordsToLeafEntry stores the values for unimplemented keywords in leaf entries. +func addExtraKeywordsToLeafEntry(n Node, e *Entry) { + v := reflect.ValueOf(n).Elem() + t := v.Type() + + for i := t.NumField() - 1; i > 0; i-- { + f := t.Field(i) + yang := f.Tag.Get("yang") + if yang == "" { + continue + } + fv := v.Field(i) + name := strings.Split(yang, ",")[0] + switch name { + case "if-feature", + "must", + "reference", + "status", + "when": + if !fv.IsNil() { + addToExtrasSlice(fv, name, e) + } + } + } +} + +func addToExtrasSlice(fv reflect.Value, name string, e *Entry) { + if fv.Kind() == reflect.Slice { + for j := 0; j < fv.Len(); j++ { + e.Extra[name] = append(e.Extra[name], fv.Index(j).Interface()) + } + } else { + e.Extra[name] = append(e.Extra[name], fv.Interface()) + } +} + +// getRootPrefix returns the prefix of e's root node (module) +func getRootPrefix(e *Entry) *Value { + if m := RootNode(e.Node); m != nil { + return m.getPrefix() + } + return nil +} + +// Augment processes augments in e, return the number of augments processed +// and the augments skipped. If addErrors is true then missing augments will +// generate errors. +func (e *Entry) Augment(addErrors bool) (processed, skipped int) { + // Now process the augments we found + // NOTE(borman): is it possible this will fail if the augment refers + // to some removed sibling that has not been processed? Perhaps this + // should be done after the entire tree is built. Is it correct to + // assume augment paths are data tree paths and not schema tree paths? + // Augments can depend upon augments. We need to figure out how to + // order the augments (or just keep trying until we can make no further + // progress) + var unapplied []*Entry + for _, a := range e.Augments { + target := a.Find(a.Name) + if target == nil { + if addErrors { + e.errorf("%s: augment %s not found", Source(a.Node), a.Name) + } + skipped++ + unapplied = append(unapplied, a) + continue + } + // Augments do not have a prefix we merge in, just a node. + // We retain the namespace from the original context of the + // augment since the nodes have this namespace even though they + // are merged into another entry. + processed++ + target.merge(nil, a.Namespace(), a) + target.Augmented = append(target.Augmented, a.shallowDup()) + } + e.Augments = unapplied + return processed, skipped +} + +// ApplyDeviate walks the deviations within the supplied entry, and applies them to the +// schema. +func (e *Entry) ApplyDeviate(deviateOpts ...DeviateOpt) []error { + var errs []error + appendErr := func(err error) { errs = append(errs, err) } + for _, d := range e.Deviations { + deviatedNode := e.Find(d.DeviatedPath) + if deviatedNode == nil { + appendErr(fmt.Errorf("cannot find target node to deviate, %s", d.DeviatedPath)) + continue + } + + for dt, dv := range d.Deviate { + for _, devSpec := range dv { + switch dt { + case DeviationAdd, DeviationReplace: + if devSpec.Config != TSUnset { + deviatedNode.Config = devSpec.Config + } + + if len(devSpec.Default) > 0 { + switch dt { + case DeviationAdd: + switch { + case deviatedNode.IsLeafList(): + deviatedNode.Default = append(deviatedNode.Default, devSpec.Default...) + case len(devSpec.Default) > 1: + appendErr(fmt.Errorf("%s: tried to add more than one default to a non-leaflist entry at deviation", Source(e.Node))) + case len(deviatedNode.Default) != 0: + appendErr(fmt.Errorf("%s: tried to add a default value to an entry that already has a default value", Source(e.Node))) + case len(devSpec.Default) == 1 && len(deviatedNode.Default) == 0: + deviatedNode.Default = append([]string{}, devSpec.Default[0]) + } + case DeviationReplace: + deviatedNode.Default = append([]string{}, devSpec.Default...) + } + } + + if devSpec.Mandatory != TSUnset { + deviatedNode.Mandatory = devSpec.Mandatory + } + + if devSpec.deviatePresence.hasMinElements { + if !deviatedNode.IsList() && !deviatedNode.IsLeafList() { + appendErr(fmt.Errorf("tried to deviate min-elements on a non-list type %s", deviatedNode.Kind)) + continue + } + deviatedNode.ListAttr.MinElements = devSpec.ListAttr.MinElements + } + + if devSpec.deviatePresence.hasMaxElements { + if !deviatedNode.IsList() && !deviatedNode.IsLeafList() { + appendErr(fmt.Errorf("tried to deviate max-elements on a non-list type %s", deviatedNode.Kind)) + continue + } + deviatedNode.ListAttr.MaxElements = devSpec.ListAttr.MaxElements + } + + if devSpec.Units != "" { + deviatedNode.Units = devSpec.Units + } + + if devSpec.Type != nil { + deviatedNode.Type = devSpec.Type + } + + case DeviationNotSupported: + dp := deviatedNode.Parent + if dp == nil { + appendErr(fmt.Errorf("%s: node %s does not have a valid parent, but deviate not-supported references one", Source(e.Node), e.Name)) + continue + } + if !hasIgnoreDeviateNotSupported(deviateOpts) { + dp.delete(deviatedNode.Name) + } + case DeviationDelete: + if devSpec.Config != TSUnset { + deviatedNode.Config = TSUnset + } + + if len(devSpec.Default) > 0 { + switch { + case deviatedNode.IsLeafList(): + // It is unclear from RFC7950 on how deviate delete works + // when there are duplicate leaf-list values in config-false leafs. + // TODO(wenbli): Add support for deleting default values when the leaf-list is a config leaf (duplicates are not allowed). + appendErr(fmt.Errorf("%s: deviate delete on default statements unsupported for leaf-lists, please use replace instead", Source(e.Node))) + case len(deviatedNode.Default) == 0: + appendErr(fmt.Errorf("%s: tried to deviate delete a default statement that doesn't exist", Source(e.Node))) + case devSpec.Default[0] != deviatedNode.Default[0]: + appendErr(fmt.Errorf("%s: tried to deviate delete a default statement with a non-matching keyword", Source(e.Node))) + default: + deviatedNode.Default = nil + } + } + + if devSpec.Mandatory != TSUnset { + deviatedNode.Mandatory = TSUnset + } + + if devSpec.deviatePresence.hasMinElements { + if !deviatedNode.IsList() && !deviatedNode.IsLeafList() { + appendErr(fmt.Errorf("tried to deviate min-elements on a non-list type %s", deviatedNode.Kind)) + continue + } + if deviatedNode.ListAttr.MinElements != devSpec.ListAttr.MinElements { + // Argument value must match: + // https://tools.ietf.org/html/rfc7950#section-7.20.3.2 + appendErr(fmt.Errorf("min-element value %d differs from deviation's min-element value %d for entry %v", devSpec.ListAttr.MinElements, deviatedNode.ListAttr.MinElements, d.DeviatedPath)) + } + deviatedNode.ListAttr.MinElements = 0 + } + + if devSpec.deviatePresence.hasMaxElements { + if !deviatedNode.IsList() && !deviatedNode.IsLeafList() { + appendErr(fmt.Errorf("tried to deviate max-elements on a non-list type %s", deviatedNode.Kind)) + continue + } + if deviatedNode.ListAttr.MaxElements != devSpec.ListAttr.MaxElements { + appendErr(fmt.Errorf("max-element value %d differs from deviation's max-element value %d for entry %v", devSpec.ListAttr.MaxElements, deviatedNode.ListAttr.MaxElements, d.DeviatedPath)) + } + deviatedNode.ListAttr.MaxElements = math.MaxUint64 + } + + default: + appendErr(fmt.Errorf("invalid deviation type %s", dt)) + } + } + } + } + + return errs +} + +// FixChoice inserts missing Case entries for non-case entries within a choice +// entry. +func (e *Entry) FixChoice() { + if e.Kind == ChoiceEntry && len(e.Errors) == 0 { + for k, ce := range e.Dir { + if ce.Kind != CaseEntry { + ne := &Entry{ + Parent: e, + Node: &Case{ + Parent: ce.Node.ParentNode(), + Name: ce.Node.NName(), + Source: ce.Node.Statement(), + Extensions: ce.Node.Exts(), + }, + Name: ce.Name, + Kind: CaseEntry, + Config: ce.Config, + Prefix: ce.Prefix, + Dir: map[string]*Entry{ce.Name: ce}, + Extra: map[string][]interface{}{}, + } + ce.Parent = ne + e.Dir[k] = ne + } + } + } + for _, ce := range e.Dir { + ce.FixChoice() + } +} + +// ReadOnly returns true if e is a read-only variable (config == false). +// If Config is unset in e, then false is returned if e has no parent, +// otherwise the value parent's ReadOnly is returned. +func (e *Entry) ReadOnly() bool { + switch { + case e == nil: + // We made it all the way to the root of the tree + return false + case e.Kind == OutputEntry: + return true + case e.Config == TSUnset: + return e.Parent.ReadOnly() + default: + return !e.Config.Value() + } +} + +// Find finds the Entry named by name relative to e. +func (e *Entry) Find(name string) *Entry { + if e == nil || name == "" { + return nil + } + parts := strings.Split(name, "/") + + // If parts[0] is "" then this path started with a / + // and we need to find our parent. + if parts[0] == "" { + parts = parts[1:] + contextNode := e.Node + for e.Parent != nil { + e = e.Parent + } + if prefix, _ := getPrefix(parts[0]); prefix != "" { + mod := FindModuleByPrefix(contextNode, prefix) + if mod == nil { + e.addError(fmt.Errorf("cannot find module giving prefix %q within context entry %q", prefix, e.Path())) + return nil + } + m := module(mod) + if m == nil { + e.addError(fmt.Errorf("cannot find which module %q belongs to within context entry %q", + mod.NName(), e.Path())) + return nil + } + if m != e.Node.(*Module) { + e = ToEntry(m) + } + } + } + + for _, part := range parts { + switch { + case e == nil: + return nil + case part == ".": + case part == "..": + e = e.Parent + case e.RPC != nil: + _, part = getPrefix(part) + switch part { + case "input": + if e.RPC.Input == nil { + e.RPC.Input = &Entry{ + Name: "input", + Kind: InputEntry, + Dir: make(map[string]*Entry), + } + } + e = e.RPC.Input + case "output": + if e.RPC.Output == nil { + e.RPC.Output = &Entry{ + Name: "output", + Kind: OutputEntry, + Dir: make(map[string]*Entry), + } + } + e = e.RPC.Output + } + default: + _, part = getPrefix(part) + switch part { + case ".": + case "", "..": + return nil + default: + e = e.Dir[part] + } + } + } + return e +} + +// Path returns the path to e. A nil Entry returns "". +func (e *Entry) Path() string { + if e == nil { + return "" + } + return e.Parent.Path() + "/" + e.Name +} + +// Namespace returns the YANG/XML namespace Value for e as mounted in the Entry +// tree (e.g., as placed by grouping statements). +// +// Per RFC6020 section 7.12, the namespace on elements in the tree due to a +// "uses" statement is that of the where the uses statement occurs, i.e., the +// user, rather than creator (grouping) of those elements, so we follow the +// usage (Entry) tree up to the parent before obtaining the (then adjacent) root +// node for its namespace Value. +func (e *Entry) Namespace() *Value { + // Make e the root parent entry + for ; e.Parent != nil; e = e.Parent { + if e.namespace != nil { + return e.namespace + } + } + + // Return the namespace of a valid root parent entry + if e != nil && e.Node != nil { + if root := RootNode(e.Node); root != nil { + if root.Kind() == "submodule" { + root = root.Modules.Modules[root.BelongsTo.Name] + if root == nil { + return new(Value) + } + } + return root.Namespace + } + } + + // Otherwise return an empty namespace Value (rather than nil) + return new(Value) +} + +// InstantiatingModule returns the YANG module which instantiated the Entry +// within the schema tree - using the same rules described in the documentation +// of the Namespace function. The namespace is resolved in the module name. This +// approach to namespacing is used when serialising YANG-modelled data to JSON as +// per RFC7951. +func (e *Entry) InstantiatingModule() (string, error) { + n := e.Namespace() + if n == nil { + return "", fmt.Errorf("entry %s had nil namespace", e.Name) + } + + module, err := e.Modules().FindModuleByNamespace(n.Name) + if err != nil { + return "", fmt.Errorf("could not find module %q when retrieving namespace for %s: %v", n.Name, e.Name, err) + } + return module.Name, nil +} + +// shallowDup makes a shallow duplicate of e (only direct children are +// duplicated; grandchildren and deeper descendants are deleted). +func (e *Entry) shallowDup() *Entry { + // Warning: if we add any elements to Entry that should not be + // copied we will have to explicitly uncopy them. + ne := *e + + // Now only copy direct children, clear their Dir, and fix up + // Parent pointers. + if e.Dir != nil { + ne.Dir = make(map[string]*Entry, len(e.Dir)) + for k, v := range e.Dir { + de := *v + de.Dir = nil + de.Parent = &ne + ne.Dir[k] = &de + } + } + return &ne +} + +// dup makes a deep duplicate of e. +func (e *Entry) dup() *Entry { + // Warning: if we add any elements to Entry that should not be + // copied we will have to explicitly uncopy them. + // It is possible we may want to do a deep copy on some other fields, + // such as Exts, Choice and Case, but it is not clear that we need + // to do that. + ne := *e + + // Now recurse down to all of our children, fixing up Parent + // pointers as we go. + if e.Dir != nil { + ne.Dir = make(map[string]*Entry, len(e.Dir)) + for k, v := range e.Dir { + de := v.dup() + de.Parent = &ne + ne.Dir[k] = de + } + } + + ne.Extra = make(map[string][]interface{}) + for k, v := range e.Extra { + ne.Extra[k] = v + } + + return &ne +} + +// merge merges a duplicate of oe.Dir into e.Dir, setting the prefix of each +// element to prefix, if not nil. It is an error if e and oe contain common +// elements. +func (e *Entry) merge(prefix *Value, namespace *Value, oe *Entry) { + e.importErrors(oe) + for k, v := range oe.Dir { + v := v.dup() + if prefix != nil { + v.Prefix = prefix + } + if namespace != nil { + v.namespace = namespace + } + if se := e.Dir[k]; se != nil { + er := newError(oe.Node, `Duplicate node %q in %q from: + %s: %s + %s: %s`, k, e.Name, Source(v.Node), v.Name, Source(se.Node), se.Name) + e.addError(er.Errors[0]) + } else { + v.Parent = e + v.Exts = append(v.Exts, oe.Exts...) + for lk := range oe.Extra { + v.Extra[lk] = append(v.Extra[lk], oe.Extra[lk]...) + } + e.Dir[k] = v + } + } +} + +// nless returns -1 if a is less than b, 0 if a == b, and 1 if a > b. +// If a and b are both numeric, then nless compares them as numbers, +// otherwise they are compared lexicographically. +func nless(a, b string) int { + an, ae := strconv.Atoi(a) + bn, be := strconv.Atoi(b) + switch { + case ae == nil && be == nil: + switch { + case an < bn: + return -1 + case an > bn: + return 1 + default: + return 0 + } + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +type sError struct { + s string + err error +} + +type sortedErrors []sError + +func (s sortedErrors) Len() int { return len(s) } +func (s sortedErrors) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortedErrors) Less(i, j int) bool { + // We expect the error strings to be composed of error messages, + // line numbers, etc. delimited by ":". + const errorSplitCount = 4 + fi := strings.SplitN(s[i].s, ":", errorSplitCount) + fj := strings.SplitN(s[j].s, ":", errorSplitCount) + // First, order the errors by the file name. + if fi[0] < fj[0] { + return true + } + if fi[0] > fj[0] { + return false + } + + // compare remaining indices of the error string slices + // in order to create a total ordering. + for i := 1; i < errorSplitCount; i++ { + switch { + // Handle when an expected index doesn't exist. + case len(fj) == i: + return false + case len(fi) == i: + return true + } + + switch nless(fi[i], fj[i]) { + case -1: + return true + case 1: + return false + } + } + return false +} + +// errorSort sorts the strings in the errors slice assuming each line starts +// with file:line:col. Line and column number are sorted numerically. +// Duplicate errors are stripped. +func errorSort(errors []error) []error { + switch len(errors) { + case 0: + return nil + case 1: + return errors + } + elist := make(sortedErrors, len(errors)) + for x, err := range errors { + elist[x] = sError{err.Error(), err} + } + sort.Sort(elist) + errors = make([]error, len(errors)) + i := 0 + for _, err := range elist { + if i > 0 && reflect.DeepEqual(err.err, errors[i-1]) { + continue + } + errors[i] = err.err + i++ + } + return errors[:i] +} + +// SingleDefaultValue returns the single schema default value for e and a bool +// indicating whether the entry contains one and only one default value. The +// empty string is returned when the entry has zero or multiple default values. +// This function is useful for determining the default values of a +// non-leaf-list leaf entry. If the leaf has no explicit default, its type +// default (if any) will be used. +// +// For a leaf-list entry, use DefaultValues() instead. +func (e *Entry) SingleDefaultValue() (string, bool) { + if dvals := e.DefaultValues(); len(dvals) == 1 { + return dvals[0], true + } + return "", false +} + +// DefaultValues returns all default values for the leaf entry. This is useful +// for determining the default values for a leaf-list, which may have more than +// one default value. If the entry has no explicit default, its type default +// (if any) will be used. nil is returned when no default value exists. +// +// For a leaf entry, use SingleDefaultValue() instead. +func (e *Entry) DefaultValues() []string { + if len(e.Default) > 0 { + return append([]string{}, e.Default...) + } + + if typ := e.Type; typ != nil && typ.HasDefault { + switch leaf := e.Node.(type) { + case *Leaf: + switch { + case e.IsLeaf() && (leaf.Mandatory == nil || leaf.Mandatory.Name == "false"), e.IsLeafList() && e.ListAttr.MinElements == 0: + return []string{typ.Default} + } + } + } + return nil +} diff --git a/src/webui/internal/goyang/pkg/yang/entry_test.go b/src/webui/internal/goyang/pkg/yang/entry_test.go new file mode 100644 index 000000000..2000238b9 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/entry_test.go @@ -0,0 +1,4141 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "math" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/openconfig/gnmi/errdiff" +) + +func TestNilEntry(t *testing.T) { + e := ToEntry(nil) + _, ok := e.Node.(*ErrorNode) + if !ok { + t.Fatalf("ToEntry(nil) did not return an error node") + } + errs := e.GetErrors() + switch len(errs) { + default: + t.Errorf("got %d errors, wanted 1", len(errs)) + fallthrough + case 1: + got := errs[0].Error() + want := "ToEntry called on nil AST node" + if got != want { + t.Fatalf("got error %q, want %q", got, want) + } + case 0: + t.Fatalf("GetErrors returned no error") + } +} + +var badInputs = []struct { + name string + in string + errors []string +}{ + { + name: "bad.yang", + in: ` +// Base test yang module. +// This module is syntactally correct (we can build an AST) but it is has +// invalid parameters in many statements. +module base { + namespace "urn:mod"; + prefix "base"; + + container c { + // bad config value in a container + config bad; + } + container d { + leaf bob { + // bad config value + config incorrect; + type unknown; + } + // duplicate leaf entry bob + leaf bob { type string; } + // unknown grouping to uses + uses the-beatles; + } + grouping the-group { + leaf one { type string; } + // duplicate leaf in unused grouping. + leaf one { type int; } + } + uses the-group; +} +`, + errors: []string{ + `bad.yang:9:3: invalid config value: bad`, + `bad.yang:13:3: duplicate key from bad.yang:20:5: bob`, + `bad.yang:14:5: invalid config value: incorrect`, + `bad.yang:17:7: unknown type: base:unknown`, + `bad.yang:22:5: unknown group: the-beatles`, + `bad.yang:24:3: duplicate key from bad.yang:27:5: one`, + }, + }, + { + name: "bad-augment.yang", + in: ` +module base { + namespace "urn:mod"; + prefix "base"; + // augmentation of unknown element + augment erewhon { + leaf bob { + type string; + // bad config value in unused augment + config wrong; + } + } +} +`, + errors: []string{ + `bad-augment.yang:6:3: augment erewhon not found`, + }, + }, + { + name: "bad-min-max-elements.yang", + in: ` +module base { + namespace "urn:mod"; + prefix "base"; + list foo { + // bad arguments to min-elements and max-elements + min-elements bar; + max-elements -5; + } + leaf-list bar { + type string; + // bad arguments to min-elements and max-elements + min-elements unbounded; + max-elements 122222222222222222222222222222222222222222222222222222222222; + } + list baz { + // good arguments + min-elements 0; + max-elements unbounded; + } + list caz { + // bad max element: has to be positive. + min-elements 0; + max-elements 0; + } +} +`, + errors: []string{ + `bad-min-max-elements.yang:7:5: invalid min-elements value`, + `bad-min-max-elements.yang:8:5: invalid max-elements value`, + `bad-min-max-elements.yang:13:5: invalid min-elements value`, + `bad-min-max-elements.yang:14:5: invalid max-elements value`, + `bad-min-max-elements.yang:24:5: invalid max-elements value`, + }, + }, +} + +func TestBadYang(t *testing.T) { + for _, tt := range badInputs { + ms := NewModules() + if err := ms.Parse(tt.in, tt.name); err != nil { + t.Fatalf("unexpected error %s", err) + } + errs := ms.Process() + if len(errs) != len(tt.errors) { + t.Errorf("got %d errors, want %d", len(errs), len(tt.errors)) + } else { + ok := true + for x, err := range errs { + if !strings.Contains(err.Error(), tt.errors[x]) { + ok = false + break + } + } + if ok { + continue + } + } + + var b bytes.Buffer + fmt.Fprint(&b, "got errors:\n") + for _, err := range errs { + fmt.Fprintf(&b, "\t%v\n", err) + } + fmt.Fprint(&b, "want errors:\n") + for _, err := range tt.errors { + fmt.Fprintf(&b, "\t%s\n", err) + } + t.Error(b.String()) + } +} + +var parentTestModules = []struct { + name string + in string +}{ + { + name: "foo.yang", + in: ` +module foo { + namespace "urn:foo"; + prefix "foo"; + + import bar { prefix "temp-bar"; } + container foo-c { + leaf zzz { type string; } + leaf-list foo-list { type string; } + uses temp-bar:common; + } + uses temp-bar:common; +} +`, + }, + { + name: "bar.yang", + in: ` +module bar { + namespace "urn:bar"; + prefix "bar"; + + grouping common { + container test1 { leaf str { type string; } } + container test2 { leaf str { type string; } } + } + + container bar-local { + leaf test1 { type string; } + } + +} +`, + }, + { + name: "baz.yang", + in: ` +module baz { + namespace "urn:baz"; + prefix "baz"; + + import foo { prefix "f"; } + + grouping baz-common { + leaf baz-common-leaf { type string; } + container baz-dir { + leaf aardvark { type string; } + } + } + + augment /f:foo-c { + uses baz-common; + leaf baz-direct-leaf { type string; } + } +} +`, + }, + { + name: "baz-augment.yang", + in: ` + submodule baz-augment { + belongs-to baz { + prefix "baz"; + } + + import foo { prefix "f"; } + + augment "/f:foo-c" { + leaf baz-submod-leaf { type string; } + } + } + `, + }, + { + name: "qux-augment.yang", + in: ` + submodule qux-augment { + belongs-to qux { + prefix "qux"; + } + + import foo { prefix "f"; } + + augment "/f:foo-c" { + leaf qux-submod-leaf { type string; } + } + } + `, + }, +} + +func TestUsesParent(t *testing.T) { + ms := NewModules() + for _, tt := range parentTestModules { + _ = ms.Parse(tt.in, tt.name) + } + + efoo, _ := ms.GetModule("foo") + used := efoo.Dir["foo-c"].Dir["test1"] + expected := "/foo/foo-c/test1" + if used.Path() != expected { + t.Errorf("want %s, got %s", expected, used.Path()) + } + + used = efoo.Dir["test1"] + expected = "/foo/test1" + if used.Path() != expected { + t.Errorf("want %s, got %s", expected, used.Path()) + } +} + +func TestPrefixes(t *testing.T) { + ms := NewModules() + for _, tt := range parentTestModules { + _ = ms.Parse(tt.in, tt.name) + } + + efoo, _ := ms.GetModule("foo") + if efoo.Prefix.Name != "foo" { + t.Errorf(`want prefix "foo", got %q`, efoo.Prefix.Name) + } + + used := efoo.Dir["foo-c"].Dir["zzz"] + if used.Prefix == nil || used.Prefix.Name != "foo" { + t.Errorf(`want prefix named "foo", got %#v`, used.Prefix) + } + + used = efoo.Dir["foo-c"].Dir["foo-list"] + if used.Prefix == nil || used.Prefix.Name != "foo" { + t.Errorf(`want prefix named "foo", got %#v`, used.Prefix) + } + used = efoo.Dir["foo-c"].Dir["test1"] + if used.Prefix.Name != "bar" { + t.Errorf(`want prefix "bar", got %q`, used.Prefix.Name) + } + + used = efoo.Dir["foo-c"].Dir["test1"].Dir["str"] + if used.Prefix == nil || used.Prefix.Name != "bar" { + t.Errorf(`want prefix named "bar", got %#v`, used.Prefix) + } + +} + +func TestEntryNamespace(t *testing.T) { + ms := NewModules() + for _, tt := range parentTestModules { + if err := ms.Parse(tt.in, tt.name); err != nil { + t.Fatalf("could not parse module %s: %v", tt.name, err) + } + } + + if errs := ms.Process(); len(errs) > 0 { + t.Fatalf("could not process modules: %v", errs) + } + + foo, _ := ms.GetModule("foo") + bar, _ := ms.GetModule("bar") + + for _, tc := range []struct { + descr string + entry *Entry + ns string + wantMod string + wantModError string + }{ + { + descr: "grouping used in foo always have foo's namespace, even if it was defined in bar", + entry: foo.Dir["foo-c"].Dir["test1"], + ns: "urn:foo", + wantMod: "foo", + }, + { + descr: "grouping defined and used in foo has foo's namespace", + entry: foo.Dir["foo-c"].Dir["zzz"], + ns: "urn:foo", + wantMod: "foo", + }, + { + descr: "grouping defined and used in bar has bar's namespace", + entry: bar.Dir["bar-local"].Dir["test1"], + ns: "urn:bar", + wantMod: "bar", + }, + { + descr: "leaf within a used grouping in baz augmented into foo has baz's namespace", + entry: foo.Dir["foo-c"].Dir["baz-common-leaf"], + ns: "urn:baz", + wantMod: "baz", + }, + { + descr: "leaf directly defined within an augment to foo from baz has baz's namespace", + entry: foo.Dir["foo-c"].Dir["baz-direct-leaf"], + ns: "urn:baz", + wantMod: "baz", + }, + { + descr: "leaf directly defined within an augment to foo from submodule baz-augment of baz has baz's namespace", + entry: foo.Dir["foo-c"].Dir["baz-submod-leaf"], + ns: "urn:baz", + wantMod: "baz", + }, + { + descr: "leaf directly defined within an augment to foo from orphan submodule qux-augment has empty namespace", + entry: foo.Dir["foo-c"].Dir["qux-submod-leaf"], + ns: "", + wantModError: `could not find module "" when retrieving namespace for qux-submod-leaf: "": no such namespace`, + }, + { + descr: "children of a container within an augment to from baz have baz's namespace", + entry: foo.Dir["foo-c"].Dir["baz-dir"].Dir["aardvark"], + ns: "urn:baz", + wantMod: "baz", + }, + } { + nsValue := tc.entry.Namespace() + if nsValue == nil { + t.Errorf("%s: want namespace %s, got nil", tc.descr, tc.ns) + } else if tc.ns != nsValue.Name { + t.Errorf("%s: want namespace %s, got %s", tc.descr, tc.ns, nsValue.Name) + } + + m, err := tc.entry.InstantiatingModule() + if err != nil { + if tc.wantModError == "" { + t.Errorf("%s: %s.InstantiatingModule(): got unexpected error: %v", tc.descr, tc.entry.Path(), err) + } else if got := err.Error(); got != tc.wantModError { + t.Errorf("%s: %s.InstantiatingModule(): got error: %q, want: %q", tc.descr, tc.entry.Path(), got, tc.wantModError) + } + continue + } else if tc.wantModError != "" { + t.Errorf("%s: %s.InstantiatingModule(): got no error, want: %q", tc.descr, tc.entry.Path(), tc.wantModError) + continue + } + + if m != tc.wantMod { + t.Errorf("%s: %s.InstantiatingModule(): did not get expected name, got: %v, want: %v", + tc.descr, tc.entry.Path(), m, tc.wantMod) + } + } +} + +var testWhenModules = []struct { + name string + in string +}{ + { + name: "when.yang", + in: ` +module when { + namespace "urn:when"; + prefix "when"; + + leaf condition { type string; } + + container alpha { + when "../condition = 'alpha'"; + } + + leaf beta { + when "../condition = 'beta'"; + type string; + } + + leaf-list gamma { + when "../condition = 'gamma'"; + type string; + } + + list delta { + when "../condition = 'delta'"; + } + + choice epsilon { + when "../condition = 'epsilon'"; + + case zeta { + when "../condition = 'zeta'"; + } + } + + anyxml eta { + when "../condition = 'eta'"; + } + + anydata theta { + when "../condition = 'theta'"; + } + + uses iota { + when "../condition = 'iota'"; + } + + grouping iota { + } + + augment "../alpha" { + when "../condition = 'kappa'"; + } +} +`, + }, +} + +func TestGetWhenXPath(t *testing.T) { + ms := NewModules() + ms.ParseOptions.StoreUses = true + for _, tt := range testWhenModules { + if err := ms.Parse(tt.in, tt.name); err != nil { + t.Fatalf("could not parse module %s: %v", tt.name, err) + } + } + + if errs := ms.Process(); len(errs) > 0 { + t.Fatalf("could not process modules: %v", errs) + } + + when, _ := ms.GetModule("when") + + testcases := []struct { + descr string + childName string + isCase bool + choiceName string + isAugment bool + augmentTarget string + }{ + { + descr: "extract when statement from *Container", + childName: "alpha", + }, { + descr: "extract when statement from *Leaf", + childName: "beta", + }, { + descr: "extract when statement from *LeafList", + childName: "gamma", + }, { + descr: "extract when statement from *List", + childName: "delta", + }, { + descr: "extract when statement from *Choice", + childName: "epsilon", + }, { + descr: "extract when statement from *Case", + childName: "zeta", + isCase: true, + choiceName: "epsilon", + }, { + descr: "extract when statement from *AnyXML", + childName: "eta", + }, { + descr: "extract when statement from *AnyData", + childName: "theta", + }, { + descr: "extract when statement from *Augment", + childName: "kappa", + isAugment: true, + augmentTarget: "alpha", + }, + } + + for _, tc := range testcases { + parentEntry := when + t.Run(tc.descr, func(t *testing.T) { + var child *Entry + + if tc.isAugment { + child = parentEntry.Dir[tc.augmentTarget].Augmented[0] + } else { + if tc.isCase { + parentEntry = parentEntry.Dir[tc.choiceName] + } + child = parentEntry.Dir[tc.childName] + } + + expectedWhen := "../condition = '" + tc.childName + "'" + + if gotWhen, ok := child.GetWhenXPath(); !ok { + t.Errorf("Cannot get when statement of child entry %v", tc.childName) + } else if gotWhen != expectedWhen { + t.Errorf("Expected when XPath %v, but got %v", expectedWhen, gotWhen) + } + }) + } +} + +var testAugmentAndUsesModules = []struct { + name string + in string +}{ + { + name: "original.yang", + in: ` +module original { + namespace "urn:original"; + prefix "orig"; + + import groupings { + prefix grp; + } + + container alpha { + leaf beta { + type string; + } + leaf psi { + type string; + } + leaf omega { + type string; + } + uses grp:nestedLevel0 { + when "beta = 'holaWorld'"; + } + } +} +`, + }, + { + name: "augments.yang", + in: ` +module augments { + namespace "urn:augments"; + prefix "aug"; + + import original { + prefix orig; + } + + import groupings { + prefix grp; + } + + augment "/orig:alpha" { + when "orig:beta = 'helloWorld'"; + + container charlie { + leaf charlieLeaf { + type string; + } + } + } + + grouping delta { + container echo { + leaf echoLeaf { + type string; + } + } + } + + augment "/orig:alpha" { + when "orig:omega = 'privetWorld'"; + uses delta { + when "current()/orig:beta = 'nihaoWorld'"; + } + } +} +`, + }, + { + name: "groupings.yang", + in: ` +module groupings { + namespace "urn:groupings"; + prefix "grp"; + + import "original" { + prefix orig; + } + + grouping nestedLevel0 { + leaf leafAtLevel0 { + type string; + } + uses nestedLevel1 { + when "orig:psi = 'geiasouWorld'"; + } + } + + grouping nestedLevel1 { + leaf leafAtLevel1 { + type string; + } + uses nestedLevel2 { + when "orig:omega = 'salveWorld'"; + } + } + + grouping nestedLevel2 { + leaf leafAtLevel2 { + type string; + } + } +} +`, + }, +} + +func TestAugmentedEntry(t *testing.T) { + ms := NewModules() + for _, tt := range testAugmentAndUsesModules { + if err := ms.Parse(tt.in, tt.name); err != nil { + t.Fatalf("could not parse module %s: %v", tt.name, err) + } + } + + if errs := ms.Process(); len(errs) > 0 { + t.Fatalf("could not process modules: %v", errs) + } + + orig, _ := ms.GetModule("original") + + testcases := []struct { + descr string + augmentEntry *Entry + augmentWhenStmt string + augmentChildNames map[string]bool + }{ + { + descr: "leaf charlie is augmented to container alpha", + augmentEntry: orig.Dir["alpha"].Augmented[0], + augmentWhenStmt: "orig:beta = 'helloWorld'", + augmentChildNames: map[string]bool{ + "charlie": false, + }, + }, { + descr: "grouping delta is augmented to container alpha", + augmentEntry: orig.Dir["alpha"].Augmented[1], + augmentWhenStmt: "orig:omega = 'privetWorld'", + augmentChildNames: map[string]bool{ + "echo": false, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.descr, func(t *testing.T) { + augment := tc.augmentEntry + + if tc.augmentWhenStmt != "" { + if gotAugmentWhenStmt, ok := augment.GetWhenXPath(); !ok { + t.Errorf("Expected augment when statement %v, but not present", + tc.augmentWhenStmt) + } else if gotAugmentWhenStmt != tc.augmentWhenStmt { + t.Errorf("Expected augment when statement %v, but got %v", + tc.augmentWhenStmt, gotAugmentWhenStmt) + } + } + + for name, entry := range augment.Dir { + if _, ok := tc.augmentChildNames[name]; ok { + tc.augmentChildNames[name] = true + } else { + t.Errorf("Got unexpected child name %v in augment", name) + } + + if entry.Dir != nil { + t.Errorf("Expected augment's child entry %v have nil dir, but got %v", + name, entry.Dir) + } + } + + for name, matched := range tc.augmentChildNames { + if !matched { + t.Errorf("Expected child name %v in augment, but not present", name) + } + } + + }) + } +} + +func TestUsesEntry(t *testing.T) { + ms := NewModules() + ms.ParseOptions.StoreUses = true + for _, tt := range testAugmentAndUsesModules { + if err := ms.Parse(tt.in, tt.name); err != nil { + t.Fatalf("could not parse module %s: %v", tt.name, err) + } + } + + if errs := ms.Process(); len(errs) > 0 { + t.Fatalf("could not process modules: %v", errs) + } + + orig, _ := ms.GetModule("original") + + testcases := []struct { + descr string + usesParentEntry *Entry + usesWhenStmts []string + groupingChildNames []map[string]bool + nestedLevel int + }{ + { + descr: "second augment in augments.yang uses grouping delta", + usesParentEntry: orig.Dir["alpha"].Augmented[1], + usesWhenStmts: []string{"current()/orig:beta = 'nihaoWorld'"}, + groupingChildNames: []map[string]bool{{"echo": false}}, + }, { + descr: "container alpha uses nested grouping nestedLevel0", + usesParentEntry: orig.Dir["alpha"], + usesWhenStmts: []string{ + "beta = 'holaWorld'", + "orig:psi = 'geiasouWorld'", + "orig:omega = 'salveWorld'", + }, + groupingChildNames: []map[string]bool{ + {"leafAtLevel0": false, "leafAtLevel1": false, "leafAtLevel2": false}, + {"leafAtLevel1": false, "leafAtLevel2": false}, + {"leafAtLevel2": false}, + }, + nestedLevel: 2, + }, + } + + for _, tc := range testcases { + t.Run(tc.descr, func(t *testing.T) { + usesParentEntry := tc.usesParentEntry + for i := 0; i <= tc.nestedLevel; i++ { + usesStmts := usesParentEntry.Uses + // want the usesStmts to have length 1, otherwise also need to verify + // every usesStmt slice element is expected. + if len(usesStmts) != 1 { + t.Errorf("Expected usesStmts to have length 1, but got %v", + len(usesStmts)) + } + + usesNode := usesStmts[0].Uses + grouping := usesStmts[0].Grouping + + if tc.usesWhenStmts[i] != "" { + if gotUsesWhenStmt, ok := usesNode.When.Statement().Arg(); !ok { + t.Errorf("Expected uses when statement %v, but not present", + tc.usesWhenStmts[i]) + } else if gotUsesWhenStmt != tc.usesWhenStmts[i] { + t.Errorf("Expected uses when statement %v, but got %v", + tc.usesWhenStmts[i], gotUsesWhenStmt) + } + } + + for name, entry := range grouping.Dir { + if _, ok := tc.groupingChildNames[i][name]; ok { + tc.groupingChildNames[i][name] = true + } else { + t.Errorf("Got unexpected child name %v in uses", name) + } + + if entry.Dir != nil { + t.Errorf("Expected uses's child entry %v have nil dir, but got %v", + name, entry.Dir) + } + } + + for name, matched := range tc.groupingChildNames[i] { + if !matched { + t.Errorf("Expected child name %v in grouping %v, but not present", + name, grouping.Name) + } + } + usesParentEntry = grouping + } + + }) + } +} + +func TestShallowDup(t *testing.T) { + testModule := struct { + name string + in string + }{ + + name: "mod.yang", + in: ` +module mod { + namespace "urn:mod"; + prefix "mod"; + + container level0 { + container level1-1 { + leaf level2-1 { type string;} + } + + container level1-2 { + leaf level2-2 { type string;} + } + + container level1-3{ + container level2-3 { + leaf level3-1 { type string;} + } + } + } +} +`, + } + + ms := NewModules() + + if err := ms.Parse(testModule.in, testModule.name); err != nil { + t.Fatalf("could not parse module %s: %v", testModule.name, err) + } + + if errs := ms.Process(); len(errs) > 0 { + t.Fatalf("could not process modules: %v", errs) + } + + mod, _ := ms.GetModule("mod") + level0 := mod.Dir["level0"] + level0ShallowDup := level0.shallowDup() + + for name, entry := range level0.Dir { + shallowDupedEntry, ok := level0ShallowDup.Dir[name] + if !ok { + t.Errorf("Expect shallowDup() to duplicate direct child %v, but did not", name) + } + if len(entry.Dir) != 1 { + t.Errorf("Expect original entry's direct child have length 1 dir") + } + if shallowDupedEntry.Dir != nil { + t.Errorf("Expect shallowDup()'ed entry's direct child to have nil dir") + } + } +} + +func TestIgnoreCircularDependencies(t *testing.T) { + tests := []struct { + name string + inModules map[string]string + inIgnoreCircDep bool + wantErrs bool + }{{ + name: "validation that non-circular dependencies are correct", + inModules: map[string]string{ + "mod-a": ` + module mod-a { + namespace "urn:a"; + prefix "a"; + + include subm-x; + include subm-y; + + leaf marker { type string; } + } + `, + "subm-x": ` + submodule subm-x { + belongs-to mod-a { prefix a; } + } + `, + "subm-y": ` + submodule subm-y { + belongs-to mod-a { prefix a; } + // Not circular. + include subm-x; + } + `}, + }, { + name: "circular dependency error identified", + inModules: map[string]string{ + "mod-a": ` + module mod-a { + namespace "urn:a"; + prefix "a"; + + include subm-x; + include subm-y; + + leaf marker { type string; } + } + `, + "subm-x": ` + submodule subm-x { + belongs-to mod-a { prefix a; } + // Circular + include subm-y; + } + `, + "subm-y": ` + submodule subm-y { + belongs-to mod-a { prefix a; } + // Circular + include subm-x; + } + `}, + wantErrs: true, + }, { + name: "circular dependency error skipped", + inModules: map[string]string{ + "mod-a": ` + module mod-a { + namespace "urn:a"; + prefix "a"; + + include subm-x; + include subm-y; + + leaf marker { type string; } + } + `, + "subm-x": ` + submodule subm-x { + belongs-to mod-a { prefix a; } + // Circular + include subm-y; + } + `, + "subm-y": ` + submodule subm-y { + belongs-to mod-a { prefix a; } + // Circular + include subm-x; + } + `}, + inIgnoreCircDep: true, + }} + + for _, tt := range tests { + ms := NewModules() + ms.ParseOptions.IgnoreSubmoduleCircularDependencies = tt.inIgnoreCircDep + for n, m := range tt.inModules { + if err := ms.Parse(m, n); err != nil { + if !tt.wantErrs { + t.Errorf("%s: could not parse modules, got: %v, want: nil", tt.name, err) + } + continue + } + } + } +} + +func TestEntryDefaultValue(t *testing.T) { + getdir := func(e *Entry, elements ...string) (*Entry, error) { + for _, elem := range elements { + next := e.Dir[elem] + if next == nil { + return nil, fmt.Errorf("%s missing directory %q", e.Path(), elem) + } + e = next + } + return e, nil + } + + modtext := ` +module defaults { + namespace "urn:defaults"; + prefix "defaults"; + + typedef string-default { + type string; + default "typedef default value"; + } + + typedef string-emptydefault { + type string; + default ""; + } + + grouping common { + container common-nodefault { + leaf string { + type string; + } + } + container common-withdefault { + leaf string { + type string; + default "default value"; + } + } + container common-withemptydefault { + leaf string { + type string; + default ""; + } + } + container common-typedef-withdefault { + leaf string { + type string-default; + } + } + container common-typedef-withemptydefault { + leaf string { + type string-emptydefault; + } + } + } + + container defaults { + leaf mandatory-default { + type string-default; + mandatory true; + } + leaf uint32-withdefault { + type uint32; + default 13; + } + leaf string-withdefault { + type string-default; + } + leaf nodefault { + type string; + } + uses common; + + choice choice-default { + case alpha { + leaf alpha { + type string; + } + } + case zeta { + leaf zeta { + type string; + } + } + default zeta; + } + } + + grouping leaflist-common { + container common-nodefault { + leaf string { + type string; + } + } + container common-withdefault { + leaf-list string { + type string; + default "default value"; + } + } + container common-typedef-withdefault { + leaf string { + type string-default; + } + } + } + + container leaflist-defaults { + leaf-list uint32-withdefault { + type uint32; + default "13"; + default 14; + } + leaf-list stringlist-withdefault { + type string-default; + } + leaf-list stringlist-withemptydefault { + type string-emptydefault; + } + leaf-list stringlist-withdefault-withminelem { + type string-default; + min-elements 1; + } + leaf-list emptydefault { + type string; + default ""; + } + leaf-list nodefault { + type string; + } + uses leaflist-common; + } + +} +` + + ms := NewModules() + if err := ms.Parse(modtext, "defaults.yang"); err != nil { + t.Fatal(err) + } + + for i, tc := range []struct { + wantSingle string + wantSingleOk bool + wantDefaults []string + path []string + }{ + { + path: []string{"defaults", "string-withdefault"}, + wantSingle: "typedef default value", + wantDefaults: []string{"typedef default value"}, + wantSingleOk: true, + }, + { + path: []string{"defaults", "uint32-withdefault"}, + wantSingle: "13", + wantDefaults: []string{"13"}, + wantSingleOk: true, + }, + { + path: []string{"defaults", "nodefault"}, + wantSingle: "", + wantDefaults: nil, + }, + { + path: []string{"defaults", "common-withdefault", "string"}, + wantSingle: "default value", + wantDefaults: []string{"default value"}, + wantSingleOk: true, + }, + { + path: []string{"defaults", "common-withemptydefault", "string"}, + wantSingle: "", + wantDefaults: []string{""}, + wantSingleOk: true, + }, + { + path: []string{"defaults", "common-typedef-withdefault", "string"}, + wantSingle: "typedef default value", + wantDefaults: []string{"typedef default value"}, + wantSingleOk: true, + }, + { + path: []string{"defaults", "common-typedef-withemptydefault", "string"}, + wantSingle: "", + wantDefaults: []string{""}, + wantSingleOk: true, + }, + { + path: []string{"defaults", "common-nodefault", "string"}, + wantSingle: "", + wantDefaults: nil, + }, + { + path: []string{"defaults", "mandatory-default"}, + wantSingle: "", + wantDefaults: nil, + }, + { + path: []string{"defaults", "choice-default"}, + wantSingle: "zeta", + wantDefaults: []string{"zeta"}, + wantSingleOk: true, + }, + { + path: []string{"leaflist-defaults", "uint32-withdefault"}, + wantSingle: "", + wantDefaults: []string{"13", "14"}, + }, + { + path: []string{"leaflist-defaults", "stringlist-withdefault"}, + wantSingle: "typedef default value", + wantDefaults: []string{"typedef default value"}, + wantSingleOk: true, + }, + { + path: []string{"leaflist-defaults", "stringlist-withemptydefault"}, + wantSingle: "", + wantDefaults: []string{""}, + wantSingleOk: true, + }, + { + path: []string{"leaflist-defaults", "stringlist-withdefault-withminelem"}, + wantSingle: "", + wantDefaults: nil, + }, + { + path: []string{"leaflist-defaults", "emptydefault"}, + wantSingle: "", + wantDefaults: []string{""}, + wantSingleOk: true, + }, + { + path: []string{"leaflist-defaults", "nodefault"}, + wantSingle: "", + wantDefaults: nil, + }, + { + path: []string{"leaflist-defaults", "common-nodefault", "string"}, + wantSingle: "", + wantDefaults: nil, + }, + { + path: []string{"leaflist-defaults", "common-withdefault", "string"}, + wantSingle: "default value", + wantDefaults: []string{"default value"}, + wantSingleOk: true, + }, + { + path: []string{"leaflist-defaults", "common-typedef-withdefault", "string"}, + wantSingle: "typedef default value", + wantDefaults: []string{"typedef default value"}, + wantSingleOk: true, + }, + } { + tname := strings.Join(tc.path, "/") + + mod, ok := ms.Modules["defaults"] + if !ok { + t.Fatalf("[%d] module not found: %q", i, tname) + } + defaults := ToEntry(mod) + dir, err := getdir(defaults, tc.path...) + if err != nil { + t.Fatalf("[%d_%s] could not retrieve path: %v", i, tname, err) + } + if got, gotOk := dir.SingleDefaultValue(); got != tc.wantSingle || gotOk != tc.wantSingleOk { + t.Errorf("[%d_%s] got SingleDefaultValue (%q, %v), want (%q, %v)", i, tname, got, gotOk, tc.wantSingle, tc.wantSingleOk) + } + if diff := cmp.Diff(dir.DefaultValues(), tc.wantDefaults); diff != "" { + t.Errorf("[%d_%s] DefaultValues (-got, +want):\n%s", i, tname, diff) + } + } +} + +func TestFullModuleProcess(t *testing.T) { + tests := []struct { + name string + inModules map[string]string + inIgnoreCircDeps bool + wantLeaves map[string][]string + customVerify func(t *testing.T, module *Entry) + wantErr bool + }{{ + name: "circular import via child", + inModules: map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + + include test-router; + include test-router-bgp; + include test-router-isis; + + container configure { + uses test-router; + } + }`, + "test-router": ` + submodule test-router { + belongs-to test { prefix "t"; } + + include test-router-bgp; + include test-router-isis; + include test-router-ldp; + + grouping test-router { + uses test-router-ldp; + } + }`, + "test-router-ldp": ` + submodule test-router-ldp { + belongs-to test { prefix "t"; } + + grouping test-router-ldp { } + }`, + "test-router-isis": ` + submodule test-router-isis { + belongs-to test { prefix "t"; } + + include test-router; + }`, + "test-router-bgp": ` + submodule test-router-bgp { + belongs-to test { prefix "t"; } + }`, + }, + inIgnoreCircDeps: true, + }, { + name: "non-circular via child", + inModules: map[string]string{ + "bgp": ` + module bgp { + prefix "bgp"; + namespace "urn:bgp"; + + include bgp-son; + include bgp-daughter; + + leaf parent { type string; } + }`, + "bgp-son": ` + submodule bgp-son { + belongs-to bgp { prefix "bgp"; } + + leaf son { type string; } + }`, + "bgp-daughter": ` + submodule bgp-daughter { + belongs-to bgp { prefix "bgp"; } + include bgp-son; + + leaf daughter { type string; } + }`, + }, + }, { + name: "simple circular via child", + inModules: map[string]string{ + "parent": ` + module parent { + prefix "p"; + namespace "urn:p"; + include son; + include daughter; + + leaf p { type string; } + } + `, + "son": ` + submodule son { + belongs-to parent { prefix "p"; } + include daughter; + + leaf s { type string; } + } + `, + "daughter": ` + submodule daughter { + belongs-to parent { prefix "p"; } + include son; + + leaf d { type string; } + } + `, + }, + wantErr: true, + }, { + name: "merge via grandchild", + inModules: map[string]string{ + "bgp": ` + module bgp { + prefix "bgp"; + namespace "urn:bgp"; + + include bgp-son; + + leaf parent { type string; } + }`, + "bgp-son": ` + submodule bgp-son { + belongs-to bgp { prefix "bgp"; } + + include bgp-grandson; + + leaf son { type string; } + }`, + "bgp-grandson": ` + submodule bgp-grandson { + belongs-to bgp { prefix "bgp"; } + + leaf grandson { type string; } + }`, + }, + wantLeaves: map[string][]string{ + "bgp": {"parent", "son", "grandson"}, + }, + }, { + name: "parent to son and daughter with common grandchild", + inModules: map[string]string{ + "parent": ` + module parent { + prefix "p"; + namespace "urn:p"; + include son; + include daughter; + + leaf p { type string; } + } + `, + "son": ` + submodule son { + belongs-to parent { prefix "p"; } + include grandchild; + + leaf s { type string; } + } + `, + "daughter": ` + submodule daughter { + belongs-to parent { prefix "p"; } + include grandchild; + + leaf d { type string; } + } + `, + "grandchild": ` + submodule grandchild { + belongs-to parent { prefix "p"; } + + leaf g { type string; } + } + `, + }, + wantLeaves: map[string][]string{ + "parent": {"p", "s", "d", "g"}, + }, + }, { + name: "parent to son and daughter, not a circdep", + inModules: map[string]string{ + "parent": ` + module parent { + prefix "p"; + namespace "urn:p"; + + include son; + include daughter; + + uses son-group; + } + `, + "son": ` + submodule son { + belongs-to parent { prefix "p"; } + include daughter; + + grouping son-group { + uses daughter-group; + } + } + `, + "daughter": ` + submodule daughter { + belongs-to parent { prefix "p"; } + + grouping daughter-group { + leaf s { type string; } + } + + leaf d { type string; } + } + `, + }, + wantLeaves: map[string][]string{ + "parent": {"s", "d"}, + }, + }, { + name: "parent with grouping and with extension", + inModules: map[string]string{ + "parent": ` + module parent { + prefix "p"; + namespace "urn:p"; + + import extensions { + prefix "ext"; + } + + container c { + ext:c-define "c's extension"; + uses daughter-group { + ext:u-define "uses's extension"; + } + } + + grouping daughter-group { + ext:g-define "daughter-group's extension"; + + leaf l { + ext:l-define "l's extension"; + type string; + } + + container c2 { + leaf l2 { + type string; + } + } + + // test nested grouping extensions. + uses son-group { + ext:sg-define "son-group's extension"; + } + } + + grouping son-group { + leaf s { + ext:s-define "s's extension"; + type string; + } + + } + } + `, + "extension": ` + module extensions { + prefix "q"; + namespace "urn:q"; + + extension c-define { + description + "Takes as an argument a name string. + c's extension."; + argument "name"; + } + extension g-define { + description + "Takes as an argument a name string. + daughter-group's extension."; + argument "name"; + } + extension sg-define { + description + "Takes as an argument a name string. + son-groups's extension."; + argument "name"; + } + extension s-define { + description + "Takes as an argument a name string. + s's extension."; + argument "name"; + } + extension l-define { + description + "Takes as an argument a name string. + l's extension."; + argument "name"; + } + extension u-define { + description + "Takes as an argument a name string. + uses's extension."; + argument "name"; + } + } + `, + }, + wantLeaves: map[string][]string{ + "parent": {"c"}, + }, + customVerify: func(t *testing.T, module *Entry) { + // Verify that an extension within the uses statement + // and within a grouping's definition is copied to each + // of the top-level nodes within the grouping, and no + // one else above or below. + less := cmpopts.SortSlices(func(l, r *Statement) bool { return l.Keyword < r.Keyword }) + + if diff := cmp.Diff([]*Statement{ + {Keyword: "ext:c-define", HasArgument: true, Argument: "c's extension"}, + }, module.Dir["c"].Exts, cmpopts.IgnoreUnexported(Statement{}), less); diff != "" { + t.Errorf("container c Exts (-want, +got):\n%s", diff) + } + + if diff := cmp.Diff([]*Statement{ + {Keyword: "ext:g-define", HasArgument: true, Argument: "daughter-group's extension"}, + {Keyword: "ext:l-define", HasArgument: true, Argument: "l's extension"}, + {Keyword: "ext:u-define", HasArgument: true, Argument: "uses's extension"}, + }, module.Dir["c"].Dir["l"].Exts, cmpopts.IgnoreUnexported(Statement{}), less); diff != "" { + t.Errorf("leaf l Exts (-want, +got):\n%s", diff) + } + + if diff := cmp.Diff([]*Statement{ + {Keyword: "ext:g-define", HasArgument: true, Argument: "daughter-group's extension"}, + {Keyword: "ext:sg-define", HasArgument: true, Argument: "son-group's extension"}, + {Keyword: "ext:s-define", HasArgument: true, Argument: "s's extension"}, + {Keyword: "ext:u-define", HasArgument: true, Argument: "uses's extension"}, + }, module.Dir["c"].Dir["s"].Exts, cmpopts.IgnoreUnexported(Statement{}), less); diff != "" { + t.Errorf("leaf s Exts (-want, +got):\n%s", diff) + } + + if diff := cmp.Diff([]*Statement{ + {Keyword: "ext:g-define", HasArgument: true, Argument: "daughter-group's extension"}, + {Keyword: "ext:u-define", HasArgument: true, Argument: "uses's extension"}, + }, module.Dir["c"].Dir["c2"].Exts, cmpopts.IgnoreUnexported(Statement{}), less); diff != "" { + t.Errorf("container c2 Exts (-want, +got):\n%s", diff) + } + + if diff := cmp.Diff([]*Statement{}, module.Dir["c"].Dir["c2"].Dir["l2"].Exts, cmpopts.IgnoreUnexported(Statement{}), less, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("leaf l2 Exts (-want, +got):\n%s", diff) + } + }, + }} + + for _, tt := range tests { + ms := NewModules() + + ms.ParseOptions.IgnoreSubmoduleCircularDependencies = tt.inIgnoreCircDeps + for n, m := range tt.inModules { + if err := ms.Parse(m, n); err != nil { + t.Errorf("%s: error parsing module %s, got: %v, want: nil", tt.name, n, err) + } + } + + if errs := ms.Process(); len(errs) > 0 { + if !tt.wantErr { + t.Errorf("%s: error processing modules, got: %v, want: nil", tt.name, errs) + } + continue + } + + if tt.wantErr { + t.Errorf("%s: did not get expected errors", tt.name) + continue + } + + for m, l := range tt.wantLeaves { + mod, errs := ms.GetModule(m) + if len(errs) > 0 { + t.Errorf("%s: cannot retrieve expected module %s, got: %v, want: nil", tt.name, m, errs) + continue + } + + var leaves []string + for _, n := range mod.Dir { + leaves = append(leaves, n.Name) + } + + // Sort the two slices to ensure that we are comparing like with like. + sort.Strings(l) + sort.Strings(leaves) + if !reflect.DeepEqual(l, leaves) { + t.Errorf("%s: did not get expected leaves in %s, got: %v, want: %v", tt.name, m, leaves, l) + } + + if tt.customVerify != nil { + tt.customVerify(t, mod) + } + } + } +} + +func TestAnyDataAnyXML(t *testing.T) { + tests := []struct { + name string + inModule string + wantNodeKind string + wantEntryKind EntryKind + }{ + { + name: "test anyxml", + wantNodeKind: "anyxml", + wantEntryKind: AnyXMLEntry, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + container c { + anyxml data { + description "anyxml"; + } + } +}`, + }, + { + name: "test anydata", + wantNodeKind: "anydata", + wantEntryKind: AnyDataEntry, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + container c { + anydata data { + description "anydata"; + } + } +}`, + }, + } + for _, tt := range tests { + ms := NewModules() + if err := ms.Parse(tt.inModule, "test"); err != nil { + t.Errorf("%s: error parsing module 'test', got: %v, want: nil", tt.name, err) + } + + if errs := ms.Process(); len(errs) > 0 { + t.Errorf("%s: got module parsing errors", tt.name) + for i, err := range errs { + t.Errorf("%s: error #%d: %v", tt.name, i, err) + } + continue + } + + mod, ok := ms.Modules["test"] + if !ok { + t.Errorf("%s: did not find `test` module", tt.name) + continue + } + e := ToEntry(mod) + c := e.Dir["c"] + if c == nil { + t.Errorf("%s: did not find container c", tt.name) + continue + } + data := c.Dir["data"] + if data == nil { + t.Errorf("%s: did not find leaf c/data", tt.name) + continue + } + if got := data.Node.Kind(); got != tt.wantNodeKind { + t.Errorf("%s: want Node.Kind(): %q, got: %q", tt.name, tt.wantNodeKind, got) + } + if got := data.Kind; got != tt.wantEntryKind { + t.Errorf("%s: want Kind: %v, got: %v", tt.name, tt.wantEntryKind, got) + } + if got := data.Description; got != tt.wantNodeKind { + t.Errorf("%s: want data.Description: %q, got: %q", tt.name, tt.wantNodeKind, got) + } + } +} + +func getEntry(root *Entry, path []string) *Entry { + for _, elem := range path { + if root = root.Dir[elem]; root == nil { + break + } + } + return root +} + +func TestActionRPC(t *testing.T) { + tests := []struct { + name string + inModule string + operationPath []string + wantNodeKind string + wantError string + noInput bool + noOutput bool + }{ + { + name: "test action in container", + wantNodeKind: "action", + operationPath: []string{"c", "operation"}, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + container c { + action operation { + description "action"; + input { leaf string { type string; } } + output { leaf string { type string; } } + } + } +}`, + }, + + { + name: "test action in list", + wantNodeKind: "action", + operationPath: []string{"list", "operation"}, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + list list { + action operation { + description "action"; + input { leaf string { type string; } } + output { leaf string { type string; } } + } + } +}`, + }, + + { + name: "test action in container via grouping", + wantNodeKind: "action", + operationPath: []string{"c", "operation"}, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + grouping g { + action operation { + description "action"; + input { leaf string { type string; } } + output { leaf string { type string; } } + } + } + container c { uses g; } +}`, + }, + + { + name: "test action in list via grouping", + wantNodeKind: "action", + operationPath: []string{"list", "operation"}, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + grouping g { + action operation { + description "action"; + input { leaf string { type string; } } + output { leaf string { type string; } } + } + } + list list { uses g; } +}`, + }, + + { + name: "test rpc", + wantNodeKind: "rpc", + operationPath: []string{"operation"}, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + rpc operation { + description "rpc"; + input { + leaf string { type string; } + } + output { + leaf string { type string; } + } + } +}`, + }, + + { + name: "minimal rpc", + wantNodeKind: "rpc", + operationPath: []string{"operation"}, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + rpc operation { + description "rpc"; + } +}`, + noInput: true, + noOutput: true, + }, + + { + name: "input-only rpc", + wantNodeKind: "rpc", + operationPath: []string{"operation"}, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + rpc operation { + description "rpc"; + input { + leaf string { type string; } + } + } +}`, + noOutput: true, + }, + + { + name: "output-only rpc", + wantNodeKind: "rpc", + operationPath: []string{"operation"}, + inModule: `module test { + namespace "urn:test"; + prefix "test"; + rpc operation { + description "rpc"; + output { + leaf string { type string; } + } + } +}`, + noInput: true, + }, + + // test cases with errors (in module parsing) + { + name: "rpc not module child", + wantError: "test:6:5: unknown container field: rpc", + inModule: `module test { + namespace "urn:test"; + prefix "test"; + container c { + // error: "rpc" is not a valid sub-statement to "container" + rpc operation; + } +}`, + }, + + { + name: "action not valid leaf child", + wantError: "test:6:5: unknown leaf field: action", + inModule: `module test { + namespace "urn:test"; + prefix "test"; + leaf l { + // error: "operation" is not a valid sub-statement to "leaf" + action operation; + } +}`, + }, + + { + name: "action not valid leaf-list child", + wantError: "test:6:5: unknown leaf-list field: action", + inModule: `module test { + namespace "urn:test"; + prefix "test"; + leaf-list leaf-list { + // error: "operation" is not a valid sub-statement to "leaf-list" + action operation; + } +}`, + }, + } + for _, tt := range tests { + ms := NewModules() + if err := ms.Parse(tt.inModule, "test"); err != nil { + if got := err.Error(); got != tt.wantError { + t.Errorf("%s: error parsing module 'test', got error: %q, want: %q", tt.name, got, tt.wantError) + } + continue + } + + if errs := ms.Process(); len(errs) > 0 { + t.Errorf("%s: got %d module parsing errors", tt.name, len(errs)) + for i, err := range errs { + t.Errorf("%s: error #%d: %v", tt.name, i, err) + } + continue + } + + mod := ms.Modules["test"] + e := ToEntry(mod) + if e = getEntry(e, tt.operationPath); e == nil { + t.Errorf("%s: want child entry at: %v, got: nil", tt.name, tt.operationPath) + continue + } + if got := e.Node.Kind(); got != tt.wantNodeKind { + t.Errorf("%s: got `operation` node kind: %q, want: %q", tt.name, got, tt.wantNodeKind) + } else if got := e.Description; got != tt.wantNodeKind { + t.Errorf("%s: got `operation` Description: %q, want: %q", tt.name, got, tt.wantNodeKind) + } + // confirm the child RPCEntry was populated for the entry. + if e.RPC == nil { + t.Errorf("%s: entry at %v has nil RPC child, want: non-nil. Entry: %#v", tt.name, tt.operationPath, e) + } else if !tt.noInput && e.RPC.Input == nil { + t.Errorf("%s: RPCEntry has nil Input, want: non-nil. Entry: %#v", tt.name, e.RPC) + } else if !tt.noOutput && e.RPC.Output == nil { + t.Errorf("%s: RPCEntry has nil Output, want: non-nil. Entry: %#v", tt.name, e.RPC) + } + } +} + +var testIfFeatureModules = []struct { + name string + in string +}{ + { + name: "if-feature.yang", + in: `module if-feature { + namespace "urn:if-feature"; + prefix "feat"; + + feature ft-container; + feature ft-action; + feature ft-anydata1; + feature ft-anydata2; + feature ft-anyxml; + feature ft-choice; + feature ft-case; + feature ft-feature; + feature ft-leaf; + feature ft-bit; + feature ft-leaf-list; + feature ft-enum; + feature ft-list; + feature ft-notification; + feature ft-rpc; + feature ft-augment; + feature ft-identity; + feature ft-uses; + feature ft-refine; + feature ft-augment-uses; + + container cont { + if-feature ft-container; + action act { + if-feature ft-action; + } + } + + anydata data { + if-feature ft-anydata1; + if-feature ft-anydata2; + } + + anyxml xml { + if-feature ft-anyxml; + } + + choice ch { + if-feature ft-choice; + case cs { + if-feature ft-case; + } + } + + feature f { + if-feature ft-feature; + } + + leaf l { + if-feature ft-leaf; + type bits { + bit A { + if-feature ft-bit; + } + } + } + + leaf-list ll { + if-feature ft-leaf-list; + type enumeration { + enum zero { + if-feature ft-enum; + } + } + } + + list ls { + if-feature ft-list; + } + + notification n { + if-feature ft-notification; + } + + rpc r { + if-feature ft-rpc; + } + + augment "/cont" { + if-feature ft-augment; + uses g { + if-feature ft-augment-uses; + } + } + + identity id { + if-feature ft-identity; + } + + uses g { + if-feature ft-uses; + refine rf { + if-feature ft-refine; + } + } + + grouping g { + container gc {} + } +} +`, + }, +} + +func TestIfFeature(t *testing.T) { + entryIfFeatures := func(e *Entry) []*Value { + extra := e.Extra["if-feature"] + if len(extra) == 0 { + return nil + } + values := make([]*Value, len(extra)) + for i, ex := range extra { + values[i] = ex.(*Value) + } + return values + } + + featureByName := func(e *Entry, name string) *Feature { + for _, f := range e.Extra["feature"] { + ft := f.(*Feature) + if ft.Name == name { + return ft + } + } + return nil + } + + ms := NewModules() + for _, tt := range testIfFeatureModules { + if err := ms.Parse(tt.in, tt.name); err != nil { + t.Fatalf("could not parse module %s: %v", tt.name, err) + } + } + + if errs := ms.Process(); len(errs) > 0 { + t.Fatalf("could not process modules: %v", errs) + } + + mod, _ := ms.GetModule("if-feature") + + testcases := []struct { + name string + inIfFeatures []*Value + wantIfFeatures []string + }{ + // Node statements + { + name: "action", + inIfFeatures: entryIfFeatures(mod.Dir["cont"].Dir["act"]), + wantIfFeatures: []string{"ft-action"}, + }, + { + name: "anydata", + inIfFeatures: entryIfFeatures(mod.Dir["data"]), + wantIfFeatures: []string{"ft-anydata1", "ft-anydata2"}, + }, + { + name: "anyxml", + inIfFeatures: entryIfFeatures(mod.Dir["xml"]), + wantIfFeatures: []string{"ft-anyxml"}, + }, + { + name: "case", + inIfFeatures: entryIfFeatures(mod.Dir["ch"].Dir["cs"]), + wantIfFeatures: []string{"ft-case"}, + }, + { + name: "choice", + inIfFeatures: entryIfFeatures(mod.Dir["ch"]), + wantIfFeatures: []string{"ft-choice"}, + }, + { + name: "container", + inIfFeatures: entryIfFeatures(mod.Dir["cont"]), + wantIfFeatures: []string{"ft-container"}, + }, + { + name: "feature", + inIfFeatures: featureByName(mod, "f").IfFeature, + wantIfFeatures: []string{"ft-feature"}, + }, + { + name: "leaf", + inIfFeatures: entryIfFeatures(mod.Dir["l"]), + wantIfFeatures: []string{"ft-leaf"}, + }, + { + name: "leaf-list", + inIfFeatures: entryIfFeatures(mod.Dir["ll"]), + wantIfFeatures: []string{"ft-leaf-list"}, + }, + { + name: "list", + inIfFeatures: entryIfFeatures(mod.Dir["ls"]), + wantIfFeatures: []string{"ft-list"}, + }, + { + name: "notification", + inIfFeatures: entryIfFeatures(mod.Dir["n"]), + wantIfFeatures: []string{"ft-notification"}, + }, + { + name: "rpc", + inIfFeatures: entryIfFeatures(mod.Dir["r"]), + wantIfFeatures: []string{"ft-rpc"}, + }, + // Other statements + { + name: "augment", + inIfFeatures: entryIfFeatures(mod.Dir["cont"].Augmented[0]), + wantIfFeatures: []string{"ft-augment"}, + }, + { + name: "bit", + inIfFeatures: mod.Dir["l"].Node.(*Leaf).Type.Bit[0].IfFeature, + wantIfFeatures: []string{"ft-bit"}, + }, + { + name: "enum", + inIfFeatures: mod.Dir["ll"].Node.(*Leaf).Type.Enum[0].IfFeature, + wantIfFeatures: []string{"ft-enum"}, + }, + { + name: "identity", + inIfFeatures: mod.Identities[0].IfFeature, + wantIfFeatures: []string{"ft-identity"}, + }, + { + name: "refine", + inIfFeatures: ms.Modules["if-feature"].Uses[0].Refine[0].IfFeature, + wantIfFeatures: []string{"ft-refine"}, + }, + { + name: "uses", + inIfFeatures: ms.Modules["if-feature"].Uses[0].IfFeature, + wantIfFeatures: []string{"ft-uses"}, + }, + { + // Verify that if-feature field defined in "uses" is correctly propagated to container + name: "uses", + inIfFeatures: entryIfFeatures(mod.Dir["gc"]), + wantIfFeatures: []string{"ft-uses"}, + }, + { + // Verify that if-feature field defined in "augment" and in "augment > uses" is correctly propagated to container + name: "augment-uses", + inIfFeatures: entryIfFeatures(mod.Dir["cont"].Dir["gc"]), + wantIfFeatures: []string{"ft-augment-uses", "ft-augment"}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + var names []string + for _, f := range tc.inIfFeatures { + names = append(names, f.Name) + } + + if !reflect.DeepEqual(names, tc.wantIfFeatures) { + t.Errorf("%s: did not get expected if-features, got %v, want %v", tc.name, names, tc.wantIfFeatures) + } + }) + } +} + +var testNotificationModules = []struct { + name string + in string +}{ + { + name: "notification.yang", + in: `module notification { + namespace "urn:notification"; + prefix "n"; + + notification n {} + + grouping g { + notification g-n {} + } + + container cont { + notification cont-n {} + } + + list ls { + notification ls-n {} + uses g; + } + + augment "/cont" { + notification aug-n {} + } +} +`, + }, +} + +func TestNotification(t *testing.T) { + ms := NewModules() + for _, tt := range testNotificationModules { + if err := ms.Parse(tt.in, tt.name); err != nil { + t.Fatalf("could not parse module %s: %v", tt.name, err) + } + } + + if errs := ms.Process(); len(errs) > 0 { + t.Fatalf("could not process modules: %v", errs) + } + + mod, _ := ms.GetModule("notification") + + testcases := []struct { + name string + wantPath []string + }{ + { + name: "module", + wantPath: []string{"n"}, + }, + { + name: "container", + wantPath: []string{"cont", "cont-n"}, + }, + { + name: "list", + wantPath: []string{"ls", "ls-n"}, + }, + { + name: "grouping", + wantPath: []string{"ls", "g-n"}, + }, + { + name: "augment", + wantPath: []string{"cont", "aug-n"}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if e := getEntry(mod, tc.wantPath); e == nil || e.Node.Kind() != "notification" { + t.Errorf("%s: want notification entry at: %v, got: %+v", tc.name, tc.wantPath, e) + } + }) + } +} + +// addTreeE takes an input Entry and appends it to a directory, keyed by path, to the Entry. +// If the Entry has children, they are appended to the directory recursively. Used in test +// cases where a path is to be referred to. +func addTreeE(e *Entry, dir map[string]*Entry) { + for _, ch := range e.Dir { + dir[ch.Path()] = ch + if ch.Dir != nil { + addTreeE(ch, dir) + } + } +} + +func TestEntryFind(t *testing.T) { + tests := []struct { + name string + inModules map[string]string + inBaseEntryPath string + wantEntryPath map[string]string // keyed on path to find, with path expected as value. + wantError string + }{{ + name: "intra module find", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + leaf a { type string; } + leaf b { type string; } + + container c { leaf d { type string; } } + + rpc rpc1 { + input { leaf input1 { type string; } } + } + + container e { + action operation { + description "action"; + input { leaf input1 { type string; } } + output { leaf output1 { type string; } } + } + } + + } + `, + }, + inBaseEntryPath: "/test/a", + wantEntryPath: map[string]string{ + // Absolute path with no prefixes. + "/b": "/test/b", + // Relative path with no prefixes. + "../b": "/test/b", + // Absolute path with prefixes. + "/t:b": "/test/b", + // Relative path with prefixes. + "../t:b": "/test/b", + // Find within a directory. + "/c/d": "/test/c/d", + // Find within a directory specified relatively. + "../c/d": "/test/c/d", + // Find within a relative directory with prefixes. + "../t:c/t:d": "/test/c/d", + "../t:c/d": "/test/c/d", + "../c/t:d": "/test/c/d", + // Find within an absolute directory with prefixes. + "/t:c/d": "/test/c/d", + "/c/t:d": "/test/c/d", + "../t:rpc1/input": "/test/rpc1/input", + "/t:rpc1/input": "/test/rpc1/input", + "/t:rpc1/t:input": "/test/rpc1/input", + "/t:e/operation/input": "/test/e/operation/input", + "/t:e/operation/output": "/test/e/operation/output", + "/t:e/t:operation/t:input": "/test/e/operation/input", + "/t:e/t:operation/t:output": "/test/e/operation/output", + }, + }, { + name: "submodule find", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + include test1; + + leaf a { type string; } + leaf b { type string; } + + container c { leaf d { type string; } } + + rpc rpc1 { + input { leaf input1 { type string; } } + } + + container e { + action operation { + description "action"; + input { leaf input1 { type string; } } + output { leaf output1 { type string; } } + } + } + + } + `, + "test1.yang": ` + submodule test1 { + belongs-to test { + prefix "t"; + } + + leaf d { type string; } + } + `, + }, + inBaseEntryPath: "/test/d", + wantEntryPath: map[string]string{ + // Absolute path with no prefixes. + "/b": "/test/b", + // Relative path with no prefixes. + "../b": "/test/b", + // Absolute path with prefixes. + "/t:b": "/test/b", + // Relative path with prefixes. + "../t:b": "/test/b", + // Find within a directory. + "/c/d": "/test/c/d", + // Find within a directory specified relatively. + "../c/d": "/test/c/d", + // Find within a relative directory with prefixes. + "../t:c/t:d": "/test/c/d", + "../t:c/d": "/test/c/d", + "../c/t:d": "/test/c/d", + // Find within an absolute directory with prefixes. + "/t:c/d": "/test/c/d", + "/c/t:d": "/test/c/d", + "../t:rpc1/input": "/test/rpc1/input", + "/t:rpc1/input": "/test/rpc1/input", + "/t:rpc1/t:input": "/test/rpc1/input", + "/t:e/operation/input": "/test/e/operation/input", + "/t:e/operation/output": "/test/e/operation/output", + "/t:e/t:operation/t:input": "/test/e/operation/input", + "/t:e/t:operation/t:output": "/test/e/operation/output", + }, + }, { + name: "inter-module find", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + import foo { prefix foo; } + import bar { prefix baz; } + + leaf ctx { type string; } + leaf other { type string; } + leaf conflict { type string; } + }`, + "foo.yang": ` + module foo { + prefix "foo"; // matches the import above + namespace "urn:foo"; + + container bar { + leaf baz { type string; } + } + + leaf conflict { type string; } + }`, + "bar.yang": ` + module bar { + prefix "bar"; // does not match import in test + namespace "urn:b"; + + container fish { + leaf chips { type string; } + } + + leaf conflict { type string; } + }`, + }, + inBaseEntryPath: "/test/ctx", + wantEntryPath: map[string]string{ + // Check we can still do intra module lookups + "../other": "/test/other", + "/other": "/test/other", + "/foo:bar/foo:baz": "/foo/bar/baz", + // Technically partially prefixed paths to remote modules are + // not legal - check whether we can resolve them. + "/foo:bar/baz": "/foo/bar/baz", + // With mismatched prefixes. + "/baz:fish/baz:chips": "/bar/fish/chips", + // With conflicting node names + "/conflict": "/test/conflict", + "/foo:conflict": "/foo/conflict", + "/baz:conflict": "/bar/conflict", + "/t:conflict": "/test/conflict", + }, + }} + + for _, tt := range tests { + ms := NewModules() + var errs []error + for n, m := range tt.inModules { + if err := ms.Parse(m, n); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + t.Errorf("%s: ms.Parse(), got unexpected error parsing input modules: %v", tt.name, errs) + continue + } + + if errs := ms.Process(); len(errs) > 0 { + t.Errorf("%s: ms.Process(), got unexpected error processing entries: %v", tt.name, errs) + continue + } + + dir := map[string]*Entry{} + for _, m := range ms.Modules { + addTreeE(ToEntry(m), dir) + } + + if _, ok := dir[tt.inBaseEntryPath]; !ok { + t.Errorf("%s: could not find entry %s within the dir: %v", tt.name, tt.inBaseEntryPath, dir) + } + + for path, want := range tt.wantEntryPath { + got := dir[tt.inBaseEntryPath].Find(path) + if got.Path() != want { + t.Errorf("%s: (entry %s).Find(%s), did not find path, got: %v, want: %v, errors: %v", tt.name, dir[tt.inBaseEntryPath].Path(), path, got.Path(), want, dir[tt.inBaseEntryPath].Errors) + } + } + } +} + +func TestEntryTypes(t *testing.T) { + leafSchema := &Entry{Name: "leaf-schema", Kind: LeafEntry, Type: &YangType{Kind: Ystring}} + + containerSchema := &Entry{ + Name: "container-schema", + Kind: DirectoryEntry, + Dir: map[string]*Entry{ + "config": { + Dir: map[string]*Entry{ + "leaf1": { + Kind: LeafEntry, + Name: "Leaf1Name", + Type: &YangType{Kind: Ystring}, + }, + }, + }, + }, + } + + emptyContainerSchema := &Entry{ + Name: "empty-container-schema", + Kind: DirectoryEntry, + } + + leafListSchema := &Entry{ + Kind: LeafEntry, + ListAttr: &ListAttr{MinElements: 0}, + Type: &YangType{Kind: Ystring}, + Name: "leaf-list-schema", + } + + listSchema := &Entry{ + Name: "list-schema", + Kind: DirectoryEntry, + ListAttr: &ListAttr{MinElements: 0}, + Dir: map[string]*Entry{ + "leaf-name": { + Kind: LeafEntry, + Name: "LeafName", + Type: &YangType{Kind: Ystring}, + }, + }, + } + + choiceSchema := &Entry{ + Kind: ChoiceEntry, + Name: "Choice1Name", + Dir: map[string]*Entry{ + "case1": { + Kind: CaseEntry, + Name: "case1", + Dir: map[string]*Entry{ + "case1-leaf1": { + Kind: LeafEntry, + Name: "Case1Leaf1", + Type: &YangType{Kind: Ystring}, + }, + }, + }, + }, + } + + type SchemaType string + const ( + Leaf SchemaType = "Leaf" + Container SchemaType = "Container" + LeafList SchemaType = "LeafList" + List SchemaType = "List" + Choice SchemaType = "Choice" + Case SchemaType = "Case" + ) + + tests := []struct { + desc string + schema *Entry + wantType SchemaType + }{ + { + desc: "leaf", + schema: leafSchema, + wantType: Leaf, + }, + { + desc: "container", + schema: containerSchema, + wantType: Container, + }, + { + desc: "empty container", + schema: emptyContainerSchema, + wantType: Container, + }, + { + desc: "leaf-list", + schema: leafListSchema, + wantType: LeafList, + }, + { + desc: "list", + schema: listSchema, + wantType: List, + }, + { + desc: "choice", + schema: choiceSchema, + wantType: Choice, + }, + { + desc: "case", + schema: choiceSchema.Dir["case1"], + wantType: Case, + }, + } + + for _, tt := range tests { + gotm := map[SchemaType]bool{ + Leaf: tt.schema.IsLeaf(), + Container: tt.schema.IsContainer(), + LeafList: tt.schema.IsLeafList(), + List: tt.schema.IsList(), + Choice: tt.schema.IsChoice(), + Case: tt.schema.IsCase(), + } + + for stype, got := range gotm { + if want := (stype == tt.wantType); got != want { + t.Errorf("%s: got Is%v? %t, want Is%v? %t", tt.desc, stype, got, stype, want) + } + } + } +} + +func TestFixChoice(t *testing.T) { + choiceEntry := &Entry{ + Name: "choiceEntry", + Kind: ChoiceEntry, + Dir: map[string]*Entry{ + "unnamedAnyDataCase": { + Name: "unnamedAnyDataCase", + Kind: AnyDataEntry, + Node: &AnyData{ + Parent: &Container{ + Name: "AnyDataParentNode", + }, + Name: "unnamedAnyDataCase", + Source: &Statement{ + Keyword: "anyData-keyword", + HasArgument: true, + Argument: "anyData-argument", + statements: nil, + }, + Extensions: []*Statement{ + { + Keyword: "anyData-extension", + HasArgument: true, + Argument: "anyData-extension-arg", + statements: nil, + }, + }, + }, + }, + "unnamedAnyXMLCase": { + Name: "unnamedAnyXMLCase", + Kind: AnyXMLEntry, + Node: &AnyXML{ + Parent: &Container{ + Name: "AnyXMLParentNode", + }, + Name: "unnamedAnyXMLCase", + Source: &Statement{ + Keyword: "anyXML-keyword", + HasArgument: true, + Argument: "anyXML-argument", + statements: nil, + }, + Extensions: []*Statement{ + { + Keyword: "anyXML-extension", + HasArgument: true, + Argument: "anyXML-extension-arg", + statements: nil, + }, + }, + }, + }, + "unnamedContainerCase": { + Name: "unnamedContainerCase", + Kind: DirectoryEntry, + Node: &Container{ + Parent: &Container{ + Name: "AnyContainerNode", + }, + Name: "unnamedContainerCase", + Source: &Statement{ + Keyword: "container-keyword", + HasArgument: true, + Argument: "container-argument", + statements: nil, + }, + Extensions: []*Statement{ + { + Keyword: "container-extension", + HasArgument: true, + Argument: "container-extension-arg", + statements: nil, + }, + }, + }, + }, + "unnamedLeafCase": { + Name: "unnamedLeafCase", + Kind: LeafEntry, + Node: &Leaf{ + Parent: &Container{ + Name: "leafParentNode", + }, + Name: "unnamedLeafCase", + Source: &Statement{ + Keyword: "leaf-keyword", + HasArgument: true, + Argument: "leaf-argument", + statements: nil, + }, + Extensions: []*Statement{ + { + Keyword: "leaf-extension", + HasArgument: true, + Argument: "leaf-extension-arg", + statements: nil, + }, + }, + }, + }, + "unnamedLeaf-ListCase": { + Name: "unnamedLeaf-ListCase", + Kind: LeafEntry, + Node: &LeafList{ + Parent: &Container{ + Name: "LeafListNode", + }, + Name: "unnamedLeaf-ListCase", + Source: &Statement{ + Keyword: "leaflist-keyword", + HasArgument: true, + Argument: "leaflist-argument", + statements: nil, + }, + Extensions: []*Statement{ + { + Keyword: "leaflist-extension", + HasArgument: true, + Argument: "leaflist-extension-arg", + statements: nil, + }, + }, + }, + }, + "unnamedListCase": { + Name: "unnamedListCase", + Kind: DirectoryEntry, + Node: &List{ + Parent: &Container{ + Name: "ListNode", + }, + Name: "unnamedListCase", + Source: &Statement{ + Keyword: "list-keyword", + HasArgument: true, + Argument: "list-argument", + statements: nil, + }, + Extensions: []*Statement{ + { + Keyword: "list-extension", + HasArgument: true, + Argument: "list-extension-arg", + statements: nil, + }, + }, + }, + }, + }, + } + + choiceEntry.FixChoice() + + for _, e := range []string{"AnyData", "AnyXML", "Container", + "Leaf", "Leaf-List", "List"} { + entryName := "unnamed" + e + "Case" + t.Run(entryName, func(t *testing.T) { + + insertedCase := choiceEntry.Dir[entryName] + originalCase := insertedCase.Dir[entryName] + + insertedNode := insertedCase.Node + if insertedNode.Kind() != "case" { + t.Errorf("Got inserted node type %s, expected case", + insertedNode.Kind()) + } + + originalNode := originalCase.Node + if originalNode.Kind() != strings.ToLower(e) { + t.Errorf("Got original node type %s, expected %s", + originalNode.Kind(), strings.ToLower(e)) + } + + if insertedNode.ParentNode() != originalNode.ParentNode() { + t.Errorf("Got inserted node's parent node %v, expected %v", + insertedNode.ParentNode(), originalNode.ParentNode()) + } + + if insertedNode.NName() != originalNode.NName() { + t.Errorf("Got inserted node's name %s, expected %s", + insertedNode.NName(), originalNode.NName()) + } + + if insertedNode.Statement() != originalNode.Statement() { + t.Errorf("Got inserted node's statement %v, expected %v", + insertedNode.Statement(), originalNode.Statement()) + } + + if len(insertedNode.Exts()) != len(originalNode.Exts()) { + t.Errorf("Got inserted node extensions slice len %d, expected %v", + len(insertedNode.Exts()), len(originalNode.Exts())) + } + + for i, e := range insertedNode.Exts() { + if e != originalNode.Exts()[i] { + t.Errorf("Got inserted node's extension %v at index %d, expected %v", + e, i, originalNode.Exts()[i]) + } + } + }) + } +} + +func mustReadFile(path string) string { + s, err := ioutil.ReadFile(path) + if err != nil { + panic(err) + } + return string(s) +} + +func TestDeviation(t *testing.T) { + type deviationTest struct { + path string + entry *Entry // entry is the entry that is wanted at a particular path, if a field is left as nil, it is not checked. + } + tests := []struct { + desc string + inFiles map[string]string + inParseOptions Options + wants map[string][]deviationTest + wantParseErrSubstring string + wantProcessErrSubstring string + }{{ + desc: "deviation with add", + inFiles: map[string]string{"deviate": mustReadFile(filepath.Join("testdata", "deviate.yang"))}, + wants: map[string][]deviationTest{ + "deviate": {{ + path: "/target/add/config", + entry: &Entry{ + Config: TSFalse, + }, + }, { + path: "/target/add/default", + entry: &Entry{ + Default: []string{"a default value"}, + }, + }, { + path: "/target/add/default-typedef", + entry: &Entry{ + Default: nil, + }, + }, { + path: "/target/add/default-list", + entry: &Entry{ + Default: []string{"foo", "bar", "foo"}, + }, + }, { + path: "/target/add/default-list-typedef-default", + entry: &Entry{ + Default: nil, + }, + }, { + path: "/target/add/mandatory", + entry: &Entry{ + Mandatory: TSTrue, + }, + }, { + path: "/target/add/min-elements", + entry: &Entry{ + ListAttr: &ListAttr{ + MinElements: 42, + }, + deviatePresence: deviationPresence{ + hasMinElements: true, + }, + }, + }, { + path: "/target/add/max-elements", + entry: &Entry{ + ListAttr: &ListAttr{ + MaxElements: 42, + }, + deviatePresence: deviationPresence{ + hasMaxElements: true, + }, + }, + }, { + path: "/target/add/max-and-min-elements", + entry: &Entry{ + ListAttr: &ListAttr{ + MinElements: 42, + MaxElements: 42, + }, + deviatePresence: deviationPresence{ + hasMinElements: true, + hasMaxElements: true, + }, + }, + }, { + path: "/target/add/units", + entry: &Entry{ + Units: "fish per second", + }, + }}, + }, + }, { + desc: "error case - deviation add that already has a default", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf a { + type string; + default "fish"; + } + + deviation /a { + deviate add { + default "fishsticks"; + } + } + }`, + }, + wantProcessErrSubstring: "already has a default value", + }, { + desc: "error case - deviate type not recognized", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf a { type string; } + + deviation /a { + deviate shrink { + max-elements 42; + } + } + }`, + }, + wantProcessErrSubstring: "unknown deviation type", + }, { + desc: "error case - deviation add max-element to non-list", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf a { type string; } + + deviation /a { + deviate add { + max-elements 42; + } + } + }`, + }, + wantProcessErrSubstring: "tried to deviate max-elements on a non-list type", + }, { + desc: "error case - deviation add min elements to non-list", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf a { type string; } + + deviation /a { + deviate add { + min-elements 42; + } + } + }`, + }, + wantProcessErrSubstring: "tried to deviate min-elements on a non-list type", + }, { + desc: "error case - deviation delete max-element on non-list", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf a { type string; } + + deviation /a { + deviate delete { + max-elements 42; + } + } + }`, + }, + wantProcessErrSubstring: "tried to deviate max-elements on a non-list type", + }, { + desc: "error case - deviation delete min elements on non-list", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf a { type string; } + + deviation /a { + deviate delete { + min-elements 42; + } + } + }`, + }, + wantProcessErrSubstring: "tried to deviate min-elements on a non-list type", + }, { + desc: "deviation - not supported", + inFiles: map[string]string{"deviate": mustReadFile(filepath.Join("testdata", "deviate-notsupported.yang"))}, + wants: map[string][]deviationTest{ + "deviate": {{ + path: "/target", + }, { + path: "/target-list", + }, { + path: "/a-leaf", + }, { + path: "/a-leaflist", + }, { + path: "survivor", + entry: &Entry{Name: "survivor"}, + }}, + }, + }, { + desc: "deviation - not supported but ignored by option", + inFiles: map[string]string{"deviate": mustReadFile(filepath.Join("testdata", "deviate-notsupported.yang"))}, + inParseOptions: Options{ + DeviateOptions: DeviateOptions{ + IgnoreDeviateNotSupported: true, + }, + }, + wants: map[string][]deviationTest{ + "deviate": {{ + path: "/target", + entry: &Entry{Name: "target"}, + }, { + path: "/target-list", + entry: &Entry{Name: "target-list"}, + }, { + path: "/a-leaf", + entry: &Entry{Name: "a-leaf"}, + }, { + path: "/a-leaflist", + entry: &Entry{Name: "a-leaflist"}, + }, { + path: "survivor", + entry: &Entry{Name: "survivor"}, + }}, + }, + }, { + desc: "deviation removing non-existent node", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + deviation /a/b/c { + deviate not-supported; + } + } + `, + }, + wantProcessErrSubstring: "cannot find target node to deviate", + }, { + desc: "deviation not supported across modules", + inFiles: map[string]string{ + "source": ` + module source { + prefix "s"; + namespace "urn:s"; + + leaf a { type string; } + leaf b { type string; } + }`, + "deviation": ` + module deviation { + prefix "d"; + namespace "urn:d"; + + import source { prefix s; } + + deviation /s:a { + deviate not-supported; + } + }`, + }, + wants: map[string][]deviationTest{ + "source": {{ + path: "/a", + }, { + path: "/b", + entry: &Entry{}, + }}, + }, + }, { + desc: "deviation with replace", + inFiles: map[string]string{"deviate": mustReadFile(filepath.Join("testdata", "deviate-replace.yang"))}, + wants: map[string][]deviationTest{ + "deviate": {{ + path: "/target/replace/config", + entry: &Entry{ + Config: TSFalse, + }, + }, { + path: "/target/replace/default", + entry: &Entry{ + Default: []string{"a default value"}, + }, + }, { + path: "/target/replace/default-list", + entry: &Entry{ + Default: []string{"nematodes"}, + }, + }, { + path: "/target/replace/mandatory", + entry: &Entry{ + Mandatory: TSTrue, + }, + }, { + path: "/target/replace/min-elements", + entry: &Entry{ + ListAttr: &ListAttr{ + MinElements: 42, + }, + deviatePresence: deviationPresence{ + hasMinElements: true, + }, + }, + }, { + path: "/target/replace/max-elements", + entry: &Entry{ + ListAttr: &ListAttr{ + MaxElements: 42, + }, + deviatePresence: deviationPresence{ + hasMaxElements: true, + }, + }, + }, { + path: "/target/replace/max-and-min-elements", + entry: &Entry{ + ListAttr: &ListAttr{ + MinElements: 42, + MaxElements: 42, + }, + deviatePresence: deviationPresence{ + hasMinElements: true, + hasMaxElements: true, + }, + }, + }, { + path: "/target/replace/units", + entry: &Entry{ + Units: "fish per second", + }, + }, { + path: "/target/replace/type", + entry: &Entry{ + Type: &YangType{ + Name: "uint16", + Kind: Yuint16, + }, + }, + }}, + }, + }, { + desc: "deviation with delete", + inFiles: map[string]string{"deviate": mustReadFile(filepath.Join("testdata", "deviate-delete.yang"))}, + wants: map[string][]deviationTest{ + "deviate": {{ + path: "/target/delete/config", + entry: &Entry{ + Config: TSUnset, + }, + }, { + path: "/target/delete/default", + entry: &Entry{}, + }, { + path: "/target/delete/mandatory", + entry: &Entry{ + Mandatory: TSUnset, + }, + }, { + path: "/target/delete/min-elements", + entry: &Entry{ + ListAttr: &ListAttr{ + MinElements: 0, + }, + deviatePresence: deviationPresence{ + hasMinElements: true, + }, + }, + }, { + path: "/target/delete/max-elements", + entry: &Entry{ + ListAttr: &ListAttr{ + MaxElements: math.MaxUint64, + }, + deviatePresence: deviationPresence{ + hasMaxElements: true, + }, + }, + }, { + path: "/target/delete/max-and-min-elements", + entry: &Entry{ + ListAttr: &ListAttr{ + MinElements: 0, + MaxElements: math.MaxUint64, + }, + deviatePresence: deviationPresence{ + hasMinElements: true, + hasMaxElements: true, + }, + }, + }, { + path: "/target/delete/units", + entry: &Entry{ + Units: "", + }, + }}, + }, + }, { + // TODO(wenovus): Support deviate delete for leaf-lists for config-false leafs once its semantics are clear. + // https://github.com/mbj4668/pyang/issues/756 + desc: "error case - deviation delete on a leaf-list", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf-list a { + type string; + default "fish"; + } + + deviation /a { + deviate delete { + default "fishsticks"; + } + } + }`, + }, + wantProcessErrSubstring: "deviate delete on default statements unsupported for leaf-lists", + }, { + desc: "error case - deviation delete of default has different keyword value", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf a { + type string; + default "fish"; + } + + deviation /a { + deviate delete { + default "fishsticks"; + } + } + }`, + }, + wantProcessErrSubstring: "non-matching keyword", + }, { + desc: "error case - deviation delete where the default didn't exist", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf a { + type string; + } + + deviation /a { + deviate delete { + default "fishsticks"; + } + } + }`, + }, + wantProcessErrSubstring: "default statement that doesn't exist", + }, { + desc: "error case - deviation delete of min-elements has different keyword value", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf-list a { type string; } + + deviation /a { + deviate delete { + min-elements 42; + } + } + }`, + }, + wantProcessErrSubstring: "differs from deviation's min-element value", + }, { + desc: "error case - deviation delete of max-elements has different keyword value", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + leaf-list a { + type string; + max-elements 100; + } + + deviation /a { + deviate delete { + max-elements 42; + } + } + }`, + }, + wantProcessErrSubstring: "differs from deviation's max-element value", + }, { + desc: "deviation using locally defined typedef", + inFiles: map[string]string{ + "deviate": ` + module deviate { + prefix "d"; + namespace "urn:d"; + + import source { prefix s; } + + typedef rstr { + type string { + pattern "a.*"; + } + } + + deviation /s:a { + deviate replace { + type rstr; + } + } + } + `, + "source": ` + module source { + prefix "s"; + namespace "urn:s"; + + leaf a { type uint16; } + } + `, + }, + wants: map[string][]deviationTest{ + "source": {{ + path: "/a", + entry: &Entry{ + Type: &YangType{ + Name: "rstr", + Kind: Ystring, + Pattern: []string{"a.*"}, + }, + }, + }}, + }, + }, { + desc: "complex deviation of multiple leaves", + inFiles: map[string]string{ + "foo": ` + module foo { + prefix "f"; + namespace "urn:f"; + + container a { leaf b { type string; } } + + typedef abc { type boolean; } + typedef abt { type uint32; } + + deviation /a/b { + // typedef is not valid here. + //typedef abc { + // type boolean; + //} + deviate replace { type abc; } + } + + deviation /a/b { + // typedef is not valid here. + //typedef abt { + // type uint16; + //} + deviate replace { type abt; } + } + }`, + }, + wants: map[string][]deviationTest{ + "foo": {{ + path: "/a/b", + entry: &Entry{ + Type: &YangType{ + Name: "abt", + Kind: Yuint32, + }, + }, + }}, + }, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + ms := NewModules() + ms.ParseOptions = tt.inParseOptions + + for name, mod := range tt.inFiles { + if err := ms.Parse(mod, name); err != nil { + if diff := errdiff.Substring(err, tt.wantParseErrSubstring); diff != "" { + t.Fatalf("error parsing module %s, %s", name, diff) + } + } + } + + errs := ms.Process() + if len(errs) == 0 { + // Add a nil error to compare against the wanted error string. + errs = append(errs, nil) + } + var match bool + for _, err := range errs { + if diff := errdiff.Substring(err, tt.wantProcessErrSubstring); diff == "" { + match = true + break + } + } + if !match { + t.Fatalf("got errs: %v, want: %v", errs, tt.wantProcessErrSubstring) + } + + if tt.wantProcessErrSubstring == "" && len(tt.wants) == 0 { + t.Fatalf("test case expects no error and no entry. Please change your test case to contain one of them.") + } + + for mod, tcs := range tt.wants { + m, errs := ms.GetModule(mod) + if errs != nil { + t.Errorf("couldn't find module %s", mod) + continue + } + + for idx, want := range tcs { + got := m.Find(want.path) + switch { + case got == nil && want.entry != nil: + t.Errorf("%d: expected entry %s does not exist", idx, want.path) + continue + case got != nil && want.entry == nil: + t.Errorf("%d: unexpected entry %s exists, got: %v", idx, want.path, got) + continue + case want.entry == nil: + continue + } + + if got.Config != want.entry.Config { + t.Errorf("%d (%s): did not get expected config statement, got: %v, want: %v", idx, want.path, got.Config, want.entry.Config) + } + + if diff := cmp.Diff(got.Default, want.entry.Default, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("%d (%s): did not get expected default statement, (-got, +want): %s", idx, want.path, diff) + } + + if got.Mandatory != want.entry.Mandatory { + t.Errorf("%d (%s): did not get expected mandatory statement, got: %v, want: %v", idx, want.path, got.Mandatory, want.entry.Mandatory) + } + + if want.entry.ListAttr != nil { + if got.ListAttr == nil { + t.Errorf("%d (%s): listattr was nil for an entry expected to be a list at %s", idx, want.path, want.path) + continue + } + if want.entry.deviatePresence.hasMinElements { + if gotMin, wantMin := got.ListAttr.MinElements, want.entry.ListAttr.MinElements; gotMin != wantMin { + t.Errorf("%d (%s): min-elements, got: %v, want: %v", idx, want.path, gotMin, wantMin) + } + } + if want.entry.deviatePresence.hasMaxElements { + if gotMax, wantMax := got.ListAttr.MaxElements, want.entry.ListAttr.MaxElements; gotMax != wantMax { + t.Errorf("%d (%s): max-elements, got: %v, want: %v", idx, want.path, gotMax, wantMax) + } + } + } + + if want.entry.Type != nil { + if got.Type.Name != want.entry.Type.Name { + t.Errorf("%d (%s): type name, got: %s, want: %s", idx, want.path, got.Type.Name, want.entry.Type.Name) + } + + if got.Type.Kind != want.entry.Type.Kind { + t.Errorf("%d (%s): type kind, got: %s, want: %s", idx, want.path, got.Type.Kind, want.entry.Type.Kind) + } + } + + if got.Units != want.entry.Units { + t.Errorf("%d (%s): did not get expected units statement, got: %s, want: %s", idx, want.path, got.Units, want.entry.Units) + } + } + } + }) + } +} + +func TestLeafEntry(t *testing.T) { + tests := []struct { + name string + inModules map[string]string + wantEntryPath string + wantEntryCustomTest func(t *testing.T, e *Entry) + wantErrSubstr string + }{{ + name: "direct decimal64 type", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + leaf "gain-adjustment" { + type "decimal64" { + fraction-digits "1"; + range "-12.0..12.0"; + } + default "0.0"; + } + } + `, + }, + wantEntryPath: "/test/gain-adjustment", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.Type.FractionDigits, 1; got != want { + t.Errorf("got %d, want %d", got, want) + } + if got, want := e.Mandatory, TSUnset; got != want { + t.Errorf("got %d, want %d", got, want) + } + if got, want := e.Type.Range, (YangRange{Rf(-120, 120, 1)}); !cmp.Equal(got, want) { + t.Errorf("Range got: %v, want: %v", got, want) + } + }, + }, { + name: "typedef decimal64 type", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + typedef "optical-dB" { + type "decimal64" { + fraction-digits "1"; + } + } + + leaf "gain-adjustment" { + type "optical-dB" { + range "-12.0..12.0"; + } + default "0.0"; + } + } + `, + }, + wantEntryPath: "/test/gain-adjustment", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.Type.FractionDigits, 1; got != want { + t.Errorf("got %d, want %d", got, want) + } + if diff := cmp.Diff(e.Type.Range, YangRange{Rf(-120, 120, 1)}); diff != "" { + t.Errorf("Range (-got, +want):\n%s", diff) + } + }, + }, { + name: "typedef decimal64 type with overriding fraction-digits", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + typedef "optical-dB" { + type "decimal64" { + fraction-digits "1"; + } + } + + leaf "gain-adjustment" { + type "optical-dB" { + fraction-digits "2"; + range "-12.0..12.0"; + } + default "0.0"; + } + } + `, + }, + wantErrSubstr: "overriding of fraction-digits not allowed", + }, { + name: "leaf mandatory true", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + leaf "mandatory" { + type "string" { + } + mandatory true; + } + } + `, + }, + wantEntryPath: "/test/mandatory", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.Mandatory, TSTrue; got != want { + t.Errorf("got %d, want %d", got, want) + } + }, + }, { + name: "leaf mandatory false", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + leaf "mandatory" { + type "string" { + } + mandatory false; + } + } + `, + }, + wantEntryPath: "/test/mandatory", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.Mandatory, TSFalse; got != want { + t.Errorf("got %d, want %d", got, want) + } + }, + }, { + name: "leaf description", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + leaf "mandatory" { + type "string" { + } + description "I am a leaf"; + } + } + `, + }, + wantEntryPath: "/test/mandatory", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.Description, "I am a leaf"; got != want { + t.Errorf("got %q, want %q", got, want) + } + }, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ms := NewModules() + var errs []error + for n, m := range tt.inModules { + if err := ms.Parse(m, n); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + t.Fatalf("ms.Parse(), got unexpected error parsing input modules: %v", errs) + } + + if errs := ms.Process(); len(errs) > 0 { + if len(errs) == 1 { + if diff := errdiff.Substring(errs[0], tt.wantErrSubstr); diff != "" { + t.Fatalf("did not get expected error, %s", diff) + } + return + } + t.Fatalf("ms.Process(), got too many errors processing entries: %v", errs) + } + + dir := map[string]*Entry{} + for _, m := range ms.Modules { + addTreeE(ToEntry(m), dir) + } + + e, ok := dir[tt.wantEntryPath] + if !ok { + t.Fatalf("could not find entry %s within the dir: %v", tt.wantEntryPath, dir) + } + tt.wantEntryCustomTest(t, e) + }) + } +} + +func TestLess(t *testing.T) { + sErrors := sortedErrors{ + {"testfile0", errors.New("test error0")}, + {"testfile1", errors.New("test error1")}, + {"testfile1:1", errors.New("test error2")}, + {"testfile2:1", errors.New("test error3")}, + {"testfile2:1:1", errors.New("test error4")}, + {"testfile3:1:1:error5", errors.New("test error5")}, + {"testfile3:1:2:error6", errors.New("test error6")}, + {"testfile3:1:1:error7", errors.New("test error7")}, + } + + tests := []struct { + desc string + i int + j int + want bool + }{{ + desc: "compare two different strings without seperator ':'", + i: 0, + j: 1, + want: true, + }, { + desc: "compare two different strings without seperator ':'", + i: 1, + j: 0, + want: false, + }, { + desc: "compare one slice in a string with two slices in another string", + i: 1, + j: 2, + want: true, + }, { + desc: "compare two different strings with two slices each", + i: 2, + j: 3, + want: true, + }, { + desc: "compare two different strings with two slices each", + i: 3, + j: 2, + want: false, + }, { + desc: "compare two slices in a string with three slices in another string", + i: 3, + j: 4, + want: true, + }, { + desc: "compare three slices in a string with two slices in another string", + i: 4, + j: 3, + want: false, + }, { + desc: "compare two different strings with four slices each", + i: 5, + j: 6, + want: true, + }, { + desc: "compare two different strings with four slices each", + i: 6, + j: 5, + want: false, + }, { + desc: "compare two identical strings without separator ':'", + i: 1, + j: 1, + want: false, + }, { + desc: "compare two identical strings with two slices", + i: 2, + j: 2, + want: false, + }, { + desc: "compare two identical strings with three slices", + i: 4, + j: 4, + want: false, + }, { + desc: "compare two identical strings with four slices", + i: 5, + j: 5, + want: false, + }, { + desc: "compare different strings with four slices", + i: 7, + j: 5, + want: false, + }, { + desc: "compare different strings with four slices", + i: 5, + j: 7, + want: true, + }} + var cmpSymbol byte + for _, tt := range tests { + want := sErrors.Less(tt.i, tt.j) + if want != tt.want { + if want { + cmpSymbol = '<' + } else { + cmpSymbol = '>' + } + t.Errorf("%s: incorrect less comparison: \"%s\" %c \"%s\"", tt.desc, sErrors[tt.i].s, cmpSymbol, sErrors[tt.j].s) + } + } +} + +type customTestCases struct { + wantEntryPath string + wantEntryCustomTest func(t *testing.T, e *Entry) +} + +func TestOrderedBy(t *testing.T) { + tests := []struct { + name string + inModules map[string]string + testcases []customTestCases + wantErrSubstr string + }{{ + name: "ordered-by user", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + list ordered-list { + key "name"; + ordered-by user; + leaf name { + type string; + } + } + + list unordered-list { + key "name"; + ordered-by system; + leaf name { + type string; + } + } + + list unordered-list2 { + key "name"; + leaf name { + type string; + } + } + + leaf-list ordered-leaflist { + ordered-by user; + type string; + } + + leaf-list unordered-leaflist { + ordered-by system; + type string; + } + + leaf-list unordered-leaflist2 { + type string; + } + } + `, + }, + testcases: []customTestCases{{ + wantEntryPath: "/test/ordered-list", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.ListAttr.OrderedByUser, true; got != want { + t.Errorf("got %v, want %v", got, want) + } + }, + }, { + wantEntryPath: "/test/unordered-list", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.ListAttr.OrderedByUser, false; got != want { + t.Errorf("got %v, want %v", got, want) + } + }, + }, { + wantEntryPath: "/test/unordered-list2", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.ListAttr.OrderedByUser, false; got != want { + t.Errorf("got %v, want %v", got, want) + } + }, + }, { + wantEntryPath: "/test/ordered-leaflist", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.ListAttr.OrderedByUser, true; got != want { + t.Errorf("got %v, want %v", got, want) + } + }, + }, { + wantEntryPath: "/test/unordered-leaflist", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.ListAttr.OrderedByUser, false; got != want { + t.Errorf("got %v, want %v", got, want) + } + }, + }, { + wantEntryPath: "/test/unordered-leaflist2", + wantEntryCustomTest: func(t *testing.T, e *Entry) { + if got, want := e.ListAttr.OrderedByUser, false; got != want { + t.Errorf("got %v, want %v", got, want) + } + }, + }}, + }, { + name: "ordered-by client: invalid argument", + inModules: map[string]string{ + "test.yang": ` + module test { + prefix "t"; + namespace "urn:t"; + + list ordered-list { + key "name"; + ordered-by client; + leaf name { + type string; + } + } + } + `, + }, + wantErrSubstr: "ordered-by has invalid argument", + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ms := NewModules() + var errs []error + for n, m := range tt.inModules { + if err := ms.Parse(m, n); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + t.Fatalf("ms.Parse(), got unexpected error parsing input modules: %v", errs) + } + + if errs := ms.Process(); len(errs) > 0 { + if len(errs) == 1 { + if diff := errdiff.Substring(errs[0], tt.wantErrSubstr); diff != "" { + t.Fatalf("did not get expected error, %s", diff) + } + return + } + t.Fatalf("ms.Process(), got too many errors processing entries: %v", errs) + } + + dir := map[string]*Entry{} + for _, m := range ms.Modules { + addTreeE(ToEntry(m), dir) + } + + for _, tc := range tt.testcases { + e, ok := dir[tc.wantEntryPath] + if !ok { + t.Fatalf("could not find entry %s within the dir: %v", tc.wantEntryPath, dir) + } + tc.wantEntryCustomTest(t, e) + } + }) + } +} diff --git a/src/webui/internal/goyang/pkg/yang/file.go b/src/webui/internal/goyang/pkg/yang/file.go new file mode 100644 index 000000000..48ff76be1 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/file.go @@ -0,0 +1,167 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "sort" + "strings" +) + +var ( + // revisionDateSuffixRegex matches on the revision-date portion of a YANG + // file's name. + revisionDateSuffixRegex = regexp.MustCompile(`^@\d{4}-\d{2}-\d{2}\.yang$`) +) + +// PathsWithModules returns all paths under and including the +// root containing files with a ".yang" extension, as well as +// any error encountered +func PathsWithModules(root string) (paths []string, err error) { + pm := map[string]bool{} + filepath.Walk(root, func(p string, info os.FileInfo, e error) error { + err = e + if err == nil { + if info == nil { + return nil + } + if !info.IsDir() && strings.HasSuffix(p, ".yang") { + dir := filepath.Dir(p) + if !pm[dir] { + pm[dir] = true + paths = append(paths, dir) + } + } + return nil + } + return err + }) + return +} + +// AddPath adds the directories specified in p, a colon separated list +// of directory names, to Path, if they are not already in Path. Using +// multiple arguments is also supported. +func (ms *Modules) AddPath(paths ...string) { + for _, path := range paths { + for _, p := range strings.Split(path, ":") { + if !ms.pathMap[p] { + ms.pathMap[p] = true + ms.Path = append(ms.Path, p) + } + } + } +} + +// readFile makes testing of findFile easier. +var readFile = ioutil.ReadFile + +// scanDir makes testing of findFile easier. +var scanDir = findInDir + +// findFile returns the name and contents of the .yang file associated with +// name, or an error. If name is a module name rather than a file name (it does +// not have a .yang extension and there is no / in name), .yang is appended to +// the the name. The directory that the .yang file is found in is added to Path +// if not already in Path. If a file is not found by exact match, directories +// are scanned for "name@revision-date.yang" files, the latest (sorted by +// YYYY-MM-DD revision-date) of these will be selected. +// +// If a path has the form dir/... then dir and all direct or indirect +// subdirectories of dir are searched. +// +// The current directory (.) is always checked first, no matter the value of +// Path. +func (ms *Modules) findFile(name string) (string, string, error) { + slash := strings.Index(name, "/") + if slash < 0 && !strings.HasSuffix(name, ".yang") { + name += ".yang" + if best := scanDir(".", name, false); best != "" { + // we found a matching candidate in the local directory + name = best + } + } + + switch data, err := readFile(name); true { + case err == nil: + ms.AddPath(filepath.Dir(name)) + return name, string(data), nil + case slash >= 0: + // If there are any /'s in the name then don't search Path. + return "", "", fmt.Errorf("no such file: %s", name) + } + + for _, dir := range ms.Path { + var n string + if filepath.Base(dir) == "..." { + n = scanDir(filepath.Dir(dir), name, true) + } else { + n = scanDir(dir, name, false) + } + if n == "" { + continue + } + if data, err := readFile(n); err == nil { + return n, string(data), nil + } + } + return "", "", fmt.Errorf("no such file: %s", name) +} + +// findInDir looks for a file named name in dir or any of its subdirectories if +// recurse is true. if recurse is false, scan only the directory dir. +// If no matching file is found, an empty string is returned. +// +// The file SHOULD have the following name, per +// https://tools.ietf.org/html/rfc7950#section-5.2: +// module-or-submodule-name ['@' revision-date] '.yang' +// where revision-date = 4DIGIT "-" 2DIGIT "-" 2DIGIT +// +// If a perfect name match is found, then that file's path is returned. +// Else if file(s) with otherwise matching names but which contain a +// revision-date pattern exactly matching the above are found, then path of the +// one with the latest date is returned. +func findInDir(dir, name string, recurse bool) string { + fis, err := ioutil.ReadDir(dir) + if err != nil { + return "" + } + + var revisions []string + mname := strings.TrimSuffix(name, ".yang") + for _, fi := range fis { + switch { + case !fi.IsDir(): + if fn := fi.Name(); fn == name { + return filepath.Join(dir, name) + } else if strings.HasPrefix(fn, mname) && revisionDateSuffixRegex.MatchString(strings.TrimPrefix(fn, mname)) { + revisions = append(revisions, fn) + } + case recurse: + if n := findInDir(filepath.Join(dir, fi.Name()), name, recurse); n != "" { + return n + } + } + } + if len(revisions) == 0 { + return "" + } + sort.Strings(revisions) + return filepath.Join(dir, revisions[len(revisions)-1]) +} diff --git a/src/webui/internal/goyang/pkg/yang/file_test.go b/src/webui/internal/goyang/pkg/yang/file_test.go new file mode 100644 index 000000000..6500a6de4 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/file_test.go @@ -0,0 +1,166 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" +) + +func TestFindFile(t *testing.T) { + sep := string(os.PathSeparator) + + for _, tt := range []struct { + name string + path []string + check []string + }{ + { + name: "one", + check: []string{"one.yang"}, + }, + { + name: "./two", + check: []string{"./two"}, + }, + { + name: "three.yang", + check: []string{"three.yang"}, + }, + { + name: "four", + path: []string{"dir1", "dir2"}, + check: []string{"four.yang", "dir1" + sep + "four.yang", "dir2" + sep + "four.yang"}, + }, + } { + var checked []string + ms := NewModules() + ms.Path = tt.path + readFile = func(path string) ([]byte, error) { + checked = append(checked, path) + return nil, errors.New("no such file") + } + scanDir = func(dir, name string, recurse bool) string { + return filepath.Join(dir, name) + } + if _, _, err := ms.findFile(tt.name); err == nil { + t.Errorf("%s unexpectedly succeeded", tt.name) + continue + } + if !reflect.DeepEqual(tt.check, checked) { + t.Errorf("%s: got %v, want %v", tt.name, checked, tt.check) + } + } +} + +func TestScanForPathsAndAddModules(t *testing.T) { + // disable any readFile mock setup by other tests + readFile = ioutil.ReadFile + + // Scan the directory tree for YANG modules + paths, err := PathsWithModules("../../testdata") + if err != nil { + t.Fatal(err) + } + // we should have seen two directories being testdata and + // testdata/subdir. + if len(paths) != 2 { + t.Errorf("got %d paths imported, want 2", len(paths)) + } + ms := NewModules() + // add the paths found in the scan to the module path + ms.AddPath(paths...) + + // confirm we can load the four modules that exist in + // the two paths we scanned. + modules := []string{"aug", "base", "other", "subdir1"} + for _, name := range modules { + if _, err := ms.GetModule(name); err != nil { + t.Errorf("getting %s: %v", name, err) + } + } + + // however, a sub module is not a valid argument to GetModule. + if _, err := ms.GetModule("sub"); err == nil { + t.Error("want an error when loading 'sub', got nil") + } + +} + +func TestFindInDir(t *testing.T) { + testDir := "testdata/find-file-test" + + tests := []struct { + desc string + inDir string + inName string + inRecurse bool + want string + }{{ + desc: "file not found", + inDir: testDir, + inName: "green.yang", + inRecurse: true, + want: "", + }, { + desc: "input directory does not exist", + inDir: filepath.Join(testDir, "dne"), + inName: "red.yang", + inRecurse: true, + want: "", + }, { + desc: "exact match", + inDir: testDir, + inName: "blue.yang", + inRecurse: false, + want: filepath.Join(testDir, "blue.yang"), + }, { + desc: "exact match, recursive", + inDir: testDir, + inName: "blue.yang", + inRecurse: true, + want: filepath.Join(testDir, "blue.yang"), + }, { + desc: "exact match with non-standard name", + inDir: testDir, + inName: "non-standard.name", + inRecurse: false, + want: filepath.Join(testDir, "non-standard.name"), + }, { + desc: "revision match without recursion, and ignoring invalid revision", + inDir: testDir, + inName: "red.yang", + inRecurse: false, + want: filepath.Join(testDir, "red@2010-10-10.yang"), + }, { + desc: "revision match with recursion", + inDir: testDir, + inName: "red.yang", + inRecurse: true, + want: filepath.Join(testDir, "dir", "dirdir", "red@2022-02-22.yang"), + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if got, want := findInDir(tt.inDir, tt.inName, tt.inRecurse), tt.want; got != want { + t.Errorf("got: %q, want: %q", got, want) + } + }) + } +} diff --git a/src/webui/internal/goyang/pkg/yang/find.go b/src/webui/internal/goyang/pkg/yang/find.go new file mode 100644 index 000000000..9f0575d47 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/find.go @@ -0,0 +1,96 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +// This file has functions that search the AST for specified nodes. + +import ( + "reflect" + "strings" +) + +// localPrefix returns the local prefix used by the containing (sub)module to +// refer to its own module. +func localPrefix(n Node) string { + return RootNode(n).GetPrefix() +} + +// trimLocalPrefix trims the current module's prefix from the given name. If the +// name is not prefixed with the local module's prefix or is unprefixed +// entirely, then the same string is returned unchanged. +func trimLocalPrefix(n Node, name string) string { + pfx := localPrefix(n) + if pfx != "" { + pfx += ":" + } + return strings.TrimPrefix(name, pfx) +} + +// FindGrouping finds the grouping named name according to YANG namespace rules +// using the input node as the initial context node. The seen parameter +// provides a list of the modules previously seen by FindGrouping during +// traversal. If the named grouping cannot be found, nil is returned. +// +// FindGrouping works by recursively looking through the context node's parent +// nodes for grouping fields, or in included or imported submodules/modules for +// externally-defined groupings. Note that any prefix in the name must match +// the module prefix of its import statement in the context node's module. +func FindGrouping(n Node, name string, seen map[string]bool) *Grouping { + name = trimLocalPrefix(n, name) + for n != nil { + // Grab the Grouping field of the underlying structure. n is + // always a pointer to a structure, + e := reflect.ValueOf(n).Elem() + v := e.FieldByName("Grouping") + if v.IsValid() { + for _, g := range v.Interface().([]*Grouping) { + if g.Name == name { + return g + } + } + } + v = e.FieldByName("Import") + if v.IsValid() { + for _, i := range v.Interface().([]*Import) { + // If the prefix matches the import statement, + // then search for the trimmed name in that module. + pname := strings.TrimPrefix(name, i.Prefix.Name+":") + if pname == name { + continue + } + if g := FindGrouping(i.Module, pname, seen); g != nil { + return g + } + } + } + v = e.FieldByName("Include") + if v.IsValid() { + for _, i := range v.Interface().([]*Include) { + if seen[i.Module.Name] { + // Prevent infinite loops in the case that we have already looked at + // this submodule. This occurs where submodules have include statements + // in them, or there is a circular dependency. + continue + } + seen[i.Module.Name] = true + if g := FindGrouping(i.Module, name, seen); g != nil { + return g + } + } + } + n = n.ParentNode() + } + return nil +} diff --git a/src/webui/internal/goyang/pkg/yang/find_test.go b/src/webui/internal/goyang/pkg/yang/find_test.go new file mode 100644 index 000000000..da39d5047 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/find_test.go @@ -0,0 +1,358 @@ +package yang + +import ( + "testing" +) + +func TestFindGrouping(t *testing.T) { + tests := []struct { + desc string + inMods map[string]string + inNode func(*Modules) (Node, error) + inName string + wantGroupNodePath string + // wantCannotFound indicates that the grouping cannot be found. + wantCannotFound bool + }{{ + desc: "grouping within module", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + + revision 01-01-01 { description "the start of time"; } + + grouping g { leaf a { type string; } } + + container c { leaf b { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.Modules["dev"], "c") + }, + inName: "g", + wantGroupNodePath: "/dev/g", + }, { + desc: "nested grouping within module", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + + revision 01-01-01 { description "the start of time"; } + + grouping g { grouping gg { leaf a { type string; } } uses gg; } + + container c { leaf b { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.Modules["dev"], "g") + }, + inName: "gg", + wantGroupNodePath: "/dev/g/gg", + }, { + desc: "grouping that uses another grouping both within the same module", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + + revision 01-01-01 { description "the start of time"; } + + grouping gg { leaf a { type string; } } + + grouping g { uses gg; } + + container c { leaf b { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.Modules["dev"], "g") + }, + inName: "gg", + wantGroupNodePath: "/dev/gg", + }, { + desc: "grouping in included submodule", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + include sys; + + container c { leaf b { type string; } } + + revision 01-01-01 { description "the start of time"; } + }`, + "sys": ` + submodule sys { + belongs-to dev { + prefix "d"; + } + + revision 01-01-01 { description "the start of time"; } + + grouping g { leaf a { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.Modules["dev"], "c") + }, + inName: "g", + wantGroupNodePath: "/sys/g", + }, { + desc: "grouping in indirectly-included submodule", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + include sys; + + revision 01-01-01 { description "the start of time"; } + + container c { leaf b { type string; } } + }`, + "sys": ` + submodule sys { + belongs-to dev { + prefix "d"; + } + include sysdb; + + revision 01-01-01 { description "the start of time"; } + }`, + "sysdb": ` + submodule sysdb { + belongs-to dev { + prefix "d"; + } + + revision 01-01-01 { description "the start of time"; } + + grouping g { leaf a { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.Modules["dev"], "c") + }, + inName: "g", + wantGroupNodePath: "/sysdb/g", + }, { + desc: "grouping in indirectly-included submodule with node in submodule", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + include sys; + + revision 01-01-01 { description "the start of time"; } + }`, + "sys": ` + submodule sys { + belongs-to dev { + prefix "d"; + } + include sysdb; + + revision 01-01-01 { description "the start of time"; } + + container c { leaf b { type string; } } + }`, + "sysdb": ` + submodule sysdb { + belongs-to dev { + prefix "d"; + } + + revision 01-01-01 { description "the start of time"; } + + grouping g { leaf a { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.SubModules["sys"], "c") + }, + inName: "g", + wantGroupNodePath: "/sysdb/g", + }, { + desc: "grouping in submodule", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + import sysdb { prefix "s"; } + + revision 01-01-01 { description "the start of time"; } + + container c { leaf b { type string; } uses s:g; } + }`, + "sysdb": ` + module sysdb { + prefix sd; + namespace "urn:sd"; + + revision 01-01-01 { description "the start of time"; } + + grouping g { leaf a { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.Modules["dev"], "c") + }, + inName: "s:g", + wantGroupNodePath: "/sysdb/g", + }, { + desc: "grouping that uses another grouping both in different modules", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + import dev2 { prefix "de2"; } + + revision 01-01-01 { description "the start of time"; } + + container c { leaf l { type string; } uses de2:g; } + }`, + "dev2": ` + module dev2 { + prefix d2; + namespace "urn:d2"; + import dev3 { prefix "de3"; } + + revision 01-01-01 { description "the start of time"; } + + grouping g { leaf a { type string; } uses de3:gg; } + }`, + "dev3": ` + module dev3 { + prefix d3; + namespace "urn:d3"; + + revision 01-01-01 { description "the start of time"; } + + grouping gg { leaf b { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.Modules["dev2"], "g") + }, + inName: "de3:gg", + wantGroupNodePath: "/dev3/gg", + }, { + desc: "grouping that uses another grouping both in different modules but prefix is wrong", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + import dev2 { prefix "de2"; } + + revision 01-01-01 { description "the start of time"; } + + container c { leaf l { type string; } uses de2:g; } + }`, + "dev2": ` + module dev2 { + prefix d2; + namespace "urn:d2"; + import dev3 { prefix "de3"; } + + revision 01-01-01 { description "the start of time"; } + + grouping g { leaf a { type string; } uses de3:gg; } + }`, + "dev3": ` + module dev3 { + prefix dev3; + namespace "urn:dev3"; + + revision 01-01-01 { description "the start of time"; } + + grouping gg { leaf b { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.Modules["dev2"], "g") + }, + inName: "d3:gg", + wantCannotFound: true, + }, { + desc: "grouping that uses another grouping both in different modules but uses wrong context node", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + import dev2 { prefix "de2"; } + + revision 01-01-01 { description "the start of time"; } + + container c { leaf l { type string; } uses de2:g; } + }`, + "dev2": ` + module dev2 { + prefix d2; + namespace "urn:d2"; + import dev3 { prefix "dev3"; } + + revision 01-01-01 { description "the start of time"; } + + grouping g { leaf a { type string; } uses dev3:gg; } + }`, + "dev3": ` + module dev3 { + prefix dev3; + namespace "urn:dev3"; + + revision 01-01-01 { description "the start of time"; } + + grouping gg { leaf b { type string; } } + }`, + }, + inNode: func(ms *Modules) (Node, error) { + return FindNode(ms.Modules["dev"], "c") + }, + inName: "dev3:gg", + wantCannotFound: true, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + ms := NewModules() + + for n, m := range tt.inMods { + if err := ms.Parse(m, n); err != nil { + t.Fatalf("cannot parse module %s, err: %v", n, err) + } + } + + if errs := ms.Process(); errs != nil { + t.Fatalf("cannot process modules: %v", errs) + } + + seen := map[string]bool{} + node, err := tt.inNode(ms) + if err != nil { + t.Fatalf("cannot find input node: %v", err) + } + g := FindGrouping(node, tt.inName, seen) + if got, want := g == nil, tt.wantCannotFound; got != want { + t.Fatalf("got grouping: %v, wantCannotFound: %v", got, want) + } + if tt.wantCannotFound { + return + } + if got, want := NodePath(g), tt.wantGroupNodePath; got != want { + t.Errorf("found grouping path doesn't match expected, got: %s, want: %s", got, want) + } + }) + } +} diff --git a/src/webui/internal/goyang/pkg/yang/identity.go b/src/webui/internal/goyang/pkg/yang/identity.go new file mode 100644 index 000000000..615bff79f --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/identity.go @@ -0,0 +1,192 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "fmt" + "sort" + "sync" +) + +// This file implements data structures and functions that relate to the +// identity type. + +// identityDictionary stores a set of identities across all parsed Modules that +// have been resolved to be identified by their module and name. +type identityDictionary struct { + mu sync.Mutex + // dict is a global cache of identities keyed by + // modulename:identityname, where modulename is the full name of the + // module to which the identity belongs. If the identity were defined + // in a submodule, then the parent module name is used instead. + dict map[string]resolvedIdentity +} + +// resolvedIdentity is an Identity that has been disambiguated. +type resolvedIdentity struct { + Module *Module + Identity *Identity +} + +// isEmpty determines whether the resolvedIdentity struct value is populated. +func (r resolvedIdentity) isEmpty() bool { + return r.Module == nil && r.Identity == nil +} + +// newResolvedIdentity creates a resolved identity from an identity and its +// associated module, and returns the prefixed name (Prefix:IdentityName) +// along with the resolved identity. +func newResolvedIdentity(m *Module, i *Identity) (string, *resolvedIdentity) { + r := &resolvedIdentity{ + Module: m, + Identity: i, + } + return i.modulePrefixedName(), r +} + +func appendIfNotIn(ids []*Identity, chk *Identity) []*Identity { + for _, id := range ids { + if id == chk { + return ids + } + } + return append(ids, chk) +} + +// addChildren adds identity r and all of its children to ids +// deterministically. +func addChildren(r *Identity, ids []*Identity) []*Identity { + ids = appendIfNotIn(ids, r) + + // Iterate through the values of r. + for _, ch := range r.Values { + ids = addChildren(ch, ids) + } + return ids +} + +// findIdentityBase returns the resolved identity that is corresponds to the +// baseStr string in the context of the module/submodule mod. +func (mod *Module) findIdentityBase(baseStr string) (*resolvedIdentity, []error) { + var base resolvedIdentity + var ok bool + var errs []error + + basePrefix, baseName := getPrefix(baseStr) + rootPrefix := mod.GetPrefix() + source := Source(mod) + typeDict := mod.Modules.typeDict + + switch basePrefix { + case "", rootPrefix: + // This is a local identity which is defined within the current + // module + keyName := fmt.Sprintf("%s:%s", module(mod).Name, baseName) + base, ok = typeDict.identities.dict[keyName] + if !ok { + errs = append(errs, fmt.Errorf("%s: can't resolve the local base %s as %s", source, baseStr, keyName)) + } + default: + // This is an identity which is defined within another module + extmod := FindModuleByPrefix(mod, basePrefix) + if extmod == nil { + errs = append(errs, + fmt.Errorf("%s: can't find external module with prefix %s", source, basePrefix)) + break + } + // The identity we are looking for is modulename:basename. + if id, ok := typeDict.identities.dict[fmt.Sprintf("%s:%s", module(extmod).Name, baseName)]; ok { + base = id + break + } + + // Error if we did not find the identity that had the name specified in + // the module it was expected to be in. + if base.isEmpty() { + errs = append(errs, fmt.Errorf("%s: can't resolve remote base %s", source, baseStr)) + } + } + return &base, errs +} + +func (ms *Modules) resolveIdentities() []error { + defer ms.typeDict.identities.mu.Unlock() + ms.typeDict.identities.mu.Lock() + + var errs []error + + // Across all modules, read the identity values that have been extracted + // from them, and compile them into a "fully resolved" map that means that + // we can look them up based on the 'real' prefix of the module and the + // name of the identity. + for _, mod := range ms.Modules { + for _, i := range mod.Identities() { + keyName, r := newResolvedIdentity(mod, i) + ms.typeDict.identities.dict[keyName] = *r + } + + // Hoist up all identities in our included submodules. + // We could just do a range on ms.SubModules, but that + // might process a submodule that no module included. + for _, in := range mod.Include { + if in.Module == nil { + continue + } + for _, i := range in.Module.Identities() { + keyName, r := newResolvedIdentity(in.Module, i) + ms.typeDict.identities.dict[keyName] = *r + } + } + } + + // Now, we want to create for all identities a view of all of their children. + // A child identity here means an inherited identity. + // + // We start by finding the direct children of all identities using the + // 'base' statement. + for _, i := range ms.typeDict.identities.dict { + if i.Identity.Base != nil { + // This identity inherits from one or more other identities. + + root := RootNode(i.Identity) + for _, b := range i.Identity.Base { + base, baseErr := root.findIdentityBase(b.asString()) + + if baseErr != nil { + errs = append(errs, baseErr...) + continue + } + + // Build up a list of direct children of this identity. + base.Identity.Values = append(base.Identity.Values, i.Identity) + } + } + } + + // Now, we can find all transitive identities by recursively populating + // the children of each identity. + for _, i := range ms.typeDict.identities.dict { + newValues := []*Identity{} + for _, j := range i.Identity.Values { + newValues = addChildren(j, newValues) + } + sort.SliceStable(newValues, func(j, k int) bool { + return newValues[j].Name < newValues[k].Name + }) + i.Identity.Values = newValues + } + + return errs +} diff --git a/src/webui/internal/goyang/pkg/yang/identity_test.go b/src/webui/internal/goyang/pkg/yang/identity_test.go new file mode 100644 index 000000000..631680c9a --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/identity_test.go @@ -0,0 +1,802 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/openconfig/gnmi/errdiff" +) + +// inputModule is a mock input YANG module. +type inputModule struct { + name string // The filename of the YANG module. + content string // The contents of the YANG module. +} + +type idrefOut struct { + module string // The module that the identityref is within. + name string // The name of the identityref. + values []string // Names of the identities that the identityref relates to. +} + +// identityOut is the output for a particular identity within the test case. +type identityOut struct { + module string // The module that the identity is within. + name string // The name of the identity. + baseNames []string // The base(s) of the identity as string(s). + values []string // The string names of derived identities. +} + +// identityTestCase is a test case for a module which contains identities. +type identityTestCase struct { + name string + in []inputModule // The set of input modules for the test + identities []identityOut // Slice of the identity values expected + idrefs []idrefOut // Slice of identityref results expected + wantErrSubstr string // wanErrSubstr is a substring of the wanted error. +} + +// getBaseNamesFrom is a utility function for getting the base name(s) of an identity +func getBaseNamesFrom(i *Identity) []string { + baseNames := []string{} + for _, base := range i.Base { + baseNames = append(baseNames, base.Name) + } + return baseNames +} + +// Test cases for basic identity extraction. +var basicTestCases = []identityTestCase{ + { + name: "basic-test-case-1: Check identity is found in module.", + in: []inputModule{ + { + name: "idtest-one", + content: ` + module idtest-one { + namespace "urn:idone"; + prefix "idone"; + + identity TEST_ID; + } + `}, + }, + identities: []identityOut{ + {module: "idtest-one", name: "TEST_ID"}, + }, + }, + { + name: "basic-test-case-2: Check identity with base is found in module.", + in: []inputModule{ + { + name: "idtest-two", + content: ` + module idtest-two { + namespace "urn:idtwo"; + prefix "idone"; + + identity TEST_ID; + identity TEST_ID_TWO; + identity TEST_CHILD { + base TEST_ID; + } + } + `}, + }, + identities: []identityOut{ + {module: "idtest-two", name: "TEST_ID"}, + {module: "idtest-two", name: "TEST_ID_TWO"}, + {module: "idtest-two", name: "TEST_CHILD", baseNames: []string{"TEST_ID"}}, + }, + }, + { + name: "basic-test-case-3: Check identity with multiple bases.", + in: []inputModule{ + { + name: "idtest-three", + content: ` + module idtest-three { + namespace "urn:idthree"; + prefix "idthree"; + + identity BASE_ONE; + identity BASE_TWO; + identity TEST_CHILD_WITH_MULTIPLE_BASES { + base BASE_ONE; + base BASE_TWO; + } + } + `}, + }, + identities: []identityOut{ + {module: "idtest-three", name: "BASE_ONE"}, + {module: "idtest-three", name: "BASE_TWO"}, + {module: "idtest-three", name: "TEST_CHILD_WITH_MULTIPLE_BASES", baseNames: []string{"BASE_ONE", "BASE_TWO"}}, + }, + }, + { + name: "basic-test-case-4: Check identity base is found from submodule.", + in: []inputModule{ + { + name: "idtest-one", + content: ` + module idtest-one { + namespace "urn:idone"; + prefix "idone"; + + include "idtest-one-sub"; + + identity TEST_ID_DERIVED { + base TEST_ID; + } + } + `}, + { + name: "idtest-one-sub", + content: ` + submodule idtest-one-sub { + belongs-to idtest-one { + prefix "idone"; + } + + identity TEST_ID; + } + `}, + }, + identities: []identityOut{ + {module: "idtest-one", name: "TEST_ID"}, + {module: "idtest-one", name: "TEST_ID_DERIVED", baseNames: []string{"TEST_ID"}}, + }, + }, + { + name: "basic-test-case-5: Check identity base is found from module.", + in: []inputModule{ + { + name: "idtest-one", + content: ` + module idtest-one { + namespace "urn:idone"; + prefix "idone"; + + include "idtest-one-sub"; + + identity TEST_ID; + } + `}, + { + name: "idtest-one-sub", + content: ` + submodule idtest-one-sub { + belongs-to idtest-one { + prefix "idone"; + } + + identity TEST_ID_DERIVED { + base TEST_ID; + } + } + `}, + }, + identities: []identityOut{ + {module: "idtest-one", name: "TEST_ID_DERIVED", baseNames: []string{"TEST_ID"}}, + {module: "idtest-one", name: "TEST_ID"}, + }, + }, +} + +// Test the ability to extract identities from a module with the correct base +// statements. +func TestIdentityExtract(t *testing.T) { + for _, tt := range basicTestCases { + ms := NewModules() + for _, mod := range tt.in { + _ = ms.Parse(mod.content, mod.name) + } + + for _, ti := range tt.identities { + _, err := ms.GetModule(ti.module) + + if err != nil { + t.Errorf("Could not parse module : %s", ti.module) + continue + } + + foundIdentity := false + var thisID *Identity + for _, ri := range ms.typeDict.identities.dict { + moduleName := module(ri.Module).Name + if ri.Identity.Name == ti.name && moduleName == ti.module { + foundIdentity = true + thisID = ri.Identity + break + } + } + + if !foundIdentity { + t.Errorf("Could not find identity %s in module %s, identity dict:\n%+v", ti.name, ti.module, ms.typeDict.identities.dict) + continue + } + + actualBaseNames := getBaseNamesFrom(thisID) + if len(ti.baseNames) > 0 { + if diff := cmp.Diff(actualBaseNames, ti.baseNames); diff != "" { + t.Errorf("(-got, +want):\n%s", diff) + } + } else { + if thisID.Base != nil { + t.Errorf("Identity %s had unexpected base(s) %s", thisID.Name, + actualBaseNames) + } + } + } + } +} + +// Test cases for validating that identities can be resolved correctly. +var treeTestCases = []identityTestCase{ + { + name: "tree-test-case-0: Validate identity resolution across submodules", + in: []inputModule{ + { + name: "base.yang", + content: ` + module base { + namespace "urn:base"; + prefix "base"; + + include side; + + identity REMOTE_BASE; + } + `}, + { + name: "remote.yang", + content: ` + submodule side { + belongs-to base { + prefix "r"; + } + + identity LOCAL_REMOTE_BASE { + base r:REMOTE_BASE; + } + } + `}, + }, + identities: []identityOut{ + { + module: "base", + name: "REMOTE_BASE", + values: []string{"LOCAL_REMOTE_BASE"}, + }, + }, + }, + { + name: "tree-test-case-1: Validate identity resolution across modules", + in: []inputModule{ + { + name: "base.yang", + content: ` + module base { + namespace "urn:base"; + prefix "base"; + + import remote { prefix "r"; } + import remote2 { prefix "r2"; } + + identity LOCAL_REMOTE_BASE { + base r:REMOTE_BASE; + } + + identity LOCAL_REMOTE_BASE2 { + base r2:REMOTE_BASE2; + } + } + `}, + { + name: "remote.yang", + content: ` + module remote { + namespace "urn:remote"; + prefix "r"; + + identity REMOTE_BASE; + } + `}, + { + name: "remote2.yang", + content: ` + module remote2 { + namespace "urn:remote2"; + prefix "remote"; + + identity REMOTE_BASE2; + } + `}, + }, + identities: []identityOut{ + { + module: "remote", + name: "REMOTE_BASE", + values: []string{"LOCAL_REMOTE_BASE"}, + }, + { + module: "remote2", + name: "REMOTE_BASE2", + values: []string{"LOCAL_REMOTE_BASE2"}, + }, + { + module: "base", + name: "LOCAL_REMOTE_BASE", + baseNames: []string{"r:REMOTE_BASE"}, + }, + { + module: "base", + name: "LOCAL_REMOTE_BASE2", + baseNames: []string{"r2:REMOTE_BASE2"}, + }, + }, + }, + { + name: "tree-test-case-2: Multi-level inheritance validation.", + in: []inputModule{ + { + name: "base.yang", + content: ` + module base { + namespace "urn:base"; + prefix "base"; + + identity GREATGRANDFATHER; + identity GRANDFATHER { + base "GREATGRANDFATHER"; + } + identity FATHER { + base "GRANDFATHER"; + } + identity SON { + base "FATHER"; + } + identity UNCLE { + base "GRANDFATHER"; + } + identity BROTHER { + base "FATHER"; + } + identity GREATUNCLE { + base "GREATGRANDFATHER"; + } + } + `}, + }, + identities: []identityOut{ + { + module: "base", + name: "GREATGRANDFATHER", + values: []string{ + "BROTHER", // Order is alphabetical + "FATHER", + "GRANDFATHER", + "GREATUNCLE", + "SON", + "UNCLE", + }, + }, + { + module: "base", + name: "GRANDFATHER", + baseNames: []string{"GREATGRANDFATHER"}, + values: []string{"BROTHER", "FATHER", "SON", "UNCLE"}, + }, + { + module: "base", + name: "GREATUNCLE", + baseNames: []string{"GREATGRANDFATHER"}, + }, + { + module: "base", + name: "FATHER", + baseNames: []string{"GRANDFATHER"}, + values: []string{"BROTHER", "SON"}, + }, + { + module: "base", + name: "UNCLE", + baseNames: []string{"GRANDFATHER"}, + }, + { + module: "base", + name: "BROTHER", + baseNames: []string{"FATHER"}, + }, + }, + }, + { + name: "tree-test-case-3", + in: []inputModule{ + { + name: "base.yang", + content: ` + module base { + namespace "urn:base"; + prefix "base"; + + identity BASE; + identity NOTBASE { + base BASE; + } + + leaf idref { + type identityref { + base "BASE"; + } + } + } + `}, + }, + identities: []identityOut{ + { + module: "base", + name: "BASE", + values: []string{"NOTBASE"}, + }, + { + module: "base", + name: "NOTBASE", + baseNames: []string{"BASE"}, + }, + }, + idrefs: []idrefOut{ + { + module: "base", + name: "idref", + values: []string{"NOTBASE"}, + }, + }, + }, + { + name: "tree-test-case-4", + in: []inputModule{ + { + name: "base.yang", + content: ` + module base4 { + namespace "urn:base"; + prefix "base4"; + + identity BASE4; + identity CHILD4 { + base BASE4; + } + + typedef t { + type identityref { + base BASE4; + } + } + + leaf tref { + type t; + } + } + `}, + }, + identities: []identityOut{ + { + module: "base4", + name: "BASE4", + values: []string{"CHILD4"}, + }, + { + module: "base4", + name: "CHILD4", + baseNames: []string{"BASE4"}, + }, + }, + idrefs: []idrefOut{ + { + module: "base4", + name: "tref", + values: []string{"CHILD4"}, + }, + }, + }, + { + name: "tree-test-case-5", + in: []inputModule{ + { + name: "base.yang", + content: ` + module base5 { + namespace "urn:base"; + prefix "base5"; + + identity BASE5A; + identity BASE5B; + + identity FIVE_ONE { + base BASE5A; + } + + identity FIVE_TWO { + base BASE5B; + } + + leaf union { + type union { + type identityref { + base BASE5A; + } + type identityref { + base BASE5B; + } + } + } + }`}, + }, + identities: []identityOut{ + { + module: "base5", + name: "BASE5A", + values: []string{"FIVE_ONE"}, + }, + { + module: "base5", + name: "BASE5B", + values: []string{"FIVE_TWO"}, + }, + }, + idrefs: []idrefOut{ + { + module: "base5", + name: "union", + values: []string{"FIVE_ONE", "FIVE_TWO"}, + }, + }, + }, + { + name: "identity's base can't be found", + in: []inputModule{ + { + name: "idtest", + content: ` + module idtest{ + namespace "urn:idtwo"; + prefix "idone"; + + identity TEST_ID_TWO; + identity TEST_CHILD { + base TEST_ID; + } + } + `}, + }, + identities: []identityOut{ + {module: "idtest", name: "TEST_ID2"}, + }, + wantErrSubstr: "can't resolve the local base", + }, + { + name: "identity's base can't be found in remote", + in: []inputModule{ + { + name: "remote.yang", + content: ` + module remote { + namespace "urn:remote"; + prefix "remote"; + + identity REMOTE_BASE_ESCAPE; + } + `}, + { + name: "base.yang", + content: ` + module base { + namespace "urn:base"; + prefix "base"; + + import remote { prefix "r"; } + + identity LOCAL_REMOTE_BASE { + base r:REMOTE_BASE; + } + } + `}, + }, + identities: []identityOut{ + {module: "base", name: "LOCAL_REMOTE_BASE"}, + }, + wantErrSubstr: "can't resolve remote base", + }, + { + name: "identity's base's module can't be found", + in: []inputModule{ + { + name: "remote.yang", + content: ` + module remote { + namespace "urn:remote"; + prefix "remote"; + + identity REMOTE_BASE; + } + `}, + { + name: "base.yang", + content: ` + module base { + namespace "urn:base"; + prefix "base"; + + import remote { prefix "r"; } + + identity LOCAL_REMOTE_BASE { + base roe:REMOTE_BASE; + } + } + `}, + }, + identities: []identityOut{ + {module: "base", name: "LOCAL_REMOTE_BASE"}, + }, + wantErrSubstr: "can't find external module", + }, +} + +// TestIdentityTree - check inheritance of identities from local and remote +// sources. The Values of an Identity correspond to the values that are +// referenced by that identity, which need to be inherited. +func TestIdentityTree(t *testing.T) { + for _, tt := range treeTestCases { + t.Run(tt.name, func(t *testing.T) { + ms := NewModules() + + for _, mod := range tt.in { + _ = ms.Parse(mod.content, mod.name) + } + + errs := ms.Process() + + var err error + switch len(errs) { + case 1: + err = errs[0] + if diff := errdiff.Substring(err, tt.wantErrSubstr); diff != "" { + t.Fatalf("%s", diff) + } + return + case 0: + if diff := errdiff.Substring(err, tt.wantErrSubstr); diff != "" { + t.Fatalf("%s", diff) + } + default: + t.Fatalf("got multiple errors: %v", errs) + } + + // Walk through the identities that are defined in the test case output + // and validate that they exist, and their base and values are as expected. + for _, chkID := range tt.identities { + m, errs := ms.GetModule(chkID.module) + if errs != nil { + t.Errorf("Couldn't find expected module: %v", errs) + continue + } + + var foundID *Identity + for _, i := range m.Identities { + if i.Name == chkID.name { + foundID = i + break + } + } + + if foundID == nil { + t.Errorf("Couldn't find identity %s in module %s", chkID.name, + chkID.module) + continue + } + + if len(chkID.baseNames) > 0 { + actualBaseNames := getBaseNamesFrom(foundID) + if diff := cmp.Diff(actualBaseNames, chkID.baseNames); diff != "" { + t.Errorf("(-got, +want):\n%s", diff) + } + } + + valueMap := make(map[string]bool) + + for i, val := range chkID.values { + valueMap[val] = false + // Check that IsDefined returns the right result + if !foundID.IsDefined(val) { + t.Errorf("Couldn't find defined value %s for %s", val, chkID.name) + } + + // Check that the values are sorted in a consistent order + if foundID.Values[i].Name != val { + t.Errorf("Invalid order for value #%d. Expecting %s Got %s", i, foundID.Values[i].Name, val) + } + // Check that GetValue returns the right Identity + idval := foundID.GetValue(val) + if idval == nil { + t.Errorf("Couldn't GetValue(%s) for %s", val, chkID.name) + } + } + + // Ensure that IsDefined does not return false positives + if foundID.IsDefined("DoesNotExist") { + t.Errorf("Non-existent value IsDefined for %s", foundID.Name) + } + + if foundID.GetValue("DoesNotExist") != nil { + t.Errorf("Non-existent value GetValue not nil for %s", foundID.Name) + } + + for _, chkv := range foundID.Values { + _, ok := valueMap[chkv.Name] + if !ok { + t.Errorf("Found unexpected value %s for %s", chkv.Name, chkID.name) + continue + } + valueMap[chkv.Name] = true + } + + for k, v := range valueMap { + if v == false { + t.Errorf("Could not find identity %s for %s", k, chkID.name) + } + } + } + + for _, idr := range tt.idrefs { + m, errs := ms.GetModule(idr.module) + if errs != nil { + t.Errorf("Couldn't find expected module %s: %v", idr.module, errs) + continue + } + + if _, ok := m.Dir[idr.name]; !ok { + t.Errorf("Could not find expected identity, got: nil, want: %v", idr.name) + continue + } + + identity := m.Dir[idr.name] + var vals []*Identity + switch len(identity.Type.Type) { + case 0: + vals = identity.Type.IdentityBase.Values + default: + for _, b := range identity.Type.Type { + if b.IdentityBase != nil { + vals = append(vals, b.IdentityBase.Values...) + } + } + } + + var valNames []string + for _, v := range vals { + valNames = append(valNames, v.Name) + } + + if diff := cmp.Diff(idr.values, valNames); diff != "" { + t.Errorf("Identity %s did not have expected values, (-got, +want):\n%s", idr.name, diff) + } + } + }) + } +} diff --git a/src/webui/internal/goyang/pkg/yang/lex.go b/src/webui/internal/goyang/pkg/yang/lex.go new file mode 100644 index 000000000..49a0515b2 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/lex.go @@ -0,0 +1,522 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +// This file implements the lexical tokenization of yang. The lexer returns +// a series of tokens with one of the following codes: +// +// tError // an error was encountered +// tEOF // end-of-file +// tString // A de-quoted string (e.g., "\"bob\"" becomes "bob") +// tUnquoted // An un-quoted string +// '{' +// ';' +// '}' + +import ( + "bytes" + "fmt" + "io" + "os" + "reflect" + "runtime" + "strings" + "unicode/utf8" +) + +const ( + eof = 0x7fffffff // end of file, also an invalid rune + maxErrors = 8 + tooMany = "too many errors...\n" +) + +// stateFn represents a state in the lexer as a function, returning the next +// state the lexer should move to. +type stateFn func(*lexer) stateFn + +// A lexer holds the internal state of the lexer. +type lexer struct { + errout io.Writer // destination for errors, defaults to os.Stderr + errcnt int // number of errors encountered + + file string // name of file we are processing + input string // contents of the file + start int // start position in input of unconsumed data. + pos int // current position in the input. + line int // the current line number (1's based) + col int // the current column number (0 based, add 1 before displaying) + + debug bool // set to true to include internal debugging + inPattern bool // set when parsing the argument to a pattern + items chan *token // channel of scanned items. + tcol int // column with tabs expanded (for multi-line strings) + scol int // starting col of current token + sline int // starting line of current token + state stateFn // current state of the lexer + width int // width of last rune read from input. +} + +// A code is a token code. Single character tokens (i.e., punctuation) +// are represented by their unicode code point. +type code int + +const ( + tEOF = code(-1 - iota) // Reached end of file + tError // An error + tString // A dequoted string + tUnquoted // A non-quoted string +) + +// String returns c as a string. +func (c code) String() string { + switch c { + case tError: + return "Error" + case tString: + return "String" + case tUnquoted: + return "Unquoted" + } + if c < 0 || c > '~' { + return fmt.Sprintf("%d", c) + } + return fmt.Sprintf("'%c'", c) +} + +// A token represents one lexical unit read from the input. +// Line and Col are both 1's based. +type token struct { + code code + Text string // the actual text of the token + File string // the source file the token is from + Line int // the source line number the token is from + Col int // the source column number the token is from (8 space tabs) +} + +// Code returns the code of t. If t is nil, tEOF is returned. +func (t *token) Code() code { + if t == nil { + return tEOF + } + return t.code +} + +// String returns the location, code, and text of t as a string. +func (t *token) String() string { + var s []string + if t.File != "" { + s = append(s, t.File+":") + } + if t.Line != 0 { + s = append(s, fmt.Sprintf("%d:%d:", t.Line, t.Col)) + } + if t.Text == "" { + s = append(s, fmt.Sprintf(" %v", t.code)) + } else { + s = append(s, " ", t.Text) + } + return strings.Join(s, "") +} + +// A note on writing to errout. Errors should always be written to errout +// in a single Write call. The test code makes this assumption for testing +// expected errors. + +// newLexer returns a new lexer, importing into it the provided input and path. +// The provided path should indicate where the source originated. +func newLexer(input, path string) *lexer { + // Force input to be newline terminated. + if len(input) > 0 && input[len(input)-1] != '\n' { + input += "\n" + } + return &lexer{ + file: path, + input: input, + line: 1, // humans start with 1 + items: make(chan *token, maxErrors), + state: lexGround, + errout: os.Stderr, + } +} + +// NextToken returns the next token from the input, returning nil on EOF. +func (l *lexer) NextToken() *token { + for { + select { + case item := <-l.items: + return item + default: + if l.state == nil { + return nil + } + if l.debug { + name := runtime.FuncForPC(reflect.ValueOf(l.state).Pointer()).Name() + name = name[strings.LastIndex(name, ".")+1:] + name = strings.TrimPrefix(name, "lex") + input := l.input[l.pos:] + if len(input) > 8 { + input = input[:8] + "..." + } + fmt.Fprintf(os.Stderr, "%d:%d: state %s %q\n", l.line, l.col+1, name, input) + } + l.state = l.state(l) + } + } +} + +// emit emits the currently parsed token marked with code c using emitText. +func (l *lexer) emit(c code) { + l.emitText(c, l.input[l.start:l.pos]) +} + +// emitText emits text as a token marked with c. +// All input up to the current cursor (pos) is consumed. +func (l *lexer) emitText(c code, text string) { + if l.debug { + fmt.Fprintf(os.Stderr, "%v: %q\n", c, text) + } + select { + case l.items <- &token{ + code: c, + Text: text, + File: l.file, + Line: l.sline, + Col: l.scol + 1, + }: + default: + } + l.consume() +} + +// consume consumes all input to the current cursor. +func (l *lexer) consume() { + l.start = l.pos +} + +// backup steps back one rune. It can be called only immediately after a call +// of next. Backing up over a tab will set tcol to the last position of the +// tab, not where the tab started. This is okay as when we call next again it +// will move tcol back to where it was before backup was called. +func (l *lexer) backup() { + l.pos -= l.width + if l.width > 0 { + l.col-- + l.tcol-- + if l.col < 0 { + // We must have backuped up over a newline. + // Don't bother to figure out the column number + // as the next call to next will reset it to 0. + l.line-- + l.col = 0 + l.tcol = 0 + } + } +} + +// peek returns but does not move past the next rune in the input. backup +// is not supported over peeked characters. +func (l *lexer) peek() rune { + rune := l.next() + l.backup() + return rune +} + +// next returns the next rune in the input. If next encounters the end of input +// then it will return eof. +func (l *lexer) next() (rune rune) { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + // l.width is what limits more than a single backup. + rune, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + switch rune { + case '\n': + l.line++ + l.col = 0 + l.tcol = 0 + case '\t': + l.tcol = (l.tcol + 8) & ^7 + l.col++ // should this be l.width? + default: + l.tcol++ + l.col++ // should this be l.width? + } + return rune +} + +// acceptRun moves the cursor forward up to, but not including, the first rune +// not found in the valid set. It returns true if any runes were accepted. +func (l *lexer) acceptRun(valid string) bool { + ret := false + for strings.ContainsRune(valid, l.next()) { + ret = true + } + l.backup() + return ret +} + +// skipTo moves the cursor up to, but not including, s. +// Returns whether s was found in the remaining input. +func (l *lexer) skipTo(s string) bool { + if x := strings.Index(l.input[l.pos:], s); x >= 0 { + l.updateCursor(x) + return true + } + return false +} + +// updateCursor moves the cursor forward n bytes. updateCursor does not +// correctly handle tabs. This is okay as it is only used by skipTo, and skipTo +// is never used to skip to an initial " (which is the only time that tcol is +// necessary, as per YANG's multi-line quoted string requirement). +func (l *lexer) updateCursor(n int) { + s := l.input[l.pos : l.pos+n] + l.pos += n + // we could get away without updating width at all because backup is + // only promised to work after a call to next. + l.width = n + + if c := strings.Count(s, "\n"); c > 0 { + l.line += c + l.col = 0 + } + l.col += utf8.RuneCountInString(s[strings.LastIndex(s, "\n")+1:]) +} + +// Errorf writes an error on l.errout and increments the error count. +// If too many errors (8) are encountered then lexing will stop and +// eof is returned as the next token. +func (l *lexer) Errorf(f string, v ...interface{}) { + buf := &bytes.Buffer{} + + if l.debug { + // For internal debugging, print the file and line number + // of the call to Errorf + _, name, line, _ := runtime.Caller(1) + + fmt.Fprintf(buf, "%s:%d: ", name, line) + } + fmt.Fprintf(buf, "%s:%d:%d: ", l.file, l.line, l.col+1) + fmt.Fprintf(buf, f, v...) + b := buf.Bytes() + if b[len(b)-1] != '\n' { + buf.Write([]byte{'\n'}) + } + l.emit(tError) + l.adderror(buf.Bytes()) +} + +func (l *lexer) ErrorfAt(line, col int, f string, v ...interface{}) { + oline, ocol := l.line, l.col + defer func() { + l.line, l.col = oline, ocol + }() + l.line, l.col = line, col + l.Errorf(f, v...) +} + +// adderror writes out the error string err and increases the error count. +// If more than maxErrors are encountered, a "too many errors" message is +// displayed and processing stops (by clearing the input). +func (l *lexer) adderror(err []byte) { + if l.errcnt == maxErrors { + l.pos = 0 + l.start = 0 + l.input = "" + l.errout.Write([]byte(tooMany)) + l.errcnt++ + return + } else if l.errcnt == maxErrors+1 { + return + } + l.errout.Write(err) + l.errcnt++ +} + +// Below are all the states + +// lexGround is the state when the lexer is not in the middle of a token. The +// ground state is left once the start of a token is found. Pure comment lines +// leave the lexer in the ground state. +func lexGround(l *lexer) stateFn { + l.acceptRun(" \t\r\n") // Skip leading spaces + l.consume() + l.sline = l.line + l.scol = l.col + + switch c := l.peek(); c { + case eof: + return nil + case ';', '{', '}': + l.next() + l.emit(code(c)) + return lexGround + case '\'': + l.next() + l.consume() // Toss the leading ' + if !l.skipTo("'") { + l.ErrorfAt(l.line, l.col-1, `missing closing '`) + return nil + } + l.emit(tString) + l.next() // Either EOF or the matching ' + return lexGround + case '"': + l.next() + return lexQString + case '/': + l.next() + switch l.peek() { + case '/': + // Start of a // comment + if !l.skipTo("\n") { + // Here "\n" should always be found, since we force all + // input to be "\n" terminated. + l.ErrorfAt(l.line, l.col-1, `lexer internal error: all lines should be newline-terminated.`) + return nil + } + return lexGround + case '*': + // Start of a /* comment + if !l.skipTo("*/") { + l.ErrorfAt(l.line, l.col-1, `missing closing */`) + return nil + } + // Now actually skip the */ + l.next() + l.next() + return lexGround + default: + return lexUnquoted + } + case '+': + l.next() + switch l.peek() { + case '"', '\'': + l.emit(tUnquoted) + return lexGround + default: + return lexUnquoted + } + default: + return lexUnquoted + } +} + +// From the YANG standard: +// +// If the double-quoted string contains a line break followed by space +// or tab characters that are used to indent the text according to the +// layout in the YANG file, this leading whitespace is stripped from the +// string, up to and including the column of the double quote character, +// or to the first non-whitespace character, whichever occurs first. In +// this process, a tab character is treated as 8 space characters. +// +// If the double-quoted string contains space or tab characters before a +// line break, this trailing whitespace is stripped from the string. + +// lexQString handles double quoted strings, see the above text on how they +// work. The leading " has already been parsed. +func lexQString(l *lexer) stateFn { + indent := l.tcol // the column our text starts on + over := true // set to false when we are not past the indent + + // Keep track of where the starting quote was + line, col := l.line, l.col-1 + + var text []byte + for { + // l.next can return non-8bit unicode code points. + // c cannot be treated as only a single byte. + switch c := l.next(); c { + case eof: + l.ErrorfAt(line, col, `missing closing "`) + return nil + case '"': + l.emitText(tString, string(text)) + + return lexGround + case '\n': + Loop: + // Trim trailing white space from the line. + for i := len(text); i > 0; { + i-- + switch text[i] { + case ' ', '\t': + text = text[:i] + default: + break Loop + } + } + text = append(text, []byte(string(c))...) + over = false + case ' ', '\t': + // Ignore leading white space up to our indent. + if !over && l.tcol <= indent { + break + } + over = true + text = append(text, []byte(string(c))...) + case '\\': + switch c = l.next(); c { + case 'n': + c = '\n' + case 't': + c = '\t' + case '"': + case '\\': + default: + // Strings are use both in descriptions and + // in patterns. In strings only \n, \t, \" + // and \\ are defined. In patterns the \ + // can either mean to escape the character + // (e..g., \{) or to be part of of a special + // sequence such as \S. + if !l.inPattern { + l.ErrorfAt(l.line, l.col-2, `invalid escape sequence: \`+string(c)) + } + text = append(text, '\\') + } + fallthrough + default: + over = true + text = append(text, []byte(string(c))...) + } + } +} + +// lexUnquoted reads one identifier/number/un-quoted-string/... +// +// From https://tools.ietf.org/html/rfc7950#section-6.1.3: +// An unquoted string is any sequence of characters that does not +// contain any space, tab, carriage return, or line feed characters, a +// single or double quote character, a semicolon (";"), braces ("{" or +// "}"), or comment sequences ("//", "/*", or "*/"). +func lexUnquoted(l *lexer) stateFn { + for { + switch c := l.peek(); c { + // TODO: Support detection of comment immediately following an + // unquoted string, likely through supporting two peeks instead + // of just one. + case ' ', '\r', '\n', '\t', ';', '"', '\'', '{', '}', eof: + l.emit(tUnquoted) + return lexGround + default: + l.next() + } + } +} diff --git a/src/webui/internal/goyang/pkg/yang/lex_test.go b/src/webui/internal/goyang/pkg/yang/lex_test.go new file mode 100644 index 000000000..5d1c56a3d --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/lex_test.go @@ -0,0 +1,309 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "bytes" + "runtime" + "testing" +) + +// line returns the line number from which it was called. +// Used to mark where test entries are in the source. +func line() int { + _, _, line, _ := runtime.Caller(1) + return line + +} + +// Equal returns true if t and tt are equal (have the same code and text), +// false if not. +func (t *token) Equal(tt *token) bool { + return t.code == tt.code && t.Text == tt.Text +} + +// T Creates a new token from the provided code and string. +func T(c code, text string) *token { return &token{code: c, Text: text} } + +func TestLex(t *testing.T) { +Tests: + for _, tt := range []struct { + line int + in string + tokens []*token + }{ + {line(), "", nil}, + {line(), "bob", []*token{ + T(tUnquoted, "bob"), + }}, + {line(), "bob //bob", []*token{ + T(tUnquoted, "bob"), + }}, + {line(), "/the/path", []*token{ + T(tUnquoted, "/the/path"), + }}, + {line(), "+the/path", []*token{ + T(tUnquoted, "+the/path"), + }}, + {line(), "+the+path", []*token{ + T(tUnquoted, "+the+path"), + }}, + {line(), "+ the/path", []*token{ + T(tUnquoted, "+"), + T(tUnquoted, "the/path"), + }}, + {line(), "{bob}", []*token{ + T('{', "{"), + T(tUnquoted, "bob"), + T('}', "}"), + }}, + {line(), "bob;fred", []*token{ + T(tUnquoted, "bob"), + T(';', ";"), + T(tUnquoted, "fred"), + }}, + {line(), "\t bob\t; fred ", []*token{ + T(tUnquoted, "bob"), + T(';', ";"), + T(tUnquoted, "fred"), + }}, + {line(), ` + bob; + fred +`, []*token{ + T(tUnquoted, "bob"), + T(';', ";"), + T(tUnquoted, "fred"), + }}, + {line(), ` + // This is a comment + bob; + fred +`, []*token{ + T(tUnquoted, "bob"), + T(';', ";"), + T(tUnquoted, "fred"), + }}, + {line(), ` + /* This is a comment */ + bob; + fred +`, []*token{ + T(tUnquoted, "bob"), + T(';', ";"), + T(tUnquoted, "fred"), + }}, + {line(), ` + /* + * This is a comment + */ + bob; + fred +`, []*token{ + T(tUnquoted, "bob"), + T(';', ";"), + T(tUnquoted, "fred"), + }}, + {line(), ` + bob; // This is bob + fred // This is fred +`, []*token{ + T(tUnquoted, "bob"), + T(';', ";"), + T(tUnquoted, "fred"), + }}, + {line(), ` +pattern '[a-zA-Z0-9!#$%&'+"'"+'*+/=?^_` + "`" + `{|}~-]+'; +`, []*token{ + T(tUnquoted, "pattern"), + T(tString, "[a-zA-Z0-9!#$%&"), + T(tUnquoted, "+"), + T(tString, "'"), + T(tUnquoted, "+"), + T(tString, "*+/=?^_`{|}~-]+"), + T(';', ";"), + }}, + {line(), ` +// tab indent both lines + "Broken + line" +`, []*token{ + T(tString, "Broken\nline"), + }}, + {line(), ` +// tab indent both lines, trailing spaces and tabs + "Broken + line" +`, []*token{ + T(tString, "Broken\nline"), + }}, + {line(), ` +// tab indent first line, spaces and tab second line + "Broken + line" +`, []*token{ + T(tString, "Broken\nline"), + }}, + {line(), ` +// tab indent first line, spaces second linfe + "Broken + line" +`, []*token{ + T(tString, "Broken\nline"), + }}, + {line(), ` +// extra space in second line + "Broken + space" +`, []*token{ + T(tString, "Broken\n space"), + }}, + {line(), ` +// spaces first line, tab on second + "Broken + space" +`, []*token{ + T(tString, "Broken\nspace"), + }}, + {line(), ` +// Odd indenting + "Broken + space" +`, []*token{ + T(tString, "Broken\nspace"), + }}, + {line(), ` +// Odd indenting + "Broken \t + space with trailing space" +`, []*token{ + T(tString, "Broken\nspace with trailing space"), + }}, + } { + l := newLexer(tt.in, "") + // l.debug = true + for i := 0; ; i++ { + token := l.NextToken() + if token == nil { + if len(tt.tokens) != i { + t.Errorf("%d: got %d tokens, want %d", tt.line, i, len(tt.tokens)) + } + continue Tests + } + if len(tt.tokens) > i && !token.Equal(tt.tokens[i]) { + t.Errorf("%d, %d: got (%v, %q) want (%v, %q)", tt.line, i, token.code, token.Text, tt.tokens[i].code, tt.tokens[i].Text) + } + } + } +} + +func TestLexErrors(t *testing.T) { + for _, tt := range []struct { + line int + in string + errcnt int + errs string + }{ + {line(), + `1: "no closing quote`, + 1, + `test.yang:1:4: missing closing " +`, + }, + {line(), + `1: on another line +2: there is "no closing quote\"`, + 1, + `test.yang:2:13: missing closing " +`, + }, + {line(), + `1: +2: "Mares eat oats," +3: "And does eat oats," +4: "But little lambs eat ivy," +5: "and if I were a little lamb," +6: "I'ld eat ivy too. +5: So saith the sage.`, + 1, + `test.yang:6:4: missing closing " +`, + }, + {line(), + `1: +2: "Quoted string" +3: "Missing quote +4: "Another quoted string" +`, + 1, + `test.yang:4:26: missing closing " +`, + }, + {line(), + `1: +2: 'Quoted string' +3: 'Missing quote +4: 'Another quoted string' +`, + 1, + `test.yang:4:26: missing closing ' +`, + }, + {line(), + `1: "Quoted string\" +2: Missing end-quote\q`, + 2, + `test.yang:2:21: invalid escape sequence: \q +test.yang:1:4: missing closing " +`, + }, + {line(), + `/* This is a comment +without an ending. +`, + 1, + `test.yang:1:1: missing closing */ +`, + }, + {line(), + // Two errors too many. + `yang-version 1.1;description "\/\/\/\/\/\/\/\/\/\/";`, + 9, + `test.yang:1:31: invalid escape sequence: \/ +test.yang:1:33: invalid escape sequence: \/ +test.yang:1:35: invalid escape sequence: \/ +test.yang:1:37: invalid escape sequence: \/ +test.yang:1:39: invalid escape sequence: \/ +test.yang:1:41: invalid escape sequence: \/ +test.yang:1:43: invalid escape sequence: \/ +test.yang:1:45: invalid escape sequence: \/ +` + tooMany, + }, + } { + l := newLexer(tt.in, "test.yang") + errbuf := &bytes.Buffer{} + l.errout = errbuf + for l.NextToken() != nil { + + } + if l.errcnt != tt.errcnt { + t.Errorf("%d: got %d errors, want %v", tt.line, l.errcnt, tt.errcnt) + } + errs := errbuf.String() + if errs != tt.errs { + t.Errorf("%d: got errors:\n%s\nwant:\n%s", tt.line, errs, tt.errs) + } + } +} diff --git a/src/webui/internal/goyang/pkg/yang/marshal_test.go b/src/webui/internal/goyang/pkg/yang/marshal_test.go new file mode 100644 index 000000000..6151aa150 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/marshal_test.go @@ -0,0 +1,832 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/kylelemons/godebug/pretty" +) + +func TestMarshalJSON(t *testing.T) { + tests := []struct { + name string + in *Entry + want string + wantErr bool + }{{ + name: "simple leaf entry", + in: &Entry{ + Name: "leaf", + Node: &Leaf{ + Name: "leaf", + }, + Description: "This is a fake leaf.", + Default: []string{"default-leaf-value"}, + Errors: []error{fmt.Errorf("error one")}, + Kind: LeafEntry, + Config: TSTrue, + Prefix: &Value{ + Name: "ModulePrefix", + Source: &Statement{ + Keyword: "prefix", + Argument: "ModulePrefix", + HasArgument: true, + }, + }, + Type: &YangType{ + Name: "string", + Kind: Ystring, + Default: "string-value", + }, + Annotation: map[string]interface{}{ + "fish": struct{ Side string }{"chips"}, + }, + }, + want: `{ + "Name": "leaf", + "Description": "This is a fake leaf.", + "Default": [ + "default-leaf-value" + ], + "Kind": 0, + "Config": 1, + "Prefix": { + "Name": "ModulePrefix", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "ModulePrefix" + } + }, + "Type": { + "Name": "string", + "Kind": 18, + "Default": "string-value" + }, + "Annotation": { + "fish": { + "Side": "chips" + } + } +}`, + }, { + name: "simple container entry with parent", + in: &Entry{ + Name: "container", + Node: &Container{ + Name: "container", + }, + Kind: DirectoryEntry, + Config: TSFalse, + Prefix: &Value{ + Name: "ModulePrefix", + Source: &Statement{ + Keyword: "prefix", + Argument: "ModulePrefix", + HasArgument: true, + }, + }, + Dir: map[string]*Entry{ + "child": { + Name: "leaf", + Node: &Leaf{ + Name: "leaf", + }, + Kind: LeafEntry, + Config: TSUnset, + Prefix: &Value{ + Name: "ModulePrefix", + Source: &Statement{ + Keyword: "prefix", + Argument: "ModulePrefix", + HasArgument: true, + }, + }, + Type: &YangType{ + Name: "union", + Type: []*YangType{{ + Name: "string", + Pattern: []string{"^a.*$"}, + Kind: Ystring, + Length: YangRange{{ + Min: FromInt(10), + Max: FromInt(20), + }}, + }}, + }, + }, + }, + Augments: []*Entry{{ + Name: "augment", + Node: &Leaf{ + Name: "leaf", + }, + Kind: LeafEntry, + Config: TSFalse, + Prefix: &Value{ + Name: "ModulePrefix", + Source: &Statement{ + Keyword: "prefix", + Argument: "ModulePrefix", + HasArgument: true, + }, + }, + }}, + Augmented: []*Entry{{ + Name: "augmented", + Node: &Leaf{ + Name: "leaf", + }, + Kind: LeafEntry, + Config: TSTrue, + Prefix: &Value{ + Name: "ModulePrefix", + Source: &Statement{ + Keyword: "prefix", + Argument: "ModulePrefix", + HasArgument: true, + }, + }, + }}, + Uses: []*UsesStmt{{ + Uses: &Uses{ + Name: "grouping", + }, + Grouping: &Entry{ + Name: "grouping", + Node: &Grouping{ + Name: "grouping", + Leaf: []*Leaf{{ + Name: "groupingLeaf", + }}, + }, + Config: TSFalse, + Prefix: &Value{ + Name: "ModulePrefix", + Source: &Statement{ + Keyword: "prefix", + Argument: "ModulePrefix", + HasArgument: true, + }, + }, + }, + }}, + }, + want: `{ + "Name": "container", + "Kind": 1, + "Config": 2, + "Prefix": { + "Name": "ModulePrefix", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "ModulePrefix" + } + }, + "Dir": { + "child": { + "Name": "leaf", + "Kind": 0, + "Config": 0, + "Prefix": { + "Name": "ModulePrefix", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "ModulePrefix" + } + }, + "Type": { + "Name": "union", + "Kind": 0, + "Type": [ + { + "Name": "string", + "Kind": 18, + "Length": [ + { + "Min": { + "Value": 10, + "FractionDigits": 0, + "Negative": false + }, + "Max": { + "Value": 20, + "FractionDigits": 0, + "Negative": false + } + } + ], + "Pattern": [ + "^a.*$" + ] + } + ] + } + } + }, + "Augments": [ + { + "Name": "augment", + "Kind": 0, + "Config": 2, + "Prefix": { + "Name": "ModulePrefix", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "ModulePrefix" + } + } + } + ], + "Augmented": [ + { + "Name": "augmented", + "Kind": 0, + "Config": 1, + "Prefix": { + "Name": "ModulePrefix", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "ModulePrefix" + } + } + } + ], + "Uses": [ + { + "Uses": { + "Name": "grouping" + }, + "Grouping": { + "Name": "grouping", + "Kind": 0, + "Config": 2, + "Prefix": { + "Name": "ModulePrefix", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "ModulePrefix" + } + } + } + } + ] +}`, + }, { + name: "Entry with list and leaflist", + in: &Entry{ + Name: "list", + Kind: DirectoryEntry, + Config: TSUnset, + Dir: map[string]*Entry{ + "leaf": { + Name: "string", + Kind: LeafEntry, + }, + "leaf-list": { + Name: "leaf-list", + ListAttr: &ListAttr{ + MaxElements: 18446744073709551615, + MinElements: 0, + }, + }, + }, + ListAttr: &ListAttr{ + MaxElements: 42, + MinElements: 48, + }, + Identities: []*Identity{{ + Name: "ID_ONE", + }}, + Exts: []*Statement{{ + Keyword: "some-extension:ext", + Argument: "ext-value", + HasArgument: true, + }}, + }, + want: `{ + "Name": "list", + "Kind": 1, + "Config": 0, + "Dir": { + "leaf": { + "Name": "string", + "Kind": 0, + "Config": 0 + }, + "leaf-list": { + "Name": "leaf-list", + "Kind": 0, + "Config": 0, + "ListAttr": { + "MinElements": 0, + "MaxElements": 18446744073709551615, + "OrderedBy": null, + "OrderedByUser": false + } + } + }, + "Exts": [ + { + "Keyword": "some-extension:ext", + "HasArgument": true, + "Argument": "ext-value" + } + ], + "ListAttr": { + "MinElements": 48, + "MaxElements": 42, + "OrderedBy": null, + "OrderedByUser": false + }, + "Identities": [ + { + "Name": "ID_ONE" + } + ] +}`, + }} + + for _, tt := range tests { + got, err := json.MarshalIndent(tt.in, "", " ") + if err != nil { + if !tt.wantErr { + t.Errorf("%s: json.MarshalIndent(%v, ...): got unexpected error: %v", tt.name, tt.in, err) + } + continue + } + + if diff := pretty.Compare(string(got), tt.want); diff != "" { + t.Errorf("%s: jsonMarshalIndent(%v, ...): did not get expected JSON, diff(-got,+want):\n%s", tt.name, tt.in, diff) + } + } +} + +func TestParseAndMarshal(t *testing.T) { + tests := []struct { + name string + in []inputModule + want map[string]string + }{{ + name: "simple single module", + in: []inputModule{{ + name: "test.yang", + content: `module test { + prefix "t"; + namespace "urn:t"; + + typedef foobar { + type string { + length "10"; + } + } + + identity "BASE"; + identity "DERIVED" { base "BASE"; } + + container test { + list a { + key "k"; + min-elements 10; + max-elements "unbounded"; + leaf k { type string; } + + leaf bar { + type foobar; + } + } + + leaf d { + type decimal64 { + fraction-digits 8; + } + } + + leaf-list zip { + type string; + } + + leaf-list zip2 { + max-elements 1000; + type string; + } + + leaf x { + type union { + type string; + type identityref { + base "BASE"; + } + } + } + } + }`, + }}, + want: map[string]string{ + "test": `{ + "Name": "test", + "Kind": 1, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Dir": { + "test": { + "Name": "test", + "Kind": 1, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Dir": { + "a": { + "Name": "a", + "Kind": 1, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Dir": { + "bar": { + "Name": "bar", + "Kind": 0, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Type": { + "Name": "foobar", + "Kind": 18, + "Length": [ + { + "Min": { + "Value": 10, + "FractionDigits": 0, + "Negative": false + }, + "Max": { + "Value": 10, + "FractionDigits": 0, + "Negative": false + } + } + ] + } + }, + "k": { + "Name": "k", + "Kind": 0, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Type": { + "Name": "string", + "Kind": 18 + } + } + }, + "Key": "k", + "ListAttr": { + "MinElements": 10, + "MaxElements": 18446744073709551615, + "OrderedBy": null, + "OrderedByUser": false + } + }, + "d": { + "Name": "d", + "Kind": 0, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Type": { + "Name": "decimal64", + "Kind": 12, + "FractionDigits": 8, + "Range": [ + { + "Min": { + "Value": 9223372036854775808, + "FractionDigits": 8, + "Negative": true + }, + "Max": { + "Value": 9223372036854775807, + "FractionDigits": 8, + "Negative": false + } + } + ] + } + }, + "x": { + "Name": "x", + "Kind": 0, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Type": { + "Name": "union", + "Kind": 19, + "Type": [ + { + "Name": "string", + "Kind": 18 + }, + { + "Name": "identityref", + "Kind": 15, + "IdentityBase": { + "Name": "BASE", + "Values": [ + { + "Name": "DERIVED" + } + ] + } + } + ] + } + }, + "zip": { + "Name": "zip", + "Kind": 0, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Type": { + "Name": "string", + "Kind": 18 + }, + "ListAttr": { + "MinElements": 0, + "MaxElements": 18446744073709551615, + "OrderedBy": null, + "OrderedByUser": false + } + }, + "zip2": { + "Name": "zip2", + "Kind": 0, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Type": { + "Name": "string", + "Kind": 18 + }, + "ListAttr": { + "MinElements": 0, + "MaxElements": 1000, + "OrderedBy": null, + "OrderedByUser": false + } + } + } + } + }, + "Identities": [ + { + "Name": "BASE", + "Values": [ + { + "Name": "DERIVED" + } + ] + }, + { + "Name": "DERIVED" + } + ], + "extra-unstable": { + "namespace": [ + { + "Name": "urn:t", + "Source": { + "Keyword": "namespace", + "HasArgument": true, + "Argument": "urn:t" + } + } + ] + } +}`, + }, + }, { + name: "multiple modules with extension", + in: []inputModule{{ + name: "ext.yang", + content: `module ext { + prefix "e"; + namespace "urn:e"; + + extension foobar { + argument "baz"; + } + }`, + }, { + name: "test.yang", + content: `module test { + prefix "t"; + namespace "urn:t"; + + import ext { prefix ext; } + + leaf t { + type string; + ext:foobar "marked"; + } + }`, + }}, + want: map[string]string{ + "test": `{ + "Name": "test", + "Kind": 1, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Dir": { + "t": { + "Name": "t", + "Kind": 0, + "Config": 0, + "Prefix": { + "Name": "t", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "t" + } + }, + "Type": { + "Name": "string", + "Kind": 18 + }, + "Exts": [ + { + "Keyword": "ext:foobar", + "HasArgument": true, + "Argument": "marked" + } + ] + } + }, + "extra-unstable": { + "namespace": [ + { + "Name": "urn:t", + "Source": { + "Keyword": "namespace", + "HasArgument": true, + "Argument": "urn:t" + } + } + ] + } +}`, + "ext": `{ + "Name": "ext", + "Kind": 1, + "Config": 0, + "Prefix": { + "Name": "e", + "Source": { + "Keyword": "prefix", + "HasArgument": true, + "Argument": "e" + } + }, + "extra-unstable": { + "extension": [ + { + "Name": "foobar", + "Argument": { + "Name": "baz" + } + } + ], + "namespace": [ + { + "Name": "urn:e", + "Source": { + "Keyword": "namespace", + "HasArgument": true, + "Argument": "urn:e" + } + } + ] + } +}`, + }, + }} + + for _, tt := range tests { + ms := NewModules() + + for _, mod := range tt.in { + if err := ms.Parse(mod.content, mod.name); err != nil { + t.Errorf("%s: ms.Parse(..., %v): parsing error with module: %v", tt.name, mod.name, err) + continue + } + + if errs := ms.Process(); len(errs) != 0 { + t.Errorf("%s: ms.Process(): could not parse modules: %v", tt.name, errs) + continue + } + + entries := make(map[string]*Entry) + for _, m := range ms.Modules { + if _, ok := entries[m.Name]; !ok { + entries[m.Name] = ToEntry(m) + + got, err := json.MarshalIndent(entries[m.Name], "", " ") + if err != nil { + t.Errorf("%s: json.MarshalIndent(...): got unexpected error: %v", tt.name, err) + continue + } + + if diff := cmp.Diff(string(got), tt.want[m.Name]); diff != "" { + t.Errorf("%s: json.MarshalIndent(...): did not get expected JSON, diff(-got,+want):\n%s", tt.name, diff) + } + } + } + } + } +} diff --git a/src/webui/internal/goyang/pkg/yang/modules.go b/src/webui/internal/goyang/pkg/yang/modules.go new file mode 100644 index 000000000..ab543d259 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/modules.go @@ -0,0 +1,466 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +// This file implements the Modules type. This includes the processing of +// include and import statements, which must be done prior to turning the +// module into an Entry tree. + +import ( + "fmt" + "sync" +) + +// Modules contains information about all the top level modules and +// submodules that are read into it via its Read method. +type Modules struct { + Modules map[string]*Module // All "module" nodes + SubModules map[string]*Module // All "submodule" nodes + includes map[*Module]bool // Modules we have already done include on + nsMu sync.Mutex // nsMu protects the byNS map. + byNS map[string]*Module // Cache of namespace lookup + typeDict *typeDictionary // Cache for type definitions. + entryCacheMu sync.RWMutex // entryCacheMu protects the entryCache map. + // entryCache is used to prevent unnecessary recursion into previously + // converted nodes. To access the map, use the get/set/ClearEntryCache() + // thread-safe functions. + entryCache map[Node]*Entry + // mergedSubmodule is used to prevent re-parsing a submodule that has already + // been merged into a particular entity when circular dependencies are being + // ignored. The keys of the map are a string that is formed by concatenating + // the name of the including (sub)module and the included submodule. + mergedSubmodule map[string]bool + // ParseOptions sets the options for the current YANG module parsing. It can be + // directly set by the caller to influence how goyang will behave in the presence + // of certain exceptional cases. + ParseOptions Options + // Path is the list of directories to look for .yang files in. + Path []string + // pathMap is used to prevent adding dups in Path. + pathMap map[string]bool +} + +// NewModules returns a newly created and initialized Modules. +func NewModules() *Modules { + ms := &Modules{ + Modules: map[string]*Module{}, + SubModules: map[string]*Module{}, + includes: map[*Module]bool{}, + byNS: map[string]*Module{}, + typeDict: newTypeDictionary(), + mergedSubmodule: map[string]bool{}, + entryCache: map[Node]*Entry{}, + pathMap: map[string]bool{}, + } + return ms +} + +// Read reads the named yang module into ms. The name can be the name of an +// actual .yang file or a module/submodule name (the base name of a .yang file, +// e.g., foo.yang is named foo). An error is returned if the file is not +// found or there was an error parsing the file. +func (ms *Modules) Read(name string) error { + name, data, err := ms.findFile(name) + if err != nil { + return err + } + return ms.Parse(data, name) +} + +// Parse parses data as YANG source and adds it to ms. The name should reflect +// the source of data. +// Note: If an error is returned, valid modules might still have been added to +// the Modules cache. +func (ms *Modules) Parse(data, name string) error { + ss, err := Parse(data, name) + if err != nil { + return err + } + for _, s := range ss { + n, err := buildASTWithTypeDict(s, ms.typeDict) + if err != nil { + return err + } + if err := ms.add(n); err != nil { + return err + } + } + return nil +} + +// GetModule returns the Entry of the module named by name. GetModule will +// search for and read the file named name + ".yang" if it cannot satisfy the +// request from what it has currently read. +// +// GetModule is a convenience function for calling Read and Process, and +// then looking up the module name. It is safe to call Read and Process prior +// to calling GetModule. +func (ms *Modules) GetModule(name string) (*Entry, []error) { + if ms.Modules[name] == nil { + if err := ms.Read(name); err != nil { + return nil, []error{err} + } + if ms.Modules[name] == nil { + return nil, []error{fmt.Errorf("module not found: %s", name)} + } + } + // Make sure that the modules have all been processed and have no + // errors. + if errs := ms.Process(); len(errs) != 0 { + return nil, errs + } + return ToEntry(ms.Modules[name]), nil +} + +// GetModule optionally reads in a set of YANG source files, named by sources, +// and then returns the Entry for the module named module. If sources is +// missing, or the named module is not yet known, GetModule searches for name +// with the suffix ".yang". GetModule either returns an Entry or returns +// one or more errors. +// +// GetModule is a convenience function for calling NewModules, Read, and Process, +// and then looking up the module name. +func GetModule(name string, sources ...string) (*Entry, []error) { + var errs []error + ms := NewModules() + for _, source := range sources { + if err := ms.Read(source); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return nil, errs + } + return ms.GetModule(name) +} + +// add adds Node n to ms. n must be assignable to *Module (i.e., it is a +// "module" or "submodule"). An error is returned if n is a duplicate of +// a name already added, or n is not assignable to *Module. +func (ms *Modules) add(n Node) error { + var m map[string]*Module + + name := n.NName() + kind := n.Kind() + switch kind { + case "module": + m = ms.Modules + case "submodule": + m = ms.SubModules + default: + return fmt.Errorf("not a module or submodule: %s is of type %s", name, kind) + } + + mod := n.(*Module) + fullName := mod.FullName() + mod.Modules = ms + + if o := m[fullName]; o != nil { + return fmt.Errorf("duplicate %s %s at %s and %s", kind, fullName, Source(o), Source(n)) + } + m[fullName] = mod + if fullName == name { + return nil + } + + // Add us to the map if: + // name has not been added before + // fullname is a more recent version of the entry. + if o := m[name]; o == nil || o.FullName() < fullName { + m[name] = mod + } + return nil +} + +// FindModule returns the Module/Submodule specified by n, which must be a +// *Include or *Import. If n is a *Include then a submodule is returned. If n +// is a *Import then a module is returned. +func (ms *Modules) FindModule(n Node) *Module { + name := n.NName() + rev := name + var m map[string]*Module + + switch i := n.(type) { + case *Include: + m = ms.SubModules + if i.RevisionDate != nil { + rev = name + "@" + i.RevisionDate.Name + } + // TODO(borman): we should check the BelongsTo field below? + case *Import: + m = ms.Modules + if i.RevisionDate != nil { + rev = name + "@" + i.RevisionDate.Name + } + default: + return nil + } + if n := m[rev]; n != nil { + return n + } + if n := m[name]; n != nil { + return n + } + + // Try to read first a module by revision + if err := ms.Read(rev); err != nil { + // if failed, try to read a module by its bare name + if err := ms.Read(name); err != nil { + return nil + } + } + if n := m[rev]; n != nil { + return n + } + return m[name] +} + +// FindModuleByNamespace either returns the Module specified by the namespace +// or returns an error. +func (ms *Modules) FindModuleByNamespace(ns string) (*Module, error) { + // Protect the byNS map from concurrent accesses + ms.nsMu.Lock() + defer ms.nsMu.Unlock() + + if m, ok := ms.byNS[ns]; ok { + return m, nil + } + var found *Module + for _, m := range ms.Modules { + if m.Namespace.Name == ns { + switch { + case m == found: + case found != nil: + return nil, fmt.Errorf("namespace %s matches two or more modules (%s, %s)", + ns, found.Name, m.Name) + default: + found = m + } + } + } + if found == nil { + return nil, fmt.Errorf("%q: no such namespace", ns) + } + // Don't cache negative results because new modules could be added. + ms.byNS[ns] = found + return found, nil +} + +// process satisfies all include and import statements and verifies that all +// link ref paths reference a known node. If an import or include references +// a [sub]module that is not already known, Process will search for a .yang +// file that contains it, returning an error if not found. An error is also +// returned if there is an unknown link ref path or other parsing errors. +// +// Process must be called once all the source modules have been read in and +// prior to converting Node tree into an Entry tree. +func (ms *Modules) process() []error { + var mods []*Module + var errs []error + + // Collect the list of modules we know about now so when we range + // below we don't pick up new modules. We assume the user tells + // us explicitly which modules they are interested in. + for _, m := range ms.Modules { + mods = append(mods, m) + } + for _, m := range mods { + if err := ms.include(m); err != nil { + errs = append(errs, err) + } + } + + // Resolve identities before resolving typedefs, otherwise when we resolve a + // typedef that has an identityref within it, then the identity dictionary + // has not yet been built. + errs = append(errs, ms.resolveIdentities()...) + // Append any errors found trying to resolve typedefs + errs = append(errs, ms.typeDict.resolveTypedefs()...) + + return errs +} + +// Process processes all the modules and submodules that have been read into +// ms. While processing, if an include or import is found for which there +// is no matching module, Process attempts to locate the source file (using +// Path) and automatically load them. If a file cannot be found then an +// error is returned. When looking for a source file, Process searches for a +// file using the module's or submodule's name with ".yang" appended. After +// searching the current directory, the directories in Path are searched. +// +// Process builds Entry trees for each modules and submodules in ms. These +// trees are accessed using the ToEntry function. Process does augmentation +// on Entry trees once all the modules and submodules in ms have been built. +// Following augmentation, Process inserts implied case statements. I.e., +// +// choice interface-type { +// container ethernet { ... } +// } +// +// has a case statement inserted to become: +// +// choice interface-type { +// case ethernet { +// container ethernet { ... } +// } +// } +// +// Process may return multiple errors if multiple errors were encountered +// while processing. Even though multiple errors may be returned, this does +// not mean these are all the errors. Process will terminate processing early +// based on the type and location of the error. +func (ms *Modules) Process() []error { + // Reset globals that may remain stale if multiple Process() calls are + // made by the same caller. + ms.mergedSubmodule = map[string]bool{} + ms.ClearEntryCache() + + errs := ms.process() + if len(errs) > 0 { + return errorSort(errs) + } + + for _, m := range ms.Modules { + errs = append(errs, ToEntry(m).GetErrors()...) + } + for _, m := range ms.SubModules { + errs = append(errs, ToEntry(m).GetErrors()...) + } + + if len(errs) > 0 { + return errorSort(errs) + } + + // Now handle all the augments. We don't have a good way to know + // what order to process them in, so repeat until no progress is made + + mods := make([]*Module, 0, len(ms.Modules)+len(ms.SubModules)) + for _, m := range ms.Modules { + mods = append(mods, m) + } + for _, m := range ms.SubModules { + mods = append(mods, m) + } + for len(mods) > 0 { + var processed int + for i := 0; i < len(mods); { + m := mods[i] + p, s := ToEntry(m).Augment(false) + processed += p + if s == 0 { + mods[i] = mods[len(mods)-1] + mods = mods[:len(mods)-1] + continue + } + i++ + } + if processed == 0 { + break + } + } + + // Now fix up all the choice statements to add in the missing case + // statements. + for _, m := range ms.Modules { + ToEntry(m).FixChoice() + } + for _, m := range ms.SubModules { + ToEntry(m).FixChoice() + } + + // Go through any modules that have remaining augments and collect + // the errors. + for _, m := range mods { + ToEntry(m).Augment(true) + errs = append(errs, ToEntry(m).GetErrors()...) + } + + // The deviation statement is only valid under a module or submodule, + // which allows us to avoid having to process it within ToEntry, and + // rather we can just walk all modules and submodules *after* entries + // are resolved. This means we do not need to concern ourselves that + // an entry does not exist. + dvP := map[string]bool{} // cache the modules we've handled since we have both modname and modname@revision-date + for _, devmods := range []map[string]*Module{ms.Modules, ms.SubModules} { + for _, m := range devmods { + e := ToEntry(m) + if !dvP[e.Name] { + errs = append(errs, e.ApplyDeviate(ms.ParseOptions.DeviateOptions)...) + dvP[e.Name] = true + } + } + } + + return errorSort(errs) +} + +// include resolves all the include and import statements for m. It returns +// an error if m, or recursively, any of the modules it includes or imports, +// reference a module that cannot be found. +func (ms *Modules) include(m *Module) error { + if ms.includes[m] { + return nil + } + ms.includes[m] = true + + // First process any includes in this module. + for _, i := range m.Include { + im := ms.FindModule(i) + if im == nil { + return fmt.Errorf("no such submodule: %s", i.Name) + } + // Process the include statements in our included module. + if err := ms.include(im); err != nil { + return err + } + i.Module = im + } + + // Next process any imports in this module. Imports are used + // when searching. + for _, i := range m.Import { + im := ms.FindModule(i) + if im == nil { + return fmt.Errorf("no such module: %s", i.Name) + } + // Process the include statements in our included module. + if err := ms.include(im); err != nil { + return err + } + + i.Module = im + } + return nil +} + +func (ms *Modules) getEntryCache(n Node) *Entry { + ms.entryCacheMu.RLock() + defer ms.entryCacheMu.RUnlock() + return ms.entryCache[n] +} + +func (ms *Modules) setEntryCache(n Node, e *Entry) { + ms.entryCacheMu.Lock() + defer ms.entryCacheMu.Unlock() + ms.entryCache[n] = e +} + +// ClearEntryCache clears the entryCache containing previously converted nodes +// used by the ToEntry function. +func (ms *Modules) ClearEntryCache() { + ms.entryCacheMu.Lock() + defer ms.entryCacheMu.Unlock() + ms.entryCache = map[Node]*Entry{} +} diff --git a/src/webui/internal/goyang/pkg/yang/modules_test.go b/src/webui/internal/goyang/pkg/yang/modules_test.go new file mode 100644 index 000000000..b0a260c42 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/modules_test.go @@ -0,0 +1,414 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "strings" + "testing" + + "github.com/openconfig/gnmi/errdiff" +) + +var testdataFindModulesText = map[string]string{ + "foo": `module foo { prefix "foo"; namespace "urn:foo"; }`, + "bar": `module bar { prefix "bar"; namespace "urn:bar"; }`, + "baz": `module baz { prefix "baz"; namespace "urn:baz"; }`, + "dup-pre-one": `module dup-pre-one { prefix duplicate; namespace urn:duplicate:one; }`, + "dup-pre-two": `module dup-pre-two { prefix duplicate; namespace urn:duplicate:two; }`, + "dup-ns-one": `module dup-ns-one { prefix ns-one; namespace urn:duplicate; }`, + "dup-ns-two": `module dup-ns-two { prefix ns-two; namespace urn:duplicate; }`, +} + +func TestDupModule(t *testing.T) { + tests := []struct { + desc string + inModules map[string]string + wantErr bool + }{{ + desc: "two modules with the same name", + inModules: map[string]string{ + "foo": `module foo { prefix "foo"; namespace "urn:foo"; }`, + "bar": `module foo { prefix "foo"; namespace "urn:foo"; }`, + }, + wantErr: true, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + ms := NewModules() + var err error + for name, modtext := range tt.inModules { + if err = ms.Parse(modtext, name+".yang"); err != nil { + break + } + } + if gotErr := err != nil; gotErr != tt.wantErr { + t.Fatalf("wantErr: %v, got error: %v", tt.wantErr, err) + } + }) + } +} + +func testModulesForTestdataModulesText(t *testing.T) *Modules { + ms := NewModules() + for name, modtext := range testdataFindModulesText { + if err := ms.Parse(modtext, name+".yang"); err != nil { + t.Fatalf("error importing testdataFindModulesText[%q]: %v", name, err) + } + } + if errs := ms.Process(); errs != nil { + for _, err := range errs { + t.Errorf("error: %v", err) + } + t.Fatalf("fatal error(s) calling Process()") + } + return ms +} + +func testModulesFindByCommonHandler(t *testing.T, i int, got, want *Module, wantError string, err error) { + if err != nil { + if wantError != "" { + if !strings.Contains(err.Error(), wantError) { + t.Errorf("[%d] want error containing %q, got %q", + i, wantError, err.Error()) + } + } else { + t.Errorf("[%d] unexpected error: %v", i, err) + } + } else if wantError != "" { + t.Errorf("[%d] want error containing %q, got nil", i, wantError) + } else if want != got { + t.Errorf("[%d] want module %#v, got %#v", i, want, got) + } +} + +func TestModulesFindByNamespace(t *testing.T) { + ms := testModulesForTestdataModulesText(t) + + for i, tc := range []struct { + namespace string + want *Module + wantError string + }{ + { + namespace: "does-not-exist", + wantError: `"does-not-exist": no such namespace`, + }, + { + namespace: "urn:foo", + want: ms.Modules["foo"], + }, + { + namespace: "urn:bar", + want: ms.Modules["bar"], + }, + { + namespace: "urn:baz", + want: ms.Modules["baz"], + }, + { + namespace: "urn:duplicate", + wantError: "namespace urn:duplicate matches two or more modules (dup-ns-", + }, + } { + got, err := ms.FindModuleByNamespace(tc.namespace) + testModulesFindByCommonHandler(t, i, got, tc.want, tc.wantError, err) + } +} + +func TestModuleLinkage(t *testing.T) { + tests := []struct { + desc string + inMods map[string]string + wantErrSubstr string + }{{ + desc: "invalid import", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + import sys { prefix sys; } + + revision 01-01-01 { description "the start of time"; } + + deviation /sys:sys/sys:hostname { + deviate not-supported; + } + }`, + }, + wantErrSubstr: "no such module", + }, { + desc: "valid include", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + include sys; + + revision 01-01-01 { description "the start of time"; } + }`, + "sys": ` + submodule sys { + belongs-to dev { + prefix "d"; + } + + revision 01-01-01 { description "the start of time"; } + + container sys { leaf hostname { type string; } } + }`, + }, + }, { + desc: "invalid include", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + include sys; + + revision 01-01-01 { description "the start of time"; } + }`, + "sysdb": ` + submodule sysdb { + belongs-to dev { + prefix "d"; + } + + revision 01-01-01 { description "the start of time"; } + + container sys { leaf hostname { type string; } } + }`, + }, + wantErrSubstr: "no such submodule", + }, { + desc: "valid include in submodule", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + include sys; + + revision 01-01-01 { description "the start of time"; } + }`, + "sys": ` + submodule sys { + belongs-to dev { + prefix "d"; + } + include sysdb; + + revision 01-01-01 { description "the start of time"; } + + container sys { leaf hostname { type string; } } + }`, + "sysdb": ` + submodule sysdb { + belongs-to dev { + prefix "d"; + } + + revision 01-01-01 { description "the start of time"; } + + container sysdb { leaf hostname { type string; } } + }`, + }, + }, { + desc: "invalid include in submodule", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + include sys; + + revision 01-01-01 { description "the start of time"; } + }`, + "sys": ` + submodule sys { + belongs-to dev { + prefix "d"; + } + include sysdb; + + revision 01-01-01 { description "the start of time"; } + + container sys { leaf hostname { type string; } } + }`, + "syyysdb": ` + submodule syyysdb { + belongs-to dev { + prefix "d"; + } + + revision 01-01-01 { description "the start of time"; } + + container sysdb { leaf hostname { type string; } } + }`, + }, + wantErrSubstr: "no such submodule", + }, { + desc: "valid import in submodule", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + include sys; + + revision 01-01-01 { description "the start of time"; } + }`, + "sys": ` + submodule sys { + belongs-to dev { + prefix "d"; + } + import sysdb { + prefix "sd"; + } + + revision 01-01-01 { description "the start of time"; } + + container sys { leaf hostname { type string; } } + }`, + "sysdb": ` + module sysdb { + prefix sd; + namespace "urn:sd"; + + revision 01-01-01 { description "the start of time"; } + + container sysdb { leaf hostname { type string; } } + }`, + }, + }, { + desc: "invalid import in submodule", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + include sys; + + revision 01-01-01 { description "the start of time"; } + }`, + "sys": ` + submodule sys { + belongs-to dev { + prefix "d"; + } + import sysdb { + prefix "sd"; + } + + revision 01-01-01 { description "the start of time"; } + + container sys { leaf hostname { type string; } } + }`, + "syyysdb": ` + module syyysdb { + prefix sd; + namespace "urn:sd"; + + revision 01-01-01 { description "the start of time"; } + + container sysdb { leaf hostname { type string; } } + }`, + }, + wantErrSubstr: "no such module", + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + ms := NewModules() + + for n, m := range tt.inMods { + if err := ms.Parse(m, n); err != nil { + t.Fatalf("cannot parse module %s, err: %v", n, err) + } + } + + errs := ms.Process() + var err error + switch len(errs) { + case 1: + err = errs[0] + fallthrough + case 0: + if diff := errdiff.Substring(err, tt.wantErrSubstr); diff != "" { + t.Fatalf("%s", diff) + } + default: + t.Fatalf("got multiple errors: %v", errs) + } + }) + } +} + +func TestModulesTotalProcess(t *testing.T) { + tests := []struct { + desc string + inMods map[string]string + wantErr bool + }{{ + desc: "import with deviation", + inMods: map[string]string{ + "dev": ` + module dev { + prefix d; + namespace "urn:d"; + import sys { prefix sys; } + + revision 01-01-01 { description "the start of time"; } + + deviation /sys:sys/sys:hostname { + deviate not-supported; + } + }`, + "sys": ` + module sys { + prefix s; + namespace "urn:s"; + + revision 01-01-01 { description "the start of time"; } + + container sys { leaf hostname { type string; } } + }`, + }, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + ms := NewModules() + + for n, m := range tt.inMods { + if err := ms.Parse(m, n); err != nil { + t.Fatalf("cannot parse module %s, err: %v", n, err) + } + } + + errs := ms.Process() + switch { + case len(errs) == 0 && tt.wantErr: + t.Fatalf("did not get expected errors, got: %v, wantErr: %v", errs, tt.wantErr) + case len(errs) != 0 && !tt.wantErr: + t.Fatalf("got unexpected errors, got: %v, wantErr: %v", errs, tt.wantErr) + } + }) + } +} diff --git a/src/webui/internal/goyang/pkg/yang/node.go b/src/webui/internal/goyang/pkg/yang/node.go new file mode 100644 index 000000000..ee52efeb8 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/node.go @@ -0,0 +1,388 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + + "github.com/openconfig/goyang/pkg/indent" +) + +// A Node contains a yang statement and all attributes and sub-statements. +// Only pointers to structures should implement Node. +type Node interface { + // Kind returns the kind of yang statement (the keyword). + Kind() string + // NName returns the node's name (the argument) + NName() string + // Statement returns the original Statement of this Node. + Statement() *Statement + // ParentNode returns the parent of this Node, or nil if the + // Node has no parent. + ParentNode() Node + // Exts returns the list of extension statements found. + Exts() []*Statement +} + +// A Typedefer is a Node that defines typedefs. +type Typedefer interface { + Node + Typedefs() []*Typedef +} + +// An ErrorNode is a node that only contains an error. +type ErrorNode struct { + Parent Node `yang:"Parent,nomerge"` + + Error error +} + +func (ErrorNode) Kind() string { return "error" } +func (s *ErrorNode) ParentNode() Node { return s.Parent } +func (s *ErrorNode) NName() string { return "error" } +func (s *ErrorNode) Statement() *Statement { return &Statement{} } +func (s *ErrorNode) Exts() []*Statement { return nil } + +// isRPCNode is a terrible hack to return back that a path points into +// an RPC and we should ignore it. +var isRPCNode = &ErrorNode{Error: errors.New("rpc is unsupported")} + +// Source returns the location of the source where n was defined. +func Source(n Node) string { + if n != nil && n.Statement() != nil { + return n.Statement().Location() + } + return "unknown" +} + +// getPrefix returns the prefix and base name of s. If s has no prefix +// then the returned prefix is "". +func getPrefix(s string) (string, string) { + f := strings.SplitN(s, ":", 2) + if len(f) == 1 { + return "", s + } + return f[0], f[1] +} + +// Prefix notes for types: +// +// If there is prefix, look in nodes ancestors. +// +// If prefix matches the module's prefix statement, look in nodes ancestors. +// +// If prefix matches the submodule's belongs-t statement, look in nodes +// ancestors. +// +// Finally, look in the module imported with prefix. + +// FindModuleByPrefix finds the module or submodule with the provided prefix +// relative to where n was defined. If the prefix cannot be resolved then nil +// is returned. +func FindModuleByPrefix(n Node, prefix string) *Module { + if n == nil { + return nil + } + mod := RootNode(n) + + if prefix == "" || prefix == mod.GetPrefix() { + return mod + } + + for _, i := range mod.Import { + if prefix == i.Prefix.Name { + return mod.Modules.FindModule(i) + } + } + return nil +} + +// MatchingExtensions returns the subset of the given node's extensions +// that match the given module and identifier. +func MatchingExtensions(n Node, module, identifier string) ([]*Statement, error) { + return matchingExtensions(n, n.Exts(), module, identifier) +} + +// MatchingEntryExtensions returns the subset of the given entry's extensions +// that match the given module and identifier. +func MatchingEntryExtensions(e *Entry, module, identifier string) ([]*Statement, error) { + return matchingExtensions(e.Node, e.Exts, module, identifier) +} + +// matchingEntryExtensions returns the subset of the given node's extensions +// that match the given module and identifier. +func matchingExtensions(n Node, exts []*Statement, module, identifier string) ([]*Statement, error) { + var matchingExtensions []*Statement + for _, ext := range exts { + names := strings.SplitN(ext.Keyword, ":", 2) + mod := FindModuleByPrefix(n, names[0]) + if mod == nil { + return nil, fmt.Errorf("matchingExtensions: module prefix %q not found", names[0]) + } + if len(names) == 2 && names[1] == identifier && mod.Name == module { + matchingExtensions = append(matchingExtensions, ext) + } + } + return matchingExtensions, nil +} + +// RootNode returns the submodule or module that n was defined in. +func RootNode(n Node) *Module { + for ; n.ParentNode() != nil; n = n.ParentNode() { + } + if mod, ok := n.(*Module); ok { + return mod + } + return nil +} + +// module returns the Module to which n belongs. If n resides in a submodule, +// the belonging module will be returned. +// If n is nil or a module could not be find, nil is returned. +func module(n Node) *Module { + m := RootNode(n) + if m.Kind() == "submodule" { + m = m.Modules.Modules[m.BelongsTo.Name] + } + return m +} + +// NodePath returns the full path of the node from the module name. +func NodePath(n Node) string { + var path string + for n != nil { + path = "/" + n.NName() + path + n = n.ParentNode() + } + return path +} + +// FindNode finds the node referenced by path relative to n. If path does not +// reference a node then nil is returned (i.e. path not found). The path looks +// similar to an XPath but currently has no wildcarding. For example: +// "/if:interfaces/if:interface" and "../config". +func FindNode(n Node, path string) (Node, error) { + if path == "" { + return n, nil + } + // / is not a valid path, it needs a module name + if path == "/" { + return nil, fmt.Errorf("invalid path %q", path) + } + // Paths do not end in /'s + if path[len(path)-1] == '/' { + return nil, fmt.Errorf("invalid path %q", path) + } + + parts := strings.Split(path, "/") + + // An absolute path has a leading component of "". + // We need to discover which module they are part of + // based on our imports. + if parts[0] == "" { + parts = parts[1:] + + // TODO(borman): merge this with FindModuleByPrefix? + // The base is always a module + mod := RootNode(n) + n = mod + prefix, _ := getPrefix(parts[0]) + if mod.Kind() == "submodule" { + m := mod.Modules.Modules[mod.BelongsTo.Name] + if m == nil { + return nil, fmt.Errorf("%s: unknown module %s", m.Name, mod.BelongsTo.Name) + } + if prefix == "" || prefix == mod.BelongsTo.Prefix.Name { + goto processing + } + mod = m + } + + if prefix == "" || prefix == mod.Prefix.Name { + goto processing + } + + for _, i := range mod.Import { + if prefix == i.Prefix.Name { + n = i.Module + goto processing + } + } + // We didn't find a matching prefix. + return nil, fmt.Errorf("unknown prefix: %q", prefix) + processing: + // At this point, n should be pointing to the Module node + // of module we are rooted in + } + + for _, part := range parts { + // If we encounter an RPC node in our search then we + // return the magic isRPCNode Node which just contains + // an error that it is an RPC node. isRPCNode is a singleton + // and can be checked against. + if n.Kind() == "rpc" { + return isRPCNode, nil + } + if part == ".." { + Loop: + for { + n = n.ParentNode() + if n == nil { + return nil, fmt.Errorf(".. with no parent") + } + // choice, leaf, and case nodes + // are "invisible" when doing ".." + // up the tree. + switch n.Kind() { + case "choice", "leaf", "case": + default: + break Loop + } + } + continue + } + // For now just strip off any prefix + // TODO(borman): fix this + _, spart := getPrefix(part) + n = ChildNode(n, spart) + if n == nil { + return nil, fmt.Errorf("%s: no such element", part) + } + } + return n, nil +} + +// ChildNode finds n's child node named name. It returns nil if the node +// could not be found. ChildNode looks at every direct Node pointer in +// n as well as every node in all slices of Node pointers. Names must +// be non-ambiguous, otherwise ChildNode has a non-deterministic result. +func ChildNode(n Node, name string) Node { + v := reflect.ValueOf(n).Elem() + t := v.Type() + nf := t.NumField() + +Loop: + for i := 0; i < nf; i++ { + ft := t.Field(i) + yang := ft.Tag.Get("yang") + if yang == "" { + continue + } + parts := strings.Split(yang, ",") + for _, p := range parts[1:] { + if p == "nomerge" { + continue Loop + } + } + + f := v.Field(i) + if !f.IsValid() || f.IsNil() { + continue + } + + check := func(n Node) Node { + if n.NName() == name { + return n + } + return nil + } + if parts[0] == "uses" { + check = func(n Node) Node { + uname := n.NName() + // unrooted uses are rooted at root + if !strings.HasPrefix(uname, "/") { + uname = "/" + uname + } + if n, _ = FindNode(n, uname); n != nil { + return ChildNode(n, name) + } + return nil + } + } + + switch ft.Type.Kind() { + case reflect.Ptr: + if n = check(f.Interface().(Node)); n != nil { + return n + } + case reflect.Slice: + sl := f.Len() + for i := 0; i < sl; i++ { + n = f.Index(i).Interface().(Node) + if n = check(n); n != nil { + return n + } + } + } + } + return nil +} + +// PrintNode prints node n to w, recursively. +// TODO(borman): display more information +func PrintNode(w io.Writer, n Node) { + v := reflect.ValueOf(n).Elem() + t := v.Type() + nf := t.NumField() + fmt.Fprintf(w, "%s [%s]\n", n.NName(), n.Kind()) +Loop: + for i := 0; i < nf; i++ { + ft := t.Field(i) + yang := ft.Tag.Get("yang") + if yang == "" { + continue + } + parts := strings.Split(yang, ",") + for _, p := range parts[1:] { + if p == "nomerge" { + continue Loop + } + } + + // Skip uppercase elements. + if parts[0][0] >= 'A' && parts[0][0] <= 'Z' { + continue + } + + f := v.Field(i) + if !f.IsValid() || f.IsNil() { + continue + } + + switch ft.Type.Kind() { + case reflect.Ptr: + n = f.Interface().(Node) + if v, ok := n.(*Value); ok { + fmt.Fprintf(w, "%s = %s\n", ft.Name, v.Name) + } else { + PrintNode(indent.NewWriter(w, " "), n) + } + case reflect.Slice: + sl := f.Len() + for i := 0; i < sl; i++ { + n = f.Index(i).Interface().(Node) + if v, ok := n.(*Value); ok { + fmt.Fprintf(w, "%s[%d] = %s\n", ft.Name, i, v.Name) + } else { + PrintNode(indent.NewWriter(w, " "), n) + } + } + } + } +} diff --git a/src/webui/internal/goyang/pkg/yang/node_test.go b/src/webui/internal/goyang/pkg/yang/node_test.go new file mode 100644 index 000000000..5fdac6283 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/node_test.go @@ -0,0 +1,622 @@ +// Copyright 2019 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/openconfig/gnmi/errdiff" +) + +func TestNodePath(t *testing.T) { + tests := []struct { + desc string + in Node + want string + }{{ + desc: "basic", + in: &Leaf{ + Name: "bar", + Parent: &Container{ + Name: "c", + Parent: &List{ + Name: "b", + Parent: &Module{ + Name: "foo", + }, + }, + }, + }, + want: "/foo/b/c/bar", + }, { + desc: "nil input node", + in: nil, + want: "", + }, { + desc: "single node", + in: &Module{ + Name: "foo", + }, + want: "/foo", + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if diff := cmp.Diff(NodePath(tt.in), tt.want); diff != "" { + t.Errorf("(-got, +want):\n%s", diff) + } + }) + } +} + +// TestNode provides a framework for processing tests that can check particular +// nodes being added to the grammar. It can be used to ensure that particular +// statement combinations are supported, especially where they are opaque to +// the YANG library. +func TestNode(t *testing.T) { + tests := []struct { + desc string + inFn func(*Modules) (Node, error) + inModules map[string]string + wantNode func(Node) error + wantErrSubstr string + }{{ + desc: "import reference statement", + inFn: func(ms *Modules) (Node, error) { + + const module = "test" + m, ok := ms.Modules[module] + if !ok { + return nil, fmt.Errorf("can't find module %q", module) + } + + if len(m.Import) == 0 { + return nil, fmt.Errorf("node %v is missing imports", m) + } + + return m.Import[0], nil + }, + inModules: map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + + import foo { + prefix "f"; + reference "bar"; + } + } + `, + "foo": ` + module foo { + prefix "f"; + namespace "urn:f"; + } + `, + }, + wantNode: func(n Node) error { + is, ok := n.(*Import) + if !ok { + return fmt.Errorf("got node: %v, want type: import", n) + } + + switch { + case is.Reference == nil: + return errors.New("did not get expected reference, got: nil, want: *yang.Statement") + case is.Reference.Statement().Argument != "bar": + return fmt.Errorf("did not get expected reference, got: %v, want: 'bar'", is.Reference.Statement()) + } + + return nil + }, + }, { + desc: "get submodule from prefix in submodule", + inFn: func(ms *Modules) (Node, error) { + + m, ok := ms.SubModules["foo"] + if !ok { + return nil, fmt.Errorf("can't find submodule in %v", ms) + } + + if m.BelongsTo == nil { + return nil, fmt.Errorf("node %v is missing belongs-to", m) + } + + return m.BelongsTo, nil + }, + inModules: map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + + include foo { + revision-date 2008-01-01; + } + } + `, + "foo": ` + submodule foo { + belongs-to test { + prefix "t"; + } + } + `, + }, + wantNode: func(n Node) error { + is, ok := n.(*BelongsTo) + if !ok { + return fmt.Errorf("got node: %v, want type: belongs-to", n) + } + + switch { + case is.Prefix == nil: + return errors.New("did not get expected reference, got: nil, want: *yang.Statement") + case is.Prefix.Statement().Argument != "t": + return fmt.Errorf("did not get expected reference, got: %v, want: 't'", is.Prefix.Statement()) + } + + m := FindModuleByPrefix(is, is.Prefix.Statement().Argument) + if m == nil { + return fmt.Errorf("can't find module from submodule's belongs-to prefix value") + } + if want := "foo"; m.Name != want { + return fmt.Errorf("module from submodule's belongs-to prefix value doesn't match, got %q, want %q", m.Name, want) + } + + return nil + }, + }, { + desc: "import statement from submodule", + inFn: func(ms *Modules) (Node, error) { + + m, ok := ms.SubModules["foo"] + if !ok { + return nil, fmt.Errorf("can't find submodule in %v", ms) + } + + if len(m.Import) == 0 { + return nil, fmt.Errorf("node %v is missing import statement", m) + } + + return m.Import[0], nil + }, + inModules: map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + + include foo { + revision-date 2008-01-01; + } + + typedef t { + type string; + } + } + `, + "foo": ` + submodule foo { + belongs-to test { + prefix "t"; + } + + import test2 { + prefix "t2"; + description "test2 module"; + } + } + `, + "test2": ` + module test2 { + prefix "t2"; + namespace "urn:t2"; + } + `, + }, + wantNode: func(n Node) error { + is, ok := n.(*Import) + if !ok { + return fmt.Errorf("got node: %v, want type: belongs-to", n) + } + + switch { + case is.Prefix == nil: + return errors.New("did not get expected reference, got: nil, want: *yang.Statement") + case is.Prefix.Statement().Argument != "t2": + return fmt.Errorf("did not get expected reference, got: %v, want: 't'", is.Prefix.Statement()) + } + + m := FindModuleByPrefix(is, is.Prefix.Statement().Argument) + if m == nil { + return fmt.Errorf("can't find module from submodule's import prefix value") + } + if want := "test2"; m.Name != want { + return fmt.Errorf("module from submodule's import prefix value doesn't match, got %q, want %q", m.Name, want) + } + + return nil + }, + }, { + desc: "import description statement", + inFn: func(ms *Modules) (Node, error) { + + const module = "test" + m, ok := ms.Modules[module] + if !ok { + return nil, fmt.Errorf("can't find module %q", module) + } + + if len(m.Import) == 0 { + return nil, fmt.Errorf("node %v is missing imports", m) + } + + return m.Import[0], nil + }, + inModules: map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + + import foo { + prefix "f"; + description "foo module"; + } + } + `, + "foo": ` + module foo { + prefix "f"; + namespace "urn:f"; + } + `, + }, + wantNode: func(n Node) error { + is, ok := n.(*Import) + if !ok { + return fmt.Errorf("got node: %v, want type: import", n) + } + + switch { + case is.Description == nil: + return errors.New("did not get expected reference, got: nil, want: *yang.Statement") + case is.Description.Statement().Argument != "foo module": + return fmt.Errorf("did not get expected reference, got: '%v', want: 'foo module'", is.Description.Statement().Argument) + } + + return nil + }, + }, { + desc: "Test matchingExtensions", + inFn: func(ms *Modules) (Node, error) { + + module := "test" + m, ok := ms.Modules[module] + if !ok { + return nil, fmt.Errorf("can't find module %q", module) + } + + if len(m.Leaf) == 0 { + return nil, fmt.Errorf("node %v is missing imports", m) + } + + module = "foo" + if _, ok := ms.Modules[module]; !ok { + return nil, fmt.Errorf("can't find module %q", module) + } + + return m.Leaf[0].Type, nil + }, + inModules: map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + + import foo { + prefix "f"; + description "foo module"; + } + + import foo2 { + prefix "f2"; + description "foo2 module"; + } + + leaf test-leaf { + type string { + pattern 'alpha'; + // Test different modules and different ext names. + f:bar 'boo'; + f2:bar 'boo2'; + + f:bar 'coo'; + f2:bar 'coo2'; + + f:far 'doo'; + f2:far 'doo2'; + + f:bar 'foo'; + f2:bar 'foo2'; + + f:far 'goo'; + f2:far 'goo2'; + } + } + } + `, + "foo": ` + module foo { + prefix "f"; + namespace "urn:f"; + + extension bar { + argument "baz"; + } + + extension far { + argument "baz"; + } + } + `, + "foo2": ` + module foo2 { + prefix "f2"; + namespace "urn:f2"; + + extension bar { + argument "baz"; + } + + extension far { + argument "baz"; + } + } + `, + }, + wantNode: func(n Node) error { + n, ok := n.(*Type) + if !ok { + return fmt.Errorf("got node: %v, want type: Leaf", n) + } + + var bars []string + matches, err := matchingExtensions(n, n.Exts(), "foo", "bar") + if err != nil { + return err + } + for _, ext := range matches { + bars = append(bars, ext.Argument) + } + + if diff := cmp.Diff(bars, []string{"boo", "coo", "foo"}); diff != "" { + return fmt.Errorf("matchingExtensions (-got, +want):\n%s", diff) + } + + return nil + }, + }, { + desc: "Test matchingExtensions when module is not found", + inFn: func(ms *Modules) (Node, error) { + + module := "test" + m, ok := ms.Modules[module] + if !ok { + return nil, fmt.Errorf("can't find module %q", module) + } + + if len(m.Leaf) == 0 { + return nil, fmt.Errorf("node %v is missing imports", m) + } + + module = "foo" + if _, ok := ms.Modules[module]; !ok { + return nil, fmt.Errorf("can't find module %q", module) + } + + return m.Leaf[0].Type, nil + }, + inModules: map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + + import foo { + prefix "f"; + description "foo module"; + } + + leaf test-leaf { + type string { + pattern 'alpha'; + not-found:bar 'foo'; + } + } + } + `, + "foo": ` + module foo { + prefix "f"; + namespace "urn:f"; + + extension bar { + argument "baz"; + } + + extension far { + argument "baz"; + } + } + `, + }, + wantNode: func(n Node) error { + n, ok := n.(*Type) + if !ok { + return fmt.Errorf("got node: %v, want type: Leaf", n) + } + + var bars []string + matches, err := matchingExtensions(n, n.Exts(), "foo", "bar") + if err != nil { + return err + } + for _, ext := range matches { + bars = append(bars, ext.Argument) + } + + if diff := cmp.Diff(bars, []string{"boo", "coo", "foo"}); diff != "" { + return fmt.Errorf("matchingExtensions (-got, +want):\n%s", diff) + } + + return nil + }, + wantErrSubstr: `module prefix "not-found" not found`, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + ms := NewModules() + + for n, m := range tt.inModules { + if err := ms.Parse(m, n); err != nil { + t.Errorf("error parsing module %s, got: %v, want: nil", n, err) + } + } + + errs := ms.Process() + var err error + if len(errs) > 1 { + t.Fatalf("Got more than 1 error: %v", errs) + } else if len(errs) == 1 { + err = errs[0] + } + if diff := errdiff.Substring(err, tt.wantErrSubstr); diff != "" { + t.Errorf("Did not get expected error: %s", diff) + } + if err != nil { + return + } + + node, err := tt.inFn(ms) + if err != nil { + t.Fatalf("cannot run in function, %v", err) + } + + if err := tt.wantNode(node); err != nil { + t.Fatalf("failed check function, %v", err) + } + }) + } +} + +func TestModulesFindByPrefix(t *testing.T) { + // Some examples of where prefixes might be used are in the following + // YANG statements: extension, uses, augment, deviation, type, leafref. + // Not all are put into the test here, since the logic is the same for + // each. + modules := map[string]string{ + "foo": `module foo { prefix "foo"; namespace "urn:foo"; include bar; leaf leafref { type leafref { path "../foo:leaf"; } } uses foo:lg; }`, + "bar": `submodule bar { belongs-to foo { prefix "bar"; } container c { uses bar:lg; } grouping lg { leaf leaf { type string; } } }`, + "baz": `module baz { prefix "foo"; namespace "urn:foo"; import foo { prefix f; } extension e; uses f:lg; foo:e; }`, + } + + ms := NewModules() + for name, modtext := range modules { + if err := ms.Parse(modtext, name+".yang"); err != nil { + t.Fatalf("error parsing module %q: %v", name, err) + } + } + if errs := ms.Process(); errs != nil { + for _, err := range errs { + t.Errorf("error: %v", err) + } + t.Fatalf("fatal error(s) calling Process()") + } + + for _, tt := range []struct { + desc string + node Node + prefix string + want *Module + }{ + { + desc: "nil node", + node: nil, + prefix: "does-not-exist", + want: nil, + }, + { + desc: "module foo", + node: ms.Modules["foo"], + prefix: "foo", + want: ms.Modules["foo"], + }, + { + desc: "submodule bar", + node: ms.SubModules["bar"], + prefix: "bar", + want: ms.SubModules["bar"], + }, + { + desc: "module baz", + node: ms.Modules["baz"], + prefix: "foo", + want: ms.Modules["baz"], + }, + { + desc: "foo leafref", + node: ms.Modules["foo"].Leaf[0].Type, + prefix: "foo", + want: ms.Modules["foo"], + }, + { + desc: "foo uses", + node: ms.Modules["foo"].Uses[0], + prefix: "foo", + want: ms.Modules["foo"], + }, + { + desc: "bar uses", + node: ms.SubModules["bar"].Container[0].Uses[0], + prefix: "bar", + want: ms.SubModules["bar"], + }, + { + desc: "baz uses", + node: ms.Modules["baz"].Uses[0], + prefix: "f", + want: ms.Modules["foo"], + }, + { + desc: "baz extension", + node: ms.Modules["baz"], + prefix: "foo", + want: ms.Modules["baz"], + }, + } { + t.Run(tt.desc, func(t *testing.T) { + if got := FindModuleByPrefix(tt.node, tt.prefix); got != tt.want { + t.Errorf("got: %v, want: %v", got, tt.want) + } + }) + } +} diff --git a/src/webui/internal/goyang/pkg/yang/options.go b/src/webui/internal/goyang/pkg/yang/options.go new file mode 100644 index 000000000..2de2ebd57 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/options.go @@ -0,0 +1,59 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +// Options defines the options that should be used when parsing YANG modules, +// including specific overrides for potentially problematic YANG constructs. +type Options struct { + // IgnoreSubmoduleCircularDependencies specifies whether circular dependencies + // between submodules. Setting this value to true will ensure that this + // package will explicitly ignore the case where a submodule will include + // itself through a circular reference. + IgnoreSubmoduleCircularDependencies bool + // StoreUses controls whether the Uses field of each YANG entry should be + // populated. Setting this value to true will cause each Entry which is + // generated within the schema to store the logical grouping from which it + // is derived. + StoreUses bool + // DeviateOptions contains options for how deviations are handled. + DeviateOptions DeviateOptions +} + +// DeviateOptions contains options for how deviations are handled. +type DeviateOptions struct { + // IgnoreDeviateNotSupported indicates to the parser to retain nodes + // that are marked with "deviate not-supported". An example use case is + // where the user wants to interact with different targets that have + // different support for a leaf without having to use a second instance + // of an AST. + IgnoreDeviateNotSupported bool +} + +// IsDeviateOpt ensures that DeviateOptions satisfies the DeviateOpt interface. +func (DeviateOptions) IsDeviateOpt() {} + +// DeviateOpt is an interface that can be used in function arguments. +type DeviateOpt interface { + IsDeviateOpt() +} + +func hasIgnoreDeviateNotSupported(opts []DeviateOpt) bool { + for _, o := range opts { + if opt, ok := o.(DeviateOptions); ok { + return opt.IgnoreDeviateNotSupported + } + } + return false +} diff --git a/src/webui/internal/goyang/pkg/yang/parse.go b/src/webui/internal/goyang/pkg/yang/parse.go new file mode 100644 index 000000000..60c388b68 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/parse.go @@ -0,0 +1,338 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +// This file implements Parse, which parses the input as generic YANG and +// returns a slice of base Statements (which in turn may contain more +// Statements, i.e., a slice of Statement trees.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" +) + +// a parser is used to parse the contents of a single .yang file. +type parser struct { + lex *lexer + errout *bytes.Buffer + tokens []*token // stack of pushed tokens (for backing up) + + // Depth of statements in nested braces + statementDepth int + + // hitBrace is returned when we encounter a '}'. The statement location + // is updated with the location of the '}'. The brace may be legitimate + // but only the caller will know if it is. That is, the brace may be + // closing our parent or may be an error (we didn't expect it). + // hitBrace is updated with the file, line, and column of the brace's + // location. + hitBrace *Statement +} + +// Statement is a generic YANG statement that may have sub-statements. +// It implements the Node interface. +// +// Within the parser, it represents a non-terminal token. +// From https://tools.ietf.org/html/rfc7950#section-6.3: +// statement = keyword [argument] (";" / "{" *statement "}") +// The argument is a string. +type Statement struct { + Keyword string + HasArgument bool + Argument string + statements []*Statement + + file string + line int // 1's based line number + col int // 1's based column number +} + +func (s *Statement) NName() string { return s.Argument } +func (s *Statement) Kind() string { return s.Keyword } +func (s *Statement) Statement() *Statement { return s } +func (s *Statement) ParentNode() Node { return nil } +func (s *Statement) Exts() []*Statement { return nil } + +// Arg returns the optional argument to s. It returns false if s has no +// argument. +func (s *Statement) Arg() (string, bool) { return s.Argument, s.HasArgument } + +// SubStatements returns a slice of Statements found in s. +func (s *Statement) SubStatements() []*Statement { return s.statements } + +// Location returns the location in the source where s was defined. +func (s *Statement) Location() string { + switch { + case s.file == "" && s.line == 0: + return "unknown" + case s.file == "": + return fmt.Sprintf("line %d:%d", s.line, s.col) + case s.line == 0: + return s.file + default: + return fmt.Sprintf("%s:%d:%d", s.file, s.line, s.col) + } +} + +// Write writes the tree in s to w, each line indented by ident. Children +// nodes are indented further by a tab. Typically indent is "" at the top +// level. Write is intended to display the contents of Statement, but +// not necessarily reproduce the input of Statement. +func (s *Statement) Write(w io.Writer, indent string) error { + if s.Keyword == "" { + // We are just a collection of statements at the top level. + for _, s := range s.statements { + if err := s.Write(w, indent); err != nil { + return err + } + } + return nil + } + + parts := []string{fmt.Sprintf("%s%s", indent, s.Keyword)} + if s.HasArgument { + args := strings.Split(s.Argument, "\n") + if len(args) == 1 { + parts = append(parts, fmt.Sprintf(" %q", s.Argument)) + } else { + parts = append(parts, ` "`, args[0], "\n") + i := fmt.Sprintf("%*s", len(s.Keyword)+1, "") + for x, p := range args[1:] { + s := fmt.Sprintf("%q", p) + s = s[1 : len(s)-1] + parts = append(parts, indent, " ", i, s) + if x == len(args[1:])-1 { + // last part just needs the closing " + parts = append(parts, `"`) + } else { + parts = append(parts, "\n") + } + } + } + } + + if len(s.statements) == 0 { + _, err := fmt.Fprintf(w, "%s;\n", strings.Join(parts, "")) + return err + } + if _, err := fmt.Fprintf(w, "%s {\n", strings.Join(parts, "")); err != nil { + return err + } + for _, s := range s.statements { + if err := s.Write(w, indent+"\t"); err != nil { + return err + } + } + if _, err := fmt.Fprintf(w, "%s}\n", indent); err != nil { + return err + } + return nil +} + +// ignoreMe is an error recovery token used by the parser in order +// to continue processing for other errors in the file. +var ignoreMe = &Statement{} + +// Parse parses the input as generic YANG and returns the statements parsed. +// The path parameter should be the source name where input was read from (e.g., +// the file name the input was read from). If one more more errors are +// encountered, nil and an error are returned. The error's text includes all +// errors encountered. +func Parse(input, path string) ([]*Statement, error) { + var statements []*Statement + p := &parser{ + lex: newLexer(input, path), + errout: &bytes.Buffer{}, + hitBrace: &Statement{}, + } + p.lex.errout = p.errout +Loop: + for { + switch ns := p.nextStatement(); ns { + case nil: + break Loop + case p.hitBrace: + fmt.Fprintf(p.errout, "%s:%d:%d: unexpected %c\n", ns.file, ns.line, ns.col, '}') + default: + statements = append(statements, ns) + } + } + + p.checkStatementDepthIsZero() + + if p.errout.Len() == 0 { + return statements, nil + } + return nil, errors.New(strings.TrimSpace(p.errout.String())) +} + +// push pushes tokens t back on the input stream so they will be the next +// tokens returned by next. The tokens list is a LIFO so the final token +// listed to push will be the next token returned. +func (p *parser) push(t ...*token) { + p.tokens = append(p.tokens, t...) +} + +// pop returns the last token pushed, or nil if the token stack is empty. +func (p *parser) pop() *token { + if n := len(p.tokens); n > 0 { + n-- + defer func() { p.tokens = p.tokens[:n] }() + return p.tokens[n] + } + return nil +} + +// next returns the next token from the lexer. If the next token is a +// concatenated string, it returns the concatenated string as the token. +func (p *parser) next() *token { + if t := p.pop(); t != nil { + return t + } + // next returns the next unprocessed lexer token. + next := func() *token { + for { + if t := p.lex.NextToken(); t.Code() != tError { + return t + } + } + } + t := next() + if t.Code() != tString { + return t + } + // Process string concatenation (both single and double quote). + // See https://tools.ietf.org/html/rfc7950#section-6.1.3.1 + // The lexer trimmed the quotes already. + for { + nt := next() + switch nt.Code() { + case tEOF: + return t + case tUnquoted: + if nt.Text != "+" { + p.push(nt) + return t + } + default: + p.push(nt) + return t + } + // Invariant: nt is a + sign. + nnt := next() + switch nnt.Code() { + case tEOF: + p.push(nt) + return t + case tString: + // Accumulate the concatenation. + t.Text += nnt.Text + default: + p.push(nnt, nt) + return t + } + } +} + +// nextStatement returns the next statement in the input, which may in turn +// recurse to read sub statements. +// nil is returned when EOF has been reached, or is reached halfway through +// parsing the next statement (with associated syntax errors printed to +// errout). +func (p *parser) nextStatement() *Statement { + t := p.next() + switch t.Code() { + case tEOF: + return nil + case '}': + p.statementDepth -= 1 + p.hitBrace.file = t.File + p.hitBrace.line = t.Line + p.hitBrace.col = t.Col + return p.hitBrace + case tUnquoted: + default: + fmt.Fprintf(p.errout, "%v: keyword token not an unquoted string\n", t) + return ignoreMe + } + // Invariant: t represents a keyword token. + + s := &Statement{ + Keyword: t.Text, + file: t.File, + line: t.Line, + col: t.Col, + } + + // The keyword "pattern" must be treated specially. When + // parsing the argument for "pattern", escape sequences + // must be expanded differently. + p.lex.inPattern = t.Text == "pattern" + t = p.next() + p.lex.inPattern = false + switch t.Code() { + case tString, tUnquoted: + s.HasArgument = true + s.Argument = t.Text + t = p.next() + } + + switch t.Code() { + case tEOF: + fmt.Fprintf(p.errout, "%s: unexpected EOF\n", s.file) + return nil + case ';': + return s + case '{': + p.statementDepth += 1 + for { + switch ns := p.nextStatement(); ns { + case nil: + // Signal EOF reached. + return nil + case p.hitBrace: + return s + default: + s.statements = append(s.statements, ns) + } + } + default: + fmt.Fprintf(p.errout, "%v: syntax error, expected ';' or '{'\n", t) + return ignoreMe + } +} + +// checkStatementDepthIsZero checks that we aren't missing closing +// braces. Note: the parser will error out for the case where we +// start with an unmatched close brace, i.e. depth < 0 +// +// This test should only be done if there are no other errors as +// we may exit early due to those errors -- and therefore there *might* +// not really be a mismatched brace issue. +func (p *parser) checkStatementDepthIsZero() { + if p.errout.Len() > 0 || p.statementDepth == 0 { + return + } + + plural := "" + if p.statementDepth > 1 { + plural = "s" + } + fmt.Fprintf(p.errout, "%s:%d:%d: missing %d closing brace%s\n", + p.lex.file, p.lex.line, p.lex.col, p.statementDepth, plural) +} diff --git a/src/webui/internal/goyang/pkg/yang/parse_test.go b/src/webui/internal/goyang/pkg/yang/parse_test.go new file mode 100644 index 000000000..33269966d --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/parse_test.go @@ -0,0 +1,539 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "bytes" + "testing" +) + +func (s1 *Statement) equal(s2 *Statement) bool { + if s1.Keyword != s2.Keyword || + s1.HasArgument != s2.HasArgument || + s1.Argument != s2.Argument || + len(s1.statements) != len(s2.statements) { + return false + } + + for x, ss := range s1.statements { + if !ss.equal(s2.statements[x]) { + return false + } + } + return true +} + +// SA returns a statement with an argument and optional substatements. +func SA(k, a string, ss ...*Statement) *Statement { + return &Statement{ + Keyword: k, + Argument: a, + HasArgument: true, + statements: ss, + } +} + +// S returns a statement with no argument and optional substatements. +func S(k string, ss ...*Statement) *Statement { + return &Statement{ + Keyword: k, + statements: ss, + } +} + +func TestParse(t *testing.T) { + for _, tt := range []struct { + line int + in string + out []*Statement + err string + }{ + {line: line()}, + {line: line(), in: ` +foo; +`, + out: []*Statement{ + S("foo"), + }, + }, + {line: line(), in: ` +foo {} +`, + out: []*Statement{ + S("foo"), + }, + }, + {line: line(), in: ` +foo ""; +`, + out: []*Statement{ + SA("foo", ""), + }, + }, + {line: line(), in: ` +foo bar; +`, + out: []*Statement{ + SA("foo", "bar"), + }, + }, + {line: line(), in: ` +foo "bar"; +`, + out: []*Statement{ + SA("foo", "bar"), + }, + }, + {line: line(), in: ` +foo "\\ \S \n"; +`, + err: `test.yang:2:9: invalid escape sequence: \S`, + }, + {line: line(), in: ` +pattern "\\ \S \n"; +`, + out: []*Statement{ + SA("pattern", `\ \S +`), + }, + }, + {line: line(), in: ` +foo '\\ \S \n'; +`, + out: []*Statement{ + SA("foo", `\\ \S \n`), + }, + }, + {line: line(), in: ` +pattern '\\ \S \n'; +`, + out: []*Statement{ + SA("pattern", `\\ \S \n`), + }, + }, + {line: line(), in: ` +foo "bar" + "baz"; +`, + out: []*Statement{ + SA("foo", "barbaz"), + }, + }, + {line: line(), in: ` +foo "bar" + "+" + "baz"; +`, + out: []*Statement{ + SA("foo", "bar+baz"), + }, + }, + {line: line(), in: ` +foo "bar" +`, + err: `test.yang: unexpected EOF`, + }, + {line: line(), in: ` +foo "bar" + "baz" +`, + err: `test.yang: unexpected EOF`, + }, + {line: line(), in: ` +foo "bar" baz; +`, + err: `test.yang:2:11: baz: syntax error, expected ';' or '{' +test.yang:2:14: ;: keyword token not an unquoted string`, + }, + {line: line(), in: ` +foo "bar" + baz; +`, + err: `test.yang:2:11: +: syntax error, expected ';' or '{'`, + }, + {line: line(), in: ` +foo "bar" + +`, + err: `test.yang:2:11: +: syntax error, expected ';' or '{'`, + }, + {line: line(), in: ` +foo "bar"; +`, + out: []*Statement{ + SA("foo", "bar"), + }, + }, + {line: line(), in: ` +foo "bar" {} +`, + out: []*Statement{ + SA("foo", "bar"), + }, + }, + {line: line(), in: ` +foo 'bar' + 'baz'; +`, + out: []*Statement{ + SA("foo", "barbaz"), + }, + }, + {line: line(), in: ` +foo 'bar' + '+' + 'baz'; +`, + out: []*Statement{ + SA("foo", "bar+baz"), + }, + }, + {line: line(), in: ` +foo 'bar' +`, + err: `test.yang: unexpected EOF`, + }, + {line: line(), in: ` +foo 'bar' + 'baz' +`, + err: `test.yang: unexpected EOF`, + }, + {line: line(), in: ` +foo 'bar' baz; +`, + err: `test.yang:2:11: baz: syntax error, expected ';' or '{' +test.yang:2:14: ;: keyword token not an unquoted string`, + }, + {line: line(), in: ` +foo 'bar' + baz; +`, + err: `test.yang:2:11: +: syntax error, expected ';' or '{'`, + }, + {line: line(), in: ` +foo 'bar' + +`, + err: `test.yang:2:11: +: syntax error, expected ';' or '{'`, + }, + {line: line(), in: ` +foo 'bar'; +`, + out: []*Statement{ + SA("foo", "bar"), + }, + }, + {line: line(), in: ` +foo 'bar' {} +`, + out: []*Statement{ + SA("foo", "bar"), + }, + }, + {line: line(), in: ` +foo bar; +red black; +`, + out: []*Statement{ + SA("foo", "bar"), + SA("red", "black"), + }, + }, + {line: line(), in: ` +foo { + key value; +} +`, + out: []*Statement{ + S("foo", + SA("key", "value"), + ), + }, + }, + {line: line(), in: ` +foo { + key value; +} +`, + out: []*Statement{ + S("foo", + SA("key", "value"), + ), + }, + }, + {line: line(), in: ` +foo { + key "value1 value2 + + value3"; +} +`, + out: []*Statement{ + S("foo", + SA("key", "value1 value2\n\n value3"), + ), + }, + }, + {line: line(), in: ` +foo { + key value; + key2; +} +`, + out: []*Statement{ + S("foo", + SA("key", "value"), + S("key2"), + ), + }, + }, + {line: line(), in: ` +foo1 { + key value1; +} +foo2 { + key value2; +} +foo3 value3; +`, + out: []*Statement{ + S("foo1", + SA("key", "value1"), + ), + S("foo2", + SA("key", "value2"), + ), + SA("foo3", "value3"), + }, + }, + {line: line(), in: ` +foo1 { + key value1; + foo2 { + key value2; + } +} +`, + out: []*Statement{ + S("foo1", + SA("key", "value1"), + S("foo2", + SA("key", "value2"), + ), + ), + }, + }, + {line: line(), in: ` +foo1 { + key value1; + foo2 { + pattern '[a-zA-Z0-9!#$%&'+"'"+'*+/=?^_` + "`" + `{|}~-]+' + + '(\.[a-zA-Z0-9!#$%&'+"'"+'*+/=?^_` + "`" + `{|}~-]+)*' + + '@' + + '[a-zA-Z0-9!#$%&'+"'"+'*+/=?^_` + "`" + `{|}~-]+' + + '(\.[a-zA-Z0-9!#$%&'+"'"+'*+/=?^_` + "`" + `{|}~-]+)*'; + } +} +`, + out: []*Statement{ + S("foo1", + SA("key", "value1"), + S("foo2", + SA("pattern", "[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*@[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*"), + ), + ), + }, + }, + {line: line(), in: ` + } +`, + err: `test.yang:2:2: unexpected }`, + }, + {line: line(), in: ` +id +`, + err: `test.yang: unexpected EOF`, + }, + {line: line(), in: ` + { +`, + err: `test.yang:2:4: {: keyword token not an unquoted string`, + }, + {line: line(), in: ` +; +`, + err: `test.yang:2:1: ;: keyword token not an unquoted string`, + }, + {line: line(), in: ` +statement one two { } +`, + err: `test.yang:2:15: two: syntax error, expected ';' or '{' +test.yang:2:19: {: keyword token not an unquoted string +test.yang:2:21: unexpected }`, + }, + {line: line(), in: ` + } +foo { + key: "value"; +} +`, + err: `test.yang:2:5: unexpected }`, + }, + {line: line(), in: ` +{ + something: "bad"; +} +foo { + key: "\Value"; + key2: "value2"; + bar { + key3: "value\3; + } +}`, + err: `test.yang:2:1: {: keyword token not an unquoted string +test.yang:4:1: unexpected } +test.yang:6:8: invalid escape sequence: \V +test.yang:9:15: invalid escape sequence: \3 +test.yang:9:9: missing closing " +test.yang: unexpected EOF`, + }, + {line: line(), in: ` +module base { + container top-missing-close-brace { + leaf my-leaf { + type string; + } + } +`, + err: "test.yang:8:0: missing 1 closing brace", + }, + {line: line(), in: ` +module base { + container top-missing-close-brace { + leaf my-leaf { + type string; + } +`, + err: "test.yang:7:0: missing 2 closing braces", + }, + } { + s, err := Parse(tt.in, "test.yang") + if (s == nil) != (tt.out == nil) { + if s == nil { + t.Errorf("%d: did not get expected statements: %v", tt.line, tt.out) + } else { + t.Errorf("%d: get unexpected statements: %v", tt.line, s) + } + } + switch { + case err == nil && tt.err == "": + case tt.err == "": + t.Errorf("%d: unexpected error %v", tt.line, err) + continue + case err == nil: + t.Errorf("%d: did not get expected error %v", tt.line, tt.err) + continue + case err.Error() == tt.err: + continue + default: + t.Errorf("%d: got error:\n%s\nwant:\n%s", tt.line, err, tt.err) + continue + } + s1 := &Statement{statements: s} + s2 := &Statement{statements: tt.out} + if !s1.equal(s2) { + t.Errorf("%d: got:\n%v\nwant:\n%v", tt.line, s1, s2) + } + } +} + +func TestWrite(t *testing.T) { +Testing: + for _, tt := range []struct { + line int + in string + out string + }{ + {line: line(), + in: `key arg { substatement; }`, + out: `key "arg" { + substatement; +} +`, + }, + {line: line(), + in: `key { substatement { key arg; }}`, + out: `key { + substatement { + key "arg"; + } +} +`, + }, + {line: line(), + in: ` +module base { + namespace "urn:mod"; + prefix "base"; + + typedef base-type { type int32; } + + grouping base-group { + description + "The base-group is used to test the + 'uses' statement below. This description + is here to simply include a multi-line + string as an example of multi-line strings"; + leaf base-group-leaf { + config false; + type string; + } + } + uses base-group; +} +`, out: `module "base" { + namespace "urn:mod"; + prefix "base"; + typedef "base-type" { + type "int32"; + } + grouping "base-group" { + description "The base-group is used to test the + 'uses' statement below. This description + is here to simply include a multi-line + string as an example of multi-line strings"; + leaf "base-group-leaf" { + config "false"; + type "string"; + } + } + uses "base-group"; +} +`, + }, + } { + in := tt.in + // Run twice. The first time we are parsing tt.in, the second + // time we are parsing the output from the first parsing. + for i := 0; i < 2; i++ { + s, err := Parse(in, "test.yang") + if err != nil { + t.Errorf("%d: unexpected error %v", tt.line, err) + continue Testing + } + if len(s) != 1 { + t.Errorf("%d: got %d statements, expected 1", tt.line, len(s)) + continue Testing + } + var buf bytes.Buffer + s[0].Write(&buf, "") + out := buf.String() + if out != tt.out { + t.Errorf("%d: got:\n%swant:\n%s", tt.line, out, tt.out) + continue Testing + } + in = out + } + } +} diff --git a/src/webui/internal/goyang/pkg/yang/testdata/deviate-delete.yang b/src/webui/internal/goyang/pkg/yang/testdata/deviate-delete.yang new file mode 100644 index 000000000..8e8646665 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/testdata/deviate-delete.yang @@ -0,0 +1,89 @@ +module deviate { + prefix "d"; + namespace "urn:d"; + + grouping substmts { + leaf config { + type string; + config true; + } + leaf default { + type string; + default "fish"; + } + leaf mandatory { + type string; + mandatory false; + } + leaf-list max-elements { + type string; + max-elements 1000; + } + leaf-list min-elements { + type string; + min-elements 1000; + } + leaf-list max-and-min-elements { + type string; + max-elements 1024; + min-elements 1; + } + leaf type { + type string; + } + // TODO(robjs): unique for deviation + leaf units { + type uint16; + units "nanofish per millenium"; + } + } + + container target { + container delete { + uses substmts; + } + } + + deviation /target/delete/config { + deviate delete { + config true; + } + } + + deviation /target/delete/default { + deviate delete { + default "fish"; + } + } + + deviation /target/delete/mandatory { + deviate delete { + mandatory false; + } + } + + deviation /target/delete/min-elements { + deviate delete { + min-elements 1000; + } + } + + deviation /target/delete/max-elements { + deviate delete { + max-elements 1000; + } + } + + deviation /target/delete/max-and-min-elements { + deviate delete { + max-elements 1024; + min-elements 1; + } + } + + deviation /target/delete/units { + deviate delete { + units "nanofish per millenium"; + } + } +} diff --git a/src/webui/internal/goyang/pkg/yang/testdata/deviate-notsupported.yang b/src/webui/internal/goyang/pkg/yang/testdata/deviate-notsupported.yang new file mode 100644 index 000000000..5c59af2db --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/testdata/deviate-notsupported.yang @@ -0,0 +1,42 @@ +module deviate { + prefix "d"; + namespace "urn:d"; + + grouping substmts { + container child { + leaf zzz { type string; } + } + } + + container target { + uses substmts; + } + + list target-list { + key "k"; + + leaf k { type string; } + uses substmts; + } + + leaf a-leaf { type string; } + leaf a-leaflist { type string; } + + leaf survivor { type string; } + + deviation /target { + deviate not-supported; + } + + deviation /target-list { + deviate not-supported; + } + + deviation /a-leaf { + deviate not-supported; + } + + deviation /a-leaflist { + deviate not-supported; + } +} diff --git a/src/webui/internal/goyang/pkg/yang/testdata/deviate-replace.yang b/src/webui/internal/goyang/pkg/yang/testdata/deviate-replace.yang new file mode 100644 index 000000000..9f1166e05 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/testdata/deviate-replace.yang @@ -0,0 +1,106 @@ +module deviate { + prefix "d"; + namespace "urn:d"; + + grouping substmts { + leaf config { + type string; + config true; + } + leaf default { + type string; + default "fish"; + } + leaf-list default-list { + type string; + default "fish"; + default "sticks"; + } + leaf mandatory { + type string; + mandatory false; + } + leaf-list max-elements { + type string; + max-elements 1000; + } + leaf-list min-elements { + type string; + min-elements 1000; + } + leaf-list max-and-min-elements { + type string; + max-elements 1024; + min-elements 1; + } + leaf type { + type string; + } + // TODO(robjs): unique for deviation + leaf units { + type uint16; + units "nanofish per millenium"; + } + } + + container target { + container replace { + uses substmts; + } + } + + deviation /target/replace/config { + deviate replace { + config false; + } + } + + deviation /target/replace/default { + deviate replace { + default "a default value"; + } + } + + deviation /target/replace/default-list { + deviate replace { + default "nematodes"; + } + } + + deviation /target/replace/mandatory { + deviate replace { + mandatory true; + } + } + + deviation /target/replace/min-elements { + deviate replace { + min-elements 42; + } + } + + deviation /target/replace/max-elements { + deviate replace { + max-elements 42; + } + } + + deviation /target/replace/max-and-min-elements { + deviate replace { + max-elements 42; + min-elements 42; + } + } + + deviation /target/replace/type { + deviate replace { + type uint16; + } + } + + deviation /target/replace/units { + deviate replace { + units "fish per second"; + } + } +} diff --git a/src/webui/internal/goyang/pkg/yang/testdata/deviate.yang b/src/webui/internal/goyang/pkg/yang/testdata/deviate.yang new file mode 100644 index 000000000..d8fbaa60e --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/testdata/deviate.yang @@ -0,0 +1,81 @@ +module deviate { + prefix "d"; + namespace "urn:d"; + + typedef derived-string { + type string; + default "barnacles"; + } + + grouping substmts { + leaf config { type string; } + leaf default { type string; } + leaf default-typedef { type derived-string; } + leaf-list default-list { type string; default "foo"; default "bar"; } + leaf-list default-list-typedef-default { type derived-string; } + leaf mandatory { type string; } + leaf-list max-elements { type string; } + leaf-list min-elements { type string; } + leaf-list max-and-min-elements { type string; } + leaf type { type string; } + // TODO(robjs): unique requires a list target + leaf units { type uint16; } + } + + container target { + container add { + uses substmts; + } + } + + deviation /target/add/config { + deviate add { + config false; + } + } + + deviation /target/add/default { + deviate add { + default "a default value"; + } + } + + deviation /target/add/default-list { + deviate add { + default "foo"; + // TODO(wenovus): support multiple default statements for deviate. + //default "baz"; + } + } + + deviation /target/add/mandatory { + deviate add { + mandatory true; + } + } + + deviation /target/add/min-elements { + deviate add { + min-elements 42; + } + } + + deviation /target/add/max-elements { + deviate add { + max-elements 42; + } + } + + deviation /target/add/max-and-min-elements { + deviate add { + max-elements 42; + min-elements 42; + } + } + + deviation /target/add/units { + deviate add { + units "fish per second"; + } + } +} diff --git a/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/blue.yang b/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/blue.yang new file mode 100644 index 000000000..e69de29bb diff --git a/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/blue@2000-10-10.yang b/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/blue@2000-10-10.yang new file mode 100644 index 000000000..e69de29bb diff --git a/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/dir/dirdir/red@2022-02-22.yang b/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/dir/dirdir/red@2022-02-22.yang new file mode 100644 index 000000000..e69de29bb diff --git a/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/dir/red@2020-02-02.yang b/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/dir/red@2020-02-02.yang new file mode 100644 index 000000000..e69de29bb diff --git a/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/dir/red@2020-02-20.yang b/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/dir/red@2020-02-20.yang new file mode 100644 index 000000000..e69de29bb diff --git a/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/non-standard.name b/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/non-standard.name new file mode 100644 index 000000000..e69de29bb diff --git a/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/red@2010-10-10.yang b/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/red@2010-10-10.yang new file mode 100644 index 000000000..e69de29bb diff --git a/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/red@2222-2-22.yang b/src/webui/internal/goyang/pkg/yang/testdata/find-file-test/red@2222-2-22.yang new file mode 100644 index 000000000..e69de29bb diff --git a/src/webui/internal/goyang/pkg/yang/types.go b/src/webui/internal/goyang/pkg/yang/types.go new file mode 100644 index 000000000..2475fa4a4 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/types.go @@ -0,0 +1,425 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +// This file implements the functions relating to types and typedefs. + +import ( + "errors" + "fmt" + "regexp/syntax" + "sync" +) + +// A typeDictionary is a dictionary of all Typedefs defined in all Typedefers. +// A map of Nodes is used rather than a map of Typedefers to simplify usage +// when traversing up a Node tree. +type typeDictionary struct { + mu sync.Mutex + dict map[Node]map[string]*Typedef + // identities contains a dictionary of resolved identities. + identities identityDictionary +} + +func newTypeDictionary() *typeDictionary { + return &typeDictionary{ + dict: map[Node]map[string]*Typedef{}, + identities: identityDictionary{dict: map[string]resolvedIdentity{}}, + } +} + +// add adds an entry to the typeDictionary d. +func (d *typeDictionary) add(n Node, name string, td *Typedef) { + defer d.mu.Unlock() + d.mu.Lock() + if d.dict[n] == nil { + d.dict[n] = map[string]*Typedef{} + } + d.dict[n][name] = td +} + +// find returns the Typedef name define in node n, or nil. +func (d *typeDictionary) find(n Node, name string) *Typedef { + defer d.mu.Unlock() + d.mu.Lock() + if d.dict[n] == nil { + return nil + } + return d.dict[n][name] +} + +// findExternal finds the externally-defined typedef name in a module imported +// by n's root with the specified prefix. +func (d *typeDictionary) findExternal(n Node, prefix, name string) (*Typedef, error) { + root := FindModuleByPrefix(n, prefix) + if root == nil { + return nil, fmt.Errorf("%s: unknown prefix: %s for type %s", Source(n), prefix, name) + } + if td := d.find(root, name); td != nil { + return td, nil + } + if prefix != "" { + name = prefix + ":" + name + } + return nil, fmt.Errorf("%s: unknown type %s", Source(n), name) +} + +// typedefs returns a slice of all typedefs in d. +func (d *typeDictionary) typedefs() []*Typedef { + var tds []*Typedef + defer d.mu.Unlock() + d.mu.Lock() + for _, dict := range d.dict { + for _, td := range dict { + tds = append(tds, td) + } + } + return tds +} + +// addTypedefs is called from BuildAST after each Typedefer is defined. There +// are no error conditions in this process as it is simply used to build up the +// typedef dictionary. +func (d *typeDictionary) addTypedefs(t Typedefer) { + for _, td := range t.Typedefs() { + d.add(t, td.Name, td) + } +} + +// resolveTypedefs is called after all of modules and submodules have been read, +// as well as their imports and includes. It resolves all typedefs found in all +// modules and submodules read in. +func (d *typeDictionary) resolveTypedefs() []error { + var errs []error + + // When resolve typedefs, we may need to look up other typedefs. + // We gather all typedefs into a slice so we don't deadlock on + // typeDict. + for _, td := range d.typedefs() { + errs = append(errs, td.resolve(d)...) + } + return errs +} + +// resolve creates a YangType for t, if not already done. Resolving t +// requires resolving the Type that t is based on. +func (t *Typedef) resolve(d *typeDictionary) []error { + // If we have no parent we are a base type and + // are already resolved. + if t.Parent == nil || t.YangType != nil { + return nil + } + + if errs := t.Type.resolve(d); len(errs) != 0 { + return errs + } + + // Make a copy of the YangType we are based on and then + // update it with local information. + y := *t.Type.YangType + y.Name = t.Name + y.Base = t.Type + + if t.Units != nil { + y.Units = t.Units.Name + } + if t.Default != nil { + y.HasDefault = true + y.Default = t.Default.Name + } + + if t.Type.IdentityBase != nil { + // We need to copy over the IdentityBase statement if the type has one + if idBase, err := RootNode(t).findIdentityBase(t.Type.IdentityBase.Name); err == nil { + y.IdentityBase = idBase.Identity + } else { + return []error{fmt.Errorf("could not resolve identity base for typedef: %s", t.Type.IdentityBase.Name)} + } + } + + // If we changed something, we are the new root. + if y.Root == t.Type.YangType || !y.Equal(y.Root) { + y.Root = &y + } + t.YangType = &y + return nil +} + +// resolve resolves Type t, as well as the underlying typedef for t. If t +// cannot be resolved then one or more errors are returned. +func (t *Type) resolve(d *typeDictionary) (errs []error) { + if t.YangType != nil { + return nil + } + + // If t.Name is a base type then td will not be nil, otherwise + // td will be nil and of type *Typedef. + td := BaseTypedefs[t.Name] + + prefix, name := getPrefix(t.Name) + root := RootNode(t) + rootPrefix := root.GetPrefix() + + source := "unknown" +check: + switch { + case td != nil: + source = "builtin" + // This was a base type + case prefix == "" || rootPrefix == prefix: + source = "local" + // If we have no prefix, or the prefix is what we call our own + // root, then we look in our ancestors for a typedef of name. + for n := Node(t); n != nil; n = n.ParentNode() { + if td = d.find(n, name); td != nil { + break check + } + } + // We need to check our sub-modules as well + for _, in := range root.Include { + if td = d.find(in.Module, name); td != nil { + break check + } + } + var pname string + switch { + case prefix == "", prefix == root.Prefix.Name: + pname = root.Prefix.Name + ":" + t.Name + default: + pname = fmt.Sprintf("%s[%s]:%s", prefix, root.Prefix.Name, t.Name) + } + + return []error{fmt.Errorf("%s: unknown type: %s", Source(t), pname)} + + default: + source = "imported" + // prefix is not local to our module, so we have to go find + // what module it is part of and if it is defined at the top + // level of that module. + var err error + td, err = d.findExternal(t, prefix, name) + if err != nil { + return []error{err} + } + } + if errs := td.resolve(d); len(errs) > 0 { + return errs + } + + // Make a copy of the typedef we are based on so we can + // augment it. + if td.YangType == nil { + return []error{fmt.Errorf("%s: no YangType defined for %s %s", Source(td), source, td.Name)} + } + y := *td.YangType + + y.Base = td.Type + t.YangType = &y + + if v := t.RequireInstance; v != nil { + b, err := v.asBool() + if err != nil { + errs = append(errs, err) + } + y.OptionalInstance = !b + } + if v := t.Path; v != nil { + y.Path = v.asString() + } + isDecimal64 := y.Kind == Ydecimal64 && (t.Name == "decimal64" || y.FractionDigits != 0) + switch { + case isDecimal64 && y.FractionDigits != 0: + if t.FractionDigits != nil { + return append(errs, fmt.Errorf("%s: overriding of fraction-digits not allowed", Source(t))) + } + // FractionDigits already set via type inheritance. + case isDecimal64: + // If we are directly of type decimal64 then we must specify + // fraction-digits in the range from 1-18. + i, err := t.FractionDigits.asRangeInt(1, 18) + if err != nil { + errs = append(errs, fmt.Errorf("%s: %v", Source(t), err)) + } + y.FractionDigits = int(i) + // We only know to how to populate Range after knowing the + // fractional digit value. + y.Range = YangRange{{ + Number{Value: AbsMinInt64, Negative: true, FractionDigits: uint8(i)}, + Number{Value: MaxInt64, FractionDigits: uint8(i)}, + }} + case t.FractionDigits != nil: + errs = append(errs, fmt.Errorf("%s: fraction-digits only allowed for decimal64 values", Source(t))) + case y.Kind == Yidentityref: + if source != "builtin" { + // This is a typedef that refers to an identityref, so we want to simply + // maintain the base that the typedef resolution provided + break + } + + if t.IdentityBase == nil { + errs = append(errs, fmt.Errorf("%s: an identityref must specify a base", Source(t))) + break + } + + root := RootNode(t.Parent) + resolvedBase, baseErr := root.findIdentityBase(t.IdentityBase.Name) + if baseErr != nil { + errs = append(errs, baseErr...) + break + } + + if resolvedBase.Identity == nil { + errs = append(errs, fmt.Errorf("%s: identity has a null base", t.IdentityBase.Name)) + break + } + y.IdentityBase = resolvedBase.Identity + } + + if t.Range != nil { + yr, err := y.Range.parseChildRanges(t.Range.Name, isDecimal64, uint8(y.FractionDigits)) + switch { + case err != nil: + errs = append(errs, fmt.Errorf("%s: bad range: %v", Source(t.Range), err)) + case yr.Equal(y.Range): + default: + y.Range = yr + } + } + + if t.Length != nil { + parentRange := Uint64Range + if y.Length != nil { + parentRange = y.Length + } + yr, err := parentRange.parseChildRanges(t.Length.Name, false, 0) + switch { + case err != nil: + errs = append(errs, fmt.Errorf("%s: bad length: %v", Source(t.Length), err)) + case yr.Equal(y.Length): + default: + for _, r := range yr { + if r.Min.Negative { + errs = append(errs, fmt.Errorf("%s: negative length: %v", Source(t.Length), yr)) + break + } + } + y.Length = yr + } + } + + set := func(e *EnumType, name string, value *Value) error { + if value == nil { + return e.SetNext(name) + } + n, err := ParseInt(value.Name) + if err != nil { + return err + } + i, err := n.Int() + if err != nil { + return err + } + return e.Set(name, i) + } + + if len(t.Enum) > 0 { + enum := NewEnumType() + for _, e := range t.Enum { + if err := set(enum, e.Name, e.Value); err != nil { + errs = append(errs, fmt.Errorf("%s: %v", Source(e), err)) + } + } + y.Enum = enum + } + + if len(t.Bit) > 0 { + bit := NewBitfield() + for _, e := range t.Bit { + if err := set(bit, e.Name, e.Position); err != nil { + errs = append(errs, fmt.Errorf("%s: %v", Source(e), err)) + } + } + y.Bit = bit + } + + // Append any newly found patterns to the end of the list of patterns. + // Patterns are ANDed according to section 9.4.6. If all the patterns + // declared by t were also declared by the type t is based on, then + // no patterns are added. + seenPatterns := map[string]bool{} + for _, p := range y.Pattern { + seenPatterns[p] = true + } + seenPOSIXPatterns := map[string]bool{} + for _, p := range y.POSIXPattern { + seenPOSIXPatterns[p] = true + } + + // First parse out the pattern statements. + // These patterns are not checked because there is no support for W3C regexes by Go. + for _, pv := range t.Pattern { + if !seenPatterns[pv.Name] { + seenPatterns[pv.Name] = true + y.Pattern = append(y.Pattern, pv.Name) + } + } + + // Then, parse out the posix-pattern statements, if they exist. + // A YANG module could make use of either or both, so we deal with each separately. + posixPatterns, err := MatchingExtensions(t, "openconfig-extensions", "posix-pattern") + if err != nil { + return []error{err} + } + + checkPattern := func(n Node, p string, flags syntax.Flags) { + if _, err := syntax.Parse(p, flags); err != nil { + if re, ok := err.(*syntax.Error); ok { + // Error adds "error parsing regexp" to + // the error, re.Code is the real error. + err = errors.New(re.Code.String()) + } + errs = append(errs, fmt.Errorf("%s: bad pattern: %v: %s", Source(n), err, p)) + } + } + for _, ext := range posixPatterns { + checkPattern(ext, ext.Argument, syntax.POSIX) + if !seenPOSIXPatterns[ext.Argument] { + seenPOSIXPatterns[ext.Argument] = true + y.POSIXPattern = append(y.POSIXPattern, ext.Argument) + } + } + + // I don't know of an easy way to use a type as a key to a map, + // so we have to check equality the hard way. +looking: + for _, ut := range t.Type { + errs = append(errs, ut.resolve(d)...) + if ut.YangType != nil { + for _, yt := range y.Type { + if ut.YangType.Equal(yt) { + continue looking + } + } + y.Type = append(y.Type, ut.YangType) + } + } + + // If we changed something, we are the new root. + if !y.Equal(y.Root) { + y.Root = &y + } + + return errs +} diff --git a/src/webui/internal/goyang/pkg/yang/types_builtin.go b/src/webui/internal/goyang/pkg/yang/types_builtin.go new file mode 100644 index 000000000..f87e1b93a --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/types_builtin.go @@ -0,0 +1,700 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +// This module contains all the builtin types as well as types related +// to types (such as ranges, enums, etc). + +import ( + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +// This file handles interpretation of types + +// These are the default ranges defined by the YANG standard. +var ( + Int8Range = mustParseRangesInt("-128..127") + Int16Range = mustParseRangesInt("-32768..32767") + Int32Range = mustParseRangesInt("-2147483648..2147483647") + Int64Range = mustParseRangesInt("-9223372036854775808..9223372036854775807") + + Uint8Range = mustParseRangesInt("0..255") + Uint16Range = mustParseRangesInt("0..65535") + Uint32Range = mustParseRangesInt("0..4294967295") + Uint64Range = mustParseRangesInt("0..18446744073709551615") +) + +const ( + // MaxInt64 corresponds to the maximum value of a signed int64. + MaxInt64 = 1<<63 - 1 + // MinInt64 corresponds to the maximum value of a signed int64. + MinInt64 = -1 << 63 + // Min/MaxDecimal64 are the max/min decimal64 values. + MinDecimal64 float64 = -922337203685477580.8 + MaxDecimal64 float64 = 922337203685477580.7 + // AbsMinInt64 is the absolute value of MinInt64. + AbsMinInt64 = 1 << 63 + // MaxEnum is the maximum value of an enumeration. + MaxEnum = 1<<31 - 1 + // MinEnum is the minimum value of an enumeration. + MinEnum = -1 << 31 + // MaxBitfieldSize is the maximum number of bits in a bitfield. + MaxBitfieldSize = 1 << 32 + // MaxFractionDigits is the maximum number of fractional digits as per RFC6020 Section 9.3.4. + MaxFractionDigits uint8 = 18 + + space18 = "000000000000000000" // used for prepending 0's +) + +// A Number is either an integer the range of [-(1<<64) - 1, (1<<64)-1], or a +// YANG decimal conforming to https://tools.ietf.org/html/rfc6020#section-9.3.4. +type Number struct { + // Absolute value of the number. + Value uint64 + // Number of fractional digits. + // 0 means it's an integer. For decimal64 it falls within [1, 18]. + FractionDigits uint8 + // Negative indicates whether the number is negative. + Negative bool +} + +// IsDecimal reports whether n is a decimal number. +func (n Number) IsDecimal() bool { + return n.FractionDigits != 0 +} + +// String returns n as a string in decimal. +func (n Number) String() string { + out := strconv.FormatUint(n.Value, 10) + + if n.IsDecimal() { + if fd := int(n.FractionDigits); fd > 0 { + ofd := len(out) - fd + if ofd <= 0 { + // We want 0.1 not .1 + out = space18[:-ofd+1] + out + ofd = 1 + } + out = out[:ofd] + "." + out[ofd:] + } + } + if n.Negative { + out = "-" + out + } + + return out +} + +// Int returns n as an int64. It returns an error if n overflows an int64 or +// the number is decimal. +func (n Number) Int() (int64, error) { + if n.IsDecimal() { + return 0, errors.New("called Int() on decimal64 value") + } + if n.Negative { + return -int64(n.Value), nil + } + if n.Value <= MaxInt64 { + return int64(n.Value), nil + } + return 0, errors.New("signed integer overflow") +} + +// addQuantum adds the smallest quantum to n without checking overflow. +func (n Number) addQuantum(i uint64) Number { + switch n.Negative { + case true: + if n.Value <= i { + n.Value = i - n.Value + n.Negative = false + } else { + n.Value -= i + } + case false: + n.Value += i + } + return n +} + +// Less returns true if n is less than m. Panics if n and m are a mix of integer +// and decimal. +func (n Number) Less(m Number) bool { + switch { + case n.Negative && !m.Negative: + return true + case !n.Negative && m.Negative: + return false + } + + nt, mt := n.Trunc(), m.Trunc() + lt := nt < mt + if nt == mt { + nf, mf := n.frac(), m.frac() + if nf == mf { + return false + } + lt = nf < mf + } + + if n.Negative { + return !lt + } + return lt +} + +// Equal returns true if n is equal to m. +func (n Number) Equal(m Number) bool { + return !n.Less(m) && !m.Less(n) +} + +// Trunc returns the whole part of abs(n) as a signed integer. +func (n Number) Trunc() uint64 { + nv := n.Value + e := pow10(n.FractionDigits) + return nv / e +} + +// frac returns the fraction part with a precision of 18 fractional digits. +// E.g. if n is 3.1 then n.frac() returns 100,000,000,000,000,000 +func (n Number) frac() uint64 { + frac := n.FractionDigits + i := n.Trunc() * pow10(frac) + return (n.Value - i) * pow10(uint8(18-frac)) +} + +// YRange is a single range of consecutive numbers, inclusive. +type YRange struct { + Min Number + Max Number +} + +// Valid returns false if r is not a valid range (min > max). +func (r YRange) Valid() bool { + return !r.Max.Less(r.Min) +} + +// String returns r as a string using YANG notation, either a simple +// value if min == max or min..max. +func (r YRange) String() string { + if r.Min.Equal(r.Max) { + return r.Min.String() + } + return r.Min.String() + ".." + r.Max.String() +} + +// Equal compares whether two YRanges are equal. +func (r YRange) Equal(s YRange) bool { + return r.Min.Equal(s.Min) && r.Max.Equal(s.Max) +} + +// A YangRange is a set of non-overlapping ranges. +type YangRange []YRange + +// String returns the ranges r using YANG notation. Individual ranges +// are separated by pipes (|). +func (r YangRange) String() string { + s := make([]string, len(r)) + for i, r := range r { + s[i] = r.String() + } + return strings.Join(s, "|") +} + +func (r YangRange) Len() int { return len(r) } +func (r YangRange) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r YangRange) Less(i, j int) bool { + switch { + case r[i].Min.Less(r[j].Min): + return true + case r[j].Min.Less(r[i].Min): + return false + default: + return r[i].Max.Less(r[j].Max) + } +} + +// Validate returns an error if r has either an invalid range or has +// overlapping ranges. +// r is expected to be sorted use YangRange.Sort() +func (r YangRange) Validate() error { + if !sort.IsSorted(r) { + return errors.New("range not sorted") + } + switch { + case len(r) == 0: + return nil + case !r[0].Valid(): + return errors.New("invalid number") + } + p := r[0] + + for _, n := range r[1:] { + if n.Min.Less(p.Max) { + return errors.New("overlapping ranges") + } + } + return nil +} + +// Sort r. Must be called before Validate and coalesce if unsorted +func (r YangRange) Sort() { + sort.Sort(r) +} + +// Equal returns true if ranges r and q are identically equivalent. +// TODO(borman): should we coalesce ranges in the comparison? +func (r YangRange) Equal(q YangRange) bool { + if len(r) != len(q) { + return false + } + for i, r := range r { + if !r.Equal(q[i]) { + return false + } + } + return true +} + +// Contains returns true if all possible values in s are also possible values +// in r. An empty range is assumed to be min..max when it is the receiver +// argument. +func (r YangRange) Contains(s YangRange) bool { + if len(r) == 0 || len(s) == 0 { + return true + } + + // Check if every range in s is subsumed under r. + // Both range lists should be in order and non-adjacent (coalesced). + ri := 0 + for _, ss := range s { + for r[ri].Max.Less(ss.Min) { + ri++ + if ri == len(r) { + return false + } + } + if ss.Min.Less(r[ri].Min) || r[ri].Max.Less(ss.Max) { + return false + } + } + return true +} + +// FromInt creates a Number from an int64. +func FromInt(i int64) Number { + if i < 0 { + return Number{Negative: true, Value: uint64(-i)} + } + return Number{Value: uint64(i)} +} + +// FromUint creates a Number from a uint64. +func FromUint(i uint64) Number { + return Number{Value: i} +} + +// FromFloat creates a Number from a float64. Input values with absolute value +// outside the boundaries specified for the decimal64 value specified in +// RFC6020/RFC7950 are clamped down to the closest boundary value. +func FromFloat(f float64) Number { + if f > MaxDecimal64 { + return Number{ + Value: FromInt(MaxInt64).Value, + FractionDigits: 1, + } + } + if f < MinDecimal64 { + return Number{ + Negative: true, + Value: FromInt(MaxInt64).Value, + FractionDigits: 1, + } + } + + // Per RFC7950/6020, fraction-digits must be at least 1. + fracDig := uint8(1) + f *= 10.0 + for ; Frac(f) != 0.0 && fracDig <= MaxFractionDigits; fracDig++ { + f *= 10.0 + } + negative := false + if f < 0 { + negative = true + f = -f + } + v := uint64(f) + + return Number{Negative: negative, Value: v, FractionDigits: fracDig} +} + +// ParseInt returns s as a Number with FractionDigits=0. +// octal, or hexadecimal using the standard prefix notations (e.g., 0 and 0x) +func ParseInt(s string) (Number, error) { + s = strings.TrimSpace(s) + var n Number + switch s { + case "": + return n, errors.New("converting empty string to number") + case "+", "-": + return n, errors.New("sign with no value") + } + + ns := s + switch s[0] { + case '+': + ns = s[1:] + case '-': + n.Negative = true + ns = s[1:] + } + + var err error + n.Value, err = strconv.ParseUint(ns, 0, 64) + return n, err +} + +// ParseDecimal returns s as a Number with a non-zero FractionDigits. +// octal, or hexadecimal using the standard prefix notations (e.g., 0 and 0x) +func ParseDecimal(s string, fracDigRequired uint8) (n Number, err error) { + s = strings.TrimSpace(s) + switch s { + case "": + return n, errors.New("converting empty string to number") + case "+", "-": + return n, errors.New("sign with no value") + } + + return decimalValueFromString(s, fracDigRequired) +} + +// decimalValueFromString returns a decimal Number representation of numStr. +// fracDigRequired is used to set the number of fractional digits, which must +// be at least the greatest precision seen in numStr. +// which must be between 1 and 18. +// numStr must conform to Section 9.3.4. +func decimalValueFromString(numStr string, fracDigRequired uint8) (n Number, err error) { + if fracDigRequired > MaxFractionDigits || fracDigRequired < 1 { + return n, fmt.Errorf("invalid number of fraction digits %d > max of %d, minimum 1", fracDigRequired, MaxFractionDigits) + } + + s := numStr + dx := strings.Index(s, ".") + var fracDig uint8 + if dx >= 0 { + fracDig = uint8(len(s) - 1 - dx) + // remove first decimal, if dx > 1, will fail ParseInt below + s = s[:dx] + s[dx+1:] + } + + if fracDig > fracDigRequired { + return n, fmt.Errorf("%s has too much precision, expect <= %d fractional digits", s, fracDigRequired) + } + + s += space18[:fracDigRequired-fracDig] + + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return n, fmt.Errorf("%s is not a valid decimal number: %s", numStr, err) + } + + negative := false + if v < 0 { + negative = true + v = -v + } + + return Number{Value: uint64(v), FractionDigits: fracDigRequired, Negative: negative}, nil +} + +// ParseRangesInt parses s into a series of ranges. Each individual range is in s +// is separated by the pipe character (|). The min and max value of a range +// are separated by "..". An error is returned if the range is invalid. The +// output range is sorted and coalesced. +func ParseRangesInt(s string) (YangRange, error) { + return YangRange{}.parseChildRanges(s, false, 0) +} + +// ParseRangesDecimal parses s into a series of ranges. Each individual range is in s +// is separated by the pipe character (|). The min and max value of a range +// are separated by "..". An error is returned if the range is invalid. The +// output range is sorted and coalesced. +func ParseRangesDecimal(s string, fracDigRequired uint8) (YangRange, error) { + return YangRange{}.parseChildRanges(s, true, fracDigRequired) +} + +// parseChildRanges parses a child ranges statement 's' into a series of ranges +// based on an already-parsed parent YangRange. Each individual range is in s +// is separated by the pipe character (|). The min and max value of a range are +// separated by "..". An error is returned if the child ranges are not +// equally-limiting or more limiting than the parent range +// (rfc7950#section-9.2.5). The output range is sorted and coalesced. +// fracDigRequired is ignored when decimal=false. +func (y YangRange) parseChildRanges(s string, decimal bool, fracDigRequired uint8) (YangRange, error) { + parseNumber := func(s string) (Number, error) { + switch { + case s == "max": + if len(y) == 0 { + return Number{}, errors.New("cannot resolve 'max' keyword using an empty YangRange parent object") + } + max := y[len(y)-1].Max + max.FractionDigits = fracDigRequired + return max, nil + case s == "min": + if len(y) == 0 { + return Number{}, errors.New("cannot resolve 'min' keyword using an empty YangRange parent object") + } + min := y[0].Min + min.FractionDigits = fracDigRequired + return min, nil + case decimal: + return ParseDecimal(s, fracDigRequired) + default: + return ParseInt(s) + } + } + + parts := strings.Split(s, "|") + r := make(YangRange, len(parts)) + for i, s := range parts { + parts := strings.Split(s, "..") + min, err := parseNumber(strings.TrimSpace(parts[0])) + if err != nil { + return nil, err + } + var max Number + switch len(parts) { + case 1: + max = min + case 2: + if max, err = parseNumber(strings.TrimSpace(parts[1])); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many '..' in %s", s) + } + if max.Less(min) { + return nil, fmt.Errorf("range boundaries out of order (%s less than %s): %s", max, min, s) + } + r[i] = YRange{min, max} + } + r.Sort() + r = coalesce(r) + + if !y.Contains(r) { + return nil, fmt.Errorf("%v not within %v", s, y) + } + + if err := r.Validate(); err != nil { + return nil, err + } + return r, nil +} + +// coalesce coalesces r into as few ranges as possible. For example, +// 1..5|6..10 would become 1..10. r is assumed to be sorted. +func coalesce(r YangRange) YangRange { + // coalesce the ranges if we have more than 1. + if len(r) < 2 { + return r + } + cr := make(YangRange, len(r)) + i := 0 + cr[i] = r[0] + for _, r1 := range r[1:] { + // r1.Min is always at least as large as cr[i].Min + // Cases are: + // r1 is contained in cr[i] + // r1 starts inside of cr[i] + // r1.Min cr[i].Max+1 + // r1 is beyond cr[i] + if cr[i].Max.addQuantum(1).Less(r1.Min) { + // r1 starts after cr[i], this is a new range + i++ + cr[i] = r1 + } else if cr[i].Max.Less(r1.Max) { + cr[i].Max = r1.Max + } + } + return cr[:i+1] +} + +func mustParseRangesInt(s string) YangRange { + r, err := ParseRangesInt(s) + if err != nil { + panic(err) + } + return r +} + +func mustParseRangesDecimal(s string, fracDigRequired uint8) YangRange { + r, err := ParseRangesDecimal(s, fracDigRequired) + if err != nil { + panic(err) + } + return r +} + +// Frac returns the fractional part of f. +func Frac(f float64) float64 { + return f - math.Trunc(f) +} + +// pow10 returns 10^e without checking for overflow. +func pow10(e uint8) uint64 { + var out uint64 = 1 + for i := uint8(0); i < e; i++ { + out *= 10 + } + return out +} + +// A EnumType represents a mapping of strings to integers. It is used both +// for enumerations as well as bitfields. +type EnumType struct { + last int64 // maximum value assigned thus far + min int64 // minimum value allowed + max int64 // maximum value allowed + unique bool // numeric values must be unique (enums) + ToString map[int64]string `json:",omitempty"` // map of enum entries by value (integer) + ToInt map[string]int64 `json:",omitempty"` // map of enum entries by name (string) +} + +// NewEnumType returns an initialized EnumType. +func NewEnumType() *EnumType { + return &EnumType{ + last: -1, // +1 will start at 0 + min: MinEnum, + max: MaxEnum, + unique: true, + ToString: map[int64]string{}, + ToInt: map[string]int64{}, + } +} + +// NewBitfield returns an EnumType initialized as a bitfield. Multiple string +// values may map to the same numeric values. Numeric values must be small +// non-negative integers. +func NewBitfield() *EnumType { + return &EnumType{ + last: -1, // +1 will start at 0 + min: 0, + max: MaxBitfieldSize - 1, + ToString: map[int64]string{}, + ToInt: map[string]int64{}, + } +} + +// Set sets name in e to the provided value. Set returns an error if the value +// is invalid, name is already signed, or when used as an enum rather than a +// bitfield, the value has previousl been used. When two different names are +// assigned to the same value, the conversion from value to name will result in +// the most recently assigned name. +func (e *EnumType) Set(name string, value int64) error { + if _, ok := e.ToInt[name]; ok { + return fmt.Errorf("field %s already assigned", name) + } + if oname, ok := e.ToString[value]; e.unique && ok { + return fmt.Errorf("fields %s and %s conflict on value %d", name, oname, value) + } + if value < e.min { + return fmt.Errorf("value %d for %s too small (minimum is %d)", value, name, e.min) + } + if value > e.max { + return fmt.Errorf("value %d for %s too large (maximum is %d)", value, name, e.max) + } + e.ToString[value] = name + e.ToInt[name] = value + if value >= e.last { + e.last = value + } + return nil +} + +// SetNext sets the name in e using the next possible value that is greater than +// all previous values. +func (e *EnumType) SetNext(name string) error { + if e.last == MaxEnum { + return fmt.Errorf("enum %q must specify a value since previous enum is the maximum value allowed", name) + } + return e.Set(name, e.last+1) +} + +// Name returns the name in e associated with value. The empty string is +// returned if no name has been assigned to value. +func (e *EnumType) Name(value int64) string { return e.ToString[value] } + +// Value returns the value associated with name in e associated. 0 is returned +// if name is not in e, or if it is the first value in an unnumbered enum. Use +// IsDefined to definitively confirm name is in e. +func (e *EnumType) Value(name string) int64 { return e.ToInt[name] } + +// IsDefined returns true if name is defined in e, else false. +func (e *EnumType) IsDefined(name string) bool { + _, defined := e.ToInt[name] + return defined +} + +// Names returns the sorted list of enum string names. +func (e *EnumType) Names() []string { + names := make([]string, len(e.ToInt)) + i := 0 + for name := range e.ToInt { + names[i] = name + i++ + } + sort.Strings(names) + return names +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// Values returns the sorted list of enum values. +func (e *EnumType) Values() []int64 { + values := make([]int64, len(e.ToInt)) + i := 0 + for _, value := range e.ToInt { + values[i] = value + i++ + } + sort.Sort(int64Slice(values)) + return values +} + +// NameMap returns a map of names to values. +func (e *EnumType) NameMap() map[string]int64 { + m := make(map[string]int64, len(e.ToInt)) + for name, value := range e.ToInt { + m[name] = value + } + return m +} + +// ValueMap returns a map of values to names. +func (e *EnumType) ValueMap() map[int64]string { + m := make(map[int64]string, len(e.ToString)) + for name, value := range e.ToString { + m[name] = value + } + return m +} diff --git a/src/webui/internal/goyang/pkg/yang/types_builtin_test.go b/src/webui/internal/goyang/pkg/yang/types_builtin_test.go new file mode 100644 index 000000000..5ae654da3 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/types_builtin_test.go @@ -0,0 +1,915 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/openconfig/gnmi/errdiff" +) + +const ( + maxUint64 uint64 = 18446744073709551615 + maxUint32 = 0xFFFFFFFF + maxUint16 = 0xFFFF + maxUint8 = 0xFF + maxInt32 = 1<<31 - 1 + minInt32 = -1 << 31 + maxInt16 = 1<<15 - 1 + minInt16 = -1 << 15 + maxInt8 = 1<<7 - 1 + minInt8 = -1 << 7 +) + +// R is a test helper for creating an int-based YRange. +func R(a, b int64) YRange { + return YRange{FromInt(a), FromInt(b)} +} + +// Rf is a test helper for creating a float-based YRange. +func Rf(a, b int64, fracDig uint8) YRange { + n1 := Number{Value: uint64(a), FractionDigits: fracDig} + n2 := Number{Value: uint64(b), FractionDigits: fracDig} + if a < 0 { + n1.Value = uint64(-a) + n1.Negative = true + } + if b < 0 { + n2.Value = uint64(-b) + n2.Negative = true + } + return YRange{n1, n2} +} + +func TestFromFloat(t *testing.T) { + tests := []struct { + desc string + in float64 + want Number + }{{ + desc: "positive - no decimals", + in: 10.0, + want: Number{ + Negative: false, + Value: 10, + FractionDigits: 0, + }, + }, { + desc: "positive - decimals", + in: 10.15, + want: Number{ + Negative: false, + Value: 1015, + FractionDigits: 2, + }, + }, { + desc: "negative - no decimals", + in: -10.0, + want: Number{ + Negative: true, + Value: 10, + FractionDigits: 0, + }, + }, { + desc: "negative - decimals", + in: -10.15, + want: Number{ + Negative: true, + Value: 1015, + FractionDigits: 2, + }, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if got := FromFloat(tt.in); !cmp.Equal(got, tt.want) { + t.Fatalf("FromFloat(%v): did not get expected value, got: %+v, want: %+v", tt.in, got, tt.want) + } + }) + } +} + +func TestNumberInt(t *testing.T) { + tests := []struct { + desc string + in Number + want int64 + wantErr bool + }{{ + desc: "zero", + in: FromInt(0), + want: 0, + }, { + desc: "positive", + in: FromInt(42), + want: 42, + }, { + desc: "negative", + in: FromInt(-42), + want: -42, + }, { + desc: "decimal", + in: FromFloat(42), + wantErr: true, + }, { + desc: "overflow", + in: FromUint(maxUint64), + wantErr: true, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + got, err := tt.in.Int() + if got != tt.want { + t.Errorf("got: %v, want: %v", got, tt.want) + } + if (err != nil) != tt.wantErr { + t.Errorf("gotErr: %v, wantErr: %v", err, tt.wantErr) + } + }) + } +} + +func TestRangeEqual(t *testing.T) { + tests := []struct { + desc string + inBaseRange YangRange + inTestRange YangRange + want bool + }{{ + desc: "empty range equals empty range", + want: true, + }, { + desc: "test range is default", + inBaseRange: YangRange{R(1, 2)}, want: false, + }, { + desc: "base range is default", + inTestRange: YangRange{R(1, 2)}, want: false, + }, { + desc: "equal ranges", + inBaseRange: YangRange{R(1, 2)}, + inTestRange: YangRange{R(1, 2)}, + want: true, + }, { + desc: "wider base range", + inBaseRange: YangRange{R(1, 3)}, + inTestRange: YangRange{R(1, 2)}, + want: false, + }, { + desc: "equal ranges with multiple subranges", + inBaseRange: YangRange{R(1, 2), R(4, 5)}, + inTestRange: YangRange{R(1, 2), R(4, 5)}, + want: true, + }, { + desc: "multiple subranges with one unequal", + inBaseRange: YangRange{R(1, 2), R(4, 6)}, + inTestRange: YangRange{R(1, 2), R(4, 5)}, + want: false, + }, { + desc: "extra subrange in base range", + inBaseRange: YangRange{R(1, 2)}, + inTestRange: YangRange{R(1, 2), R(4, 5)}, + want: false, + }, { + desc: "extra subrange in test range", + inBaseRange: YangRange{R(1, 2), R(4, 5)}, + inTestRange: YangRange{R(1, 2)}, + want: false, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if want := tt.inBaseRange.Equal(tt.inTestRange); want != tt.want { + t.Errorf("got %v, want %v", want, tt.want) + } + }) + } +} + +func TestRangeContains(t *testing.T) { + tests := []struct { + desc string + inBaseRange YangRange + inTestRange YangRange + want bool + }{{ + desc: "empty range contained in empty range", + want: true, + }, { + desc: "empty range contained in non-empty range", + inBaseRange: YangRange{R(1, 2)}, + want: true, + }, { + desc: "non-empty range contained in empty range", + inTestRange: YangRange{R(1, 2)}, + want: true, + }, { + desc: "equal ranges contain", + inBaseRange: YangRange{R(1, 2)}, + inTestRange: YangRange{R(1, 2)}, + want: true, + }, { + desc: "superset contains", + inBaseRange: YangRange{R(1, 5)}, + inTestRange: YangRange{R(2, 3)}, + want: true, + }, { + desc: "subset doesn't contain", + inBaseRange: YangRange{R(2, 3)}, + inTestRange: YangRange{R(1, 5)}, + want: false, + }, { + desc: "contain subranges", + inBaseRange: YangRange{R(1, 10)}, + inTestRange: YangRange{R(1, 2), R(4, 5), R(7, 10)}, + want: true, + }, { + desc: "subranges leaks out", + inBaseRange: YangRange{R(1, 10)}, + inTestRange: YangRange{R(1, 2), R(7, 11)}, + want: false, + }, { + desc: "subranges containing a subset", + inBaseRange: YangRange{R(1, 9), R(11, 19), R(21, 29)}, + inTestRange: YangRange{R(23, 25)}, + want: true, + }, { + desc: "subranges containing a single valued range", + inBaseRange: YangRange{R(1, 9), R(11, 19), R(21, 29)}, + inTestRange: YangRange{R(23, 23)}, + want: true, + }, { + desc: "subranges doesn't contain a single outside value", + inBaseRange: YangRange{R(1, 9), R(11, 19), R(21, 29)}, + inTestRange: YangRange{R(20, 20)}, + want: false, + }, { + desc: "smaller range doesn't contain min..max", + inBaseRange: YangRange{R(1, 10)}, + inTestRange: YangRange{R(MinInt64, MaxInt64)}, + want: false, + }, { + desc: "full range contains any", + inBaseRange: YangRange{R(MinInt64, MaxInt64)}, + inTestRange: YangRange{R(1, 10)}, + want: true, + }, { + desc: "smaller range doesn't contain min..a|b..max", + inBaseRange: YangRange{R(1024, 65535)}, + inTestRange: YangRange{R(MinInt64, 4096), R(5120, MaxInt64)}, + want: false, + }, { + desc: "ranges don't overlap with max word used", + inBaseRange: YangRange{R(1024, 65535)}, + inTestRange: YangRange{R(-999999, 4096), R(5120, MaxInt64)}, + want: false, + }, { + desc: "ranges don't overlap with min word used", + inBaseRange: YangRange{R(1024, 65535)}, + inTestRange: YangRange{R(MinInt64, 4096), R(5120, 999999)}, + want: false, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if got := tt.inBaseRange.Contains(tt.inTestRange); got != tt.want { + t.Errorf("got %v, want %v", got, tt.want) + } + }) + } +} + +func TestParseRangesInt(t *testing.T) { + tests := []struct { + desc string + inParentRange YangRange + in string + want YangRange + wantErrSubstring string + }{{ + desc: "small numbers, coalescing", + in: "0|2..3|4..5", + want: YangRange{R(0, 0), R(2, 5)}, + }, { + desc: "small numbers, out of order, coalescing", + in: "4..5|0|2..3", + want: YangRange{R(0, 0), R(2, 5)}, + }, { + desc: "invalid input: too many ..s", + in: "0|2..3|4..5..6", + wantErrSubstring: "too many '..' in 4..5..6", + }, { + desc: "invalid input: range boundaries out of order", + in: "0|2..3|5..4", + wantErrSubstring: "range boundaries out of order", + }, { + desc: "range with min", + inParentRange: Int64Range, + in: "min..0|2..3|4..5", + want: YangRange{R(MinInt64, 0), R(2, 5)}, + }, { + desc: "range with min but without parent range", + in: "min..0|2..3|4..5", + wantErrSubstring: "empty YangRange parent object", + }, { + desc: "range with max", + inParentRange: Int32Range, + in: "min..0|2..3|4..5|7..max", + want: YangRange{R(minInt32, 0), R(2, 5), R(7, maxInt32)}, + }, { + desc: "coalescing from min to max for uint64", + inParentRange: Uint64Range, + in: "min..0|1..max", + want: YangRange{YRange{FromInt(0), FromUint(maxUint64)}}, + }, { + desc: "coalescing from min to max for uint32", + inParentRange: Uint32Range, + in: "min..0|1..max", + want: YangRange{R(0, maxUint32)}, + }, { + desc: "coalescing from min to max for uint16", + inParentRange: Uint16Range, + in: "min..0|1..max", + want: YangRange{R(0, maxUint16)}, + }, { + desc: "coalescing from min to max for uint8", + inParentRange: Uint8Range, + in: "min..0|1..max", + want: YangRange{R(0, maxUint8)}, + }, { + desc: "coalescing from min to max for int64", + inParentRange: Int64Range, + in: "min..0|1..max", + want: YangRange{R(MinInt64, MaxInt64)}, + }, { + desc: "coalescing from min to max for int32", + inParentRange: Int32Range, + in: "min..0|1..max", + want: YangRange{R(minInt32, maxInt32)}, + }, { + desc: "coalescing from min to max for int16", + inParentRange: Int16Range, + in: "min..0|1..max", + want: YangRange{R(minInt16, maxInt16)}, + }, { + desc: "coalescing from min to max for int8", + inParentRange: Int8Range, + in: "min..0|1..max", + want: YangRange{R(minInt8, maxInt8)}, + }, { + desc: "spelling error", + inParentRange: Int64Range, + in: "mean..0|1..max", + wantErrSubstring: "invalid syntax", + }, { + desc: "big numbers, coalescing", + in: "0..69|4294967294|4294967295", + want: YangRange{R(0, 69), R(4294967294, 4294967295)}, + }, { + desc: "no ranges", + in: "250|500|1000", + want: YangRange{R(250, 250), R(500, 500), R(1000, 1000)}, + }, { + desc: "no ranges unsorted", + in: "1000|500|250", + want: YangRange{R(250, 250), R(500, 500), R(1000, 1000)}, + }, { + desc: "negative numbers", + in: "-31..-1|1..31", + want: YangRange{R(-31, -1), R(1, 31)}, + }, { + desc: "spaces", + in: "-22 | -15 | -7 | 0", + want: YangRange{R(-22, -22), R(-15, -15), R(-7, -7), R(0, 0)}, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + got, err := tt.inParentRange.parseChildRanges(tt.in, false, 0) + if err != nil { + if diff := errdiff.Substring(err, tt.wantErrSubstring); diff != "" { + t.Fatalf("did not get expected error, %s", diff) + } + return + } + + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("parseChildRanges (-want, +got):\n%s", diff) + } + + if tt.inParentRange == nil { + if got, err = ParseRangesInt(tt.in); err != nil { + t.Fatalf("ParseRangesInt: unexpected error: %v", err) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("ParseRangesInt (-want, +got):\n%s", diff) + } + } + }) + } +} + +func TestCoalesce(t *testing.T) { + for x, tt := range []struct { + in, out YangRange + }{ + {}, + {YangRange{R(1, 4)}, YangRange{R(1, 4)}}, + {YangRange{R(1, 2), R(3, 4)}, YangRange{R(1, 4)}}, + {YangRange{Rf(10, 25, 1), Rf(30, 40, 1)}, YangRange{Rf(10, 25, 1), Rf(30, 40, 1)}}, + {YangRange{Rf(10, 29, 1), Rf(30, 40, 1)}, YangRange{Rf(10, 40, 1)}}, + {YangRange{R(1, 2), R(2, 4)}, YangRange{R(1, 4)}}, + {YangRange{R(1, 2), R(4, 5)}, YangRange{R(1, 2), R(4, 5)}}, + {YangRange{R(1, 3), R(2, 5)}, YangRange{R(1, 5)}}, + {YangRange{R(1, 10), R(2, 5)}, YangRange{R(1, 10)}}, + {YangRange{R(1, 10), R(1, 2), R(4, 5), R(7, 8)}, YangRange{R(1, 10)}}, + {YangRange{Rf(1, 10, 3), Rf(1, 2, 3), Rf(4, 5, 3), Rf(7, 8, 3)}, YangRange{Rf(1, 10, 3)}}, + } { + out := coalesce(tt.in) + if !out.Equal(tt.out) { + t.Errorf("#%d: got %v, want %v", x, out, tt.out) + } + } +} + +func TestYangRangeSort(t *testing.T) { + for x, tt := range []struct { + in, out YangRange + }{ + {YangRange{}, YangRange{}}, + {YangRange{R(1, 4), R(6, 10)}, YangRange{R(1, 4), R(6, 10)}}, + {YangRange{R(6, 10), R(1, 4)}, YangRange{R(1, 4), R(6, 10)}}, + {YangRange{Rf(10, 25, 1), Rf(30, 40, 1)}, YangRange{Rf(10, 25, 1), Rf(30, 40, 1)}}, + {YangRange{Rf(30, 40, 1), Rf(10, 25, 1)}, YangRange{Rf(10, 25, 1), Rf(30, 40, 1)}}, + {YangRange{R(1, 2)}, YangRange{R(1, 2)}}, + {YangRange{R(1, 2), R(4, 5)}, YangRange{R(1, 2), R(4, 5)}}, + {YangRange{R(1, 3), R(2, 5)}, YangRange{R(1, 3), R(2, 5)}}, + {YangRange{R(1, 10), R(2, 5)}, YangRange{R(1, 10), R(2, 5)}}, + {YangRange{R(1, 10), R(1, 2), R(4, 5), R(7, 8)}, YangRange{R(1, 2), R(1, 10), R(4, 5), R(7, 8)}}, + } { + tt.in.Sort() + if !tt.in.Equal(tt.out) { + t.Errorf("#%d: got %v, want %v", x, tt.in, tt.out) + } + } +} + +func TestParseRangesDecimal(t *testing.T) { + rangeMax := mustParseRangesDecimal("-922337203685477580.8..922337203685477580.7", 1) + rangeRestricted := mustParseRangesDecimal("-42..42|100", 5) + + tests := []struct { + desc string + inParentRange YangRange + in string + inFracDig uint8 + want YangRange + wantErrSubstring string + }{{ + desc: "min..max fraction-digits 1", + inParentRange: rangeMax, + in: "min..max", + inFracDig: 1, + want: YangRange{Rf(MinInt64, MaxInt64, 1)}, + }, { + desc: "min..max fraction-digits 2", + inParentRange: rangeMax, + in: "min..max", + inFracDig: 2, + want: YangRange{Rf(MinInt64, MaxInt64, 2)}, + }, { + desc: "min..max no parent range", + in: "min..max", + inFracDig: 2, + want: YangRange{Rf(MinInt64, MaxInt64, 2)}, + wantErrSubstring: "empty YangRange parent object", + }, { + desc: "min..max on fragmented range", + inParentRange: rangeRestricted, + in: "min..max", + inFracDig: 5, + wantErrSubstring: "not within", + }, { + desc: "small decimals", + inParentRange: rangeMax, + in: "0.0|2.0..30.0|1.34..1.99", + inFracDig: 2, + want: YangRange{Rf(0, 0, 2), Rf(134, 3000, 2)}, + }, { + desc: "small decimals on restricted range", + inParentRange: rangeRestricted, + in: "0.0|2.0..30.0|1.34..1.99999", + inFracDig: 5, + want: YangRange{Rf(0, 0, 5), Rf(134000, 3000000, 5)}, + }, { + desc: "small decimals with coalescing", + inParentRange: rangeMax, + in: "0.0|2.0..30.0", + inFracDig: 1, + want: YangRange{Rf(0, 0, 1), Rf(20, 300, 1)}, + }, { + desc: "fractional digit cannot be too high", + in: "0.0|2.0..30.0", + inFracDig: 19, + wantErrSubstring: "invalid number of fraction digits", + }, { + desc: "fractional digit cannot be 0", + in: "0.0|2.0..30.0", + inFracDig: 0, + wantErrSubstring: "invalid number of fraction digits", + }, { + desc: "big decimals", + in: "0.0..69|4294967294.1234|4294967295.1234", + inFracDig: 4, + want: YangRange{Rf(0, 690000, 4), Rf(42949672941234, 42949672941234, 4), Rf(42949672951234, 42949672951234, 4)}, + }, { + desc: "small decimals, out of order", + in: "4.0..5.55|0|2.32..3.23", + inFracDig: 3, + want: YangRange{Rf(0, 0, 3), Rf(2320, 3230, 3), Rf(4000, 5550, 3)}, + }, { + desc: "invalid input: too many ..s", + in: "4.0..5.55..6.66|0|2.32..3.23", + inFracDig: 3, + wantErrSubstring: "too many '..'", + }, { + desc: "invalid input: range boundaries out of order", + in: "5..4.0|0|2.32..3.23", + inFracDig: 3, + wantErrSubstring: "range boundaries out of order", + }, { + desc: "range with min", + inParentRange: rangeMax, + in: "4.0..5.55|min..0|2.32..3.23", + inFracDig: 3, + want: YangRange{Rf(MinInt64, 0, 3), Rf(2320, 3230, 3), Rf(4000, 5550, 3)}, + }, { + desc: "range with max", + inParentRange: rangeMax, + in: "4.0..max|min..0|2.32..3.23", + inFracDig: 3, + want: YangRange{Rf(MinInt64, 0, 3), Rf(2320, 3230, 3), Rf(4000, MaxInt64, 3)}, + }, { + desc: "coalescing from min to max", + inParentRange: rangeMax, + in: "min..0.9|1..max", + inFracDig: 1, + want: YangRange{Rf(MinInt64, MaxInt64, 1)}, + }, { + desc: "spelling error", + inParentRange: rangeMax, + in: "min..0.9|1..masks", + inFracDig: 1, + wantErrSubstring: "invalid syntax", + }, { + desc: "no ranges", + in: "250.55|500.0|1000", + inFracDig: 2, + want: YangRange{Rf(25055, 25055, 2), Rf(50000, 50000, 2), Rf(100000, 100000, 2)}, + }, { + desc: "no ranges unsorted", + in: "1000|500.0|250.55", + inFracDig: 2, + want: YangRange{Rf(25055, 25055, 2), Rf(50000, 50000, 2), Rf(100000, 100000, 2)}, + }, { + desc: "negative decimals", + in: "-31.2..-1.5|1.5..31.2", + inFracDig: 1, + want: YangRange{Rf(-312, -15, 1), Rf(15, 312, 1)}, + }, { + desc: "spaces", + in: "-22.5 | -15 | -7.5 | 0", + inFracDig: 1, + want: YangRange{Rf(-225, -225, 1), Rf(-150, -150, 1), Rf(-75, -75, 1), Rf(0, 0, 1)}, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + got, err := tt.inParentRange.parseChildRanges(tt.in, true, tt.inFracDig) + if err != nil { + if diff := errdiff.Substring(err, tt.wantErrSubstring); diff != "" { + t.Fatalf("did not get expected error, %s", diff) + } + return + } + + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("(-want, +got):\n%s", diff) + } + + if tt.inParentRange == nil { + if got, err = ParseRangesDecimal(tt.in, tt.inFracDig); err != nil { + t.Fatalf("ParseRangesDecimal: unexpected error: %v", err) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("ParseRangesDecimal (-want, +got):\n%s", diff) + } + } + }) + } +} + +func TestAdd(t *testing.T) { + tests := []struct { + desc string + inVal Number + inAdd uint64 + want Number + }{{ + desc: "add one to integer", + inVal: FromInt(1), + inAdd: 1, + want: FromInt(2), + }, { + desc: "add one to decimal64", + inVal: FromFloat(1.0), + inAdd: 1, + want: FromFloat(1.1), + }, { + desc: "negative int becomes positive", + inVal: FromInt(-2), + inAdd: 3, + want: FromInt(1), + }, { + desc: "negative int stays negative", + inVal: FromInt(-3), + inAdd: 1, + want: FromInt(-2), + }, { + desc: "negative decimal becomes positive", + inVal: FromFloat(-2), + inAdd: 35, + want: FromFloat(1.5), + }, { + desc: "negative decimal stays negative", + inVal: FromFloat(-42.22), + inAdd: 4122, + want: FromFloat(-1.0), + }, { + desc: "explicitly set fraction digits", + inVal: Number{Value: 10000, FractionDigits: 5}, + inAdd: 1, + want: Number{Value: 10001, FractionDigits: 5}, + }, { + desc: "explicitly set fraction digits - negative", + inVal: Number{Value: 0, FractionDigits: 3}, + inAdd: 42, + want: FromFloat(0.042), + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + got := tt.inVal.addQuantum(tt.inAdd) + if !cmp.Equal(got, tt.want) { + t.Fatalf("did get expected result, got: %s, want: %s", got.String(), tt.want.String()) + } + }) + } +} + +func TestParseInt(t *testing.T) { + tests := []struct { + desc string + inStr string + want Number + wantErrSubstring string + }{{ + desc: "invalid string supplied", + inStr: "fish", + wantErrSubstring: "valid syntax", + }, { + desc: "negative int", + inStr: "-42", + want: FromInt(-42), + }, { + desc: "positive int", + inStr: "42", + want: FromInt(42), + }, { + desc: "positive int with plus sign", + inStr: "+42", + want: FromInt(42), + }, { + desc: "zero", + inStr: "0", + want: FromInt(0), + }, { + desc: "min", + inStr: "min", + wantErrSubstring: "invalid syntax", + }, { + desc: "max", + inStr: "max", + wantErrSubstring: "invalid syntax", + }, { + desc: "just a sign", + inStr: "-", + wantErrSubstring: "sign with no value", + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + got, err := ParseInt(tt.inStr) + if err != nil { + if diff := errdiff.Substring(err, tt.wantErrSubstring); diff != "" { + t.Fatalf("did not get expected error, %s", diff) + } + return + } + + if !cmp.Equal(got, tt.want) { + t.Errorf("did not get expected Number, got: %s, want: %s", got, tt.want) + } + + if got.IsDecimal() { + t.Errorf("Got decimal value instead of int: %v", got) + } + }) + } +} + +func TestParseDecimal(t *testing.T) { + tests := []struct { + desc string + inStr string + inFracDig uint8 + skipFractionDigitsCheck bool + want Number + wantErrSubstring string + }{{ + desc: "too few fractional digits", + inStr: "1.000", + inFracDig: 0, + wantErrSubstring: "invalid number of fraction digits", + }, { + desc: "too many fraction digits", + inStr: "1.000", + inFracDig: 24, + wantErrSubstring: "invalid number of fraction digits", + }, { + desc: "more digits supplied", + inStr: "1.14242", + inFracDig: 2, + wantErrSubstring: "has too much precision", + }, { + desc: "single digit precision", + inStr: "1.1", + inFracDig: 1, + want: Number{Value: 11, FractionDigits: 1}, + }, { + desc: "max precision", + inStr: "0.100000000000000000", + inFracDig: 18, + skipFractionDigitsCheck: true, + want: FromFloat(0.1), + }, { + desc: "max precision but not supplied", + inStr: "0.1", + inFracDig: 4, + skipFractionDigitsCheck: true, + want: FromFloat(0.1), + }, { + desc: "invalid string supplied", + inStr: "fish", + inFracDig: 17, + wantErrSubstring: "not a valid decimal number", + }, { + desc: "negative number", + inStr: "-42.0", + inFracDig: 1, + want: FromFloat(-42), + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + got, err := ParseDecimal(tt.inStr, tt.inFracDig) + if err != nil { + if diff := errdiff.Substring(err, tt.wantErrSubstring); diff != "" { + t.Fatalf("did not get expected error, %s", diff) + } + return + } + + if !cmp.Equal(got, tt.want) { + t.Errorf("did not get expected Number, got: %s, want: %s", got, tt.want) + } + + if !tt.skipFractionDigitsCheck { + if got, want := got.FractionDigits, tt.want.FractionDigits; got != want { + t.Errorf("fractional digits not equal, got: %d, want: %d", got, want) + } + } + + if !got.IsDecimal() { + t.Errorf("Got non-decimal value: %v", got) + } + }) + } +} + +func TestNumberString(t *testing.T) { + tests := []struct { + desc string + in Number + want string + }{{ + desc: "min", + in: FromInt(MinInt64), + want: "-9223372036854775808", + }, { + desc: "max", + in: FromInt(MaxInt64), + want: "9223372036854775807", + }, { + desc: "integer", + in: Number{Value: 1}, + want: "1", + }, { + desc: "negative integer", + in: Number{Value: 1, Negative: true}, + want: "-1", + }, { + desc: "decimal, fractional digits = 1", + in: Number{Value: 1, FractionDigits: 1}, + want: "0.1", + }, { + desc: "decimal, fractional digits = 18", + in: Number{Value: 123456789012345678, FractionDigits: 18}, + want: "0.123456789012345678", + }, { + desc: "negative decimal", + in: Number{Value: 100, FractionDigits: 2, Negative: true}, + want: "-1.00", + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if got := tt.in.String(); got != tt.want { + t.Fatalf("did not get expected number, got: %s, want: %s", got, tt.want) + } + }) + } +} + +func TestEnumToJson(t *testing.T) { + tests := []struct { + desc string + in *EnumType + want string + wantErr bool + }{{ + "empty enum to JSON", + &EnumType{ + last: -1, // +1 will start at 0 + min: 0, + max: MaxBitfieldSize - 1, + ToString: map[int64]string{}, + ToInt: map[string]int64{}, + }, + `{}`, + false, + }, { + "2 value enum to JSON", + &EnumType{ + last: 2, + min: 0, + max: MaxBitfieldSize - 1, + ToString: map[int64]string{ + 1: "value1", + 2: "value2", + }, + ToInt: map[string]int64{ + "value1": 1, + "value2": 2, + }, + }, + `{"ToString":{"1":"value1","2":"value2"},"ToInt":{"value1":1,"value2":2}}`, + false, + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + got, err := json.Marshal(tt.in) + if string(got) != tt.want { + t.Errorf("got: %v, want: %v", string(got), tt.want) + } + if (err != nil) != tt.wantErr { + t.Errorf("gotErr: %v, wantErr: %v", err, tt.wantErr) + } + }) + } +} diff --git a/src/webui/internal/goyang/pkg/yang/types_test.go b/src/webui/internal/goyang/pkg/yang/types_test.go new file mode 100644 index 000000000..d66a4f43f --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/types_test.go @@ -0,0 +1,1720 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/openconfig/gnmi/errdiff" +) + +func TestTypeResolve(t *testing.T) { + tests := []struct { + desc string + in *Type + err string + out *YangType + }{{ + desc: "basic int64", + in: &Type{ + Name: "int64", + }, + out: &YangType{ + Name: "int64", + Kind: Yint64, + Range: Int64Range, + }, + }, { + desc: "basic int64 with a range", + in: &Type{ + Name: "int64", + Range: &Range{Name: "-42..42"}, + }, + out: &YangType{ + Name: "int64", + Kind: Yint64, + Range: YangRange{{Min: FromInt(-42), Max: FromInt(42)}}, + }, + }, { + desc: "basic uint64 with an invalid range", + in: &Type{ + Name: "uint64", + Range: &Range{Name: "-42..42"}, + }, + err: "unknown: bad range: -42..42 not within 0..18446744073709551615", + }, { + desc: "basic uint64 with an unparseable range", + in: &Type{ + Name: "uint64", + Range: &Range{Name: "-42..forty-two"}, + }, + err: `unknown: bad range: strconv.ParseUint: parsing "forty-two": invalid syntax`, + }, { + desc: "basic string with a length", + in: &Type{ + Name: "string", + Length: &Length{Name: "24..42"}, + }, + out: &YangType{ + Name: "string", + Kind: Ystring, + Length: YangRange{{Min: FromInt(24), Max: FromInt(42)}}, + }, + }, { + desc: "basic string with an invalid range", + in: &Type{ + Name: "string", + Length: &Length{Name: "-42..42"}, + }, + err: `unknown: bad length: -42..42 not within 0..18446744073709551615`, + }, { + desc: "basic binary with a length", + in: &Type{ + Name: "binary", + Length: &Length{Name: "24..42"}, + }, + out: &YangType{ + Name: "binary", + Kind: Ybinary, + Length: YangRange{{Min: FromInt(24), Max: FromInt(42)}}, + }, + }, { + desc: "basic binary with an unparseable range", + in: &Type{ + Name: "binary", + Length: &Length{Name: "42..forty-two"}, + }, + err: `unknown: bad length: strconv.ParseUint: parsing "forty-two": invalid syntax`, + }, { + desc: "invalid fraction-digits argument for boolean value", + in: &Type{ + Name: "boolean", + FractionDigits: &Value{Name: "42"}, + }, + err: "unknown: fraction-digits only allowed for decimal64 values", + }, { + desc: "required field fraction-digits not supplied for decimal64", + in: &Type{ + Name: "decimal64", + }, + err: "unknown: value is required in the range of [1..18]", + }, { + desc: "invalid identityref that doesn't have a base identity name", + in: &Type{ + Name: "identityref", + }, + err: "unknown: an identityref must specify a base", + }, { + desc: "invalid decimal64 having an invalid fraction-digits value", + in: &Type{ + Name: "decimal64", + FractionDigits: &Value{Name: "42"}, + }, + err: "unknown: value 42 out of range [1..18]", + }, { + desc: "decimal64", + in: &Type{ + Name: "decimal64", + FractionDigits: &Value{Name: "7"}, + }, + out: &YangType{ + Name: "decimal64", + Kind: Ydecimal64, + FractionDigits: 7, + Range: YangRange{Rf(MinInt64, MaxInt64, 7)}, + }, + }, { + desc: "instance-identifier with unspecified require-instance value (default true)", + in: &Type{ + Name: "instance-identifier", + RequireInstance: nil, + }, + out: &YangType{ + Name: "instance-identifier", + Kind: YinstanceIdentifier, + // https://tools.ietf.org/html/rfc7950#section-9.9.3 + // require-instance defaults to true. + OptionalInstance: false, + }, + }, { + desc: "instance-identifier with true require-instance value", + in: &Type{ + Name: "instance-identifier", + RequireInstance: &Value{Name: "true"}, + }, + out: &YangType{ + Name: "instance-identifier", + Kind: YinstanceIdentifier, + OptionalInstance: false, + }, + }, { + desc: "instance-identifier with false require-instance value", + in: &Type{ + Name: "instance-identifier", + RequireInstance: &Value{Name: "false"}, + }, + out: &YangType{ + Name: "instance-identifier", + Kind: YinstanceIdentifier, + OptionalInstance: true, + }, + }, { + desc: "instance-identifier with invalid require-instance value", + in: &Type{ + Name: "instance-identifier", + RequireInstance: &Value{Name: "foo"}, + }, + err: "invalid boolean: foo", + }, { + desc: "enum with unspecified values", + in: &Type{ + Name: "enumeration", + Enum: []*Enum{ + {Name: "MERCURY"}, + {Name: "VENUS"}, + {Name: "EARTH"}, + }, + }, + out: &YangType{ + Name: "enumeration", + Kind: Yenum, + Enum: &EnumType{ + last: 2, + min: MinEnum, + max: MaxEnum, + unique: true, + ToString: map[int64]string{ + 0: "MERCURY", + 1: "VENUS", + 2: "EARTH", + }, + ToInt: map[string]int64{ + "MERCURY": 0, + "VENUS": 1, + "EARTH": 2, + }, + }, + }, + }, { + desc: "enum with specified values", + in: &Type{ + Name: "enumeration", + Enum: []*Enum{ + {Name: "MERCURY", Value: &Value{Name: "-1"}}, + {Name: "VENUS", Value: &Value{Name: "10"}}, + {Name: "EARTH", Value: &Value{Name: "30"}}, + }, + }, + out: &YangType{ + Name: "enumeration", + Kind: Yenum, + Enum: &EnumType{ + last: 30, + min: MinEnum, + max: MaxEnum, + unique: true, + ToString: map[int64]string{ + -1: "MERCURY", + 10: "VENUS", + 30: "EARTH", + }, + ToInt: map[string]int64{ + "MERCURY": -1, + "VENUS": 10, + "EARTH": 30, + }, + }, + }, + }, { + desc: "enum with some values specified", + in: &Type{ + Name: "enumeration", + Enum: []*Enum{ + {Name: "MERCURY", Value: &Value{Name: "-1"}}, + {Name: "VENUS", Value: &Value{Name: "10"}}, + {Name: "EARTH"}, + }, + }, + out: &YangType{ + Name: "enumeration", + Kind: Yenum, + Enum: &EnumType{ + last: 11, + min: MinEnum, + max: MaxEnum, + unique: true, + ToString: map[int64]string{ + -1: "MERCURY", + 10: "VENUS", + 11: "EARTH", + }, + ToInt: map[string]int64{ + "MERCURY": -1, + "VENUS": 10, + "EARTH": 11, + }, + }, + }, + }, { + desc: "enum with repeated specified values", + in: &Type{ + Name: "enumeration", + Enum: []*Enum{ + {Name: "MERCURY", Value: &Value{Name: "1"}}, + {Name: "VENUS", Value: &Value{Name: "10"}}, + {Name: "EARTH", Value: &Value{Name: "1"}}, + }, + }, + err: "unknown: fields EARTH and MERCURY conflict on value 1", + }, { + desc: "enum with repeated specified names", + in: &Type{ + Name: "enumeration", + Enum: []*Enum{ + {Name: "MERCURY", Value: &Value{Name: "-1"}}, + {Name: "VENUS", Value: &Value{Name: "10"}}, + {Name: "MERCURY", Value: &Value{Name: "30"}}, + }, + }, + err: "unknown: field MERCURY already assigned", + }, { + desc: "enum with last specified value equal to the max enum value", + in: &Type{ + Name: "enumeration", + Enum: []*Enum{ + {Name: "MERCURY", Value: &Value{Name: "-2147483648"}}, + {Name: "VENUS", Value: &Value{Name: "2147483647"}}, + {Name: "EARTH"}, + }, + }, + err: `unknown: enum "EARTH" must specify a value since previous enum is the maximum value allowed`, + }, { + desc: "enum value too small", + in: &Type{ + Name: "enumeration", + Enum: []*Enum{ + {Name: "MERCURY", Value: &Value{Name: "-2147483649"}}, + {Name: "VENUS", Value: &Value{Name: "0"}}, + {Name: "EARTH"}, + }, + }, + err: `unknown: value -2147483649 for MERCURY too small (minimum is -2147483648)`, + }, { + desc: "enum value too large", + in: &Type{ + Name: "enumeration", + Enum: []*Enum{ + {Name: "MERCURY", Value: &Value{Name: "-2147483648"}}, + {Name: "VENUS", Value: &Value{Name: "2147483648"}}, + {Name: "EARTH"}, + }, + }, + err: `unknown: value 2147483648 for VENUS too large (maximum is 2147483647)`, + }, { + desc: "enum with an unparseable value", + in: &Type{ + Name: "enumeration", + Enum: []*Enum{ + {Name: "MERCURY", Value: &Value{Name: "-1"}}, + {Name: "VENUS", Value: &Value{Name: "10"}}, + {Name: "EARTH", Value: &Value{Name: "five"}}, + }, + }, + err: `unknown: strconv.ParseUint: parsing "five": invalid syntax`, + // TODO(borman): Add in more tests as we honor more fields + // in Type. + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + // We can initialize a value to ourself, so to it here. + errs := tt.in.resolve(newTypeDictionary()) + + // TODO(borman): Do not hack out Root and Base. These + // are hacked out for now because they can be self-referential, + // making construction of them difficult. + tt.in.YangType.Root = nil + tt.in.YangType.Base = nil + + switch { + case tt.err == "" && len(errs) > 0: + t.Fatalf("unexpected errors: %v", errs) + case tt.err != "" && len(errs) == 0: + t.Fatalf("did not get expected errors: %v", tt.err) + case len(errs) > 1: + t.Fatalf("too many errors: %v", errs) + case len(errs) == 1 && errs[0].Error() != tt.err: + t.Fatalf("got error %v, want %s", errs[0], tt.err) + case len(errs) != 0: + return + } + + if diff := cmp.Diff(tt.in.YangType, tt.out); diff != "" { + t.Errorf("YangType (-got, +want):\n%s", diff) + } + }) + } +} + +func TestTypedefResolve(t *testing.T) { + tests := []struct { + desc string + in *Typedef + err string + out *YangType + }{{ + desc: "basic int64", + in: &Typedef{ + Name: "time", + Parent: baseTypes["int64"].typedef(), + Default: &Value{Name: "42"}, + Type: &Type{ + Name: "int64", + }, + Units: &Value{Name: "nanoseconds"}, + }, + out: &YangType{ + Name: "time", + Kind: Yint64, + Base: &Type{ + Name: "int64", + }, + Units: "nanoseconds", + Default: "42", + HasDefault: true, + Range: Int64Range, + }, + }, { + desc: "uint32 with more specific range", + in: &Typedef{ + Name: "another-counter", + Parent: &Typedef{ + Name: "counter", + Parent: baseTypes["uint32"].typedef(), + Type: &Type{ + Name: "uint32", + Range: &Range{Name: "0..42"}, + }, + }, + Type: &Type{ + Name: "uint32", + Range: &Range{Name: "10..20"}, + }, + }, + out: &YangType{ + Name: "another-counter", + Kind: Yuint32, + Base: &Type{ + Name: "uint32", + }, + Range: YangRange{{Min: FromInt(10), Max: FromInt(20)}}, + }, + // TODO(wenovus): Add tests on range and length inheritance once those are fixed. + }} + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + // We can initialize a value to ourself, so to it here. + errs := tt.in.resolve(newTypeDictionary()) + + switch { + case tt.err == "" && len(errs) > 0: + t.Fatalf("unexpected errors: %v", errs) + case tt.err != "" && len(errs) == 0: + t.Fatalf("did not get expected errors: %v", tt.err) + case len(errs) > 1: + t.Fatalf("too many errors: %v", errs) + case len(errs) == 1 && errs[0].Error() != tt.err: + t.Fatalf("got error %v, want %s", errs[0], tt.err) + case len(errs) != 0: + return + } + + if diff := cmp.Diff(tt.in.YangType, tt.out); diff != "" { + t.Errorf("YangType (-got, +want):\n%s", diff) + } + }) + } +} + +func TestTypeResolveUnions(t *testing.T) { + tests := []struct { + desc string + leafNode string + wantType *testEnumTypeStruct + wantErrSubstr string + }{{ + desc: "simple union", + leafNode: ` + typedef alpha { + type union { + type string; + type uint32; + type enumeration { + enum zero; + enum one; + enum seven { + value 7; + } + } + } + } + + leaf test-leaf { + type alpha; + } + } // end module`, + wantType: &testEnumTypeStruct{ + Name: "alpha", + Type: []*testEnumTypeStruct{{ + Name: "string", + }, { + Name: "uint32", + }, { + Name: "enumeration", + ToInt: map[string]int64{"one": 1, "seven": 7, "zero": 0}, + }}, + }, + }, { + desc: "union with typedef", + leafNode: ` + typedef alpha { + type union { + type string; + type uint32; + type enumeration { + enum zero; + enum one; + enum seven { + value 7; + } + } + type bravo; + } + } + + typedef bravo { + type union { + type uint8; + type uint16; + type enumeration { + enum two { + value 2; + } + enum three; + enum four; + } + } + } + + leaf test-leaf { + type alpha; + } + } // end module`, + wantType: &testEnumTypeStruct{ + Name: "alpha", + Type: []*testEnumTypeStruct{{ + Name: "string", + }, { + Name: "uint32", + }, { + Name: "enumeration", + ToInt: map[string]int64{"one": 1, "seven": 7, "zero": 0}, + }, { + Name: "bravo", + Type: []*testEnumTypeStruct{{ + Name: "uint8", + }, { + Name: "uint16", + }, { + Name: "enumeration", + ToInt: map[string]int64{"two": 2, "three": 3, "four": 4}, + }}, + }}, + }, + }, { + desc: "nested unions with typedef", + leafNode: ` + typedef alpha { + type union { + type union { + type uint32; + type string; + type enumeration { + enum zero; + enum one; + enum seven { + value 7; + } + } + } + type bravo; + } + } + + typedef bravo { + type union { + type uint8; + type uint16; + type enumeration { + enum two { + value 2; + } + enum three; + enum four; + } + } + } + + leaf test-leaf { + type alpha; + } + } // end module`, + wantType: &testEnumTypeStruct{ + Name: "alpha", + Type: []*testEnumTypeStruct{{ + Name: "union", + Type: []*testEnumTypeStruct{{ + Name: "uint32", + }, { + Name: "string", + }, { + Name: "enumeration", + ToInt: map[string]int64{"one": 1, "seven": 7, "zero": 0}, + }}, + }, { + Name: "bravo", + Type: []*testEnumTypeStruct{{ + Name: "uint8", + }, { + Name: "uint16", + }, { + Name: "enumeration", + ToInt: map[string]int64{"two": 2, "three": 3, "four": 4}, + }}, + }}, + }, + }, { + desc: "simple union with multiple enumerations", + leafNode: ` + leaf test-leaf { + type union { + type string; + type uint32; + type enumeration { + enum zero; + enum one; + enum seven { + value 7; + } + } + type enumeration { + enum two { + value 2; + } + enum three; + enum four; + } + } + } + } // end module`, + wantType: &testEnumTypeStruct{ + Name: "union", + Type: []*testEnumTypeStruct{{ + Name: "string", + }, { + Name: "uint32", + }, { + Name: "enumeration", + ToInt: map[string]int64{"one": 1, "seven": 7, "zero": 0}, + }, { + Name: "enumeration", + ToInt: map[string]int64{"two": 2, "three": 3, "four": 4}, + }}, + }, + }, { + desc: "typedef union with multiple enumerations", + leafNode: ` + typedef alpha { + type union { + type string; + type uint32; + type enumeration { + enum zero; + enum one; + enum seven { + value 7; + } + } + type enumeration { + enum two { + value 2; + } + enum three; + enum four; + } + } + } + + leaf test-leaf { + type alpha; + } + } // end module`, + wantType: &testEnumTypeStruct{ + Name: "alpha", + Type: []*testEnumTypeStruct{{ + Name: "string", + }, { + Name: "uint32", + }, { + Name: "enumeration", + ToInt: map[string]int64{"one": 1, "seven": 7, "zero": 0}, + }, { + Name: "enumeration", + ToInt: map[string]int64{"two": 2, "three": 3, "four": 4}, + }}, + }, + }, { + desc: "simple union containing typedef union, both with enumerations", + leafNode: ` + typedef alpha { + type union { + type string; + type uint32; + type enumeration { + enum zero; + enum one; + enum seven { + value 7; + } + } + } + } + + leaf test-leaf { + type union { + type alpha; + type enumeration { + enum two { + value 2; + } + enum three; + enum four; + } + } + } + } // end module`, + wantType: &testEnumTypeStruct{ + Name: "union", + Type: []*testEnumTypeStruct{{ + Name: "alpha", + Type: []*testEnumTypeStruct{{ + Name: "string", + }, { + Name: "uint32", + }, { + Name: "enumeration", + ToInt: map[string]int64{"one": 1, "seven": 7, "zero": 0}, + }}, + }, { + Name: "enumeration", + ToInt: map[string]int64{"two": 2, "three": 3, "four": 4}, + }}, + }, + }, { + desc: "simple union containing typedef union containing another typedef union, all with multiple simple and typedef enumerations", + leafNode: ` + typedef a { + type enumeration { + enum un { + value 1; + } + enum deux; + } + } + + typedef b { + type enumeration { + enum trois { + value 3; + } + enum quatre; + } + } + + typedef c { + type enumeration { + enum cinq { + value 5; + } + enum sept { + value 7; + } + } + } + + typedef d { + type enumeration { + enum huit { + value 8; + } + enum neuf; + } + } + + typedef e { + type enumeration { + enum dix { + value 10; + } + enum onze; + } + } + + typedef f { + type enumeration { + enum douze { + value 12; + } + enum treize; + } + } + + typedef bravo { + type union { + type uint32; + type enumeration { + enum eight { + value 8; + } + enum nine; + } + type enumeration { + enum ten { + value 10; + } + enum eleven; + } + type e; + type f; + } + } + + typedef alpha { + type union { + type uint16; + type enumeration { + enum four { + value 4; + } + enum five; + } + type enumeration { + enum six { + value 6; + } + enum seven; + } + type c; + type d; + type bravo; + } + } + + leaf test-leaf { + type union { + type uint8; + type enumeration { + enum zero; + enum one; + } + type enumeration { + enum two { + value 2; + } + enum three; + } + type a; + type b; + type alpha; + } + } + } // end module`, + wantType: &testEnumTypeStruct{ + Name: "union", + Type: []*testEnumTypeStruct{{ + Name: "uint8", + }, { + Name: "enumeration", + ToInt: map[string]int64{"zero": 0, "one": 1}, + }, { + Name: "enumeration", + ToInt: map[string]int64{"two": 2, "three": 3}, + }, { + Name: "a", + ToInt: map[string]int64{"un": 1, "deux": 2}, + }, { + Name: "b", + ToInt: map[string]int64{"trois": 3, "quatre": 4}, + }, { + Name: "alpha", + Type: []*testEnumTypeStruct{{ + Name: "uint16", + }, { + Name: "enumeration", + ToInt: map[string]int64{"four": 4, "five": 5}, + }, { + Name: "enumeration", + ToInt: map[string]int64{"six": 6, "seven": 7}, + }, { + Name: "c", + ToInt: map[string]int64{"cinq": 5, "sept": 7}, + }, { + Name: "d", + ToInt: map[string]int64{"huit": 8, "neuf": 9}, + }, { + Name: "bravo", + Type: []*testEnumTypeStruct{{ + Name: "uint32", + }, { + Name: "enumeration", + ToInt: map[string]int64{"eight": 8, "nine": 9}, + }, { + Name: "enumeration", + ToInt: map[string]int64{"ten": 10, "eleven": 11}, + }, { + Name: "e", + ToInt: map[string]int64{"dix": 10, "onze": 11}, + }, { + Name: "f", + ToInt: map[string]int64{"douze": 12, "treize": 13}, + }}, + }}, + }}, + }, + }} + + getTestLeaf := func(ms *Modules) (*YangType, error) { + const module = "test" + m, ok := ms.Modules[module] + if !ok { + return nil, fmt.Errorf("can't find module %q", module) + } + + if len(m.Leaf) == 0 { + return nil, fmt.Errorf("node %v is missing imports", m) + } + e := ToEntry(m) + return e.Dir["test-leaf"].Type, nil + } + + for _, tt := range tests { + inModules := map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + + ` + tt.leafNode, + } + + t.Run(tt.desc, func(t *testing.T) { + ms := NewModules() + for n, m := range inModules { + if err := ms.Parse(m, n); err != nil { + t.Fatalf("error parsing module %s, got: %v, want: nil", n, err) + } + } + errs := ms.Process() + var err error + if len(errs) > 1 { + t.Fatalf("Got more than 1 error: %v", errs) + } else if len(errs) == 1 { + err = errs[0] + } + if diff := errdiff.Substring(err, tt.wantErrSubstr); diff != "" { + t.Errorf("Did not get expected error: %s", diff) + } + if err != nil { + return + } + + gotType, err := getTestLeaf(ms) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(filterTypeNames(gotType), tt.wantType); diff != "" { + t.Errorf("Type.resolve() union types test (-got, +want):\n%s", diff) + } + }) + } +} + +type testEnumTypeStruct struct { + Name string + // ToInt is the ToInt map representing the enum value (if present). + ToInt map[string]int64 + Type []*testEnumTypeStruct +} + +// filterTypeNames returns a testEnumTypeStruct with only the +// YangType.Name fields of the given type, preserving +// the recursive structure of the type, to work around cmp not +// having an allowlist way of specifying which fields to +// compare and YangType having a custom Equal function. +func filterTypeNames(ytype *YangType) *testEnumTypeStruct { + filteredNames := &testEnumTypeStruct{Name: ytype.Name} + if ytype.Enum != nil { + filteredNames.ToInt = ytype.Enum.ToInt + } + for _, subtype := range ytype.Type { + filteredNames.Type = append(filteredNames.Type, filterTypeNames(subtype)) + } + return filteredNames +} + +func TestPattern(t *testing.T) { + tests := []struct { + desc string + leafNode string + wantType *YangType + wantErrSubstr string + }{{ + desc: "Only normal patterns", + leafNode: ` + leaf test-leaf { + type string { + o:bar 'coo'; + o:bar 'foo'; + pattern 'charlie'; + o:bar 'goo'; + } + } + } // end module`, + wantType: &YangType{ + Pattern: []string{"charlie"}, + }, + }, { + desc: "Only posix patterns", + leafNode: ` + leaf test-leaf { + type string { + o:bar 'coo'; + o:posix-pattern 'bravo'; + o:bar 'foo'; + o:posix-pattern 'charlie'; + o:bar 'goo'; + } + } + } // end module`, + wantType: &YangType{ + POSIXPattern: []string{"bravo", "charlie"}, + }, + }, { + desc: "No patterns", + leafNode: ` + leaf test-leaf { + type string; + } + }`, + wantType: &YangType{ + Pattern: nil, + POSIXPattern: nil, + }, + }, { + desc: "Both patterns", + leafNode: ` + leaf test-leaf { + type string { + pattern 'alpha'; + o:posix-pattern 'bravo'; + o:posix-pattern 'charlie'; + o:bar 'coo'; + o:posix-pattern 'delta'; + } + } + } // end module`, + wantType: &YangType{ + Pattern: []string{"alpha"}, + POSIXPattern: []string{"bravo", "charlie", "delta"}, + }, + }, { + desc: "Both patterns, but with non-openconfig-extensions pretenders", + leafNode: ` + leaf test-leaf { + type string { + pattern 'alpha'; + o:bar 'coo'; + o:posix-pattern 'delta'; + + n:posix-pattern 'golf'; + + pattern 'bravo'; + o:bar 'foo'; + o:posix-pattern 'echo'; + + pattern 'charlie'; + o:bar 'goo'; + o:posix-pattern 'foxtrot'; + + n:posix-pattern 'hotel'; + } + } + } // end module`, + wantType: &YangType{ + Pattern: []string{"alpha", "bravo", "charlie"}, + POSIXPattern: []string{"delta", "echo", "foxtrot"}, + }, + }, { + desc: "Union type", + leafNode: ` + leaf test-leaf { + type union { + type string { + pattern 'alpha'; + o:bar 'coo'; + o:posix-pattern 'delta'; + + pattern 'bravo'; + o:bar 'foo'; + o:posix-pattern 'echo'; + n:posix-pattern 'echo2'; + + pattern 'charlie'; + o:bar 'goo'; + o:posix-pattern 'foxtrot'; + } + type uint64; + } + } + } // end module`, + wantType: &YangType{ + Type: []*YangType{{ + Pattern: []string{"alpha", "bravo", "charlie"}, + POSIXPattern: []string{"delta", "echo", "foxtrot"}, + }, { + Pattern: nil, + POSIXPattern: nil, + }}, + }, + }, { + desc: "Union type -- de-duping string types", + leafNode: ` + leaf test-leaf { + type union { + type string { + pattern 'alpha'; + o:posix-pattern 'alpha'; + } + type string { + pattern 'alpha'; + o:posix-pattern 'alpha'; + } + } + } + } // end module`, + wantType: &YangType{ + Type: []*YangType{{ + Pattern: []string{"alpha"}, + POSIXPattern: []string{"alpha"}, + }}, + }, + }, { + desc: "Union type -- different string types due to different patterns", + leafNode: ` + leaf test-leaf { + type union { + type string { + pattern 'alpha'; + } + type string { + pattern 'bravo'; + } + } + } + } // end module`, + wantType: &YangType{ + Type: []*YangType{{ + Pattern: []string{"alpha"}, + }, { + Pattern: []string{"bravo"}, + }}, + }, + }, { + desc: "Union type -- different string types due to different posix-patterns", + leafNode: ` + leaf test-leaf { + type union { + type string { + o:posix-pattern 'alpha'; + } + type string { + o:posix-pattern 'bravo'; + } + } + } + } // end module`, + wantType: &YangType{ + Type: []*YangType{{ + POSIXPattern: []string{"alpha"}, + }, { + POSIXPattern: []string{"bravo"}, + }}, + }, + }, { + desc: "typedef", + leafNode: ` + leaf test-leaf { + type leaf-type; + } + + typedef leaf-type { + type string { + pattern 'alpha'; + o:bar 'coo'; + o:posix-pattern 'delta'; + + pattern 'bravo'; + o:bar 'foo'; + o:posix-pattern 'echo'; + + pattern 'charlie'; + o:bar 'goo'; + o:posix-pattern 'foxtrot'; + } + } + } // end module`, + wantType: &YangType{ + Pattern: []string{"alpha", "bravo", "charlie"}, + POSIXPattern: []string{"delta", "echo", "foxtrot"}, + }, + }, { + desc: "invalid POSIX pattern", + leafNode: ` + leaf test-leaf { + type leaf-type; + } + + typedef leaf-type { + type string { + o:posix-pattern '?'; + } + } + } // end module`, + wantErrSubstr: "bad pattern", + }} + + getTestLeaf := func(ms *Modules) (*YangType, error) { + const module = "test" + m, ok := ms.Modules[module] + if !ok { + return nil, fmt.Errorf("can't find module %q", module) + } + + if len(m.Leaf) == 0 { + return nil, fmt.Errorf("node %v is missing imports", m) + } + e := ToEntry(m) + return e.Dir["test-leaf"].Type, nil + } + + for _, tt := range tests { + inModules := map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + + import non-openconfig-extensions { + prefix "n"; + description "non-openconfig-extensions module"; + } + import openconfig-extensions { + prefix "o"; + description "openconfig-extensions module"; + }` + tt.leafNode, + "openconfig-extensions": ` + module openconfig-extensions { + prefix "o"; + namespace "urn:o"; + + extension bar { + argument "baz"; + } + + extension posix-pattern { + argument "pattern"; + } + } + `, + "non-openconfig-extensions": ` + module non-openconfig-extensions { + prefix "n"; + namespace "urn:n"; + + extension bar { + argument "baz"; + } + + extension posix-pattern { + argument "pattern"; + } + } + `, + } + + t.Run(tt.desc, func(t *testing.T) { + ms := NewModules() + for n, m := range inModules { + if err := ms.Parse(m, n); err != nil { + t.Fatalf("error parsing module %s, got: %v, want: nil", n, err) + } + } + errs := ms.Process() + var err error + if len(errs) > 1 { + t.Fatalf("Got more than 1 error: %v", errs) + } else if len(errs) == 1 { + err = errs[0] + } + if diff := errdiff.Substring(err, tt.wantErrSubstr); diff != "" { + t.Errorf("Did not get expected error: %s", diff) + } + if err != nil { + return + } + + yangType, err := getTestLeaf(ms) + if err != nil { + t.Fatal(err) + } + + gotType := &YangType{} + populatePatterns(yangType, gotType) + if diff := cmp.Diff(gotType, tt.wantType, cmpopts.EquateEmpty()); diff != "" { + t.Errorf("Type.resolve() pattern test (-got, +want):\n%s", diff) + } + }) + } +} + +// populatePatterns populates targetType with only the +// Pattern/POSIXPattern fields of the given type, preserving +// the recursive structure of the type, to work around cmp not +// having an allowlist way of specifying which fields to +// compare. +func populatePatterns(ytype *YangType, targetType *YangType) { + targetType.Pattern = ytype.Pattern + targetType.POSIXPattern = ytype.POSIXPattern + for _, subtype := range ytype.Type { + targetSubtype := &YangType{} + targetType.Type = append(targetType.Type, targetSubtype) + populatePatterns(subtype, targetSubtype) + } +} + +func TestTypeLengthRange(t *testing.T) { + tests := []struct { + desc string + leafNode string + wantType *testRangeTypeStruct + wantErrSubstr string + }{{ + desc: "simple uint32", + leafNode: ` + typedef alpha { + type uint32 { + range "1..4 | 10..20"; + } + } + leaf test-leaf { + type alpha; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "alpha", + Range: YangRange{R(1, 4), R(10, 20)}, + }, + }, { + desc: "inherited uint32", + leafNode: ` + typedef alpha { + type uint32 { + range "1..4 | 10..20"; + } + } + typedef bravo { + type alpha { + range "min..3 | 12..max"; + } + } + leaf test-leaf { + type bravo; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "bravo", + Range: YangRange{R(1, 3), R(12, 20)}, + }, + }, { + desc: "inherited uint32 range violation", + leafNode: ` + typedef alpha { + type uint32 { + range "1..4 | 10..20"; + } + } + typedef bravo { + type alpha { + range "min..max"; + } + } + leaf test-leaf { + type bravo; + } + } // end module`, + wantErrSubstr: "not within", + }, { + desc: "unrestricted decimal64", + leafNode: ` + typedef alpha { + type decimal64 { + fraction-digits 2; + } + } + leaf test-leaf { + type alpha; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "alpha", + Range: YangRange{Rf(MinInt64, MaxInt64, 2)}, + }, + }, { + desc: "simple restricted decimal64", + leafNode: ` + typedef alpha { + type decimal64 { + fraction-digits 2; + range "1 .. 3.14 | 10 | 20..max"; + } + } + leaf test-leaf { + type alpha; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "alpha", + Range: YangRange{Rf(100, 314, 2), Rf(1000, 1000, 2), Rf(2000, MaxInt64, 2)}, + }, + }, { + desc: "simple decimal64 with inherited ranges", + leafNode: ` + typedef alpha { + type decimal64 { + fraction-digits 3; + range "1 .. 3.14 | 10 | 20..max"; + } + } + typedef bravo { + type alpha { + range "min .. 2.72 | 42 .. max"; + } + } + leaf test-leaf { + type bravo; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "bravo", + Range: YangRange{Rf(1000, 2720, 3), Rf(42000, MaxInt64, 3)}, + }, + }, { + desc: "triple-inherited decimal64", + leafNode: ` + typedef alpha { + type decimal64 { + fraction-digits 2; + } + } + typedef bravo { + type alpha { + range "1 .. 3.14 | 10 | 20..max"; + } + } + typedef charlie { + type bravo { + range "min .. 2.72 | 42 .. max"; + } + } + leaf test-leaf { + type charlie; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "charlie", + Range: YangRange{Rf(100, 272, 2), Rf(4200, MaxInt64, 2)}, + }, + }, { + desc: "simple decimal64 with inherited ranges", + leafNode: ` + typedef alpha { + type decimal64 { + fraction-digits 2; + range "1 .. 3.14 | 10 | 20..max"; + } + } + typedef bravo { + type alpha { + range "min..max"; + } + } + leaf test-leaf { + type alpha; + } + } // end module`, + wantErrSubstr: "not within", + }, { + desc: "simple decimal64 with too few fractional digits", + leafNode: ` + typedef alpha { + type decimal64 { + fraction-digits 1; + range "1 .. 3.14 | 10 | 20..max"; + } + } + leaf test-leaf { + type alpha; + } + } // end module`, + wantErrSubstr: "has too much precision", + }, { + desc: "simple decimal64 fractional digit on inherited decimal64 type", + leafNode: ` + typedef alpha { + type decimal64 { + fraction-digits 2; + range "1 .. 3.14 | 10 | 20..max"; + } + } + typedef bravo { + type alpha { + fraction-digits 2; + range "25..max"; + } + } + leaf test-leaf { + type bravo; + } + } // end module`, + wantErrSubstr: "overriding of fraction-digits not allowed", + }, { + desc: "simple string with length", + leafNode: ` + typedef alpha { + type string { + length "1..4 | 10..20 | 30..max"; + } + } + leaf test-leaf { + type alpha; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "alpha", + Length: YangRange{R(1, 4), R(10, 20), YRange{FromInt(30), FromUint(maxUint64)}}, + }, + }, { + desc: "inherited string", + leafNode: ` + typedef alpha { + type string { + length "1..4 | 10..20 | 30..max"; + } + } + typedef bravo { + type alpha { + length "min..3 | 42..max"; + } + } + leaf test-leaf { + type bravo; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "bravo", + Length: YangRange{R(1, 3), YRange{FromInt(42), FromUint(maxUint64)}}, + }, + }, { + desc: "inherited binary", + leafNode: ` + typedef alpha { + type binary { + length "1..4 | 10..20 | 30..max"; + } + } + typedef bravo { + type alpha { + length "min..3 | 42..max"; + } + } + leaf test-leaf { + type bravo; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "bravo", + Length: YangRange{R(1, 3), YRange{FromInt(42), FromUint(maxUint64)}}, + }, + }, { + desc: "inherited string length violation", + leafNode: ` + typedef alpha { + type string { + length "1..4 | 10..20 | 30..max"; + } + } + typedef bravo { + type alpha { + length "min..max"; + } + } + leaf test-leaf { + type bravo; + } + } // end module`, + wantErrSubstr: "not within", + }, { + desc: "simple union", + leafNode: ` + typedef alpha { + type union { + type string; + type binary { + length "min..5|999..max"; + } + type int8 { + range "min..-42|42..max"; + } + type enumeration { + enum zero; + enum one; + enum seven { + value 7; + } + } + } + } + leaf test-leaf { + type alpha; + } + } // end module`, + wantType: &testRangeTypeStruct{ + Name: "alpha", + Type: []*testRangeTypeStruct{{ + Name: "string", + }, { + Name: "binary", + Length: YangRange{R(0, 5), YRange{FromInt(999), FromUint(maxUint64)}}, + }, { + Name: "int8", + Range: YangRange{R(minInt8, -42), R(42, maxInt8)}, + }, { + Name: "enumeration", + }}, + }, + }} + + getTestLeaf := func(ms *Modules) (*YangType, error) { + const moduleName = "test" + m, ok := ms.Modules[moduleName] + if !ok { + return nil, fmt.Errorf("module not found: %q", moduleName) + } + if len(m.Leaf) == 0 { + return nil, fmt.Errorf("node %v is missing imports", m) + } + e := ToEntry(m) + return e.Dir["test-leaf"].Type, nil + } + + for _, tt := range tests { + inModules := map[string]string{ + "test": ` + module test { + prefix "t"; + namespace "urn:t"; + ` + tt.leafNode, + } + + t.Run(tt.desc, func(t *testing.T) { + ms := NewModules() + for n, m := range inModules { + if err := ms.Parse(m, n); err != nil { + t.Fatalf("error parsing module %s, got: %v, want: nil", n, err) + } + } + errs := ms.Process() + var err error + if len(errs) > 1 { + t.Fatalf("Got more than 1 error: %v", errs) + } else if len(errs) == 1 { + err = errs[0] + } + if diff := errdiff.Substring(err, tt.wantErrSubstr); diff != "" { + t.Errorf("Did not get expected error: %s", diff) + } + if err != nil { + return + } + + gotType, err := getTestLeaf(ms) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(filterRanges(gotType), tt.wantType); diff != "" { + t.Errorf("Type.resolve() union types test (-got, +want):\n%s", diff) + } + }) + } +} + +// testRangeTypeStruct is a filtered-down version of YangType where only certain +// fields are preserved for targeted testing. +type testRangeTypeStruct struct { + Name string + Length YangRange + Range YangRange + Type []*testRangeTypeStruct +} + +// filterRanges returns a testRangeTypeStruct with only the Name, Length, and Range +// fields of the given YangType, preserving the recursive structure of the +// type, to work around cmp not having an allowlist way of specifying which +// fields to compare and YangType having a custom Equal function. +func filterRanges(ytype *YangType) *testRangeTypeStruct { + filteredType := &testRangeTypeStruct{Name: ytype.Name} + filteredType.Length = ytype.Length + filteredType.Range = ytype.Range + for _, subtype := range ytype.Type { + filteredType.Type = append(filteredType.Type, filterRanges(subtype)) + } + return filteredType +} diff --git a/src/webui/internal/goyang/pkg/yang/yang.go b/src/webui/internal/goyang/pkg/yang/yang.go new file mode 100644 index 000000000..fad667e25 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/yang.go @@ -0,0 +1,1101 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import "fmt" + +// This file contains the definitions for all nodes of the yang AST. +// The actual building of the AST is in ast.go + +// Some field names have specific meanings: +// +// Grouping - This field must always be of type []*Grouping +// Typedef - This field must always be of type []*Typedef + +// A Value is just a string that can have extensions. +type Value struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:",omitempty"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:",omitempty"` + + Description *Value `yang:"description" json:",omitempty"` + Reference *Value `yang:"reference" json:",omitempty"` +} + +func (Value) Kind() string { return "string" } +func (s *Value) ParentNode() Node { return s.Parent } +func (s *Value) NName() string { return s.Name } +func (s *Value) Statement() *Statement { return s.Source } +func (s *Value) Exts() []*Statement { return s.Extensions } + +// asRangeInt returns the value v as an int64 if it is between the values of +// min and max inclusive. An error is returned if v is out of range or does +// not parse into a number. If v is nil then an error is returned. +func (s *Value) asRangeInt(min, max int64) (int64, error) { + if s == nil { + return 0, fmt.Errorf("value is required in the range of [%d..%d]", min, max) + } + n, err := ParseInt(s.Name) + if err != nil { + return 0, err + } + i, err := n.Int() + if err != nil { + return 0, err + } + if i < min || i > max { + return 0, fmt.Errorf("value %s out of range [%d..%d]", s.Name, min, max) + } + return i, nil +} + +// asBool returns v as a boolean (true or flase) or returns an error if v +// is neither true nor false. If v is nil then false is returned. +func (s *Value) asBool() (bool, error) { + // A missing value is considered false + if s == nil { + return false, nil + } + switch s.Name { + case "true": + return true, nil + case "false": + return false, nil + default: + return false, fmt.Errorf("invalid boolean: %s", s.Name) + } +} + +// asString simply returns the string value of v. If v is nil then an empty +// string is returned. +func (s *Value) asString() string { + if s == nil { + return "" + } + return s.Name +} + +// See http://tools.ietf.org/html/rfc6020#section-7 for a description of the +// following structures. The structures are derived from that document. + +// A Module is defined in: http://tools.ietf.org/html/rfc6020#section-7.1 +// +// A SubModule is defined in: http://tools.ietf.org/html/rfc6020#section-7.2 +type Module struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Anyxml []*AnyXML `yang:"anyxml"` + Augment []*Augment `yang:"augment"` + BelongsTo *BelongsTo `yang:"belongs-to,required=submodule,nomerge"` + Choice []*Choice `yang:"choice"` + Contact *Value `yang:"contact,nomerge"` + Container []*Container `yang:"container"` + Description *Value `yang:"description,nomerge"` + Deviation []*Deviation `yang:"deviation"` + Extension []*Extension `yang:"extension"` + Feature []*Feature `yang:"feature"` + Grouping []*Grouping `yang:"grouping"` + Identity []*Identity `yang:"identity"` + Import []*Import `yang:"import"` + Include []*Include `yang:"include"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + Namespace *Value `yang:"namespace,required=module,nomerge"` + Notification []*Notification `yang:"notification"` + Organization *Value `yang:"organization,nomerge"` + Prefix *Value `yang:"prefix,required=module,nomerge"` + Reference *Value `yang:"reference,nomerge"` + Revision []*Revision `yang:"revision,nomerge"` + RPC []*RPC `yang:"rpc"` + Typedef []*Typedef `yang:"typedef"` + Uses []*Uses `yang:"uses"` + YangVersion *Value `yang:"yang-version,nomerge"` + + // Modules references the Modules object from which this Module node + // was parsed. + Modules *Modules +} + +func (s *Module) Kind() string { + if s.BelongsTo != nil { + return "submodule" + } + return "module" +} +func (s *Module) ParentNode() Node { return s.Parent } +func (s *Module) NName() string { return s.Name } +func (s *Module) Statement() *Statement { return s.Source } +func (s *Module) Exts() []*Statement { return s.Extensions } +func (s *Module) Groupings() []*Grouping { return s.Grouping } +func (s *Module) Typedefs() []*Typedef { return s.Typedef } +func (s *Module) Identities() []*Identity { return s.Identity } + +// Current returns the most recent revision of this module, or "" if the module +// has no revisions. +func (s *Module) Current() string { + var rev string + for _, r := range s.Revision { + if r.Name > rev { + rev = r.Name + } + } + return rev +} + +// FullName returns the full name of the module including the most recent +// revision, if any. +func (s *Module) FullName() string { + if rev := s.Current(); rev != "" { + return s.Name + "@" + rev + } + return s.Name +} + +// GetPrefix returns the proper prefix of m. Useful when looking up types +// in modules found by FindModuleByPrefix. +func (s *Module) GetPrefix() string { + pfx := s.getPrefix() + if pfx == nil { + // This case can be true during testing. + return "" + } + return pfx.Name +} + +// getPrefix returns the local prefix of the module used to refer to itself. +func (s *Module) getPrefix() *Value { + switch { + case s == nil: + return nil + case s.Kind() == "module" && s.Prefix != nil: + return s.Prefix + case s.Kind() == "submodule" && s.BelongsTo != nil: + return s.BelongsTo.Prefix + default: + return nil + } +} + +// An Import is defined in: http://tools.ietf.org/html/rfc6020#section-7.1.5 +type Import struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext"` + + Prefix *Value `yang:"prefix,required"` + RevisionDate *Value `yang:"revision-date"` + Reference *Value `yang:"reference,nomerge"` + Description *Value `yang:"description,nomerge"` + + // Module is the imported module. The types and groupings are + // available to the importer with the defined prefix. + Module *Module +} + +func (Import) Kind() string { return "import" } +func (s *Import) ParentNode() Node { return s.Parent } +func (s *Import) NName() string { return s.Name } +func (s *Import) Statement() *Statement { return s.Source } +func (s *Import) Exts() []*Statement { return s.Extensions } + +// An Include is defined in: http://tools.ietf.org/html/rfc6020#section-7.1.6 +type Include struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:",omitempty"` + + RevisionDate *Value `yang:"revision-date"` + + // Module is the included module. The types and groupings are + // available to the importer with the defined prefix. + Module *Module +} + +func (Include) Kind() string { return "include" } +func (s *Include) ParentNode() Node { return s.Parent } +func (s *Include) NName() string { return s.Name } +func (s *Include) Statement() *Statement { return s.Source } +func (s *Include) Exts() []*Statement { return s.Extensions } + +// A Revision is defined in: http://tools.ietf.org/html/rfc6020#section-7.1.9 +type Revision struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:",omitempty"` + + Description *Value `yang:"description"` + Reference *Value `yang:"reference"` +} + +func (Revision) Kind() string { return "revision" } +func (s *Revision) ParentNode() Node { return s.Parent } +func (s *Revision) NName() string { return s.Name } +func (s *Revision) Statement() *Statement { return s.Source } +func (s *Revision) Exts() []*Statement { return s.Extensions } + +// A BelongsTo is defined in: http://tools.ietf.org/html/rfc6020#section-7.2.2 +type BelongsTo struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:",omitempty"` + + Prefix *Value `yang:"prefix,required"` +} + +func (BelongsTo) Kind() string { return "belongs-to" } +func (s *BelongsTo) ParentNode() Node { return s.Parent } +func (s *BelongsTo) NName() string { return s.Name } +func (s *BelongsTo) Statement() *Statement { return s.Source } +func (s *BelongsTo) Exts() []*Statement { return s.Extensions } + +// A Typedef is defined in: http://tools.ietf.org/html/rfc6020#section-7.3 +type Typedef struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Default *Value `yang:"default"` + Description *Value `yang:"description"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Type *Type `yang:"type,required"` + Units *Value `yang:"units"` + + YangType *YangType `json:"-"` +} + +func (Typedef) Kind() string { return "typedef" } +func (s *Typedef) ParentNode() Node { return s.Parent } +func (s *Typedef) NName() string { return s.Name } +func (s *Typedef) Statement() *Statement { return s.Source } +func (s *Typedef) Exts() []*Statement { return s.Extensions } + +// A Type is defined in: http://tools.ietf.org/html/rfc6020#section-7.4 +// Note that Name is the name of the type we want, it is what must +// be looked up and resolved. +type Type struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + IdentityBase *Value `yang:"base"` // Name == identityref + Bit []*Bit `yang:"bit"` + Enum []*Enum `yang:"enum"` + FractionDigits *Value `yang:"fraction-digits"` // Name == decimal64 + Length *Length `yang:"length"` + Path *Value `yang:"path"` + Pattern []*Pattern `yang:"pattern"` + Range *Range `yang:"range"` + RequireInstance *Value `yang:"require-instance"` + Type []*Type `yang:"type"` // len > 1 only when Name is "union" + + YangType *YangType +} + +func (Type) Kind() string { return "type" } +func (s *Type) ParentNode() Node { return s.Parent } +func (s *Type) NName() string { return s.Name } +func (s *Type) Statement() *Statement { return s.Source } +func (s *Type) Exts() []*Statement { return s.Extensions } + +// A Container is defined in: http://tools.ietf.org/html/rfc6020#section-7.5 +// and http://tools.ietf.org/html/rfc7950#section-7.5 ("container" sub-statement) +type Container struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Action []*Action `yang:"action"` + Anyxml []*AnyXML `yang:"anyxml"` + Choice []*Choice `yang:"choice"` + Config *Value `yang:"config"` + Container []*Container `yang:"container"` + Description *Value `yang:"description"` + Grouping []*Grouping `yang:"grouping"` + IfFeature []*Value `yang:"if-feature"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + Must []*Must `yang:"must"` + Notification []*Notification `yang:"notification"` + Presence *Value `yang:"presence"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Typedef []*Typedef `yang:"typedef"` + Uses []*Uses `yang:"uses"` + When *Value `yang:"when"` +} + +func (Container) Kind() string { return "container" } +func (s *Container) ParentNode() Node { return s.Parent } +func (s *Container) NName() string { return s.Name } +func (s *Container) Statement() *Statement { return s.Source } +func (s *Container) Exts() []*Statement { return s.Extensions } +func (s *Container) Groupings() []*Grouping { return s.Grouping } +func (s *Container) Typedefs() []*Typedef { return s.Typedef } + +// A Must is defined in: http://tools.ietf.org/html/rfc6020#section-7.5.3 +type Must struct { + Name string `yang:"Name,nomerge" json:",omitempty"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:",omitempty"` + + Description *Value `yang:"description" json:",omitempty"` + ErrorAppTag *Value `yang:"error-app-tag" json:",omitempty"` + ErrorMessage *Value `yang:"error-message" json:",omitempty"` + Reference *Value `yang:"reference" json:",omitempty"` +} + +func (Must) Kind() string { return "must" } +func (s *Must) ParentNode() Node { return s.Parent } +func (s *Must) NName() string { return s.Name } +func (s *Must) Statement() *Statement { return s.Source } +func (s *Must) Exts() []*Statement { return s.Extensions } + +// A Leaf is defined in: http://tools.ietf.org/html/rfc6020#section-7.6 +type Leaf struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Config *Value `yang:"config"` + Default *Value `yang:"default"` + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + Mandatory *Value `yang:"mandatory"` + Must []*Must `yang:"must"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Type *Type `yang:"type,required"` + Units *Value `yang:"units"` + When *Value `yang:"when"` +} + +func (Leaf) Kind() string { return "leaf" } +func (s *Leaf) ParentNode() Node { return s.Parent } +func (s *Leaf) NName() string { return s.Name } +func (s *Leaf) Statement() *Statement { return s.Source } +func (s *Leaf) Exts() []*Statement { return s.Extensions } + +// A LeafList is defined in: +// YANG 1: http://tools.ietf.org/html/rfc6020#section-7.7 +// YANG 1.1: https://tools.ietf.org/html/rfc7950#section-7.7 +// It this is supposed to be an array of nodes.. +type LeafList struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Config *Value `yang:"config"` + Default []*Value `yang:"default"` + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + MaxElements *Value `yang:"max-elements"` + MinElements *Value `yang:"min-elements"` + Must []*Must `yang:"must"` + OrderedBy *Value `yang:"ordered-by"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Type *Type `yang:"type,required"` + Units *Value `yang:"units"` + When *Value `yang:"when"` +} + +func (LeafList) Kind() string { return "leaf-list" } +func (s *LeafList) ParentNode() Node { return s.Parent } +func (s *LeafList) NName() string { return s.Name } +func (s *LeafList) Statement() *Statement { return s.Source } +func (s *LeafList) Exts() []*Statement { return s.Extensions } + +// A List is defined in: http://tools.ietf.org/html/rfc6020#section-7.8 +// and http://tools.ietf.org/html/rfc7950#section-7.8 ("list" sub-statement) +type List struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Action []*Action `yang:"action"` + Anyxml []*AnyXML `yang:"anyxml"` + Choice []*Choice `yang:"choice"` + Config *Value `yang:"config"` + Container []*Container `yang:"container"` + Description *Value `yang:"description"` + Grouping []*Grouping `yang:"grouping"` + IfFeature []*Value `yang:"if-feature"` + Key *Value `yang:"key"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + MaxElements *Value `yang:"max-elements"` + MinElements *Value `yang:"min-elements"` + Must []*Must `yang:"must"` + Notification []*Notification `yang:"notification"` + OrderedBy *Value `yang:"ordered-by"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Typedef []*Typedef `yang:"typedef"` + Unique []*Value `yang:"unique"` + Uses []*Uses `yang:"uses"` + When *Value `yang:"when"` +} + +func (List) Kind() string { return "list" } +func (s *List) ParentNode() Node { return s.Parent } +func (s *List) NName() string { return s.Name } +func (s *List) Statement() *Statement { return s.Source } +func (s *List) Exts() []*Statement { return s.Extensions } +func (s *List) Groupings() []*Grouping { return s.Grouping } +func (s *List) Typedefs() []*Typedef { return s.Typedef } + +// A Choice is defined in: http://tools.ietf.org/html/rfc6020#section-7.9 +type Choice struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Anyxml []*AnyXML `yang:"anyxml"` + Case []*Case `yang:"case"` + Config *Value `yang:"config"` + Container []*Container `yang:"container"` + Default *Value `yang:"default"` + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + Mandatory *Value `yang:"mandatory"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + When *Value `yang:"when"` +} + +func (Choice) Kind() string { return "choice" } +func (s *Choice) ParentNode() Node { return s.Parent } +func (s *Choice) NName() string { return s.Name } +func (s *Choice) Statement() *Statement { return s.Source } +func (s *Choice) Exts() []*Statement { return s.Extensions } + +// A Case is defined in: http://tools.ietf.org/html/rfc6020#section-7.9.2 +type Case struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Anyxml []*AnyXML `yang:"anyxml"` + Choice []*Choice `yang:"choice"` + Container []*Container `yang:"container"` + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Uses []*Uses `yang:"uses"` + When *Value `yang:"when"` +} + +func (Case) Kind() string { return "case" } +func (s *Case) ParentNode() Node { return s.Parent } +func (s *Case) NName() string { return s.Name } +func (s *Case) Statement() *Statement { return s.Source } +func (s *Case) Exts() []*Statement { return s.Extensions } + +// An AnyXML is defined in: http://tools.ietf.org/html/rfc6020#section-7.10 +type AnyXML struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Config *Value `yang:"config"` + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + Mandatory *Value `yang:"mandatory"` + Must []*Must `yang:"must"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + When *Value `yang:"when"` +} + +func (AnyXML) Kind() string { return "anyxml" } +func (s *AnyXML) ParentNode() Node { return s.Parent } +func (s *AnyXML) NName() string { return s.Name } +func (s *AnyXML) Statement() *Statement { return s.Source } +func (s *AnyXML) Exts() []*Statement { return s.Extensions } + +// An AnyData is defined in: http://tools.ietf.org/html/rfc7950#section-7.10 +// +// AnyData are only expected in YANG 1.1 modules (those with a +// "yang-version 1.1;" statement in the module). +type AnyData struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Config *Value `yang:"config"` + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + Mandatory *Value `yang:"mandatory"` + Must []*Must `yang:"must"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + When *Value `yang:"when"` +} + +func (AnyData) Kind() string { return "anydata" } +func (s *AnyData) ParentNode() Node { return s.Parent } +func (s *AnyData) NName() string { return s.Name } +func (s *AnyData) Statement() *Statement { return s.Source } +func (s *AnyData) Exts() []*Statement { return s.Extensions } + +// A Grouping is defined in: http://tools.ietf.org/html/rfc6020#section-7.11 +// and http://tools.ietf.org/html/rfc7950#section-7.12 ("grouping" sub-statement) +type Grouping struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Action []*Action `yang:"action"` + Anyxml []*AnyXML `yang:"anyxml"` + Choice []*Choice `yang:"choice"` + Container []*Container `yang:"container"` + Description *Value `yang:"description"` + Grouping []*Grouping `yang:"grouping"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + Notification []*Notification `yang:"notification"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Typedef []*Typedef `yang:"typedef"` + Uses []*Uses `yang:"uses"` +} + +func (Grouping) Kind() string { return "grouping" } +func (s *Grouping) ParentNode() Node { return s.Parent } +func (s *Grouping) NName() string { return s.Name } +func (s *Grouping) Statement() *Statement { return s.Source } +func (s *Grouping) Exts() []*Statement { return s.Extensions } +func (s *Grouping) Groupings() []*Grouping { return s.Grouping } +func (s *Grouping) Typedefs() []*Typedef { return s.Typedef } + +// A Uses is defined in: http://tools.ietf.org/html/rfc6020#section-7.12 +type Uses struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:"-"` + + Augment []*Augment `yang:"augment" json:",omitempty"` + Description *Value `yang:"description" json:",omitempty"` + IfFeature []*Value `yang:"if-feature" json:"-"` + Refine []*Refine `yang:"refine" json:"-"` + Reference *Value `yang:"reference" json:"-"` + Status *Value `yang:"status" json:"-"` + When *Value `yang:"when" json:",omitempty"` +} + +func (Uses) Kind() string { return "uses" } +func (s *Uses) ParentNode() Node { return s.Parent } +func (s *Uses) NName() string { return s.Name } +func (s *Uses) Statement() *Statement { return s.Source } +func (s *Uses) Exts() []*Statement { return s.Extensions } + +// A Refine is defined in: http://tools.ietf.org/html/rfc6020#section-7.12.2 +type Refine struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Default *Value `yang:"default"` + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + Reference *Value `yang:"reference"` + Config *Value `yang:"config"` + Mandatory *Value `yang:"mandatory"` + Presence *Value `yang:"presence"` + Must []*Must `yang:"must"` + MaxElements *Value `yang:"max-elements"` + MinElements *Value `yang:"min-elements"` +} + +func (Refine) Kind() string { return "refine" } +func (s *Refine) ParentNode() Node { return s.Parent } +func (s *Refine) NName() string { return s.Name } +func (s *Refine) Statement() *Statement { return s.Source } +func (s *Refine) Exts() []*Statement { return s.Extensions } + +// An RPC is defined in: http://tools.ietf.org/html/rfc6020#section-7.13 +type RPC struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Description *Value `yang:"description"` + Grouping []*Grouping `yang:"grouping"` + IfFeature []*Value `yang:"if-feature"` + Input *Input `yang:"input"` + Output *Output `yang:"output"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Typedef []*Typedef `yang:"typedef"` +} + +func (RPC) Kind() string { return "rpc" } +func (s *RPC) ParentNode() Node { return s.Parent } +func (s *RPC) NName() string { return s.Name } +func (s *RPC) Statement() *Statement { return s.Source } +func (s *RPC) Exts() []*Statement { return s.Extensions } +func (s *RPC) Groupings() []*Grouping { return s.Grouping } +func (s *RPC) Typedefs() []*Typedef { return s.Typedef } + +// An Input is defined in: http://tools.ietf.org/html/rfc6020#section-7.13.2 +type Input struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Anyxml []*AnyXML `yang:"anyxml"` + Choice []*Choice `yang:"choice"` + Container []*Container `yang:"container"` + Grouping []*Grouping `yang:"grouping"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + Must []*Must `yang:"must"` + Typedef []*Typedef `yang:"typedef"` + Uses []*Uses `yang:"uses"` +} + +func (Input) Kind() string { return "input" } +func (s *Input) ParentNode() Node { return s.Parent } +func (s *Input) NName() string { return s.Name } +func (s *Input) Statement() *Statement { return s.Source } +func (s *Input) Exts() []*Statement { return s.Extensions } +func (s *Input) Groupings() []*Grouping { return s.Grouping } +func (s *Input) Typedefs() []*Typedef { return s.Typedef } + +// An Output is defined in: http://tools.ietf.org/html/rfc6020#section-7.13.3 +type Output struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Anyxml []*AnyXML `yang:"anyxml"` + Choice []*Choice `yang:"choice"` + Container []*Container `yang:"container"` + Grouping []*Grouping `yang:"grouping"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + Must []*Must `yang:"must"` + Typedef []*Typedef `yang:"typedef"` + Uses []*Uses `yang:"uses"` +} + +func (Output) Kind() string { return "output" } +func (s *Output) ParentNode() Node { return s.Parent } +func (s *Output) NName() string { return s.Name } +func (s *Output) Statement() *Statement { return s.Source } +func (s *Output) Exts() []*Statement { return s.Extensions } +func (s *Output) Groupings() []*Grouping { return s.Grouping } +func (s *Output) Typedefs() []*Typedef { return s.Typedef } + +// A Notification is defined in: http://tools.ietf.org/html/rfc6020#section-7.14 +type Notification struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Anyxml []*AnyXML `yang:"anyxml"` + Choice []*Choice `yang:"choice"` + Container []*Container `yang:"container"` + Description *Value `yang:"description"` + Grouping []*Grouping `yang:"grouping"` + IfFeature []*Value `yang:"if-feature"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Typedef []*Typedef `yang:"typedef"` + Uses []*Uses `yang:"uses"` +} + +func (Notification) Kind() string { return "notification" } +func (s *Notification) ParentNode() Node { return s.Parent } +func (s *Notification) NName() string { return s.Name } +func (s *Notification) Statement() *Statement { return s.Source } +func (s *Notification) Exts() []*Statement { return s.Extensions } +func (s *Notification) Groupings() []*Grouping { return s.Grouping } +func (s *Notification) Typedefs() []*Typedef { return s.Typedef } + +// An Augment is defined in: http://tools.ietf.org/html/rfc6020#section-7.15 +// and http://tools.ietf.org/html/rfc7950#section-7.17 ("augment" sub-statement) +type Augment struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Anydata []*AnyData `yang:"anydata"` + Action []*Action `yang:"action"` + Anyxml []*AnyXML `yang:"anyxml"` + Case []*Case `yang:"case"` + Choice []*Choice `yang:"choice"` + Container []*Container `yang:"container"` + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + Leaf []*Leaf `yang:"leaf"` + LeafList []*LeafList `yang:"leaf-list"` + List []*List `yang:"list"` + Notification []*Notification `yang:"notification"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Uses []*Uses `yang:"uses"` + When *Value `yang:"when"` +} + +func (Augment) Kind() string { return "augment" } +func (s *Augment) ParentNode() Node { return s.Parent } +func (s *Augment) NName() string { return s.Name } +func (s *Augment) Statement() *Statement { return s.Source } +func (s *Augment) Exts() []*Statement { return s.Extensions } + +// An Identity is defined in: http://tools.ietf.org/html/rfc6020#section-7.16 +type Identity struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:"-"` + + Base []*Value `yang:"base" json:"-"` + Description *Value `yang:"description" json:"-"` + IfFeature []*Value `yang:"if-feature" json:"-"` + Reference *Value `yang:"reference" json:"-"` + Status *Value `yang:"status" json:"-"` + Values []*Identity `json:",omitempty"` +} + +func (Identity) Kind() string { return "identity" } +func (s *Identity) ParentNode() Node { return s.Parent } +func (s *Identity) NName() string { return s.Name } +func (s *Identity) Statement() *Statement { return s.Source } +func (s *Identity) Exts() []*Statement { return s.Extensions } + +// PrefixedName returns the prefix-qualified name for the identity +func (s *Identity) PrefixedName() string { + return fmt.Sprintf("%s:%s", RootNode(s).GetPrefix(), s.Name) +} + +// modulePrefixedName returns the module-qualified name for the identity. +func (s *Identity) modulePrefixedName() string { + return fmt.Sprintf("%s:%s", module(s).Name, s.Name) +} + +// IsDefined behaves the same as the implementation for Enum - it returns +// true if an identity with the name is defined within the Values of the +// identity +func (s *Identity) IsDefined(name string) bool { + return s.GetValue(name) != nil +} + +// GetValue returns a pointer to the identity with name "name" that is within +// the values of the identity +func (s *Identity) GetValue(name string) *Identity { + for _, v := range s.Values { + if v.Name == name { + return v + } + } + return nil +} + +// An Extension is defined in: http://tools.ietf.org/html/rfc6020#section-7.17 +type Extension struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:",omitempty"` + + Argument *Argument `yang:"argument" json:",omitempty"` + Description *Value `yang:"description" json:",omitempty"` + Reference *Value `yang:"reference" json:",omitempty"` + Status *Value `yang:"status" json:",omitempty"` +} + +func (Extension) Kind() string { return "extension" } +func (s *Extension) ParentNode() Node { return s.Parent } +func (s *Extension) NName() string { return s.Name } +func (s *Extension) Statement() *Statement { return s.Source } +func (s *Extension) Exts() []*Statement { return s.Extensions } + +// An Argument is defined in: http://tools.ietf.org/html/rfc6020#section-7.17.2 +type Argument struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:",omitempty"` + + YinElement *Value `yang:"yin-element" json:",omitempty"` +} + +func (Argument) Kind() string { return "argument" } +func (s *Argument) ParentNode() Node { return s.Parent } +func (s *Argument) NName() string { return s.Name } +func (s *Argument) Statement() *Statement { return s.Source } +func (s *Argument) Exts() []*Statement { return s.Extensions } + +// An Element is defined in: http://tools.ietf.org/html/rfc6020#section-7.17.2.2 +type Element struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + YinElement *Value `yang:"yin-element"` +} + +func (Element) Kind() string { return "element" } +func (s *Element) ParentNode() Node { return s.Parent } +func (s *Element) NName() string { return s.Name } +func (s *Element) Statement() *Statement { return s.Source } +func (s *Element) Exts() []*Statement { return s.Extensions } + +// A Feature is defined in: http://tools.ietf.org/html/rfc6020#section-7.18.1 +type Feature struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge" json:"-"` + Parent Node `yang:"Parent,nomerge" json:"-"` + Extensions []*Statement `yang:"Ext" json:",omitempty"` + + Description *Value `yang:"description" json:",omitempty"` + IfFeature []*Value `yang:"if-feature" json:",omitempty"` + Status *Value `yang:"status" json:",omitempty"` + Reference *Value `yang:"reference" json:",omitempty"` +} + +func (Feature) Kind() string { return "feature" } +func (s *Feature) ParentNode() Node { return s.Parent } +func (s *Feature) NName() string { return s.Name } +func (s *Feature) Statement() *Statement { return s.Source } +func (s *Feature) Exts() []*Statement { return s.Extensions } + +// A Deviation is defined in: http://tools.ietf.org/html/rfc6020#section-7.18.3 +type Deviation struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Description *Value `yang:"description"` + Deviate []*Deviate `yang:"deviate,required"` + Reference *Value `yang:"reference"` +} + +func (Deviation) Kind() string { return "deviation" } +func (s *Deviation) ParentNode() Node { return s.Parent } +func (s *Deviation) NName() string { return s.Name } +func (s *Deviation) Statement() *Statement { return s.Source } +func (s *Deviation) Exts() []*Statement { return s.Extensions } + +// A Deviate is defined in: http://tools.ietf.org/html/rfc6020#section-7.18.3.2 +type Deviate struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Config *Value `yang:"config"` + Default *Value `yang:"default"` + Mandatory *Value `yang:"mandatory"` + MaxElements *Value `yang:"max-elements"` + MinElements *Value `yang:"min-elements"` + Must []*Must `yang:"must"` + Type *Type `yang:"type"` + Unique []*Value `yang:"unique"` + Units *Value `yang:"units"` +} + +func (Deviate) Kind() string { return "deviate" } +func (s *Deviate) ParentNode() Node { return s.Parent } +func (s *Deviate) NName() string { return s.Name } +func (s *Deviate) Statement() *Statement { return s.Source } +func (s *Deviate) Exts() []*Statement { return s.Extensions } + +// An Enum is defined in: http://tools.ietf.org/html/rfc6020#section-9.6.4 +type Enum struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Value *Value `yang:"value"` +} + +func (Enum) Kind() string { return "enum" } +func (s *Enum) ParentNode() Node { return s.Parent } +func (s *Enum) NName() string { return s.Name } +func (s *Enum) Statement() *Statement { return s.Source } +func (s *Enum) Exts() []*Statement { return s.Extensions } + +// A Bit is defined in: http://tools.ietf.org/html/rfc6020#section-9.7.4 +type Bit struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Description *Value `yang:"description"` + IfFeature []*Value `yang:"if-feature"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Position *Value `yang:"position"` +} + +func (Bit) Kind() string { return "bit" } +func (s *Bit) ParentNode() Node { return s.Parent } +func (s *Bit) NName() string { return s.Name } +func (s *Bit) Statement() *Statement { return s.Source } +func (s *Bit) Exts() []*Statement { return s.Extensions } + +// A Range is defined in: http://tools.ietf.org/html/rfc6020#section-9.2.4 +type Range struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Description *Value `yang:"description"` + ErrorAppTag *Value `yang:"error-app-tag"` + ErrorMessage *Value `yang:"error-message"` + Reference *Value `yang:"reference"` +} + +func (Range) Kind() string { return "range" } +func (s *Range) ParentNode() Node { return s.Parent } +func (s *Range) NName() string { return s.Name } +func (s *Range) Statement() *Statement { return s.Source } +func (s *Range) Exts() []*Statement { return s.Extensions } + +// A Length is defined in: http://tools.ietf.org/html/rfc6020#section-9.4.4 +type Length struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Description *Value `yang:"description"` + ErrorAppTag *Value `yang:"error-app-tag"` + ErrorMessage *Value `yang:"error-message"` + Reference *Value `yang:"reference"` +} + +func (Length) Kind() string { return "length" } +func (s *Length) ParentNode() Node { return s.Parent } +func (s *Length) NName() string { return s.Name } +func (s *Length) Statement() *Statement { return s.Source } +func (s *Length) Exts() []*Statement { return s.Extensions } + +// A Pattern is defined in: http://tools.ietf.org/html/rfc6020#section-9.4.6 +// and http://tools.ietf.org/html/rfc7950#section-9.4.5.1 ("modifier" sub-statement) +type Pattern struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Description *Value `yang:"description"` + ErrorAppTag *Value `yang:"error-app-tag"` + ErrorMessage *Value `yang:"error-message"` + Reference *Value `yang:"reference"` + Modifier *Value `yang:"modifier"` +} + +func (Pattern) Kind() string { return "pattern" } +func (s *Pattern) ParentNode() Node { return s.Parent } +func (s *Pattern) NName() string { return s.Name } +func (s *Pattern) Statement() *Statement { return s.Source } +func (s *Pattern) Exts() []*Statement { return s.Extensions } + +// An Action is defined in http://tools.ietf.org/html/rfc7950#section-7.15 +// +// Action define an RPC operation connected to a specific container or list data +// node in the schema. In the schema tree, Action differ from RPC only in where +// in the tree they are found. RPC nodes are only found as sub-statements of a +// Module, while Action are found only as sub-statements of Container, List, +// Grouping and Augment nodes. +type Action struct { + Name string `yang:"Name,nomerge"` + Source *Statement `yang:"Statement,nomerge"` + Parent Node `yang:"Parent,nomerge"` + Extensions []*Statement `yang:"Ext"` + + Description *Value `yang:"description"` + Grouping []*Grouping `yang:"grouping"` + IfFeature []*Value `yang:"if-feature"` + Input *Input `yang:"input"` + Output *Output `yang:"output"` + Reference *Value `yang:"reference"` + Status *Value `yang:"status"` + Typedef []*Typedef `yang:"typedef"` +} + +func (Action) Kind() string { return "action" } +func (s *Action) ParentNode() Node { return s.Parent } +func (s *Action) NName() string { return s.Name } +func (s *Action) Statement() *Statement { return s.Source } +func (s *Action) Exts() []*Statement { return s.Extensions } +func (s *Action) Groupings() []*Grouping { return s.Grouping } +func (s *Action) Typedefs() []*Typedef { return s.Typedef } diff --git a/src/webui/internal/goyang/pkg/yang/yangtype.go b/src/webui/internal/goyang/pkg/yang/yangtype.go new file mode 100644 index 000000000..cc1e33fd9 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/yangtype.go @@ -0,0 +1,330 @@ +// Copyright 2021 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" +) + +var ( + // TypeKindFromName maps the string name used in a YANG file to the enumerated + // TypeKind used in this library. + TypeKindFromName = map[string]TypeKind{ + "none": Ynone, + "int8": Yint8, + "int16": Yint16, + "int32": Yint32, + "int64": Yint64, + "uint8": Yuint8, + "uint16": Yuint16, + "uint32": Yuint32, + "uint64": Yuint64, + "binary": Ybinary, + "bits": Ybits, + "boolean": Ybool, + "decimal64": Ydecimal64, + "empty": Yempty, + "enumeration": Yenum, + "identityref": Yidentityref, + "instance-identifier": YinstanceIdentifier, + "leafref": Yleafref, + "string": Ystring, + "union": Yunion, + } + + // TypeKindToName maps the enumerated type used in this library to the string + // used in a YANG file. + TypeKindToName = map[TypeKind]string{ + Ynone: "none", + Yint8: "int8", + Yint16: "int16", + Yint32: "int32", + Yint64: "int64", + Yuint8: "uint8", + Yuint16: "uint16", + Yuint32: "uint32", + Yuint64: "uint64", + Ybinary: "binary", + Ybits: "bits", + Ybool: "boolean", + Ydecimal64: "decimal64", + Yempty: "empty", + Yenum: "enumeration", + Yidentityref: "identityref", + YinstanceIdentifier: "instance-identifier", + Yleafref: "leafref", + Ystring: "string", + Yunion: "union", + } + + // BaseTypedefs is a map of all base types to the Typedef structure manufactured + // for the type. + BaseTypedefs = map[string]*Typedef{} + + baseTypes = map[string]*YangType{ + "int8": { + Name: "int8", + Kind: Yint8, + Range: Int8Range, + }, + "int16": { + Name: "int16", + Kind: Yint16, + Range: Int16Range, + }, + "int32": { + Name: "int32", + Kind: Yint32, + Range: Int32Range, + }, + "int64": { + Name: "int64", + Kind: Yint64, + Range: Int64Range, + }, + "uint8": { + Name: "uint8", + Kind: Yuint8, + Range: Uint8Range, + }, + "uint16": { + Name: "uint16", + Kind: Yuint16, + Range: Uint16Range, + }, + "uint32": { + Name: "uint32", + Kind: Yuint32, + Range: Uint32Range, + }, + "uint64": { + Name: "uint64", + Kind: Yuint64, + Range: Uint64Range, + }, + + "decimal64": { + Name: "decimal64", + Kind: Ydecimal64, + }, + "string": { + Name: "string", + Kind: Ystring, + }, + "boolean": { + Name: "boolean", + Kind: Ybool, + }, + "enumeration": { + Name: "enumeration", + Kind: Yenum, + }, + "bits": { + Name: "bits", + Kind: Ybits, + }, + "binary": { + Name: "binary", + Kind: Ybinary, + }, + "leafref": { + Name: "leafref", + Kind: Yleafref, + }, + "identityref": { + Name: "identityref", + Kind: Yidentityref, + }, + "empty": { + Name: "empty", + Kind: Yempty, + }, + "union": { + Name: "union", + Kind: Yunion, + }, + "instance-identifier": { + Name: "instance-identifier", + Kind: YinstanceIdentifier, + }, + } +) + +// Install builtin types as know types +func init() { + for k, v := range baseTypes { + // Base types are always their own root + v.Root = v + BaseTypedefs[k] = v.typedef() + } +} + +// TypeKind is the enumeration of the base types available in YANG. It +// is analogous to reflect.Kind. +type TypeKind uint + +func (k TypeKind) String() string { + if s := TypeKindToName[k]; s != "" { + return s + } + return fmt.Sprintf("unknown-type-%d", k) +} + +const ( + // Ynone represents the invalid (unset) type. + Ynone = TypeKind(iota) + // Yint8 is an int in the range [-128, 127]. + Yint8 + // Yint16 is an int in the range [-32768, 32767]. + Yint16 + // Yint32 is an int in the range [-2147483648, 2147483647]. + Yint32 + // Yint64 is an int in the range [-9223372036854775808, 9223372036854775807] + Yint64 + // Yuint8 is an int in the range [0, 255] + Yuint8 + // Yuint16 is an int in the range [0, 65535] + Yuint16 + // Yuint32 is an int in the range [0, 4294967295] + Yuint32 + // Yuint64 is an int in the range [0, 18446744073709551615] + Yuint64 + + // Ybinary stores arbitrary data. + Ybinary + // Ybits is a named set of bits or flags. + Ybits + // Ybool is true or false. + Ybool + // Ydecimal64 is a signed decimal number. + Ydecimal64 + // Yempty has no associated value. + Yempty + // Yenum stores enumerated strings. + Yenum + // Yidentityref stores an extensible enumeration. + Yidentityref + // YinstanceIdentifier stores a reference to a data tree node. + YinstanceIdentifier + // Yleafref stores a reference to a leaf instance. + Yleafref + // Ystring is a human readable string. + Ystring + // Yunion is a choice of types. + Yunion +) + +// A YangType is the internal representation of a type in YANG. It may +// refer to either a builtin type or type specified with typedef. Not +// all fields in YangType are used for all types. +type YangType struct { + Name string + Kind TypeKind // Ynone if not a base type + Base *Type `json:"-"` // Base type for non-builtin types + IdentityBase *Identity `json:",omitempty"` // Base statement for a type using identityref + Root *YangType `json:"-"` // root of this type that is the same + Bit *EnumType `json:",omitempty"` // bit position, "status" is lost + Enum *EnumType `json:",omitempty"` // enum name to value, "status" is lost + Units string `json:",omitempty"` // units to be used for this type + Default string `json:",omitempty"` // default value, if any + HasDefault bool `json:",omitempty"` // whether the type has a default. + FractionDigits int `json:",omitempty"` // decimal64 fixed point precision + Length YangRange `json:",omitempty"` // this should be processed by section 12 + OptionalInstance bool `json:",omitempty"` // !require-instances which defaults to true + Path string `json:",omitempty"` // the path in a leafref + Pattern []string `json:",omitempty"` // limiting XSD-TYPES expressions on strings + POSIXPattern []string `json:",omitempty"` // limiting POSIX ERE on strings (specified by openconfig-extensions:posix-pattern) + Range YangRange `json:",omitempty"` // range for integers + Type []*YangType `json:",omitempty"` // for unions +} + +// Equal returns true if y and t describe the same type. +func (y *YangType) Equal(t *YangType) bool { + switch { + case y == t: + return true + case y == nil || t == nil: + return false + case + // Don't check the Name, it contains no information + y.Kind != t.Kind, + y.Units != t.Units, + y.Default != t.Default, + y.HasDefault != t.HasDefault, + y.FractionDigits != t.FractionDigits, + y.IdentityBase != t.IdentityBase, + len(y.Length) != len(t.Length), + !y.Length.Equal(t.Length), + y.OptionalInstance != t.OptionalInstance, + y.Path != t.Path, + !ssEqual(y.Pattern, t.Pattern), + !ssEqual(y.POSIXPattern, t.POSIXPattern), + len(y.Range) != len(t.Range), + !y.Range.Equal(t.Range), + !tsEqual(y.Type, t.Type), + !cmp.Equal(y.Enum, t.Enum, cmp.Comparer(func(t, u EnumType) bool { + return cmp.Equal(t.unique, u.unique) && cmp.Equal(t.ToInt, u.ToInt) && cmp.Equal(t.ToString, u.ToString) + })): + + return false + } + // TODO(borman): Base, Bit + return true +} + +// typedef returns a Typedef created from y for insertion into the BaseTypedefs +// map. +func (y *YangType) typedef() *Typedef { + return &Typedef{ + Name: y.Name, + Source: &Statement{}, + Type: &Type{ + Name: y.Name, + Source: &Statement{}, + YangType: y, + }, + YangType: y, + } +} + +// ssEqual returns true if the two slices are equivalent. +func ssEqual(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + for x, s := range s1 { + if s != s2[x] { + return false + } + } + return true +} + +// tsEqual returns true if the two Type slices are identical. +func tsEqual(t1, t2 []*YangType) bool { + if len(t1) != len(t2) { + return false + } + // For now we compare absolute pointers. + // This may be wrong. + for x, t := range t1 { + if !t.Equal(t2[x]) { + return false + } + } + return true +} diff --git a/src/webui/internal/goyang/pkg/yang/yangtype_test.go b/src/webui/internal/goyang/pkg/yang/yangtype_test.go new file mode 100644 index 000000000..f02e66e2c --- /dev/null +++ b/src/webui/internal/goyang/pkg/yang/yangtype_test.go @@ -0,0 +1,136 @@ +// Copyright 2021 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yang + +import ( + "testing" +) + +func TestYangTypeEqual(t *testing.T) { + + tests := []struct { + name string + inLeft *YangType + inRight *YangType + wantEqual bool + }{{ + name: "both-nil", + inLeft: nil, + inRight: nil, + wantEqual: true, + }, { + name: "one-nil", + inLeft: &YangType{ + Kind: Ydecimal64, + FractionDigits: 5, + }, + inRight: nil, + wantEqual: false, + }, { + name: "name-unequal", + inLeft: &YangType{ + Name: "foo", + Kind: Ydecimal64, + FractionDigits: 5, + }, + inRight: &YangType{ + Name: "bar", + Kind: Ydecimal64, + FractionDigits: 5, + }, + wantEqual: true, + }, { + name: "fraction-digits-unequal", + inLeft: &YangType{ + Name: "foo", + Kind: Ydecimal64, + FractionDigits: 5, + }, + inRight: &YangType{ + Name: "foo", + Kind: Ydecimal64, + FractionDigits: 4, + }, + wantEqual: false, + }, { + name: "types-unequal", + inLeft: &YangType{ + Name: "foo", + Kind: Ydecimal64, + FractionDigits: 5, + }, + inRight: &YangType{ + Name: "foo", + Kind: Yint64, + }, + wantEqual: false, + }, { + name: "defaults-equal", + inLeft: &YangType{ + Name: "foo", + Kind: Ystring, + Default: "bar", + HasDefault: true, + }, + inRight: &YangType{ + Name: "foo", + Kind: Ystring, + Default: "bar", + HasDefault: true, + }, + wantEqual: true, + }, { + name: "defaults-unequal", + inLeft: &YangType{ + Name: "foo", + Kind: Ystring, + Default: "bar", + HasDefault: true, + }, + inRight: &YangType{ + Name: "foo", + Kind: Ystring, + Default: "baz", + HasDefault: true, + }, + wantEqual: false, + }, { + name: "has-default-unequal", + inLeft: &YangType{ + Name: "foo", + Kind: Ystring, + Default: "", + }, + inRight: &YangType{ + Name: "foo", + Kind: Ystring, + Default: "", + HasDefault: true, + }, + wantEqual: false, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotEqual := tt.inLeft.Equal(tt.inRight); gotEqual != tt.wantEqual { + t.Errorf("gotEqual: %v, wantEqual: %v", gotEqual, tt.wantEqual) + } + // Must be symmetric + if reverseEqual := tt.inRight.Equal(tt.inLeft); reverseEqual != tt.wantEqual { + t.Errorf("got reverseEqual: %v, wantEqual: %v", reverseEqual, tt.wantEqual) + } + }) + } +} diff --git a/src/webui/internal/goyang/pkg/yangentry/build_yang.go b/src/webui/internal/goyang/pkg/yangentry/build_yang.go new file mode 100644 index 000000000..486f7299d --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/build_yang.go @@ -0,0 +1,78 @@ +// Copyright 2020 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yangentry contains high-level helpers for using yang.Entry objects. +package yangentry + +import ( + "fmt" + + "github.com/openconfig/goyang/pkg/yang" +) + +// Parse takes a list of either module/submodule names or .yang file +// paths, and a list of include paths. It runs the yang parser on the YANG +// files by searching for them in the include paths or in the current +// directory, returning a slice of yang.Entry pointers which represent the +// parsed top level modules. It also returns a list of errors encountered while +// parsing, if any. +func Parse(yangfiles, path []string) (map[string]*yang.Entry, []error) { + return parse(yangfiles, path, yang.NewModules()) +} + +// ParseWithOptions takes a list of either module/submodule names or .yang file +// paths, a list of include paths, and a set of parse options. It configures the +// yang parser with the specified parse options and runs it on the YANG +// files by searching for them in the include paths or in the current +// directory, returning a slice of yang.Entry pointers which represent the +// parsed top level modules. It also returns a list of errors encountered while +// parsing, if any. +func ParseWithOptions(yangfiles, path []string, parseOptions yang.Options) (map[string]*yang.Entry, []error) { + ms := yang.NewModules() + ms.ParseOptions = parseOptions + + return parse(yangfiles, path, ms) +} + +func parse(yangfiles, path []string, ms *yang.Modules) (map[string]*yang.Entry, []error) { + for _, p := range path { + ms.AddPath(fmt.Sprintf("%s/...", p)) + } + + var processErr []error + for _, name := range yangfiles { + if name == "" { + continue + } + if err := ms.Read(name); err != nil { + processErr = append(processErr, err) + } + } + + if len(processErr) > 0 { + return nil, processErr + } + + if errs := ms.Process(); len(errs) != 0 { + return nil, errs + } + + entries := make(map[string]*yang.Entry) + for _, m := range ms.Modules { + e := yang.ToEntry(m) + entries[e.Name] = e + } + + return entries, nil +} diff --git a/src/webui/internal/goyang/pkg/yangentry/build_yang_test.go b/src/webui/internal/goyang/pkg/yangentry/build_yang_test.go new file mode 100644 index 000000000..266df9cd0 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/build_yang_test.go @@ -0,0 +1,129 @@ +// Copyright 2020 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yangentry + +import ( + "testing" + + "github.com/openconfig/goyang/pkg/yang" +) + +// TestParse tests the Parse function - which takes an input +// set of modules and processes them using the goyang compiler into a set of +// yang.Entry pointers. +func TestParse(t *testing.T) { + tests := []struct { + name string + inFiles []string + inPath []string + wantErr bool + wantMods []string + }{{ + name: "simple valid module", + inFiles: []string{"testdata/00-valid-module.yang"}, + inPath: []string{"testdata"}, + wantMods: []string{"test-module"}, + }, { + name: "simple valid module without .yang extension", + inFiles: []string{"00-valid-module"}, + inPath: []string{"testdata"}, + wantMods: []string{"test-module"}, + }, { + name: "simple invalid module", + inFiles: []string{"testdata/01-invalid-module.yang"}, + inPath: []string{"testdata"}, + wantErr: true, + }, { + name: "valid import", + inFiles: []string{"testdata/02-valid-import.yang"}, + inPath: []string{"testdata/subdir"}, + wantMods: []string{"test-module"}, + }, { + name: "invalid import", + inFiles: []string{"testdata/03-invalid-import.yang"}, + inPath: []string{}, + wantErr: true, + }, { + name: "two modules", + inFiles: []string{"testdata/04-valid-module-one.yang", "testdata/04-valid-module-two.yang"}, + inPath: []string{}, + wantMods: []string{"module-one", "module-two"}, + }, { + name: "circular submodule dependency", + inFiles: []string{"testdata/05-circular-main.yang"}, + inPath: []string{"testdata/subdir"}, + wantErr: true, + }} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + entries, errs := Parse(tt.inFiles, tt.inPath) + if len(errs) != 0 && !tt.wantErr { + t.Fatalf("%s: unexpected error processing modules: %v", tt.name, errs) + } + + for _, m := range tt.wantMods { + if _, ok := entries[m]; !ok { + t.Fatalf("%s: could not find module %s", tt.name, m) + } + } + }) + } +} + +// TestParseWithOptions tests the ParseWithOptions function - which takes an input +// set of modules along with a set of parse options, and processes them using the goyang +// compiler into a set of yang.Entry pointers. +func TestParseWithOptions(t *testing.T) { + tests := []struct { + name string + inFiles []string + inPath []string + parseOptions yang.Options + wantErr bool + wantMods []string + }{ + { + name: "circular submodule dependency with default options", + inFiles: []string{"testdata/05-circular-main.yang"}, + inPath: []string{"testdata/subdir"}, + parseOptions: yang.Options{}, + wantErr: true, + }, + { + name: "circular submodule dependency with IgnoreSubmoduleCircularDependencies", + inFiles: []string{"testdata/05-circular-main.yang"}, + inPath: []string{"testdata/subdir"}, + parseOptions: yang.Options{IgnoreSubmoduleCircularDependencies: true}, + wantMods: []string{"circular-main"}, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + entries, errs := ParseWithOptions(tt.inFiles, tt.inPath, tt.parseOptions) + if len(errs) != 0 && !tt.wantErr { + t.Fatalf("%s: unexpected error processing modules: %v", tt.name, errs) + } + + for _, m := range tt.wantMods { + if _, ok := entries[m]; !ok { + t.Fatalf("%s: could not find module %s", tt.name, m) + } + } + }) + } +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/00-valid-module.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/00-valid-module.yang new file mode 100644 index 000000000..b216af121 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/00-valid-module.yang @@ -0,0 +1,6 @@ +module test-module { + prefix "t"; + namespace "urn:t"; + + leaf valid-leaf { type string; } +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/01-invalid-module.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/01-invalid-module.yang new file mode 100644 index 000000000..7e28c9048 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/01-invalid-module.yang @@ -0,0 +1,6 @@ +module test-module { + prefix "t"; + namespace "urn:t"; + + leaf invalid-leaf { } +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/02-valid-import.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/02-valid-import.yang new file mode 100644 index 000000000..621a1070c --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/02-valid-import.yang @@ -0,0 +1,6 @@ +module test-module { + prefix "t"; + namespace "urn:t"; + + import imported { prefix "i"; } +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/03-invalid-import.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/03-invalid-import.yang new file mode 100644 index 000000000..65ba27a53 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/03-invalid-import.yang @@ -0,0 +1,6 @@ +module test-module { + prefix "t"; + namespace "urn:t"; + + import import-not-found { prefix "i"; } +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/04-valid-module-one.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/04-valid-module-one.yang new file mode 100644 index 000000000..28ed7702e --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/04-valid-module-one.yang @@ -0,0 +1,6 @@ +module module-one { + prefix "t"; + namespace "urn:t"; + + leaf one { type int8; } +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/04-valid-module-two.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/04-valid-module-two.yang new file mode 100644 index 000000000..d17cbccdd --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/04-valid-module-two.yang @@ -0,0 +1,6 @@ +module module-two { + prefix "t"; + namespace "urn:t"; + + leaf two { type int8; } +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/05-circular-main.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/05-circular-main.yang new file mode 100644 index 000000000..d27bfcf6d --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/05-circular-main.yang @@ -0,0 +1,12 @@ +module circular-main { + yang-version "1.1"; + + namespace "urn:test:circular:main"; + + prefix "main"; + + include circular-sub-one; + include circular-sub-two; + + revision "2025-02-15"; +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/subdir/circular-sub-one.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/subdir/circular-sub-one.yang new file mode 100644 index 000000000..cf50a8b3f --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/subdir/circular-sub-one.yang @@ -0,0 +1,9 @@ +submodule circular-sub-one { + yang-version "1.1"; + + belongs-to circular-main { prefix "main"; } + + include circular-sub-two; + + revision "2025-02-15"; +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/subdir/circular-sub-two.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/subdir/circular-sub-two.yang new file mode 100644 index 000000000..afaf4c4ef --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/subdir/circular-sub-two.yang @@ -0,0 +1,9 @@ +submodule circular-sub-two { + yang-version "1.1"; + + belongs-to circular-main { prefix "main"; } + + include circular-sub-one; + + revision "2025-02-15"; +} diff --git a/src/webui/internal/goyang/pkg/yangentry/testdata/subdir/imported.yang b/src/webui/internal/goyang/pkg/yangentry/testdata/subdir/imported.yang new file mode 100644 index 000000000..71ddd9330 --- /dev/null +++ b/src/webui/internal/goyang/pkg/yangentry/testdata/subdir/imported.yang @@ -0,0 +1,5 @@ +module imported { + prefix "imported"; + namespace "urn:i"; + +} diff --git a/src/webui/internal/goyang/testdata/aug.yang b/src/webui/internal/goyang/testdata/aug.yang new file mode 100644 index 000000000..785a1a1cd --- /dev/null +++ b/src/webui/internal/goyang/testdata/aug.yang @@ -0,0 +1,28 @@ +module aug { + namespace "yang-sucks"; + prefix "yang"; + grouping bgp-neighbor_config { + leaf peer-as { type string; } + } + grouping bgp-neighbors { + list neighbor { + uses bgp-neighbor-group; + } + } + grouping bgp-neighbor-group { + container config { + uses bgp-neighbor_config; + } + } + grouping bgp-neighbor-peer-group_config { + leaf peer-group { type string; } + } + augment /bgp/neighbors/neighbor/config { + uses bgp-neighbor-peer-group_config; + } + container bgp { + container neighbors { + uses bgp-neighbors; + } + } +} diff --git a/src/webui/internal/goyang/testdata/base.yang b/src/webui/internal/goyang/testdata/base.yang new file mode 100644 index 000000000..915631984 --- /dev/null +++ b/src/webui/internal/goyang/testdata/base.yang @@ -0,0 +1,93 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Base test yang module. +module base { + namespace "urn:mod"; + prefix "base"; + + include sub; + import other { + prefix bother; + } + + // basic type tests + typedef base-type { type int32; } + leaf base-leaf1 { type base-type; } + leaf base-leaf2 { type base:base-type; } + leaf base-leaf3 { type bother:other-type; } + leaf base-leaf4 { type sub-type; } + + grouping base-group { + description + "The base-group is used to test the 'uses' statement below. + This description is here to simply include a multi-line string + as an example of multi-line strings"; + leaf base-group-leaf { + config false; + type string; + } + } + + // test uses and leaf ref + container base-container-1 { + uses base-group; + uses bother:other-group; + uses base:sub-group; + choice base-choice { + case choice-a { + leaf base-choice-a1 { type string; } + leaf base-choice-a2 { + type leafref { path ../base-container-1-leaf; } + } + } + case choice-b { + leaf base-choice-b1 { type string; } + leaf base-choice-b2 { + type leafref { path ../../base-container-2/base-container-2a/base-container-2a-leaf; } + } + } + } + leaf base-container-1-leaf { type string; } + } + + // container referenced by a leafref above + container base-container-2 { + container base-container-2a { + leaf base-container-2a-leaf { type string; } + } + } + + // test basic augmenting + augment /base-container-1/base-choice/choice-a { + leaf base-choice-a3 { type string; } + } + augment /base-container-1/base-choice { + case choice-c { + leaf base-choice-c1 { type string; } + } + } + + // simple extension test + extension base-ext { + argument base-arg; + } + container ext-container { + config false; + leaf ext-container-leaf { type string; } + base:base-ext "EXTENSION" { + leaf base-ext-leaf { type string; } + } + } +} diff --git a/src/webui/internal/goyang/testdata/other.yang b/src/webui/internal/goyang/testdata/other.yang new file mode 100644 index 000000000..acbe95a44 --- /dev/null +++ b/src/webui/internal/goyang/testdata/other.yang @@ -0,0 +1,30 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// imported by base.yang. +module other { + namespace "uri:empty"; + prefix "otherp"; + typedef other-type { type string; } + + // This container should not appear in base, even though this file is + // imported by base. That is just the YANG is defined. + container other-container { + leaf other-container-leaf1 { type other-type; } + leaf other-container-leaf2 { type otherp:other-type; } + } + grouping other-group { + leaf other-group-leaf { type string; } + } +} diff --git a/src/webui/internal/goyang/testdata/sub.yang b/src/webui/internal/goyang/testdata/sub.yang new file mode 100644 index 000000000..940920c67 --- /dev/null +++ b/src/webui/internal/goyang/testdata/sub.yang @@ -0,0 +1,26 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// included by base.yang. +submodule sub { + belongs-to base { prefix "sbase"; } + typedef sub-type { type sub-type2; } + typedef sub-type2 { type int8; } + container sub-container { + leaf sub-container-leaf { type sub-type; } + } + grouping sub-group { + leaf sub-group-leaf { type string; } + } +} diff --git a/src/webui/internal/goyang/testdata/subdir/subdir1.yang b/src/webui/internal/goyang/testdata/subdir/subdir1.yang new file mode 100644 index 000000000..c6a8523b8 --- /dev/null +++ b/src/webui/internal/goyang/testdata/subdir/subdir1.yang @@ -0,0 +1,12 @@ +// A YANG module located in a subdirectory, to test the AddYANGPaths +// helper function. + +module subdir1 { + yang-version "1"; + +namespace "namespace:goes:here"; + prefix "subdir1"; + + description + "This module is to be found by test cases."; +} diff --git a/src/webui/internal/goyang/tree.go b/src/webui/internal/goyang/tree.go new file mode 100644 index 000000000..7ba17c8b9 --- /dev/null +++ b/src/webui/internal/goyang/tree.go @@ -0,0 +1,112 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "io" + "sort" + + "github.com/openconfig/goyang/pkg/indent" + "github.com/openconfig/goyang/pkg/yang" +) + +func init() { + register(&formatter{ + name: "tree", + f: doTree, + help: "display in a tree format", + }) +} + +func doTree(w io.Writer, entries []*yang.Entry) { + for _, e := range entries { + Write(w, e) + } +} + +// Write writes e, formatted, and all of its children, to w. +func Write(w io.Writer, e *yang.Entry) { + if e.Description != "" { + fmt.Fprintln(w) + fmt.Fprintln(indent.NewWriter(w, "// "), e.Description) + } + if len(e.Exts) > 0 { + fmt.Fprintf(w, "extensions: {\n") + for _, ext := range e.Exts { + if n := ext.NName(); n != "" { + fmt.Fprintf(w, " %s %s;\n", ext.Kind(), n) + } else { + fmt.Fprintf(w, " %s;\n", ext.Kind()) + } + } + fmt.Fprintln(w, "}") + } + switch { + case e.RPC != nil: + fmt.Fprintf(w, "RPC: ") + case e.ReadOnly(): + fmt.Fprintf(w, "RO: ") + default: + fmt.Fprintf(w, "rw: ") + } + if e.Type != nil { + fmt.Fprintf(w, "%s ", getTypeName(e)) + } + name := e.Name + if e.Prefix != nil { + name = e.Prefix.Name + ":" + name + } + switch { + case e.Dir == nil && e.ListAttr != nil: + fmt.Fprintf(w, "[]%s\n", name) + return + case e.Dir == nil: + fmt.Fprintf(w, "%s\n", name) + return + case e.ListAttr != nil: + fmt.Fprintf(w, "[%s]%s {\n", e.Key, name) //} + default: + fmt.Fprintf(w, "%s {\n", name) //} + } + if r := e.RPC; r != nil { + if r.Input != nil { + Write(indent.NewWriter(w, " "), r.Input) + } + if r.Output != nil { + Write(indent.NewWriter(w, " "), r.Output) + } + } + var names []string + for k := range e.Dir { + names = append(names, k) + } + sort.Strings(names) + for _, k := range names { + Write(indent.NewWriter(w, " "), e.Dir[k]) + } + // { to match the brace below to keep brace matching working + fmt.Fprintln(w, "}") +} + +func getTypeName(e *yang.Entry) string { + if e == nil || e.Type == nil { + return "" + } + // Return our root's type name. + // This is should be the builtin type-name + // for this entry. + return e.Type.Root.Name +} diff --git a/src/webui/internal/goyang/types.go b/src/webui/internal/goyang/types.go new file mode 100644 index 000000000..864e59ed8 --- /dev/null +++ b/src/webui/internal/goyang/types.go @@ -0,0 +1,135 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "io" + "strings" + + "github.com/openconfig/goyang/pkg/indent" + "github.com/openconfig/goyang/pkg/yang" + "github.com/pborman/getopt" +) + +var ( + typesDebug bool + typesVerbose bool +) + +func init() { + flags := getopt.New() + register(&formatter{ + name: "types", + f: doTypes, + help: "display found types", + flags: flags, + }) + flags.BoolVarLong(&typesDebug, "types_debug", 0, "display debug information") + flags.BoolVarLong(&typesVerbose, "types_verbose", 0, "include base information") +} + +func doTypes(w io.Writer, entries []*yang.Entry) { + types := Types{} + for _, e := range entries { + types.AddEntry(e) + } + + for t := range types { + printType(w, t, typesVerbose) + } + if typesDebug { + for _, e := range entries { + showall(w, e) + } + } +} + +// Types keeps track of all the YangTypes defined. +type Types map[*yang.YangType]struct{} + +// AddEntry adds all types defined in e and its descendants to t. +func (t Types) AddEntry(e *yang.Entry) { + if e == nil { + return + } + if e.Type != nil { + t[e.Type.Root] = struct{}{} + } + for _, d := range e.Dir { + t.AddEntry(d) + } +} + +// printType prints type t in a moderately human readable format to w. +func printType(w io.Writer, t *yang.YangType, verbose bool) { + if verbose && t.Base != nil { + base := yang.Source(t.Base) + if base == "unknown" { + base = "unnamed type" + } + fmt.Fprintf(w, "%s: ", base) + } + fmt.Fprintf(w, "%s", t.Root.Name) + if t.Kind.String() != t.Root.Name { + fmt.Fprintf(w, "(%s)", t.Kind) + } + if t.Units != "" { + fmt.Fprintf(w, " units=%s", t.Units) + } + if t.Default != "" { + fmt.Fprintf(w, " default=%q", t.Default) + } + if t.FractionDigits != 0 { + fmt.Fprintf(w, " fraction-digits=%d", t.FractionDigits) + } + if len(t.Length) > 0 { + fmt.Fprintf(w, " length=%s", t.Length) + } + if t.Kind == yang.YinstanceIdentifier && !t.OptionalInstance { + fmt.Fprintf(w, " required") + } + if t.Kind == yang.Yleafref && t.Path != "" { + fmt.Fprintf(w, " path=%q", t.Path) + } + if len(t.Pattern) > 0 { + fmt.Fprintf(w, " pattern=%s", strings.Join(t.Pattern, "|")) + } + b := yang.BaseTypedefs[t.Kind.String()].YangType + if len(t.Range) > 0 && !t.Range.Equal(b.Range) { + fmt.Fprintf(w, " range=%s", t.Range) + } + if len(t.Type) > 0 { + fmt.Fprintf(w, "{\n") + for _, t := range t.Type { + printType(indent.NewWriter(w, " "), t, verbose) + } + fmt.Fprintf(w, "}") + } + fmt.Fprintf(w, ";\n") +} + +func showall(w io.Writer, e *yang.Entry) { + if e == nil { + return + } + if e.Type != nil { + fmt.Fprintf(w, "\n%s\n ", e.Node.Statement().Location()) + printType(w, e.Type.Root, false) + } + for _, d := range e.Dir { + showall(w, d) + } +} diff --git a/src/webui/internal/goyang/yang.go b/src/webui/internal/goyang/yang.go new file mode 100644 index 000000000..548263279 --- /dev/null +++ b/src/webui/internal/goyang/yang.go @@ -0,0 +1,214 @@ +// Copyright 2015 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Program yang parses YANG files, displays errors, and possibly writes +// something related to the input on output. +// +// Usage: yang [--path DIR] [--format FORMAT] [FORMAT OPTIONS] [MODULE] [FILE ...] +// +// If MODULE is specified (an argument that does not end in .yang), it is taken +// as the name of the module to display. Any FILEs specified are read, and the +// tree for MODULE is displayed. If MODULE was not defined in FILEs (or no +// files were specified), then the file MODULES.yang is read as well. An error +// is displayed if no definition for MODULE was found. +// +// If MODULE is missing, then all base modules read from the FILEs are +// displayed. If there are no arguments then standard input is parsed. +// +// If DIR is specified, it is considered a comma separated list of paths +// to append to the search directory. If DIR appears as DIR/... then +// DIR and all direct and indirect subdirectories are checked. +// +// FORMAT, which defaults to "tree", specifies the format of output to produce. +// Use "goyang --help" for a list of available formats. +// +// FORMAT OPTIONS are flags that apply to a specific format. They must follow +// --format. +// +// THIS PROGRAM IS STILL JUST A DEVELOPMENT TOOL. +package main + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "runtime/trace" + "sort" + "strings" + + "github.com/openconfig/goyang/pkg/indent" + "github.com/openconfig/goyang/pkg/yang" + "github.com/pborman/getopt" +) + +// Each format must register a formatter with register. The function f will +// be called once with the set of yang Entry trees generated. +type formatter struct { + name string + f func(io.Writer, []*yang.Entry) + help string + flags *getopt.Set +} + +var formatters = map[string]*formatter{} + +func register(f *formatter) { + formatters[f.name] = f +} + +// exitIfError writes errs to standard error and exits with an exit status of 1. +// If errs is empty then exitIfError does nothing and simply returns. +func exitIfError(errs []error) { + if len(errs) > 0 { + for _, err := range errs { + fmt.Fprintln(os.Stderr, err) + } + stop(1) + } +} + +var stop = os.Exit + +func main() { + var format string + formats := make([]string, 0, len(formatters)) + for k := range formatters { + formats = append(formats, k) + } + sort.Strings(formats) + + var traceP string + var help bool + var paths []string + var ignoreSubmoduleCircularDependencies bool + getopt.ListVarLong(&paths, "path", 'p', "comma separated list of directories to add to search path", "DIR[,DIR...]") + getopt.StringVarLong(&format, "format", 'f', "format to display: "+strings.Join(formats, ", "), "FORMAT") + getopt.StringVarLong(&traceP, "trace", 't', "write trace into to TRACEFILE", "TRACEFILE") + getopt.BoolVarLong(&help, "help", 'h', "display help") + getopt.BoolVarLong(&ignoreSubmoduleCircularDependencies, "ignore-circdep", 'g', "ignore circular dependencies between submodules") + getopt.SetParameters("[FORMAT OPTIONS] [SOURCE] [...]") + + if err := getopt.Getopt(func(o getopt.Option) bool { + if o.Name() == "--format" { + f, ok := formatters[format] + if !ok { + fmt.Fprintf(os.Stderr, "%s: invalid format. Choices are %s\n", format, strings.Join(formats, ", ")) + stop(1) + } + if f.flags != nil { + f.flags.VisitAll(func(o getopt.Option) { + getopt.AddOption(o) + }) + } + } + return true + }); err != nil { + fmt.Fprintln(os.Stderr, err) + getopt.PrintUsage(os.Stderr) + os.Exit(1) + } + + if traceP != "" { + fp, err := os.Create(traceP) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + trace.Start(fp) + stop = func(c int) { trace.Stop(); os.Exit(c) } + defer func() { trace.Stop() }() + } + + if help { + getopt.CommandLine.PrintUsage(os.Stderr) + fmt.Fprintf(os.Stderr, ` +SOURCE may be a module name or a .yang file. + +Formats: +`) + for _, fn := range formats { + f := formatters[fn] + fmt.Fprintf(os.Stderr, " %s - %s\n", f.name, f.help) + if f.flags != nil { + f.flags.PrintOptions(indent.NewWriter(os.Stderr, " ")) + } + fmt.Fprintln(os.Stderr) + } + stop(0) + } + + ms := yang.NewModules() + ms.ParseOptions.IgnoreSubmoduleCircularDependencies = ignoreSubmoduleCircularDependencies + + for _, path := range paths { + expanded, err := yang.PathsWithModules(path) + if err != nil { + fmt.Fprintln(os.Stderr, err) + continue + } + ms.AddPath(expanded...) + } + + if format == "" { + format = "tree" + } + if _, ok := formatters[format]; !ok { + fmt.Fprintf(os.Stderr, "%s: invalid format. Choices are %s\n", format, strings.Join(formats, ", ")) + stop(1) + + } + + files := getopt.Args() + + if len(files) == 0 { + data, err := ioutil.ReadAll(os.Stdin) + if err == nil { + err = ms.Parse(string(data), "") + } + if err != nil { + fmt.Fprintln(os.Stderr, err) + stop(1) + } + } + + for _, name := range files { + if err := ms.Read(name); err != nil { + fmt.Fprintln(os.Stderr, err) + continue + } + } + + // Process the read files, exiting if any errors were found. + exitIfError(ms.Process()) + + // Keep track of the top level modules we read in. + // Those are the only modules we want to print below. + mods := map[string]*yang.Module{} + var names []string + + for _, m := range ms.Modules { + if mods[m.Name] == nil { + mods[m.Name] = m + names = append(names, m.Name) + } + } + sort.Strings(names) + entries := make([]*yang.Entry, len(names)) + for x, n := range names { + entries[x] = yang.ToEntry(mods[n]) + } + + formatters[format].f(os.Stdout, entries) +} diff --git a/src/webui/internal/handlers/capabilities.go b/src/webui/internal/handlers/capabilities.go new file mode 100644 index 000000000..5472894be --- /dev/null +++ b/src/webui/internal/handlers/capabilities.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "log" + + "github.com/kernelkit/webui/internal/restconf" +) + +type feature struct { + Name string // key used in Has() and session cookie + Module string // YANG module that carries the feature + Feature string // YANG feature name in the module's feature array +} + +// optionalFeatures maps UI capabilities to YANG module features. Extend here. +var optionalFeatures = []feature{ + {Name: "wifi", Module: "infix-interfaces", Feature: "wifi"}, + {Name: "containers", Module: "infix-interfaces", Feature: "containers"}, +} + +// Capabilities tracks which optional features are present on the device. +// Use Has("feature-name") in templates and Go code. +type Capabilities struct { + features map[string]bool +} + +func NewCapabilities(features map[string]bool) *Capabilities { + if features == nil { + features = make(map[string]bool) + } + return &Capabilities{features: features} +} + +func (c *Capabilities) Has(name string) bool { + return c != nil && c.features[name] +} + +func (c *Capabilities) Features() map[string]bool { + if c == nil { + return nil + } + return c.features +} + +type capsCtxKey struct{} + +func ContextWithCapabilities(ctx context.Context, caps *Capabilities) context.Context { + return context.WithValue(ctx, capsCtxKey{}, caps) +} + +func CapabilitiesFromContext(ctx context.Context) *Capabilities { + caps, _ := ctx.Value(capsCtxKey{}).(*Capabilities) + if caps == nil { + return NewCapabilities(nil) + } + return caps +} + +type yangLibrary struct { + YangLibrary struct { + ModuleSet []struct { + Module []struct { + Name string `json:"name"` + Feature []string `json:"feature"` + } `json:"module"` + } `json:"module-set"` + } `json:"ietf-yang-library:yang-library"` +} + +func DetectCapabilities(ctx context.Context, rc restconf.Fetcher) *Capabilities { + var lib yangLibrary + if err := rc.Get(ctx, "/data/ietf-yang-library:yang-library", &lib); err != nil { + log.Printf("yang-library: %v (ignored, no optional features)", err) + return NewCapabilities(nil) + } + + // Build index: module name → set of YANG features advertised. + modFeatures := make(map[string]map[string]bool) + for _, ms := range lib.YangLibrary.ModuleSet { + for _, m := range ms.Module { + fs := make(map[string]bool, len(m.Feature)) + for _, f := range m.Feature { + fs[f] = true + } + modFeatures[m.Name] = fs + } + } + + result := make(map[string]bool, len(optionalFeatures)) + for _, f := range optionalFeatures { + result[f.Name] = modFeatures[f.Module][f.Feature] + } + + return NewCapabilities(result) +} diff --git a/src/webui/internal/handlers/capabilities_test.go b/src/webui/internal/handlers/capabilities_test.go new file mode 100644 index 000000000..188414929 --- /dev/null +++ b/src/webui/internal/handlers/capabilities_test.go @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "errors" + "testing" + + "github.com/kernelkit/webui/internal/testutil" +) + +func yangLibraryResponse(modules ...map[string]interface{}) map[string]interface{} { + mods := make([]interface{}, len(modules)) + for i, m := range modules { + mods[i] = m + } + return map[string]interface{}{ + "ietf-yang-library:yang-library": map[string]interface{}{ + "module-set": []interface{}{ + map[string]interface{}{"module": mods}, + }, + }, + } +} + +func module(name string, features ...string) map[string]interface{} { + m := map[string]interface{}{"name": name} + if len(features) > 0 { + m["feature"] = features + } + return m +} + +func TestDetectCapabilities_NoModules(t *testing.T) { + mock := testutil.NewMockFetcher() + mock.SetResponse("/data/ietf-yang-library:yang-library", yangLibraryResponse()) + + caps := DetectCapabilities(context.Background(), mock) + if caps.Has("wifi") || caps.Has("containers") { + t.Errorf("expected no features, got wifi=%v containers=%v", + caps.Has("wifi"), caps.Has("containers")) + } +} + +func TestDetectCapabilities_YangLibraryError(t *testing.T) { + mock := testutil.NewMockFetcher() + mock.SetError("/data/ietf-yang-library:yang-library", errors.New("unreachable")) + + caps := DetectCapabilities(context.Background(), mock) + if caps.Has("wifi") || caps.Has("containers") { + t.Errorf("expected no features on error, got wifi=%v containers=%v", + caps.Has("wifi"), caps.Has("containers")) + } +} + +func TestDetectCapabilities_ContainersModule(t *testing.T) { + mock := testutil.NewMockFetcher() + mock.SetResponse("/data/ietf-yang-library:yang-library", + yangLibraryResponse( + module("ietf-interfaces"), + module("infix-interfaces", "vlan-filtering", "containers"), + )) + + caps := DetectCapabilities(context.Background(), mock) + if !caps.Has("containers") { + t.Error("expected containers=true") + } + if caps.Has("wifi") { + t.Error("expected wifi=false") + } +} + +func TestDetectCapabilities_WiFiModule(t *testing.T) { + mock := testutil.NewMockFetcher() + mock.SetResponse("/data/ietf-yang-library:yang-library", + yangLibraryResponse( + module("ietf-interfaces"), + module("infix-interfaces", "vlan-filtering", "wifi"), + )) + + caps := DetectCapabilities(context.Background(), mock) + if !caps.Has("wifi") { + t.Error("expected wifi=true") + } + if caps.Has("containers") { + t.Error("expected containers=false") + } +} + +func TestDetectCapabilities_BothModules(t *testing.T) { + mock := testutil.NewMockFetcher() + mock.SetResponse("/data/ietf-yang-library:yang-library", + yangLibraryResponse( + module("ietf-interfaces"), + module("infix-interfaces", "vlan-filtering", "containers", "wifi"), + )) + + caps := DetectCapabilities(context.Background(), mock) + if !caps.Has("wifi") || !caps.Has("containers") { + t.Errorf("expected both features, got wifi=%v containers=%v", + caps.Has("wifi"), caps.Has("containers")) + } +} + +func TestCapabilitiesFromContext_NilReturnsEmpty(t *testing.T) { + caps := CapabilitiesFromContext(context.Background()) + if caps == nil { + t.Fatal("expected non-nil Capabilities") + } + if caps.Has("wifi") || caps.Has("containers") { + t.Error("expected no features for empty context") + } +} + +func TestCapabilitiesFromContext_RoundTrip(t *testing.T) { + orig := NewCapabilities(map[string]bool{"wifi": true, "containers": true}) + ctx := ContextWithCapabilities(context.Background(), orig) + got := CapabilitiesFromContext(ctx) + if !got.Has("wifi") || !got.Has("containers") { + t.Errorf("expected wifi=true containers=true, got wifi=%v containers=%v", + got.Has("wifi"), got.Has("containers")) + } +} diff --git a/src/webui/internal/handlers/common.go b/src/webui/internal/handlers/common.go new file mode 100644 index 000000000..3af4aa258 --- /dev/null +++ b/src/webui/internal/handlers/common.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "net/http" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/security" +) + +// PageData is the base template data passed to every page. +type PageData struct { + Username string + CsrfToken string + PageTitle string + ActivePage string + Capabilities *Capabilities + CfgUnsaved bool // running config differs from startup (Apply was used without ApplyAndSave) +} + +func csrfToken(ctx context.Context) string { + return security.TokenFromContext(ctx) +} + +func newPageData(r *http.Request, page, title string) PageData { + _, cookieErr := r.Cookie(cfgUnsavedCookie) + return PageData{ + Username: restconf.CredentialsFromContext(r.Context()).Username, + CsrfToken: csrfToken(r.Context()), + PageTitle: title, + ActivePage: page, + Capabilities: CapabilitiesFromContext(r.Context()), + CfgUnsaved: cookieErr == nil, + } +} diff --git a/src/webui/internal/handlers/configure.go b/src/webui/internal/handlers/configure.go new file mode 100644 index 000000000..0d83e8aaa --- /dev/null +++ b/src/webui/internal/handlers/configure.go @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "log" + "net/http" + + "github.com/kernelkit/webui/internal/restconf" +) + +const cfgUnsavedCookie = "cfg-unsaved" + +// ConfigureHandler manages the candidate datastore lifecycle. +type ConfigureHandler struct { + RC restconf.Fetcher +} + +func setCfgUnsaved(w http.ResponseWriter) { + http.SetCookie(w, &http.Cookie{Name: cfgUnsavedCookie, Value: "1", Path: "/", MaxAge: 86400, SameSite: http.SameSiteLaxMode}) +} + +func clearCfgUnsaved(w http.ResponseWriter) { + http.SetCookie(w, &http.Cookie{Name: cfgUnsavedCookie, Value: "", Path: "/", MaxAge: -1, SameSite: http.SameSiteLaxMode}) +} + +// Enter copies running → candidate, initialising a fresh edit session. +// Called when the user opens the Configure accordion. +// POST /configure/enter +func (h *ConfigureHandler) Enter(w http.ResponseWriter, r *http.Request) { + if err := h.RC.CopyDatastore(r.Context(), "running", "candidate"); err != nil { + log.Printf("configure enter: %v", err) + http.Error(w, "Could not initialise candidate datastore", http.StatusBadGateway) + return + } + w.WriteHeader(http.StatusNoContent) +} + +// Apply copies candidate → running, activating all staged changes atomically. +// Sets the cfg-unsaved cookie so the persistent banner appears until startup is saved. +// POST /configure/apply +func (h *ConfigureHandler) Apply(w http.ResponseWriter, r *http.Request) { + if err := h.RC.CopyDatastore(r.Context(), "candidate", "running"); err != nil { + log.Printf("configure apply: %v", err) + http.Error(w, "Could not apply configuration: "+err.Error(), http.StatusBadGateway) + return + } + setCfgUnsaved(w) + w.Header().Set("HX-Refresh", "true") + w.WriteHeader(http.StatusNoContent) +} + +// Abort copies running → candidate, discarding all staged changes. +// POST /configure/abort +func (h *ConfigureHandler) Abort(w http.ResponseWriter, r *http.Request) { + if err := h.RC.CopyDatastore(r.Context(), "running", "candidate"); err != nil { + log.Printf("configure abort: %v", err) + // Best-effort reset; refresh regardless. + } + w.Header().Set("HX-Refresh", "true") + w.WriteHeader(http.StatusNoContent) +} + +// ApplyAndSave copies candidate → running then running → startup in one step. +// Clears the cfg-unsaved cookie. +// POST /configure/apply-and-save +func (h *ConfigureHandler) ApplyAndSave(w http.ResponseWriter, r *http.Request) { + if err := h.RC.CopyDatastore(r.Context(), "candidate", "running"); err != nil { + log.Printf("configure apply-and-save: %v", err) + http.Error(w, "Could not apply configuration: "+err.Error(), http.StatusBadGateway) + return + } + if err := h.RC.CopyDatastore(r.Context(), "running", "startup"); err != nil { + log.Printf("configure apply-and-save (save): %v", err) + http.Error(w, "Could not save configuration: "+err.Error(), http.StatusBadGateway) + return + } + clearCfgUnsaved(w) + w.Header().Set("HX-Refresh", "true") + w.WriteHeader(http.StatusNoContent) +} + +// DeleteLeaf removes a single leaf from the candidate datastore so the YANG +// default takes effect. Used by curated-page ↺ reset buttons. +// DELETE /configure/leaf?path=...&redirect=... +func (h *ConfigureHandler) DeleteLeaf(w http.ResponseWriter, r *http.Request) { + path := r.URL.Query().Get("path") + redirect := r.URL.Query().Get("redirect") + if path == "" || redirect == "" { + http.Error(w, "path and redirect required", http.StatusBadRequest) + return + } + if err := h.RC.Delete(r.Context(), candidatePath+path); err != nil { + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Reset to default", redirect) +} + +// Save copies running → startup, persisting the active configuration. +// Clears the cfg-unsaved cookie and does a full-page refresh so the banner disappears. +// POST /configure/save +func (h *ConfigureHandler) Save(w http.ResponseWriter, r *http.Request) { + if err := h.RC.CopyDatastore(r.Context(), "running", "startup"); err != nil { + log.Printf("configure save: %v", err) + http.Error(w, "Could not save configuration: "+err.Error(), http.StatusBadGateway) + return + } + clearCfgUnsaved(w) + w.Header().Set("HX-Refresh", "true") + w.WriteHeader(http.StatusNoContent) +} diff --git a/src/webui/internal/handlers/configure_firewall.go b/src/webui/internal/handlers/configure_firewall.go new file mode 100644 index 000000000..c212e7263 --- /dev/null +++ b/src/webui/internal/handlers/configure_firewall.go @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "errors" + "fmt" + "html/template" + "log" + "net/http" + "net/url" + "strings" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" +) + +const fwConfigPath = candidatePath + "/infix-firewall:firewall" + +// ─── RESTCONF read wrappers ─────────────────────────────────────────────────── + +// cfgFwWrapper wraps the firewall presence container for GET responses. +// Using a pointer distinguishes "present" from "absent". +type cfgFwWrapper struct { + Firewall *firewallJSON `json:"infix-firewall:firewall,omitempty"` +} + +// cfgFwZoneWrapper is used when reading a single zone by path. +type cfgFwZoneWrapper struct { + Zone []zoneJSON `json:"infix-firewall:zone"` +} + +// ─── Template display rows ──────────────────────────────────────────────────── + +type cfgZoneRow struct { + zoneJSON + IfaceCount int + IfaceSet map[string]bool + ServiceSet map[string]bool + ServicesTxt string // fallback when ServiceOptions unavailable + NetworksTxt string // comma-separated, shown read-only when zone uses networks +} + +type cfgPolicyRow struct { + policyJSON + IngressDisplay string + EgressDisplay string + MasqDisplay string +} + +// ─── Template data ──────────────────────────────────────────────────────────── + +type cfgFirewallPageData struct { + PageData + Loading bool + Active bool // firewall presence container exists + Enabled bool + Default string + Logging string + Zones []cfgZoneRow + Policies []cfgPolicyRow + Desc map[string]string + LoggingOptions []schema.IdentityOption + ActionOptions []schema.IdentityOption + PolicyActionOptions []schema.IdentityOption + ServiceOptions []schema.IdentityOption + AllInterfaces []string + Error string +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// ConfigureFirewallHandler serves the Configure > Firewall page. +type ConfigureFirewallHandler struct { + Template *template.Template + RC restconf.Fetcher + Schema *schema.Cache +} + +// Overview renders the Configure > Firewall page. +// GET /configure/firewall +func (h *ConfigureFirewallHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := cfgFirewallPageData{ + PageData: newPageData(r, "configure-firewall", "Configure: Firewall"), + } + + mgr := h.Schema.Manager() + data.Loading = mgr == nil + if mgr != nil { + fwPath := "/infix-firewall:firewall" + zPath := fwPath + "/zone" + pPath := fwPath + "/policy" + data.Desc = map[string]string{ + "enabled": schema.DescriptionOf(mgr, fwPath+"/enabled"), + "default": schema.DescriptionOf(mgr, fwPath+"/default"), + "logging": schema.DescriptionOf(mgr, fwPath+"/logging"), + "zone-name": schema.DescriptionOf(mgr, zPath+"/name"), + "zone-action": schema.DescriptionOf(mgr, zPath+"/action"), + "zone-description": schema.DescriptionOf(mgr, zPath+"/description"), + "zone-interface": schema.DescriptionOf(mgr, zPath+"/interface"), + "zone-service": schema.DescriptionOf(mgr, zPath+"/service"), + "policy-name": schema.DescriptionOf(mgr, pPath+"/name"), + "policy-action": schema.DescriptionOf(mgr, pPath+"/action"), + "policy-ingress": schema.DescriptionOf(mgr, pPath+"/ingress"), + "policy-egress": schema.DescriptionOf(mgr, pPath+"/egress"), + "policy-masquerade": schema.DescriptionOf(mgr, pPath+"/masquerade"), + } + data.LoggingOptions = schema.OptionsFor(mgr, fwPath+"/logging") + data.ActionOptions = schema.OptionsFor(mgr, zPath+"/action") + data.PolicyActionOptions = schema.OptionsFor(mgr, pPath+"/action") + data.ServiceOptions = schema.OptionsFor(mgr, zPath+"/service") + } + + fw, active, err := h.fetchFirewall(r.Context()) + if err != nil { + log.Printf("configure firewall: %v", err) + data.Error = "Could not read firewall configuration" + } + data.Active = active + if active && fw != nil { + if fw.Enabled == nil { + data.Enabled = true // YANG default + } else { + data.Enabled = bool(*fw.Enabled) + } + data.Default = fw.Default + data.Logging = fw.Logging + for _, z := range fw.Zone { + ifaceSet := make(map[string]bool, len(z.Interface)) + for _, iface := range z.Interface { + ifaceSet[iface] = true + } + svcSet := make(map[string]bool, len(z.Service)) + for _, svc := range z.Service { + svcSet[svc] = true + } + data.Zones = append(data.Zones, cfgZoneRow{ + zoneJSON: z, + IfaceCount: len(z.Interface), + IfaceSet: ifaceSet, + ServiceSet: svcSet, + ServicesTxt: strings.Join(z.Service, "\n"), + NetworksTxt: strings.Join(z.Network, ", "), + }) + } + for _, p := range fw.Policy { + masq := "—" + if p.Masquerade { + masq = "Yes" + } + data.Policies = append(data.Policies, cfgPolicyRow{ + policyJSON: p, + IngressDisplay: strings.Join(p.Ingress, ", "), + EgressDisplay: strings.Join(p.Egress, ", "), + MasqDisplay: masq, + }) + } + } + + data.AllInterfaces = h.fetchInterfaceNames(r.Context()) + + tmplName := "configure-firewall.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// Enable creates the firewall presence container with an initial "trusted" zone. +// POST /configure/firewall/enable +func (h *ConfigureFirewallHandler) Enable(w http.ResponseWriter, r *http.Request) { + body := map[string]any{ + "infix-firewall:firewall": map[string]any{ + "enabled": true, + "logging": "off", + "default": "trusted", + "zone": []map[string]any{{ + "name": "trusted", + "action": "accept", + }}, + }, + } + if err := h.RC.Put(r.Context(), fwConfigPath, body); err != nil { + log.Printf("configure firewall enable: %v", err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Firewall enabled", "/configure/firewall") +} + +// SaveSettings patches the global firewall settings (enabled, logging, default zone). +// POST /configure/firewall/settings +func (h *ConfigureFirewallHandler) SaveSettings(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + body := map[string]any{ + "infix-firewall:firewall": map[string]any{ + "enabled": r.FormValue("enabled") == "true", + "logging": r.FormValue("logging"), + "default": r.FormValue("default"), + }, + } + if err := h.RC.Patch(r.Context(), fwConfigPath, body); err != nil { + log.Printf("configure firewall settings: %v", err) + renderSaveError(w, err) + return + } + renderSaved(w, "Settings saved") +} + +// AddZone creates a new zone in the firewall candidate. +// POST /configure/firewall/zones +func (h *ConfigureFirewallHandler) AddZone(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := strings.TrimSpace(r.FormValue("name")) + if name == "" { + renderSaveError(w, fmt.Errorf("zone name is required")) + return + } + zone := map[string]any{ + "name": name, + "action": r.FormValue("action"), + } + if desc := strings.TrimSpace(r.FormValue("description")); desc != "" { + zone["description"] = desc + } + body := map[string]any{"infix-firewall:zone": []map[string]any{zone}} + if err := h.RC.Put(r.Context(), fwConfigPath+"/zone="+url.PathEscape(name), body); err != nil { + log.Printf("configure firewall zone add %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Zone added", "/configure/firewall") +} + +// DeleteZone removes a zone from the firewall. +// DELETE /configure/firewall/zones/{name} +func (h *ConfigureFirewallHandler) DeleteZone(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + if err := h.RC.Delete(r.Context(), fwConfigPath+"/zone="+url.PathEscape(name)); err != nil { + log.Printf("configure firewall zone delete %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Zone deleted", "/configure/firewall") +} + +// SaveZone updates a zone's action, description, interfaces, and services. +// Uses read-modify-write to preserve fields not managed by this UI (network, +// port-forward). Note: port-forward entries are lost on save (Phase 3 limitation). +// POST /configure/firewall/zones/{name} +func (h *ConfigureFirewallHandler) SaveZone(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + + var wrap cfgFwZoneWrapper + if err := h.RC.Get(r.Context(), fwConfigPath+"/zone="+url.PathEscape(name), &wrap); err != nil { + log.Printf("configure firewall zone save %q: GET: %v", name, err) + renderSaveError(w, err) + return + } + cur := zoneJSON{Name: name} + if len(wrap.Zone) > 0 { + cur = wrap.Zone[0] + } + + cur.Action = r.FormValue("action") + cur.Description = strings.TrimSpace(r.FormValue("description")) + + if len(cur.Network) == 0 { + ifaces := r.Form["interfaces"] + if ifaces == nil { + ifaces = []string{} + } + cur.Interface = ifaces + } + svcs := r.Form["services"] + if svcs == nil { + svcs = []string{} + } + cur.Service = svcs + + zone := map[string]any{ + "name": cur.Name, + "action": cur.Action, + "interface": cur.Interface, + "service": cur.Service, + } + if cur.Description != "" { + zone["description"] = cur.Description + } + if len(cur.Network) > 0 { + zone["network"] = cur.Network + } + body := map[string]any{"infix-firewall:zone": []map[string]any{zone}} + if err := h.RC.Put(r.Context(), fwConfigPath+"/zone="+url.PathEscape(name), body); err != nil { + log.Printf("configure firewall zone save %q: PUT: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Zone saved", "/configure/firewall") +} + +// AddPolicy creates a new inter-zone forwarding policy. +// POST /configure/firewall/policies +func (h *ConfigureFirewallHandler) AddPolicy(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := strings.TrimSpace(r.FormValue("name")) + ingress := strings.Fields(r.FormValue("ingress")) + egress := strings.Fields(r.FormValue("egress")) + if name == "" { + renderSaveError(w, fmt.Errorf("policy name is required")) + return + } + if len(ingress) == 0 || len(egress) == 0 { + renderSaveError(w, fmt.Errorf("policy requires at least one ingress and one egress zone")) + return + } + policy := map[string]any{ + "name": name, + "action": r.FormValue("action"), + "ingress": ingress, + "egress": egress, + } + if r.FormValue("masquerade") == "on" { + policy["masquerade"] = true + } + body := map[string]any{"infix-firewall:policy": []map[string]any{policy}} + if err := h.RC.Put(r.Context(), fwConfigPath+"/policy="+url.PathEscape(name), body); err != nil { + log.Printf("configure firewall policy add %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Policy added", "/configure/firewall") +} + +// DeletePolicy removes an inter-zone forwarding policy. +// DELETE /configure/firewall/policies/{name} +func (h *ConfigureFirewallHandler) DeletePolicy(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + if err := h.RC.Delete(r.Context(), fwConfigPath+"/policy="+url.PathEscape(name)); err != nil { + log.Printf("configure firewall policy delete %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Policy deleted", "/configure/firewall") +} + +// SavePolicy updates an existing policy's action, ingress, egress, masquerade. +// POST /configure/firewall/policies/{name} +func (h *ConfigureFirewallHandler) SavePolicy(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + ingress := strings.Fields(r.FormValue("ingress")) + egress := strings.Fields(r.FormValue("egress")) + if len(ingress) == 0 || len(egress) == 0 { + renderSaveError(w, fmt.Errorf("policy requires at least one ingress and one egress zone")) + return + } + policy := map[string]any{ + "name": name, + "action": r.FormValue("action"), + "ingress": ingress, + "egress": egress, + "masquerade": r.FormValue("masquerade") == "true", + } + body := map[string]any{"infix-firewall:policy": []map[string]any{policy}} + if err := h.RC.Put(r.Context(), fwConfigPath+"/policy="+url.PathEscape(name), body); err != nil { + log.Printf("configure firewall policy save %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Policy saved", "/configure/firewall") +} + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +// fetchInterfaceNames returns configured interface names from candidate (fallback running). +func (h *ConfigureFirewallHandler) fetchInterfaceNames(ctx context.Context) []string { + var wrap interfacesWrapper + if err := h.RC.Get(ctx, candidatePath+"/ietf-interfaces:interfaces", &wrap); err != nil { + h.RC.Get(ctx, "/data/ietf-interfaces:interfaces", &wrap) //nolint:errcheck + } + names := make([]string, 0, len(wrap.Interfaces.Interface)) + for _, iface := range wrap.Interfaces.Interface { + names = append(names, iface.Name) + } + return names +} + +// fetchFirewall reads the firewall presence container from candidate, +// falling back to running. Returns (nil, false, nil) when absent everywhere. +func (h *ConfigureFirewallHandler) fetchFirewall(ctx context.Context) (*firewallJSON, bool, error) { + var wrap cfgFwWrapper + err := h.RC.Get(ctx, fwConfigPath, &wrap) + if err == nil { + return wrap.Firewall, wrap.Firewall != nil, nil + } + var rcErr *restconf.Error + if errors.As(err, &rcErr) && rcErr.StatusCode == http.StatusNotFound { + runErr := h.RC.Get(ctx, "/data/infix-firewall:firewall", &wrap) + if runErr == nil { + return wrap.Firewall, wrap.Firewall != nil, nil + } + var rcRun *restconf.Error + if errors.As(runErr, &rcRun) && rcRun.StatusCode == http.StatusNotFound { + return nil, false, nil + } + return nil, false, runErr + } + return nil, false, err +} diff --git a/src/webui/internal/handlers/configure_interfaces.go b/src/webui/internal/handlers/configure_interfaces.go new file mode 100644 index 000000000..b5831d682 --- /dev/null +++ b/src/webui/internal/handlers/configure_interfaces.go @@ -0,0 +1,951 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "errors" + "fmt" + "html/template" + "log" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" +) + +const ifaceCandPath = candidatePath + "/ietf-interfaces:interfaces" + +// ─── RESTCONF JSON structs (configure-only fields) ─────────────────────────── + +type bridgeCfgJSON struct { + IEEEGroupFwd []string `json:"ieee-group-forward"` + VLANs *vlanFilterCfgJSON `json:"vlans"` // non-nil means 802.1Q mode + STP bridgeSTPCfgJSON `json:"stp"` +} + +type vlanFilterCfgJSON struct { + Proto string `json:"proto"` + VLANs []bridgeVLAN `json:"vlan"` +} + +type bridgeVLAN struct { + VID int `json:"vid"` + Untagged []string `json:"untagged"` + Tagged []string `json:"tagged"` +} + +type bridgeSTPCfgJSON struct { + ForceProtocol string `json:"force-protocol"` + HelloTime *int `json:"hello-time"` + ForwardDelay *int `json:"forward-delay"` + MaxAge *int `json:"max-age"` + TransmitHoldCount *int `json:"transmit-hold-count"` + MaxHops *int `json:"max-hops"` +} + +type lagCfgJSON struct { + Mode string `json:"mode"` + Hash string `json:"hash"` + LACP lagLACPJSON `json:"lacp"` +} + +type lagLACPJSON struct { + Mode string `json:"mode"` + Rate string `json:"rate"` + SystemPriority *int `json:"system-priority"` +} + +type lagPortCfgJSON struct { + LAG string `json:"lag"` +} + +// ─── Template display types ─────────────────────────────────────────────────── + +type cfgVLANRow struct { + VID int + UntaggedTxt string + TaggedTxt string + UntaggedSet map[string]bool + TaggedSet map[string]bool +} + +type cfgIfaceRow struct { + ifaceJSON + TypeSlug string + TypeDisplay string + AdminEnabled bool // true when enabled leaf absent (YANG default) or explicitly true + MemberOf string // bridge or lag name this interface belongs to + AddrSummary string + BridgeMembers []string // interface names that are ports of this bridge/lag + BridgeMemberSet map[string]bool // for checkbox pre-selection + PortCandidates []string // free ports + current members of this bridge/lag + BridgeIs8021Q bool + VLANRows []cfgVLANRow + IsBridge bool + IsBridgePort bool + IsLag bool + IsLagPort bool + IsVlan bool + HasIP bool // can carry IP addresses +} + +type cfgIfacePageData struct { + PageData + Loading bool + Interfaces []cfgIfaceRow + AllNames []string // every interface name + BridgeNames []string // type=bridge only + LagNames []string // type=lag only + Desc map[string]string + STPProtoOptions []schema.IdentityOption + LagModeOptions []schema.IdentityOption + LagHashOptions []schema.IdentityOption + Error string +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// ConfigureInterfacesHandler serves the Configure > Interfaces page. +type ConfigureInterfacesHandler struct { + Template *template.Template + RC restconf.Fetcher + Schema *schema.Cache +} + +// Overview renders the Configure > Interfaces page. +// GET /configure/interfaces +func (h *ConfigureInterfacesHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := cfgIfacePageData{ + PageData: newPageData(r, "configure-interfaces", "Configure: Interfaces"), + } + + mgr := h.Schema.Manager() + data.Loading = mgr == nil + if mgr != nil { + ifPath := "/ietf-interfaces:interface" + bPath := "/infix-interfaces:bridge" + lPath := "/infix-interfaces:lag" + ip4 := "/ietf-ip:ipv4" + ip6 := "/ietf-ip:ipv6" + data.Desc = map[string]string{ + "description": schema.DescriptionOf(mgr, ifPath+"/description"), + "enabled": schema.DescriptionOf(mgr, ifPath+"/enabled"), + "bridge-type": schema.DescriptionOf(mgr, ifPath+bPath+"/vlans"), + "stp-force": schema.DescriptionOf(mgr, ifPath+bPath+"/stp/force-protocol"), + "stp-hello": schema.DescriptionOf(mgr, ifPath+bPath+"/stp/hello-time"), + "stp-fwd-delay": schema.DescriptionOf(mgr, ifPath+bPath+"/stp/forward-delay"), + "stp-max-age": schema.DescriptionOf(mgr, ifPath+bPath+"/stp/max-age"), + "stp-hold-count": schema.DescriptionOf(mgr, ifPath+bPath+"/stp/transmit-hold-count"), + "stp-max-hops": schema.DescriptionOf(mgr, ifPath+bPath+"/stp/max-hops"), + "lag-mode": schema.DescriptionOf(mgr, ifPath+lPath+"/mode"), + "lag-hash": schema.DescriptionOf(mgr, ifPath+lPath+"/hash"), + "lacp-mode": schema.DescriptionOf(mgr, ifPath+lPath+"/lacp/mode"), + "lacp-rate": schema.DescriptionOf(mgr, ifPath+lPath+"/lacp/rate"), + "lacp-sysprio": schema.DescriptionOf(mgr, ifPath+lPath+"/lacp/system-priority"), + "vlan-id": schema.DescriptionOf(mgr, ifPath+"/infix-interfaces:vlan/id"), + "vlan-lower": schema.DescriptionOf(mgr, ifPath+"/infix-interfaces:vlan/lower-layer-if"), + "ipv4-address": schema.DescriptionOf(mgr, ifPath+ip4+"/address/ip"), + "ipv4-prefix": schema.DescriptionOf(mgr, ifPath+ip4+"/address/prefix-length"), + "ipv4-dhcp": schema.DescriptionOf(mgr, ifPath+ip4+"/infix-dhcp-client:dhcp"), + "ipv4-autoconf": schema.DescriptionOf(mgr, ifPath+ip4+"/infix-ip:autoconf"), + "ipv6-address": schema.DescriptionOf(mgr, ifPath+ip6+"/address/ip"), + "ipv6-prefix": schema.DescriptionOf(mgr, ifPath+ip6+"/address/prefix-length"), + "ipv6-slaac": schema.DescriptionOf(mgr, ifPath+ip6+"/autoconf"), + "ipv6-dhcp": schema.DescriptionOf(mgr, ifPath+ip6+"/infix-dhcpv6-client:dhcp"), + } + data.STPProtoOptions = schema.OptionsFor(mgr, ifPath+bPath+"/stp/force-protocol") + data.LagModeOptions = schema.OptionsFor(mgr, ifPath+lPath+"/mode") + data.LagHashOptions = schema.OptionsFor(mgr, ifPath+lPath+"/hash") + } + + ifaces, err := h.fetchAllInterfaces(r.Context()) + if err != nil { + log.Printf("configure interfaces: %v", err) + data.Error = "Could not read interface configuration" + } + + data.Interfaces = h.buildRows(ifaces) + + for _, iface := range ifaces { + slug := typeSlug(iface.Type) + data.AllNames = append(data.AllNames, iface.Name) + switch slug { + case "bridge": + data.BridgeNames = append(data.BridgeNames, iface.Name) + case "lag": + data.LagNames = append(data.LagNames, iface.Name) + } + } + + tmplName := "configure-interfaces.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// CreateInterface creates a new interface of the chosen type. +// POST /configure/interfaces +func (h *ConfigureInterfacesHandler) CreateInterface(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := strings.TrimSpace(r.FormValue("name")) + ifType := r.FormValue("type") + if name == "" || ifType == "" { + renderSaveError(w, fmt.Errorf("name and type are required")) + return + } + + iface := map[string]any{ + "name": name, + "type": ifType, + "enabled": true, + } + + switch typeSlug(ifType) { + case "bridge": + iface["infix-interfaces:bridge"] = map[string]any{} + case "lag": + iface["infix-interfaces:lag"] = map[string]any{"mode": "static"} + case "vlan": + vid, err := strconv.Atoi(r.FormValue("vid")) + if err != nil || vid < 1 || vid > 4094 { + renderSaveError(w, fmt.Errorf("VID must be 1–4094")) + return + } + lowerIf := strings.TrimSpace(r.FormValue("lower-layer-if")) + if lowerIf == "" { + renderSaveError(w, fmt.Errorf("lower-layer interface is required for VLAN")) + return + } + iface["infix-interfaces:vlan"] = map[string]any{ + "id": vid, + "lower-layer-if": lowerIf, + } + } + + body := map[string]any{"ietf-interfaces:interface": []map[string]any{iface}} + if err := h.RC.Put(r.Context(), ifacePath(name), body); err != nil { + log.Printf("configure interfaces create %s: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, fmt.Sprintf("%s created", name), "/configure/interfaces") +} + +// SaveGeneral saves description and enabled for any interface. +// POST /configure/interfaces/{name} +func (h *ConfigureInterfacesHandler) SaveGeneral(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + enabled := r.FormValue("enabled") != "false" + body := map[string]any{ + "ietf-interfaces:interface": map[string]any{ + "name": name, + "enabled": enabled, + "description": strings.TrimSpace(r.FormValue("description")), + }, + } + if err := h.RC.Patch(r.Context(), ifacePath(name), body); err != nil { + log.Printf("configure interfaces %s general: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Saved", "/configure/interfaces") +} + +// AddIPv4 adds an IPv4 address to an interface. +// POST /configure/interfaces/{name}/ipv4 +func (h *ConfigureInterfacesHandler) AddIPv4(w http.ResponseWriter, r *http.Request) { + h.addAddr(w, r, "ipv4") +} + +// DeleteIPv4 removes an IPv4 address from an interface. +// DELETE /configure/interfaces/{name}/ipv4/{ip} +func (h *ConfigureInterfacesHandler) DeleteIPv4(w http.ResponseWriter, r *http.Request) { + h.deleteAddr(w, r, "ipv4") +} + +// AddIPv6 adds an IPv6 address to an interface. +// POST /configure/interfaces/{name}/ipv6 +func (h *ConfigureInterfacesHandler) AddIPv6(w http.ResponseWriter, r *http.Request) { + h.addAddr(w, r, "ipv6") +} + +// DeleteIPv6 removes an IPv6 address from an interface. +// DELETE /configure/interfaces/{name}/ipv6/{ip} +func (h *ConfigureInterfacesHandler) DeleteIPv6(w http.ResponseWriter, r *http.Request) { + h.deleteAddr(w, r, "ipv6") +} + +// SaveBridgePort assigns or updates an interface's bridge membership. +// POST /configure/interfaces/{name}/bridge-port +func (h *ConfigureInterfacesHandler) SaveBridgePort(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + bridge := strings.TrimSpace(r.FormValue("bridge")) + if bridge == "" { + renderSaveError(w, fmt.Errorf("bridge name is required")) + return + } + body := map[string]any{ + "infix-interfaces:bridge-port": map[string]any{ + "bridge": bridge, + }, + } + if pvid := r.FormValue("pvid"); pvid != "" { + if v, err := strconv.Atoi(pvid); err == nil && v > 0 { + body["infix-interfaces:bridge-port"].(map[string]any)["pvid"] = v + } + } + if err := h.RC.Put(r.Context(), ifacePath(name)+"/infix-interfaces:bridge-port", body); err != nil { + log.Printf("configure interfaces %s bridge-port: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Bridge port saved", "/configure/interfaces") +} + +// DeleteBridgePort removes an interface from its bridge. +// DELETE /configure/interfaces/{name}/bridge-port +func (h *ConfigureInterfacesHandler) DeleteBridgePort(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + if err := h.RC.Delete(r.Context(), ifacePath(name)+"/infix-interfaces:bridge-port"); err != nil { + log.Printf("configure interfaces %s bridge-port delete: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Removed from bridge", "/configure/interfaces") +} + +// SaveBridgeMembers performs a diff-and-write to set the bridge's member ports. +// POST /configure/interfaces/{name}/bridge/members +func (h *ConfigureInterfacesHandler) SaveBridgeMembers(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + h.saveMembersDiff(w, r, r.PathValue("name"), "bridge", + func(iface ifaceJSON, master string) bool { + return iface.BridgePort != nil && iface.BridgePort.Bridge == master + }, "Bridge members saved") +} + +// SaveBridge saves bridge STP settings and bridge type. +// POST /configure/interfaces/{name}/bridge +func (h *ConfigureInterfacesHandler) SaveBridge(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + bridge := map[string]any{} + + stp := map[string]any{} + if v := r.FormValue("stp-force-protocol"); v != "" { + stp["force-protocol"] = v + } + for _, pair := range []struct{ form, yang string }{ + {"stp-hello-time", "hello-time"}, + {"stp-forward-delay", "forward-delay"}, + {"stp-max-age", "max-age"}, + {"stp-transmit-hold-count", "transmit-hold-count"}, + {"stp-max-hops", "max-hops"}, + } { + if v, err := strconv.Atoi(r.FormValue(pair.form)); err == nil { + stp[pair.yang] = v + } + } + if len(stp) > 0 { + bridge["stp"] = stp + } + + if len(bridge) > 0 { + body := map[string]any{"infix-interfaces:bridge": bridge} + if err := h.RC.Patch(r.Context(), ifacePath(name)+"/infix-interfaces:bridge", body); err != nil { + log.Printf("configure interfaces %s bridge: %v", name, err) + renderSaveError(w, err) + return + } + } + + // The bridge type choice is expressed via the vlans presence container: + // 802.1Q = vlans container present; 802.1D = vlans container absent. + vlansPath := ifacePath(name) + "/infix-interfaces:bridge/vlans" + if r.FormValue("bridge-type") == "ieee8021q" { + body := map[string]any{"vlans": map[string]any{}} + if err := h.RC.Put(r.Context(), vlansPath, body); err != nil { + log.Printf("configure interfaces %s bridge type 8021q: %v", name, err) + renderSaveError(w, err) + return + } + } else { + if err := h.RC.Delete(r.Context(), vlansPath); err != nil { + // 404 is fine — vlans already absent (802.1D) + log.Printf("configure interfaces %s bridge type 8021d (delete vlans): %v", name, err) + } + } + + renderSavedRedirect(w, "Bridge saved", "/configure/interfaces") +} + +// AddVLAN creates a new VLAN on an ieee8021q bridge. +// POST /configure/interfaces/{name}/bridge/vlans +func (h *ConfigureInterfacesHandler) AddVLAN(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + vid, err := strconv.Atoi(r.FormValue("vid")) + if err != nil || vid < 1 || vid > 4094 { + renderSaveError(w, fmt.Errorf("VID must be 1–4094")) + return + } + vlan := map[string]any{"vid": vid} + if untagged := r.Form["untagged"]; len(untagged) > 0 { + vlan["untagged"] = untagged + } + if tagged := r.Form["tagged"]; len(tagged) > 0 { + vlan["tagged"] = tagged + } + body := map[string]any{"infix-interfaces:vlan": []map[string]any{vlan}} + path := ifacePath(name) + "/infix-interfaces:bridge/vlans/vlan=" + strconv.Itoa(vid) + if err := h.RC.Put(r.Context(), path, body); err != nil { + log.Printf("configure interfaces %s vlan add %d: %v", name, vid, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "VLAN added", "/configure/interfaces") +} + +// SaveVLAN updates the untagged/tagged port sets for an existing VLAN. +// POST /configure/interfaces/{name}/bridge/vlans/{vid} +func (h *ConfigureInterfacesHandler) SaveVLAN(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + vidStr := r.PathValue("vid") + vid, err := strconv.Atoi(vidStr) + if err != nil { + http.Error(w, "invalid vid", http.StatusBadRequest) + return + } + untagged := r.Form["untagged"] + if untagged == nil { + untagged = []string{} + } + tagged := r.Form["tagged"] + if tagged == nil { + tagged = []string{} + } + vlan := map[string]any{ + "vid": vid, + "untagged": untagged, + "tagged": tagged, + } + body := map[string]any{"infix-interfaces:vlan": []map[string]any{vlan}} + path := ifacePath(name) + "/infix-interfaces:bridge/vlans/vlan=" + vidStr + if err := h.RC.Put(r.Context(), path, body); err != nil { + log.Printf("configure interfaces %s vlan save %d: %v", name, vid, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "VLAN saved", "/configure/interfaces") +} + +// DeleteVLAN removes a VLAN from an ieee8021q bridge. +// DELETE /configure/interfaces/{name}/bridge/vlans/{vid} +func (h *ConfigureInterfacesHandler) DeleteVLAN(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + vidStr := r.PathValue("vid") + path := ifacePath(name) + "/infix-interfaces:bridge/vlans/vlan=" + vidStr + if err := h.RC.Delete(r.Context(), path); err != nil { + log.Printf("configure interfaces %s vlan delete %s: %v", name, vidStr, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "VLAN deleted", "/configure/interfaces") +} + +// SaveLagPort assigns an interface to a LAG. +// POST /configure/interfaces/{name}/lag-port +func (h *ConfigureInterfacesHandler) SaveLagPort(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + lagName := strings.TrimSpace(r.FormValue("lag")) + if lagName == "" { + renderSaveError(w, fmt.Errorf("LAG name is required")) + return + } + body := map[string]any{ + "infix-interfaces:lag-port": map[string]any{"lag": lagName}, + } + if err := h.RC.Put(r.Context(), ifacePath(name)+"/infix-interfaces:lag-port", body); err != nil { + log.Printf("configure interfaces %s lag-port: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "LAG port saved", "/configure/interfaces") +} + +// DeleteLagPort removes an interface from its LAG. +// DELETE /configure/interfaces/{name}/lag-port +func (h *ConfigureInterfacesHandler) DeleteLagPort(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + if err := h.RC.Delete(r.Context(), ifacePath(name)+"/infix-interfaces:lag-port"); err != nil { + log.Printf("configure interfaces %s lag-port delete: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Removed from LAG", "/configure/interfaces") +} + +// SaveLAG saves LAG mode and LACP settings. +// POST /configure/interfaces/{name}/lag +func (h *ConfigureInterfacesHandler) SaveLAG(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + lag := map[string]any{ + "mode": r.FormValue("mode"), + } + if hash := r.FormValue("hash"); hash != "" { + lag["hash"] = hash + } + if r.FormValue("mode") == "lacp" { + lacp := map[string]any{ + "mode": r.FormValue("lacp-mode"), + "rate": r.FormValue("lacp-rate"), + } + if v, err := strconv.Atoi(r.FormValue("lacp-system-priority")); err == nil { + lacp["system-priority"] = v + } + lag["lacp"] = lacp + } + body := map[string]any{"infix-interfaces:lag": lag} + if err := h.RC.Patch(r.Context(), ifacePath(name)+"/infix-interfaces:lag", body); err != nil { + log.Printf("configure interfaces %s lag: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "LAG saved", "/configure/interfaces") +} + +// SaveLAGMembers performs a diff-and-write to set the LAG's member ports. +// POST /configure/interfaces/{name}/lag/members +func (h *ConfigureInterfacesHandler) SaveLAGMembers(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + h.saveMembersDiff(w, r, r.PathValue("name"), "lag", + func(iface ifaceJSON, master string) bool { + return iface.LagPort != nil && iface.LagPort.LAG == master + }, "LAG members saved") +} + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +func ifacePath(name string) string { + return ifaceCandPath + "/interface=" + url.PathEscape(name) +} + +// saveMembersDiff syncs bridge/lag membership by diffing submitted form values +// against the current state: add port-kind for new members, remove for ex-members. +// kind is "bridge" or "lag"; it determines the YANG augment path and body key. +func (h *ConfigureInterfacesHandler) saveMembersDiff(w http.ResponseWriter, r *http.Request, + masterName, kind string, isMember func(ifaceJSON, string) bool, successMsg string) { + + ifaces, err := h.fetchAllInterfaces(r.Context()) + if err != nil { + renderSaveError(w, err) + return + } + + submitted := make(map[string]bool) + for _, m := range r.Form["members"] { + submitted[m] = true + } + + portKey := "infix-interfaces:" + kind + "-port" + for _, iface := range ifaces { + if typeSlug(iface.Type) == kind { + continue // skip master interface itself + } + currentlyMember := isMember(iface, masterName) + wantMember := submitted[iface.Name] + portPath := ifacePath(iface.Name) + "/" + portKey + + if wantMember && !currentlyMember { + body := map[string]any{portKey: map[string]any{kind: masterName}} + if err := h.RC.Put(r.Context(), portPath, body); err != nil { + log.Printf("configure interfaces %s members add %s→%s: %v", kind, iface.Name, masterName, err) + renderSaveError(w, err) + return + } + } else if !wantMember && currentlyMember { + if err := h.RC.Delete(r.Context(), portPath); err != nil { + log.Printf("configure interfaces %s members remove %s from %s: %v", kind, iface.Name, masterName, err) + renderSaveError(w, err) + return + } + } + } + renderSavedRedirect(w, successMsg, "/configure/interfaces") +} + +func (h *ConfigureInterfacesHandler) fetchAllInterfaces(ctx context.Context) ([]ifaceJSON, error) { + var wrap interfacesWrapper + if err := h.RC.Get(ctx, ifaceCandPath, &wrap); err != nil { + // Fall back to running only on 404 (candidate has no interfaces configured yet). + // Any other error (validation failure, server error) is surfaced directly to + // avoid silently showing stale running data while the candidate is in bad shape. + var rcErr *restconf.Error + if errors.As(err, &rcErr) && rcErr.StatusCode == http.StatusNotFound { + log.Printf("configure interfaces: candidate returned 404, using running datastore") + if err2 := h.RC.Get(ctx, "/data/ietf-interfaces:interfaces", &wrap); err2 != nil { + return nil, err2 + } + } else { + return nil, err + } + } + return wrap.Interfaces.Interface, nil +} + +type membership struct{ kind, master string } + +func (h *ConfigureInterfacesHandler) buildRows(ifaces []ifaceJSON) []cfgIfaceRow { + // Build a set of current bridge/lag members for fast lookup. + memberOf := make(map[string]membership, len(ifaces)) + for _, iface := range ifaces { + if iface.BridgePort != nil && iface.BridgePort.Bridge != "" { + memberOf[iface.Name] = membership{"bridge", iface.BridgePort.Bridge} + } else if iface.LagPort != nil && iface.LagPort.LAG != "" { + memberOf[iface.Name] = membership{"lag", iface.LagPort.LAG} + } + } + + // Pre-compute bridge member sets. + bridgeMembers := make(map[string][]string) + for _, iface := range ifaces { + if m, ok := memberOf[iface.Name]; ok && m.kind == "bridge" { + bridgeMembers[m.master] = append(bridgeMembers[m.master], iface.Name) + } + } + lagMembers := make(map[string][]string) + for _, iface := range ifaces { + if m, ok := memberOf[iface.Name]; ok && m.kind == "lag" { + lagMembers[m.master] = append(lagMembers[m.master], iface.Name) + } + } + + rows := make([]cfgIfaceRow, 0, len(ifaces)) + for _, iface := range ifaces { + slug := typeSlug(iface.Type) + row := cfgIfaceRow{ + ifaceJSON: iface, + TypeSlug: slug, + TypeDisplay: typeDisplay(slug), + AdminEnabled: iface.Enabled == nil || *iface.Enabled, + IsBridge: slug == "bridge", + IsBridgePort: iface.BridgePort != nil, + IsLag: slug == "lag", + IsLagPort: iface.LagPort != nil, + IsVlan: slug == "vlan", + } + if m, ok := memberOf[iface.Name]; ok { + row.MemberOf = m.master + } + row.HasIP = !row.IsBridgePort && !row.IsLagPort + row.AddrSummary = addrSummary(iface) + + if row.IsBridge { + members := bridgeMembers[iface.Name] + sort.Strings(members) + row.BridgeMembers = members + row.BridgeMemberSet = make(map[string]bool, len(members)) + for _, m := range members { + row.BridgeMemberSet[m] = true + } + row.PortCandidates = portCandidatesFor(iface.Name, ifaces, memberOf) + if iface.Bridge != nil && iface.Bridge.VLANs != nil { + row.BridgeIs8021Q = true + if q := iface.Bridge.VLANs; q != nil { + for _, v := range q.VLANs { + untaggedSet := make(map[string]bool, len(v.Untagged)) + for _, u := range v.Untagged { + untaggedSet[u] = true + } + taggedSet := make(map[string]bool, len(v.Tagged)) + for _, t := range v.Tagged { + taggedSet[t] = true + } + row.VLANRows = append(row.VLANRows, cfgVLANRow{ + VID: v.VID, + UntaggedTxt: strings.Join(v.Untagged, ", "), + TaggedTxt: strings.Join(v.Tagged, ", "), + UntaggedSet: untaggedSet, + TaggedSet: taggedSet, + }) + } + } + } + } + + if row.IsLag { + members := lagMembers[iface.Name] + sort.Strings(members) + row.BridgeMembers = members // reuse field — LAG members shown same way + row.BridgeMemberSet = make(map[string]bool, len(members)) + for _, m := range members { + row.BridgeMemberSet[m] = true + } + row.PortCandidates = portCandidatesFor(iface.Name, ifaces, memberOf) + } + + rows = append(rows, row) + } + + sort.Slice(rows, func(i, j int) bool { + ri, rj := rows[i], rows[j] + // Sort order: bridge/lag first, then by type, then by name. + orderI, orderJ := typeOrder(ri.TypeSlug), typeOrder(rj.TypeSlug) + if orderI != orderJ { + return orderI < orderJ + } + return ri.Name < rj.Name + }) + return rows +} + +// portCandidatesFor returns sorted candidate port names for a given bridge or LAG. +// Included: ports that are free (no master) or already a member of masterName. +// Excluded: bridge, lag, loopback, dummy, wireguard, tunnel types, and ports +// enslaved to a different master. +func portCandidatesFor(masterName string, ifaces []ifaceJSON, memberOf map[string]membership) []string { + var out []string + for _, iface := range ifaces { + switch typeSlug(iface.Type) { + case "bridge", "lag", "loopback", "dummy", "wireguard", "gre", "gretap", "vxlan": + continue + } + m := memberOf[iface.Name] + if m.master == "" || m.master == masterName { + out = append(out, iface.Name) + } + } + sort.Strings(out) + return out +} + +func (h *ConfigureInterfacesHandler) addAddr(w http.ResponseWriter, r *http.Request, family string) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + ip := strings.TrimSpace(r.FormValue("ip")) + prefixStr := r.FormValue("prefix-length") + prefix, err := strconv.Atoi(prefixStr) + if err != nil || ip == "" { + renderSaveError(w, fmt.Errorf("valid IP address and prefix length required")) + return + } + body := map[string]any{ + "ietf-ip:address": map[string]any{ + "ip": ip, + "prefix-length": prefix, + }, + } + path := ifacePath(name) + "/ietf-ip:" + family + "/address=" + url.PathEscape(ip) + if err := h.RC.Put(r.Context(), path, body); err != nil { + log.Printf("configure interfaces %s add %s addr: %v", name, family, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Address added", "/configure/interfaces") +} + +func (h *ConfigureInterfacesHandler) deleteAddr(w http.ResponseWriter, r *http.Request, family string) { + name := r.PathValue("name") + ip := r.PathValue("ip") + path := ifacePath(name) + "/ietf-ip:" + family + "/address=" + url.PathEscape(ip) + if err := h.RC.Delete(r.Context(), path); err != nil { + log.Printf("configure interfaces %s delete %s addr %s: %v", name, family, ip, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Address removed", "/configure/interfaces") +} + +// SaveIPv4DHCP enables or disables the DHCPv4 client presence container. +// POST /configure/interfaces/{name}/ipv4/dhcp +func (h *ConfigureInterfacesHandler) SaveIPv4DHCP(w http.ResponseWriter, r *http.Request) { + h.togglePresence(w, r, + ifacePath(r.PathValue("name"))+"/ietf-ip:ipv4/infix-dhcp-client:dhcp", + "infix-dhcp-client:dhcp", + "DHCP client") +} + +// SaveIPv4Autoconf enables or disables IPv4 link-local autoconfiguration. +// POST /configure/interfaces/{name}/ipv4/autoconf +func (h *ConfigureInterfacesHandler) SaveIPv4Autoconf(w http.ResponseWriter, r *http.Request) { + h.togglePresence(w, r, + ifacePath(r.PathValue("name"))+"/ietf-ip:ipv4/infix-ip:autoconf", + "infix-ip:autoconf", + "IPv4 link-local") +} + +// SaveIPv6SLAAC enables or disables IPv6 SLAAC (autoconf). +// POST /configure/interfaces/{name}/ipv6/autoconf +func (h *ConfigureInterfacesHandler) SaveIPv6SLAAC(w http.ResponseWriter, r *http.Request) { + h.togglePresence(w, r, + ifacePath(r.PathValue("name"))+"/ietf-ip:ipv6/autoconf", + "autoconf", + "IPv6 SLAAC") +} + +// SaveIPv6DHCP enables or disables the DHCPv6 client presence container. +// POST /configure/interfaces/{name}/ipv6/dhcp +func (h *ConfigureInterfacesHandler) SaveIPv6DHCP(w http.ResponseWriter, r *http.Request) { + h.togglePresence(w, r, + ifacePath(r.PathValue("name"))+"/ietf-ip:ipv6/infix-dhcpv6-client:dhcp", + "infix-dhcpv6-client:dhcp", + "DHCPv6 client") +} + +func (h *ConfigureInterfacesHandler) togglePresence(w http.ResponseWriter, r *http.Request, path, bodyKey, label string) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + if r.FormValue("enabled") == "true" { + body := map[string]any{bodyKey: map[string]any{}} + if err := h.RC.Put(r.Context(), path, body); err != nil { + log.Printf("configure interfaces %s enable %s: %v", name, label, err) + renderSaveError(w, err) + return + } + } else { + if err := h.RC.Delete(r.Context(), path); err != nil { + var rcErr *restconf.Error + if errors.As(err, &rcErr) && rcErr.StatusCode == http.StatusNotFound { + // already absent — desired state achieved + } else { + log.Printf("configure interfaces %s disable %s: %v", name, label, err) + renderSaveError(w, err) + return + } + } + } + renderSavedRedirect(w, label+" updated", "/configure/interfaces") +} + +func typeSlug(yangType string) string { + s := schema.StripModulePrefix(yangType) + // Normalise iana-if-type identities to infix slugs where relevant. + switch s { + case "bridge": + return "bridge" + case "ieee8023adLag": + return "lag" + case "ethernetCsmacd": + return "ethernet" + case "l2vlan": + return "vlan" + case "softwareLoopback": + return "loopback" + } + return s +} + +func typeDisplay(slug string) string { + switch slug { + case "bridge": + return "Bridge" + case "lag": + return "LAG" + case "ethernet": + return "Ethernet" + case "vlan": + return "VLAN" + case "loopback": + return "Loopback" + case "dummy": + return "Dummy" + case "wireguard": + return "WireGuard" + case "veth": + return "veth" + case "gre", "gretap": + return strings.ToUpper(slug) + case "vxlan": + return "VXLAN" + } + return slug +} + +func typeOrder(slug string) int { + switch slug { + case "bridge": + return 0 + case "lag": + return 1 + case "ethernet": + return 2 + case "vlan": + return 3 + case "loopback": + return 4 + default: + return 5 + } +} + +func addrSummary(iface ifaceJSON) string { + var addrs []string + if iface.IPv4 != nil { + for _, a := range iface.IPv4.Address { + addrs = append(addrs, fmt.Sprintf("%s/%d", a.IP, int(a.PrefixLength))) + } + } + if iface.IPv6 != nil { + for _, a := range iface.IPv6.Address { + addrs = append(addrs, fmt.Sprintf("%s/%d", a.IP, int(a.PrefixLength))) + } + } + switch len(addrs) { + case 0: + return "" + case 1: + return addrs[0] + default: + return fmt.Sprintf("%d addresses", len(addrs)) + } +} diff --git a/src/webui/internal/handlers/configure_keystore.go b/src/webui/internal/handlers/configure_keystore.go new file mode 100644 index 000000000..ec1bb5c19 --- /dev/null +++ b/src/webui/internal/handlers/configure_keystore.go @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "html/template" + "log" + "net/http" + "net/url" + "strings" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" +) + +const keystorePath = candidatePath + "/ietf-keystore:keystore" + +// ─── Template data ──────────────────────────────────────────────────────────── + +type cfgKeystorePageData struct { + PageData + Loading bool + SymmetricKeys []cfgSymKeyEntry + AsymmetricKeys []cfgAsymKeyEntry + SymKeyFormats []schema.IdentityOption + Error string +} + +type cfgSymKeyEntry struct { + Name string + Format string + Value string +} + +type cfgCertEntry struct { + Name string + PEM string // DER re-encoded as PEM for display +} + +type cfgAsymKeyEntry struct { + Name string + Algorithm string + PublicKeyPEM string + PrivateKeyPEM string + Certificates []cfgCertEntry +} + +// ─── Handler ────────────────────────────────────────────────────────────────── + +type ConfigureKeystoreHandler struct { + Template *template.Template + RC restconf.Fetcher + Schema *schema.Cache +} + +// Overview renders the Configure > Keystore page reading from the candidate. +// GET /configure/keystore +func (h *ConfigureKeystoreHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := cfgKeystorePageData{ + PageData: newPageData(r, "configure-keystore", "Configure: Keystore"), + } + + var ks keystoreWrapper + if err := h.RC.Get(r.Context(), keystorePath, &ks); err != nil { + var rcErr *restconf.Error + if errors.As(err, &rcErr) && rcErr.StatusCode == http.StatusNotFound { + if fallErr := h.RC.Get(r.Context(), "/data/ietf-keystore:keystore", &ks); fallErr != nil { + var rcFall *restconf.Error + if !errors.As(fallErr, &rcFall) || rcFall.StatusCode != http.StatusNotFound { + log.Printf("configure keystore (running fallback): %v", fallErr) + data.Error = "Could not read keystore" + } + } + } else { + log.Printf("configure keystore: %v", err) + data.Error = "Could not read keystore" + } + } + + for _, k := range ks.Keystore.SymmetricKeys.SymmetricKey { + data.SymmetricKeys = append(data.SymmetricKeys, cfgSymKeyEntry{ + Name: k.Name, + Format: shortFormat(k.KeyFormat), + Value: decodeSymmetricValue(k), + }) + } + for _, k := range ks.Keystore.AsymmetricKeys.AsymmetricKey { + entry := cfgAsymKeyEntry{ + Name: k.Name, + Algorithm: asymAlgorithm(k), + PublicKeyPEM: derBase64ToPEM(k.PublicKey, pemBlockType(k.PublicKeyFormat)), + PrivateKeyPEM: derBase64ToPEM(k.CleartextPrivateKey, pemBlockType(k.PrivateKeyFormat)), + } + for _, c := range k.Certificates.Certificate { + entry.Certificates = append(entry.Certificates, cfgCertEntry{ + Name: c.Name, + PEM: derBase64ToPEM(c.CertData, "CERTIFICATE"), + }) + } + data.AsymmetricKeys = append(data.AsymmetricKeys, entry) + } + + mgr := h.Schema.Manager() + data.Loading = mgr == nil + if mgr != nil { + data.SymKeyFormats = schema.OptionsFor(mgr, "/ietf-keystore:keystore/symmetric-keys/symmetric-key/key-format") + } + + tmplName := "configure-keystore.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// AddSymKey adds a symmetric key to the candidate. +// POST /configure/keystore/symmetric +func (h *ConfigureKeystoreHandler) AddSymKey(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := strings.TrimSpace(r.FormValue("name")) + value := r.FormValue("value") + format := r.FormValue("format") + if format == "" { + format = "infix-crypto-types:passphrase-key-format" + } + if name == "" { + renderSaveError(w, fmt.Errorf("name is required")) + return + } + + // Passphrase values are base64-encoded plaintext in the YANG model. + keyB64 := base64.StdEncoding.EncodeToString([]byte(value)) + + body := map[string]any{ + "ietf-keystore:symmetric-key": []map[string]any{{ + "name": name, + "key-format": format, + "cleartext-symmetric-key": keyB64, + }}, + } + path := keystorePath + "/symmetric-keys/symmetric-key=" + url.PathEscape(name) + if err := h.RC.Put(r.Context(), path, body); err != nil { + log.Printf("configure keystore add sym %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Symmetric key added", "/configure/keystore") +} + +// DeleteSymKey removes a symmetric key from the candidate. +// DELETE /configure/keystore/symmetric/{name} +func (h *ConfigureKeystoreHandler) DeleteSymKey(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + path := keystorePath + "/symmetric-keys/symmetric-key=" + url.PathEscape(name) + if err := h.RC.Delete(r.Context(), path); err != nil { + log.Printf("configure keystore delete sym %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Symmetric key deleted", "/configure/keystore") +} + +// AddAsymKey adds an asymmetric key from a PEM-encoded private key. +// POST /configure/keystore/asymmetric +func (h *ConfigureKeystoreHandler) AddAsymKey(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := strings.TrimSpace(r.FormValue("name")) + privPEM := r.FormValue("private_key") + pubPEM := strings.TrimSpace(r.FormValue("public_key")) + if name == "" { + renderSaveError(w, fmt.Errorf("name is required")) + return + } + if privPEM == "" { + renderSaveError(w, fmt.Errorf("private key is required")) + return + } + + block, _ := pem.Decode([]byte(privPEM)) + if block == nil { + renderSaveError(w, fmt.Errorf("invalid private key PEM: no PEM block found")) + return + } + privB64 := base64.StdEncoding.EncodeToString(block.Bytes) + + keyBody := map[string]any{ + "name": name, + "private-key-format": pemTypeToKeyFormat(block.Type), + "cleartext-private-key": privB64, + } + + // Use explicitly provided public key, or derive from private key (avoids re-parsing PEM). + if pubPEM == "" { + pubPEM = derivePublicKeyFromDER(block.Bytes, block.Type) + } + if pubPEM != "" { + if err := applyPublicKey(keyBody, pubPEM); err != nil { + renderSaveError(w, err) + return + } + } + + body := map[string]any{"ietf-keystore:asymmetric-key": []map[string]any{keyBody}} + path := keystorePath + "/asymmetric-keys/asymmetric-key=" + url.PathEscape(name) + if err := h.RC.Put(r.Context(), path, body); err != nil { + log.Printf("configure keystore add asym %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Asymmetric key added", "/configure/keystore") +} + +// derivePublicKeyFromDER extracts the public key from DER-encoded private key bytes +// and returns it as a PKIX PEM block. Returns empty string if derivation fails. +func derivePublicKeyFromDER(der []byte, pemType string) string { + var pubKey any + switch strings.ToUpper(pemType) { + case "EC PRIVATE KEY": + k, err := x509.ParseECPrivateKey(der) + if err != nil { + return "" + } + pubKey = &k.PublicKey + case "RSA PRIVATE KEY": + k, err := x509.ParsePKCS1PrivateKey(der) + if err != nil { + return "" + } + pubKey = &k.PublicKey + default: // PKCS#8 / "PRIVATE KEY" + k, err := x509.ParsePKCS8PrivateKey(der) + if err != nil { + return "" + } + switch v := k.(type) { + case *ecdsa.PrivateKey: + pubKey = &v.PublicKey + case *rsa.PrivateKey: + pubKey = &v.PublicKey + default: + return "" + } + } + + pubDER, err := x509.MarshalPKIXPublicKey(pubKey) + if err != nil { + return "" + } + return string(pem.EncodeToMemory(&pem.Block{Type: "PUBLIC KEY", Bytes: pubDER})) +} + +// applyPublicKey parses pubPEM and sets public-key and public-key-format in keyBody. +func applyPublicKey(keyBody map[string]any, pubPEM string) error { + b64, pemType, err := parsePEMBody(pubPEM) + if err != nil { + return fmt.Errorf("invalid public key PEM: %w", err) + } + keyBody["public-key-format"] = pemTypeToKeyFormat(pemType) + keyBody["public-key"] = b64 + return nil +} + +// DeleteAsymKey removes an asymmetric key from the candidate. +// DELETE /configure/keystore/asymmetric/{name} +func (h *ConfigureKeystoreHandler) DeleteAsymKey(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + path := keystorePath + "/asymmetric-keys/asymmetric-key=" + url.PathEscape(name) + if err := h.RC.Delete(r.Context(), path); err != nil { + log.Printf("configure keystore delete asym %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Asymmetric key deleted", "/configure/keystore") +} + +// AddCert adds a certificate to an asymmetric key entry. +// POST /configure/keystore/asymmetric/{name}/certs +func (h *ConfigureKeystoreHandler) AddCert(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + keyName := r.PathValue("name") + certName := strings.TrimSpace(r.FormValue("cert_name")) + pemData := r.FormValue("cert_data") + if certName == "" || pemData == "" { + renderSaveError(w, fmt.Errorf("certificate name and data are required")) + return + } + + certB64, _, err := parsePEMBody(pemData) + if err != nil { + renderSaveError(w, fmt.Errorf("invalid PEM: %w", err)) + return + } + + body := map[string]any{ + "ietf-keystore:certificate": []map[string]any{{ + "name": certName, + "cert-data": certB64, + }}, + } + path := keystorePath + "/asymmetric-keys/asymmetric-key=" + url.PathEscape(keyName) + + "/certificates/certificate=" + url.PathEscape(certName) + if err := h.RC.Put(r.Context(), path, body); err != nil { + log.Printf("configure keystore add cert %q/%q: %v", keyName, certName, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Certificate added", "/configure/keystore") +} + +// UpdateCert replaces the PEM data of an existing certificate. +// POST /configure/keystore/asymmetric/{name}/certs/{certname} +func (h *ConfigureKeystoreHandler) UpdateCert(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + keyName := r.PathValue("name") + certName := r.PathValue("certname") + pemData := r.FormValue("cert_data") + if pemData == "" { + renderSaveError(w, fmt.Errorf("certificate PEM data is required")) + return + } + + certB64, _, err := parsePEMBody(pemData) + if err != nil { + renderSaveError(w, fmt.Errorf("invalid PEM: %w", err)) + return + } + + body := map[string]any{ + "ietf-keystore:certificate": []map[string]any{{ + "name": certName, + "cert-data": certB64, + }}, + } + path := keystorePath + "/asymmetric-keys/asymmetric-key=" + url.PathEscape(keyName) + + "/certificates/certificate=" + url.PathEscape(certName) + if err := h.RC.Patch(r.Context(), path, body); err != nil { + log.Printf("configure keystore update cert %q/%q: %v", keyName, certName, err) + renderSaveError(w, err) + return + } + renderSaved(w, "Certificate updated") +} + +// DeleteCert removes a certificate from an asymmetric key entry. +// DELETE /configure/keystore/asymmetric/{name}/certs/{certname} +func (h *ConfigureKeystoreHandler) DeleteCert(w http.ResponseWriter, r *http.Request) { + keyName := r.PathValue("name") + certName := r.PathValue("certname") + path := keystorePath + "/asymmetric-keys/asymmetric-key=" + url.PathEscape(keyName) + + "/certificates/certificate=" + url.PathEscape(certName) + if err := h.RC.Delete(r.Context(), path); err != nil { + log.Printf("configure keystore delete cert %q/%q: %v", keyName, certName, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Certificate deleted", "/configure/keystore") +} + +// derBase64ToPEM converts a base64-encoded DER blob (as returned by RESTCONF +// for YANG binary fields) back into a PEM-encoded string for display. +func derBase64ToPEM(b64, blockType string) string { + b64 = strings.ReplaceAll(b64, "\n", "") + der, err := base64.StdEncoding.DecodeString(b64) + if err != nil || len(der) == 0 { + return "" + } + return string(pem.EncodeToMemory(&pem.Block{Type: blockType, Bytes: der})) +} + +// UpdateSymKey changes the value of an existing symmetric key in the candidate. +// POST /configure/keystore/symmetric/{name} +func (h *ConfigureKeystoreHandler) UpdateSymKey(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + value := r.FormValue("value") + format := r.FormValue("format") + if format == "" { + format = "infix-crypto-types:passphrase-key-format" + } + keyB64 := base64.StdEncoding.EncodeToString([]byte(value)) + body := map[string]any{ + "ietf-keystore:symmetric-key": []map[string]any{{ + "name": name, + "key-format": format, + "cleartext-symmetric-key": keyB64, + }}, + } + path := keystorePath + "/symmetric-keys/symmetric-key=" + url.PathEscape(name) + if err := h.RC.Patch(r.Context(), path, body); err != nil { + log.Printf("configure keystore update sym %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSaved(w, "Key updated") +} + +// UpdateAsymKey updates the private and/or public key of an asymmetric key entry. +// POST /configure/keystore/asymmetric/{name} +func (h *ConfigureKeystoreHandler) UpdateAsymKey(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + privPEM := strings.TrimSpace(r.FormValue("private_key")) + pubPEM := strings.TrimSpace(r.FormValue("public_key")) + if privPEM == "" && pubPEM == "" { + renderSaveError(w, fmt.Errorf("at least one key field is required")) + return + } + + keyBody := map[string]any{"name": name} + if privPEM != "" { + block, _ := pem.Decode([]byte(privPEM)) + if block == nil { + renderSaveError(w, fmt.Errorf("invalid private key PEM: no PEM block found")) + return + } + keyBody["private-key-format"] = pemTypeToKeyFormat(block.Type) + keyBody["cleartext-private-key"] = base64.StdEncoding.EncodeToString(block.Bytes) + if pubPEM == "" { + pubPEM = derivePublicKeyFromDER(block.Bytes, block.Type) + } + } + if pubPEM != "" { + if err := applyPublicKey(keyBody, pubPEM); err != nil { + renderSaveError(w, err) + return + } + } + + body := map[string]any{"ietf-keystore:asymmetric-key": []map[string]any{keyBody}} + path := keystorePath + "/asymmetric-keys/asymmetric-key=" + url.PathEscape(name) + if err := h.RC.Patch(r.Context(), path, body); err != nil { + log.Printf("configure keystore update asym %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSaved(w, "Key updated") +} + +// pemBlockType maps a YANG key-format identity string to the corresponding PEM block type. +func pemBlockType(format string) string { + switch shortFormat(format) { + case "ec": + return "EC PRIVATE KEY" + case "rsa": + return "RSA PRIVATE KEY" + case "subject-public-key-info": + return "PUBLIC KEY" + default: // one-asymmetric (PKCS#8) and anything else + return "PRIVATE KEY" + } +} + +// parsePEMBody decodes a PEM block and returns the DER content re-encoded as +// standard base64 (no line breaks), plus the PEM block type string. +func parsePEMBody(s string) (b64 string, pemType string, err error) { + block, _ := pem.Decode([]byte(s)) + if block == nil { + return "", "", fmt.Errorf("no PEM block found") + } + return base64.StdEncoding.EncodeToString(block.Bytes), block.Type, nil +} + +// pemTypeToKeyFormat maps a PEM block type to the appropriate ietf-crypto-types identity. +func pemTypeToKeyFormat(pemType string) string { + switch strings.ToUpper(pemType) { + case "EC PRIVATE KEY": + return "ietf-crypto-types:ec-private-key-format" + case "RSA PRIVATE KEY": + return "ietf-crypto-types:rsa-private-key-format" + case "PUBLIC KEY": + return "ietf-crypto-types:subject-public-key-info-format" + default: // "PRIVATE KEY" (PKCS#8 / one-asymmetric-key) and anything else + return "ietf-crypto-types:one-asymmetric-key-format" + } +} diff --git a/src/webui/internal/handlers/configure_routes.go b/src/webui/internal/handlers/configure_routes.go new file mode 100644 index 000000000..3e7d771d1 --- /dev/null +++ b/src/webui/internal/handlers/configure_routes.go @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "errors" + "fmt" + "html/template" + "log" + "net/http" + "net/url" + "strings" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" +) + +// ─── RESTCONF paths ────────────────────────────────────────────────────────── + +const ( + staticCPPSuffix = "/ietf-routing:routing/control-plane-protocols/control-plane-protocol=infix-routing%3Astatic,static" + staticRtSuffix = staticCPPSuffix + "/static-routes" + staticIPv4Suffix = staticRtSuffix + "/ietf-ipv4-unicast-routing:ipv4" + staticIPv6Suffix = staticRtSuffix + "/ietf-ipv6-unicast-routing:ipv6" +) + +// ─── RESTCONF JSON types ────────────────────────────────────────────────────── + +type staticCPPWrapper struct { + Routing struct { + CPPs struct { + CPP []staticCPPJSON `json:"control-plane-protocol"` + } `json:"control-plane-protocols"` + } `json:"ietf-routing:routing"` +} + +// staticCPPJSON mirrors control-plane-protocol. The ipv4/ipv6 containers are +// augmented into the child static-routes container, not directly into the CPP. +type staticCPPJSON struct { + Type string `json:"type"` + Name string `json:"name"` + StaticRoutes staticRoutesJSON `json:"static-routes"` +} + +type staticRoutesJSON struct { + IPv4 *staticIPJSON `json:"ietf-ipv4-unicast-routing:ipv4,omitempty"` + IPv6 *staticIPJSON `json:"ietf-ipv6-unicast-routing:ipv6,omitempty"` +} + +type staticIPJSON struct { + Routes []staticRouteJSON `json:"route"` +} + +type staticRouteJSON struct { + Prefix string `json:"destination-prefix"` + NextHop staticNextHopJSON `json:"next-hop"` +} + +type staticNextHopJSON struct { + Address string `json:"next-hop-address,omitempty"` + Interface string `json:"outgoing-interface,omitempty"` +} + +// ─── Template data ──────────────────────────────────────────────────────────── + +type cfgRouteEntry struct { + Prefix string + NextHop string + Interface string +} + +type cfgRoutesPageData struct { + PageData + Loading bool + IPv4Routes []cfgRouteEntry + IPv6Routes []cfgRouteEntry + Desc map[string]string + Error string +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// ConfigureRoutesHandler serves the Configure > Routes page. +type ConfigureRoutesHandler struct { + Template *template.Template + RC restconf.Fetcher + Schema *schema.Cache +} + +// Overview renders the Configure > Routes page reading from the candidate. +// GET /configure/routes +func (h *ConfigureRoutesHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := cfgRoutesPageData{ + PageData: newPageData(r, "configure-routes", "Configure: Routes"), + } + + mgr := h.Schema.Manager() + data.Loading = mgr == nil + if mgr != nil { + rt := "/ietf-routing:routing/control-plane-protocols/control-plane-protocol/static-routes/ietf-ipv4-unicast-routing:ipv4/route" + data.Desc = map[string]string{ + "prefix": schema.DescriptionOf(mgr, rt+"/destination-prefix"), + "nexthop": schema.DescriptionOf(mgr, rt+"/next-hop/next-hop-address"), + "interface": schema.DescriptionOf(mgr, rt+"/next-hop/outgoing-interface"), + } + } + + cpp, err := h.fetchStaticCPP(r.Context()) + if err != nil { + log.Printf("configure routes: %v", err) + data.Error = "Could not read static routes" + } + if len(cpp.Routing.CPPs.CPP) > 0 { + entry := cpp.Routing.CPPs.CPP[0] + if entry.StaticRoutes.IPv4 != nil { + for _, rt := range entry.StaticRoutes.IPv4.Routes { + data.IPv4Routes = append(data.IPv4Routes, cfgRouteEntry{ + Prefix: rt.Prefix, + NextHop: rt.NextHop.Address, + Interface: rt.NextHop.Interface, + }) + } + } + if entry.StaticRoutes.IPv6 != nil { + for _, rt := range entry.StaticRoutes.IPv6.Routes { + data.IPv6Routes = append(data.IPv6Routes, cfgRouteEntry{ + Prefix: rt.Prefix, + NextHop: rt.NextHop.Address, + Interface: rt.NextHop.Interface, + }) + } + } + } + + tmplName := "configure-routes.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// AddRoute adds a static route to the candidate datastore. +// POST /configure/routes +func (h *ConfigureRoutesHandler) AddRoute(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + prefix := strings.TrimSpace(r.FormValue("prefix")) + nexthop := strings.TrimSpace(r.FormValue("nexthop")) + iface := strings.TrimSpace(r.FormValue("interface")) + + ipKey, _, ok := familyKeys(r.FormValue("family")) + if !ok { + renderSaveError(w, fmt.Errorf("invalid address family")) + return + } + if prefix == "" { + renderSaveError(w, fmt.Errorf("destination prefix is required")) + return + } + if nexthop == "" && iface == "" { + renderSaveError(w, fmt.Errorf("next-hop address or outgoing interface is required")) + return + } + + nhMap := map[string]any{} + if nexthop != "" { + nhMap["next-hop-address"] = nexthop + } + if iface != "" { + nhMap["outgoing-interface"] = iface + } + + // PATCH at the routing root so the CPP is created if absent (merge semantics). + // ipv4/ipv6 containers live under the static-routes intermediate container. + body := map[string]any{ + "ietf-routing:routing": map[string]any{ + "control-plane-protocols": map[string]any{ + "control-plane-protocol": []map[string]any{{ + "type": "infix-routing:static", + "name": "static", + "static-routes": map[string]any{ + ipKey: map[string]any{ + "route": []map[string]any{{ + "destination-prefix": prefix, + "next-hop": nhMap, + }}, + }, + }, + }}, + }, + }, + } + if err := h.RC.Patch(r.Context(), candidatePath+"/ietf-routing:routing", body); err != nil { + log.Printf("configure routes add %q: %v", prefix, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Route added", "/configure/routes") +} + +// UpdateRoute replaces the next-hop of an existing static route. +// PUT /configure/routes +func (h *ConfigureRoutesHandler) UpdateRoute(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + prefix := strings.TrimSpace(r.FormValue("prefix")) + nexthop := strings.TrimSpace(r.FormValue("nexthop")) + iface := strings.TrimSpace(r.FormValue("interface")) + + _, suffix, ok := familyKeys(r.FormValue("family")) + if !ok || prefix == "" { + renderSaveError(w, fmt.Errorf("invalid address family or prefix")) + return + } + if nexthop == "" && iface == "" { + renderSaveError(w, fmt.Errorf("next-hop address or outgoing interface is required")) + return + } + + nhMap := map[string]any{} + if nexthop != "" { + nhMap["next-hop-address"] = nexthop + } + if iface != "" { + nhMap["outgoing-interface"] = iface + } + + var routeKey string + switch r.FormValue("family") { + case "ipv4": + routeKey = "ietf-ipv4-unicast-routing:route" + case "ipv6": + routeKey = "ietf-ipv6-unicast-routing:route" + } + + path := candidatePath + suffix + "/route=" + url.PathEscape(prefix) + body := map[string]any{ + routeKey: []map[string]any{{ + "destination-prefix": prefix, + "next-hop": nhMap, + }}, + } + if err := h.RC.Put(r.Context(), path, body); err != nil { + log.Printf("configure routes update %q: %v", prefix, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Route updated", "/configure/routes") +} + +// DeleteRoute removes a static route from the candidate datastore. +// DELETE /configure/routes?family=ipv4&prefix=10.0.0.0/24 +func (h *ConfigureRoutesHandler) DeleteRoute(w http.ResponseWriter, r *http.Request) { + prefix := r.URL.Query().Get("prefix") + _, suffix, ok := familyKeys(r.URL.Query().Get("family")) + if !ok || prefix == "" { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + path := candidatePath + suffix + "/route=" + url.PathEscape(prefix) + if err := h.RC.Delete(r.Context(), path); err != nil { + log.Printf("configure routes delete %q: %v", prefix, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "Route deleted", "/configure/routes") +} + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +// familyKeys maps an address family string ("ipv4" or "ipv6") to the +// YANG module-qualified ipv4/ipv6 key and the RESTCONF path suffix for that +// family's route list. ok is false for unknown family values. +func familyKeys(family string) (ipKey, suffix string, ok bool) { + switch family { + case "ipv4": + return "ietf-ipv4-unicast-routing:ipv4", staticIPv4Suffix, true + case "ipv6": + return "ietf-ipv6-unicast-routing:ipv6", staticIPv6Suffix, true + } + return "", "", false +} + +// fetchStaticCPP reads the static control-plane-protocol entry from the +// candidate datastore, falling back to running on 404. +func (h *ConfigureRoutesHandler) fetchStaticCPP(ctx context.Context) (staticCPPWrapper, error) { + var cpp staticCPPWrapper + err := h.RC.Get(ctx, candidatePath+staticCPPSuffix, &cpp) + if err == nil { + return cpp, nil + } + var rcErr *restconf.Error + if errors.As(err, &rcErr) && rcErr.StatusCode == http.StatusNotFound { + runErr := h.RC.Get(ctx, "/data"+staticCPPSuffix, &cpp) + if runErr == nil { + return cpp, nil + } + var rcRun *restconf.Error + if errors.As(runErr, &rcRun) && rcRun.StatusCode == http.StatusNotFound { + return cpp, nil // no static routes configured — not an error + } + return cpp, runErr + } + return cpp, err +} diff --git a/src/webui/internal/handlers/configure_system.go b/src/webui/internal/handlers/configure_system.go new file mode 100644 index 000000000..95fae4186 --- /dev/null +++ b/src/webui/internal/handlers/configure_system.go @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "encoding/json" + "errors" + "html/template" + "log" + "net/http" + "strconv" + "strings" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" +) + +// ─── RESTCONF JSON types (candidate datastore) ──────────────────────────────── + +type cfgSystemWrapper struct { + System cfgSystemJSON `json:"ietf-system:system"` +} + +type cfgSystemJSON struct { + Contact string `json:"contact"` + Hostname string `json:"hostname"` + Location string `json:"location"` + Clock cfgClockJSON `json:"clock"` + NTP cfgNTPJSON `json:"ntp"` + DNS cfgDNSJSON `json:"dns-resolver"` + MotdBanner []byte `json:"infix-system:motd-banner,omitempty"` + TextEditor string `json:"infix-system:text-editor,omitempty"` +} + +type cfgClockJSON struct { + TimezoneName string `json:"timezone-name"` +} + +type cfgNTPJSON struct { + Enabled bool `json:"enabled"` + Servers []cfgNTPServerJSON `json:"server"` +} + +type cfgNTPServerJSON struct { + Name string `json:"name"` + UDP cfgNTPUDPJSON `json:"udp"` + Prefer bool `json:"prefer"` +} + +type cfgNTPUDPJSON struct { + Address string `json:"address"` + Port uint16 `json:"port,omitempty"` +} + +type cfgDNSJSON struct { + Search []string `json:"search"` + Servers []cfgDNSServerJSON `json:"server"` +} + +type cfgDNSServerJSON struct { + Name string `json:"name"` + UDPAndTCP cfgDNSAddrJSON `json:"udp-and-tcp"` +} + +type cfgDNSAddrJSON struct { + Address string `json:"address"` + Port uint16 `json:"port,omitempty"` +} + +// ─── Template data ──────────────────────────────────────────────────────────── + +type cfgSystemPageData struct { + PageData + Loading bool // true while YANG schema is still downloading + Error string + Hostname string + Contact string + Location string + Timezone string + NTP cfgNTPJSON + DNS cfgDNSJSON + MotdBanner string // decoded from YANG binary + TextEditor string // e.g. "infix-system:emacs" + + // Schema-enriched fields — only populated when Loading is false. + TextEditorOptions []schema.IdentityOption + TimezoneOptions []string // bare timezone names for select + Desc map[string]string // leaf name → YANG description +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// ConfigureSystemHandler serves the Configure > System page. +type ConfigureSystemHandler struct { + Template *template.Template + RC restconf.Fetcher + Schema *schema.Cache +} + +const candidatePath = "/ds/ietf-datastores:candidate" + +// Overview renders the Configure > System page reading from the candidate datastore. +// GET /configure/system +func (h *ConfigureSystemHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := cfgSystemPageData{ + PageData: newPageData(r, "configure-system", "Configure: System"), + } + + var raw cfgSystemWrapper + if err := h.RC.Get(r.Context(), candidatePath+"/ietf-system:system", &raw); err != nil { + var rcErr *restconf.Error + if errors.As(err, &rcErr) && rcErr.StatusCode == http.StatusNotFound { + // Candidate not initialised — read from running as fallback. + if fallErr := h.RC.Get(r.Context(), "/data/ietf-system:system", &raw); fallErr != nil { + var rcFall *restconf.Error + if !errors.As(fallErr, &rcFall) || rcFall.StatusCode != http.StatusNotFound { + log.Printf("configure system (running fallback): %v", fallErr) + data.Error = "Could not read system configuration" + } + } + } else { + log.Printf("configure system: %v", err) + data.Error = "Could not read candidate configuration" + } + } + if data.Error == "" { + s := raw.System + data.Hostname = s.Hostname + data.Contact = s.Contact + data.Location = s.Location + data.Timezone = s.Clock.TimezoneName + data.NTP = s.NTP + data.DNS = s.DNS + data.MotdBanner = string(s.MotdBanner) + data.TextEditor = s.TextEditor + } + + mgr := h.Schema.Manager() + data.Loading = mgr == nil + if mgr != nil { + const sys = "/ietf-system:system" + data.TextEditorOptions = schema.OptionsFor(mgr, sys+"/infix-system:text-editor") + if data.TextEditor == "" { + for _, opt := range data.TextEditorOptions { + if opt.IsDefault { + data.TextEditor = opt.Value + break + } + } + } + data.Desc = map[string]string{ + "hostname": schema.DescriptionOf(mgr, sys+"/hostname"), + "contact": schema.DescriptionOf(mgr, sys+"/contact"), + "location": schema.DescriptionOf(mgr, sys+"/location"), + "timezone": schema.DescriptionOf(mgr, sys+"/clock/timezone-name"), + "text-editor": schema.DescriptionOf(mgr, sys+"/infix-system:text-editor"), + "motd-banner": schema.DescriptionOf(mgr, sys+"/infix-system:motd-banner"), + } + for _, opt := range schema.OptionsFor(mgr, sys+"/clock/timezone-name") { + data.TimezoneOptions = append(data.TimezoneOptions, schema.StripModulePrefix(opt.Value)) + } + } + + tmplName := "configure-system.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// SaveIdentity patches hostname / contact / location to the candidate datastore. +// POST /configure/system/identity +func (h *ConfigureSystemHandler) SaveIdentity(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + body := map[string]any{ + "ietf-system:system": map[string]any{ + "hostname": r.FormValue("hostname"), + "contact": r.FormValue("contact"), + "location": r.FormValue("location"), + }, + } + if err := h.RC.Patch(r.Context(), candidatePath+"/ietf-system:system", body); err != nil { + log.Printf("configure system identity: %v", err) + renderSaveError(w, err) + return + } + renderSaved(w, "Identity saved") +} + +// SaveClock patches the timezone to the candidate datastore. +// POST /configure/system/clock +func (h *ConfigureSystemHandler) SaveClock(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + body := map[string]any{ + "ietf-system:system": map[string]any{ + "clock": map[string]any{ + "timezone-name": r.FormValue("timezone"), + }, + }, + } + if err := h.RC.Patch(r.Context(), candidatePath+"/ietf-system:system", body); err != nil { + log.Printf("configure system clock: %v", err) + renderSaveError(w, err) + return + } + renderSaved(w, "Clock saved") +} + +// SaveNTP replaces the NTP server list in the candidate datastore. +// PUT /configure/system/ntp +func (h *ConfigureSystemHandler) SaveNTP(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + servers := parseNTPServers(r) + ntp := map[string]any{"enabled": true} + if len(servers) > 0 { + ntp["server"] = servers + } + body := map[string]any{"ietf-system:ntp": ntp} + if err := h.RC.Put(r.Context(), candidatePath+"/ietf-system:system/ntp", body); err != nil { + log.Printf("configure system ntp: %v", err) + renderSaveError(w, err) + return + } + renderSaved(w, "NTP saved") +} + +// SaveDNS replaces the DNS resolver config in the candidate datastore. +// PUT /configure/system/dns +func (h *ConfigureSystemHandler) SaveDNS(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + search := parseSearchList(r) + servers := parseDNSServers(r) + + // Omit empty lists entirely — sending null for a YANG list is invalid. + dnsResolver := map[string]any{} + if len(search) > 0 { + dnsResolver["search"] = search + } + if len(servers) > 0 { + dnsResolver["server"] = servers + } + body := map[string]any{ + "ietf-system:dns-resolver": dnsResolver, + } + if err := h.RC.Put(r.Context(), candidatePath+"/ietf-system:system/dns-resolver", body); err != nil { + log.Printf("configure system dns: %v", err) + renderSaveError(w, err) + return + } + renderSaved(w, "DNS saved") +} + +// ─── Form parsing helpers ───────────────────────────────────────────────────── + +// parseNTPServers extracts NTP server entries from form values. +// Fields: ntp_name_N, ntp_addr_N, ntp_port_N, ntp_prefer_N (checkbox). +func parseNTPServers(r *http.Request) []cfgNTPServerJSON { + var servers []cfgNTPServerJSON + for i := 0; ; i++ { + name := strings.TrimSpace(r.FormValue("ntp_name_" + strconv.Itoa(i))) + if name == "" { + break + } + addr := strings.TrimSpace(r.FormValue("ntp_addr_" + strconv.Itoa(i))) + port, _ := strconv.ParseUint(r.FormValue("ntp_port_"+strconv.Itoa(i)), 10, 16) + prefer := r.FormValue("ntp_prefer_"+strconv.Itoa(i)) == "on" + srv := cfgNTPServerJSON{ + Name: name, + UDP: cfgNTPUDPJSON{Address: addr, Port: uint16(port)}, + Prefer: prefer, + } + servers = append(servers, srv) + } + return servers +} + +// parseSearchList extracts DNS search domains from form values. +// Fields: dns_search_N (one per domain). +func parseSearchList(r *http.Request) []string { + var search []string + for i := 0; ; i++ { + v := strings.TrimSpace(r.FormValue("dns_search_" + strconv.Itoa(i))) + if v == "" { + break + } + search = append(search, v) + } + return search +} + +// parseDNSServers extracts DNS server entries from form values. +// Fields: dns_name_N, dns_addr_N, dns_port_N. +func parseDNSServers(r *http.Request) []cfgDNSServerJSON { + var servers []cfgDNSServerJSON + for i := 0; ; i++ { + name := strings.TrimSpace(r.FormValue("dns_name_" + strconv.Itoa(i))) + if name == "" { + break + } + addr := strings.TrimSpace(r.FormValue("dns_addr_" + strconv.Itoa(i))) + port, _ := strconv.ParseUint(r.FormValue("dns_port_"+strconv.Itoa(i)), 10, 16) + srv := cfgDNSServerJSON{ + Name: name, + UDPAndTCP: cfgDNSAddrJSON{Address: addr, Port: uint16(port)}, + } + servers = append(servers, srv) + } + return servers +} + +// SavePreferences patches infix-system augmented fields (motd-banner, text-editor). +// POST /configure/system/preferences +func (h *ConfigureSystemHandler) SavePreferences(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + sysPatch := map[string]any{} + if motd := r.FormValue("motd_banner"); motd != "" { + sysPatch["infix-system:motd-banner"] = []byte(motd) + } + if editor := r.FormValue("text_editor"); editor != "" { + sysPatch["infix-system:text-editor"] = editor + } + if len(sysPatch) == 0 { + renderSaved(w, "Preferences saved") + return + } + + body := map[string]any{"ietf-system:system": sysPatch} + if err := h.RC.Patch(r.Context(), candidatePath+"/ietf-system:system", body); err != nil { + log.Printf("configure system preferences: %v", err) + renderSaveError(w, err) + return + } + renderSaved(w, "Preferences saved") +} + +// ─── Response helpers ───────────────────────────────────────────────────────── + +// renderSaved writes a success indicator for HTMX to swap into the Save button. +func renderSaved(w http.ResponseWriter, msg string) { + w.Header().Set("Content-Type", "text/html") + w.Header().Set("HX-Trigger", `{"cfgSaved":"`+msg+`"}`) + w.WriteHeader(http.StatusOK) +} + +// renderSavedRedirect logs a cfgSaved activity entry and then navigates HTMX to +// the given page path (targeting #content). Use this instead of a bare HX-Location +// for Add/Delete operations that redirect back to the listing page after success. +func renderSavedRedirect(w http.ResponseWriter, msg, path string) { + b, _ := json.Marshal(msg) + w.Header().Set("HX-Trigger", `{"cfgSaved":`+string(b)+`}`) + w.Header().Set("HX-Location", `{"path":"`+path+`","target":"#content"}`) + w.WriteHeader(http.StatusNoContent) +} + +// renderSaveError writes an inline error for HTMX. HX-Trigger ensures forms with +// hx-swap="none" still receive the cfgError event (body swap alone would be silenced). +func renderSaveError(w http.ResponseWriter, err error) { + msg := "Save failed" + if re, ok := err.(*restconf.Error); ok && re.Message != "" { + msg = re.Message + } + w.Header().Set("Content-Type", "text/html") + b, _ := json.Marshal(msg) + w.Header().Set("HX-Trigger", `{"cfgError":`+string(b)+`}`) + w.WriteHeader(http.StatusUnprocessableEntity) + w.Write([]byte(`` + template.HTMLEscapeString(msg) + ``)) +} diff --git a/src/webui/internal/handlers/configure_users.go b/src/webui/internal/handlers/configure_users.go new file mode 100644 index 000000000..9d4ec119d --- /dev/null +++ b/src/webui/internal/handlers/configure_users.go @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "encoding/base64" + "errors" + "fmt" + "html/template" + "log" + "net/http" + "net/url" + "strings" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" +) + +// ─── RESTCONF JSON types ────────────────────────────────────────────────────── + +// cfgAuthWrapper reads the authentication container via the full system path, +// which avoids sub-path encoding ambiguities and matches what configure-system uses. +type cfgAuthWrapper struct { + System struct { + Auth cfgAuthJSON `json:"authentication"` + } `json:"ietf-system:system"` +} + +type cfgAuthJSON struct { + Users []cfgUserJSON `json:"user"` +} + +type cfgUserJSON struct { + Name string `json:"name"` + Password string `json:"password,omitempty"` + Shell string `json:"infix-system:shell,omitempty"` + AuthorizedKeys []cfgKeyJSON `json:"authorized-key,omitempty"` +} + +type cfgKeyJSON struct { + Name string `json:"name"` + Algorithm string `json:"algorithm"` + KeyData []byte `json:"key-data"` +} + +// ─── Display helper ─────────────────────────────────────────────────────────── + +type cfgUserDisplay struct { + cfgUserJSON + ShellLabel string + KeyCount int +} + +// ─── Template data ──────────────────────────────────────────────────────────── + +type cfgUsersPageData struct { + PageData + Loading bool + Users []cfgUserDisplay + Error string + ShellOptions []schema.IdentityOption +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// ConfigureUsersHandler serves the Configure > Users page. +type ConfigureUsersHandler struct { + Template *template.Template + RC restconf.Fetcher + Schema *schema.Cache +} + +const authPath = candidatePath + "/ietf-system:system/authentication" + +// Overview renders the Configure > Users page reading from the candidate. +// GET /configure/users +func (h *ConfigureUsersHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := cfgUsersPageData{ + PageData: newPageData(r, "configure-users", "Configure: Users"), + } + + // Read via the full system path (same as configure-system) to avoid + // sub-path encoding issues. Fall back to running if candidate is empty. + sysPath := candidatePath + "/ietf-system:system" + var raw cfgAuthWrapper + if err := h.RC.Get(r.Context(), sysPath, &raw); err != nil { + var rcErr *restconf.Error + if errors.As(err, &rcErr) && rcErr.StatusCode == http.StatusNotFound { + // Candidate not initialised — read from running as fallback. + if fallErr := h.RC.Get(r.Context(), "/data/ietf-system:system", &raw); fallErr != nil { + var rcFall *restconf.Error + if !errors.As(fallErr, &rcFall) || rcFall.StatusCode != http.StatusNotFound { + log.Printf("configure users (running fallback): %v", fallErr) + data.Error = "Could not read user configuration" + } + } + } else { + log.Printf("configure users: %v", err) + data.Error = "Could not read user configuration" + } + } + const shellPath = "/ietf-system:system/authentication/user/infix-system:shell" + mgr := h.Schema.Manager() + data.Loading = mgr == nil + if mgr != nil { + data.ShellOptions = schema.OptionsFor(mgr, shellPath) + } + for _, u := range raw.System.Auth.Users { + data.Users = append(data.Users, cfgUserDisplay{ + cfgUserJSON: u, + ShellLabel: schema.StripModulePrefix(u.Shell), + KeyCount: len(u.AuthorizedKeys), + }) + } + + tmplName := "configure-users.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// AddUser creates a new user in the candidate datastore. +// POST /configure/users +func (h *ConfigureUsersHandler) AddUser(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + name := strings.TrimSpace(r.FormValue("username")) + password := r.FormValue("password") + shell := r.FormValue("shell") + if name == "" { + renderSaveError(w, fmt.Errorf("username is required")) + return + } + + hash, err := HashPassword(password) + if err != nil { + log.Printf("configure users add: hash: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + return + } + + user := map[string]any{ + "ietf-system:user": []map[string]any{{ + "name": name, + "password": hash, + "infix-system:shell": shell, + }}, + } + path := authPath + "/user=" + url.PathEscape(name) + if err := h.RC.Put(r.Context(), path, user); err != nil { + log.Printf("configure users add %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "User added", "/configure/users") +} + +// DeleteUser removes a user from the candidate datastore. +// DELETE /configure/users/{name} +func (h *ConfigureUsersHandler) DeleteUser(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + path := authPath + "/user=" + url.PathEscape(name) + if err := h.RC.Delete(r.Context(), path); err != nil { + log.Printf("configure users delete %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "User deleted", "/configure/users") +} + +// UpdateShell changes a user's login shell in the candidate datastore. +// POST /configure/users/{name}/shell +func (h *ConfigureUsersHandler) UpdateShell(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + shell := r.FormValue("shell") + body := map[string]any{ + "ietf-system:user": []map[string]any{{ + "name": name, + "infix-system:shell": shell, + }}, + } + path := authPath + "/user=" + url.PathEscape(name) + if err := h.RC.Patch(r.Context(), path, body); err != nil { + log.Printf("configure users shell %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSaved(w, "Shell updated") +} + +// ChangePassword sets a new hashed password for a user in the candidate. +// POST /configure/users/{name}/password +func (h *ConfigureUsersHandler) ChangePassword(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + password := r.FormValue("password") + if password == "" { + renderSaveError(w, fmt.Errorf("password cannot be empty")) + return + } + + hash, err := HashPassword(password) + if err != nil { + log.Printf("configure users password %q: hash: %v", name, err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + return + } + + body := map[string]any{ + "ietf-system:user": []map[string]any{{ + "name": name, + "password": hash, + }}, + } + path := authPath + "/user=" + url.PathEscape(name) + if err := h.RC.Patch(r.Context(), path, body); err != nil { + log.Printf("configure users password %q: %v", name, err) + renderSaveError(w, err) + return + } + renderSaved(w, "Password changed") +} + +// AddKey adds an SSH authorized key for a user in the candidate. +// POST /configure/users/{name}/keys +func (h *ConfigureUsersHandler) AddKey(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.PathValue("name") + keyName := strings.TrimSpace(r.FormValue("key_name")) + keyLine := strings.TrimSpace(r.FormValue("key_data")) + + if keyName == "" || keyLine == "" { + renderSaveError(w, fmt.Errorf("key name and public key are required")) + return + } + + // Parse "algorithm base64data [comment]" from an OpenSSH public key line. + parts := strings.Fields(keyLine) + if len(parts) < 2 { + renderSaveError(w, fmt.Errorf("invalid SSH public key format")) + return + } + algorithm := parts[0] + keyBytes, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + renderSaveError(w, fmt.Errorf("invalid SSH key data: %w", err)) + return + } + + // PATCH at the system root so libyang has full ancestor-key context. + // Patching at user=admin or authorized-key leaves libyang without the + // parent list-key context and produces "List requires N keys" errors. + body := map[string]any{ + "ietf-system:system": map[string]any{ + "authentication": map[string]any{ + "user": []map[string]any{{ + "name": name, + "authorized-key": []map[string]any{{ + "name": keyName, + "algorithm": algorithm, + "key-data": keyBytes, + }}, + }}, + }, + }, + } + path := candidatePath + "/ietf-system:system" + if err := h.RC.Patch(r.Context(), path, body); err != nil { + log.Printf("configure users key add %q/%q: %v", name, keyName, err) + renderSaveError(w, err) + return + } + renderSavedRedirect(w, "SSH key added", "/configure/users") +} + +// DeleteKey removes an SSH authorized key for a user in the candidate. +// DELETE /configure/users/{name}/keys/{keyname} +// +// Direct DELETE to the authorized-key path fails when the key name contains +// characters like '@' that libyang interprets as module@revision syntax in path +// predicates. Work around by GET + filter + PUT at the user level instead. +func (h *ConfigureUsersHandler) DeleteKey(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + keyName := r.PathValue("keyname") + + sysPath := candidatePath + "/ietf-system:system" + var raw cfgAuthWrapper + if err := h.RC.Get(r.Context(), sysPath, &raw); err != nil { + log.Printf("configure users key delete %q/%q: GET: %v", name, keyName, err) + renderSaveError(w, err) + return + } + + var userEntry map[string]any + for _, u := range raw.System.Auth.Users { + if u.Name != name { + continue + } + filteredKeys := make([]map[string]any, 0, len(u.AuthorizedKeys)) + found := false + for _, k := range u.AuthorizedKeys { + if k.Name == keyName { + found = true + continue + } + filteredKeys = append(filteredKeys, map[string]any{ + "name": k.Name, + "algorithm": k.Algorithm, + "key-data": k.KeyData, + }) + } + if !found { + w.WriteHeader(http.StatusOK) + return + } + userEntry = map[string]any{ + "name": u.Name, + "authorized-key": filteredKeys, + } + if u.Password != "" { + userEntry["password"] = u.Password + } + if u.Shell != "" { + userEntry["infix-system:shell"] = u.Shell + } + break + } + if userEntry == nil { + w.WriteHeader(http.StatusOK) + return + } + + // PUT at the user level replaces only this user's entry (including its key + // list), which avoids the path-predicate Syntax error while not touching + // other users or system config. + putPath := authPath + "/user=" + url.PathEscape(name) + body := map[string]any{ + "ietf-system:user": []map[string]any{userEntry}, + } + if err := h.RC.Put(r.Context(), putPath, body); err != nil { + log.Printf("configure users key delete %q/%q: PUT: %v", name, keyName, err) + renderSaveError(w, err) + return + } + w.WriteHeader(http.StatusOK) +} + diff --git a/src/webui/internal/handlers/containers.go b/src/webui/internal/handlers/containers.go new file mode 100644 index 000000000..887bb3d7d --- /dev/null +++ b/src/webui/internal/handlers/containers.go @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "fmt" + "html/template" + "log" + "net/http" + "net/url" + "sync" + + "github.com/kernelkit/webui/internal/restconf" +) + +// containerJSON matches the RESTCONF JSON for a single container entry. +type containerJSON struct { + Name string `json:"name"` + Image string `json:"image"` + Running yangBool `json:"running"` + Status string `json:"status"` + Network struct { + Publish []string `json:"publish"` + } `json:"network"` + ResourceUsage containerResourceUsageJSON `json:"resource-usage"` + ResourceLimit containerResourceLimitJSON `json:"resource-limit"` +} + +// containerResourceUsageJSON matches the RESTCONF JSON for resource-usage. +type containerResourceUsageJSON struct { + Memory yangInt64 `json:"memory"` // KiB + CPU yangFloat64 `json:"cpu"` // percent +} + +// containerResourceLimitJSON matches the RESTCONF JSON for resource-limit. +type containerResourceLimitJSON struct { + Memory yangInt64 `json:"memory"` // KiB +} + +// containerListWrapper wraps the top-level RESTCONF containers response. +// The server returns the full "containers" object; the list lives inside it. +type containerListWrapper struct { + Containers struct { + Container []containerJSON `json:"container"` + } `json:"infix-containers:containers"` +} + +// containerResourceUsageWrapper wraps the RESTCONF resource-usage response. +type containerResourceUsageWrapper struct { + ResourceUsage containerResourceUsageJSON `json:"infix-containers:resource-usage"` +} + +// ContainerEntry holds display-ready data for a single container row. +type ContainerEntry struct { + Name string + Image string + Status string + Running bool + CPUPct int + MemUsed string + MemLimit string + MemPct int + Uptime string + Ports []string +} + +// containersData is the template data for the containers page. +type containersData struct { + PageData + Containers []ContainerEntry + Error string +} + +// ContainersHandler serves the containers status page. +type ContainersHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the containers list page. +func (h *ContainersHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := containersData{ + PageData: newPageData(r, "containers", "Containers"), + } + + // Detach from the request context so that RESTCONF calls survive + // browser connection resets. + ctx := context.WithoutCancel(r.Context()) + + var listResp containerListWrapper + if err := h.RC.Get(ctx, "/data/infix-containers:containers", &listResp); err != nil { + log.Printf("restconf containers list: %v", err) + data.Error = "Could not fetch container information" + } else { + containers := listResp.Containers.Container + + // Fetch resource-usage for each container concurrently. + usages := make([]containerResourceUsageJSON, len(containers)) + var mu sync.Mutex + var wg sync.WaitGroup + + for i, c := range containers { + wg.Add(1) + go func(idx int, name string) { + defer wg.Done() + path := fmt.Sprintf("/data/infix-containers:containers/container=%s/resource-usage", + url.PathEscape(name)) + var w containerResourceUsageWrapper + if err := h.RC.Get(ctx, path, &w); err != nil { + log.Printf("restconf resource-usage %s: %v", name, err) + return + } + mu.Lock() + usages[idx] = w.ResourceUsage + mu.Unlock() + }(i, c.Name) + } + wg.Wait() + + for i, c := range containers { + entry := ContainerEntry{ + Name: c.Name, + Image: c.Image, + Status: c.Status, + Running: bool(c.Running), + Ports: c.Network.Publish, + } + + // CPU usage — round to int. + entry.CPUPct = int(float64(usages[i].CPU) + 0.5) + if entry.CPUPct > 100 { + entry.CPUPct = 100 + } + + // Memory usage — resource-usage.memory is in KiB. + memUsedKiB := int64(usages[i].Memory) + if memUsedKiB > 0 { + entry.MemUsed = humanBytes(memUsedKiB * 1024) + } + + // Memory limit — resource-limit.memory is in KiB. + memLimitKiB := int64(c.ResourceLimit.Memory) + if memLimitKiB > 0 { + entry.MemLimit = humanBytes(memLimitKiB * 1024) + if memUsedKiB > 0 { + entry.MemPct = int(float64(memUsedKiB) / float64(memLimitKiB) * 100) + if entry.MemPct > 100 { + entry.MemPct = 100 + } + } + } + + // Uptime: extract from status string (e.g., "Up About a minute", "Up 3 hours"). + entry.Uptime = extractUptime(c.Status) + + data.Containers = append(data.Containers, entry) + } + } + + tmplName := "containers.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// extractUptime returns the uptime portion of a container status string. +// E.g., "Up About a minute" → "About a minute", "Up 3 hours" → "3 hours", +// "Exited (0) 2 hours ago" → "". +func extractUptime(status string) string { + const prefix = "Up " + if len(status) > len(prefix) && status[:len(prefix)] == prefix { + return status[len(prefix):] + } + return "" +} diff --git a/src/webui/internal/handlers/containers_test.go b/src/webui/internal/handlers/containers_test.go new file mode 100644 index 000000000..1f3ec95e5 --- /dev/null +++ b/src/webui/internal/handlers/containers_test.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "html/template" + "net/http" + "net/http/httptest" + "testing" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/security" +) + +var minimalContainersTmpl = template.Must(template.New("containers.html").Parse( + `{{define "containers.html"}}count={{len .Containers}}{{end}}` + + `{{define "content"}}{{len .Containers}}{{end}}`, +)) + +func TestContainersOverview_ReturnsOK(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &ContainersHandler{Template: minimalContainersTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/containers", nil) + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + + body := w.Body.String() + if body == "" { + t.Error("expected non-empty response body") + } +} + +func TestContainersOverview_HTMXPartial(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &ContainersHandler{Template: minimalContainersTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/containers", nil) + req.Header.Set("HX-Request", "true") + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + + body := w.Body.String() + if body == "" { + t.Error("expected non-empty response body for htmx partial") + } +} diff --git a/src/webui/internal/handlers/crypt.go b/src/webui/internal/handlers/crypt.go new file mode 100644 index 000000000..430ac96a8 --- /dev/null +++ b/src/webui/internal/handlers/crypt.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "fmt" + "os/exec" + "strings" +) + +// HashPassword returns a yescrypt crypt hash of password using mkpasswd(1). +// mkpasswd is available on Infix target systems and uses the system's libcrypt, +// so the output matches whatever the device expects (default: yescrypt $y$). +func HashPassword(password string) (string, error) { + path, err := exec.LookPath("mkpasswd") + if err != nil { + return "", fmt.Errorf("mkpasswd not found: %w", err) + } + cmd := exec.Command(path, "--method=yescrypt", "--password-fd=0") + cmd.Stdin = strings.NewReader(password) + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("mkpasswd: %w", err) + } + return strings.TrimSpace(string(out)), nil +} diff --git a/src/webui/internal/handlers/dashboard.go b/src/webui/internal/handlers/dashboard.go new file mode 100644 index 000000000..172e12626 --- /dev/null +++ b/src/webui/internal/handlers/dashboard.go @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "encoding/json" + "fmt" + "html/template" + "log" + "math" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/kernelkit/webui/internal/restconf" +) + +// yangInt64 unmarshals a YANG numeric value that RESTCONF encodes as a +// JSON string (e.g. "1024000") or, occasionally, as a bare number. +type yangInt64 int64 + +func (y *yangInt64) UnmarshalJSON(b []byte) error { + var s string + if json.Unmarshal(b, &s) == nil { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *y = yangInt64(v) + return nil + } + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + *y = yangInt64(v) + return nil +} + +// yangBool unmarshals a YANG boolean that RESTCONF may encode as a +// JSON string ("true"/"false") or as a bare boolean. +type yangBool bool + +func (y *yangBool) UnmarshalJSON(b []byte) error { + var s string + if json.Unmarshal(b, &s) == nil { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + *y = yangBool(v) + return nil + } + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + *y = yangBool(v) + return nil +} + +// yangFloat64 unmarshals a YANG decimal value that RESTCONF may encode +// as a JSON string (e.g. "0.12") or as a bare number. +type yangFloat64 float64 + +func (y *yangFloat64) UnmarshalJSON(b []byte) error { + var s string + if json.Unmarshal(b, &s) == nil { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *y = yangFloat64(v) + return nil + } + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + *y = yangFloat64(v) + return nil +} + +// RESTCONF JSON structures for ietf-system:system-state. + +type systemStateWrapper struct { + SystemState systemState `json:"ietf-system:system-state"` +} + +type systemState struct { + Platform platform `json:"platform"` + Clock clock `json:"clock"` + Software software `json:"infix-system:software"` + Resource resourceUsage `json:"infix-system:resource-usage"` +} + +type platform struct { + OSName string `json:"os-name"` + OSVersion string `json:"os-version"` + Machine string `json:"machine"` +} + +type clock struct { + BootDatetime string `json:"boot-datetime"` + CurrentDatetime string `json:"current-datetime"` +} + +type software struct { + Booted string `json:"booted"` + Slot []softwareSlot `json:"slot"` +} + +type softwareSlot struct { + Name string `json:"name"` + Version string `json:"version"` +} + +type resourceUsage struct { + Memory memoryInfo `json:"memory"` + LoadAverage loadAverage `json:"load-average"` + Filesystem []filesystemFS `json:"filesystem"` +} + +type memoryInfo struct { + Total yangInt64 `json:"total"` + Free yangInt64 `json:"free"` + Available yangInt64 `json:"available"` +} + +type loadAverage struct { + Load1min yangFloat64 `json:"load-1min"` + Load5min yangFloat64 `json:"load-5min"` + Load15min yangFloat64 `json:"load-15min"` +} + +type filesystemFS struct { + MountPoint string `json:"mount-point"` + Size yangInt64 `json:"size"` + Used yangInt64 `json:"used"` + Available yangInt64 `json:"available"` +} + +// RESTCONF JSON structures for ietf-hardware:hardware. + +type hardwareWrapper struct { + Hardware struct { + Component []hwComponentJSON `json:"component"` + } `json:"ietf-hardware:hardware"` +} + +type hwComponentJSON struct { + Name string `json:"name"` + Class string `json:"class"` + Description string `json:"description"` + Parent string `json:"parent"` + MfgName string `json:"mfg-name"` + ModelName string `json:"model-name"` + SerialNum string `json:"serial-num"` + HardwareRev string `json:"hardware-rev"` + PhysAddress string `json:"infix-hardware:phys-address"` + WiFiRadio *wifiRadioHWJSON `json:"infix-hardware:wifi-radio"` + SensorData *struct { + ValueType string `json:"value-type"` + Value yangInt64 `json:"value"` + ValueScale string `json:"value-scale"` + OperStatus string `json:"oper-status"` + } `json:"sensor-data"` + State *struct { + AdminState string `json:"admin-state"` + OperState string `json:"oper-state"` + } `json:"state"` +} + +// Template data structures. + +type dashboardData struct { + PageData + Hostname string + Contact string + Location string + OSName string + OSVersion string + Machine string + CurrentTime string + Firmware string + Uptime string + MemTotal int64 + MemUsed int64 + MemPercent int + MemClass string + Load1 string + Load5 string + Load15 string + CPUClass string + Disks []diskEntry + Board boardInfo + WiFiRadios []wifiEntry + SensorGroups []sensorGroup + Error string +} + +type boardInfo struct { + Model string + Manufacturer string + SerialNum string + HardwareRev string + BaseMAC string +} + +type sensorEntry struct { + Name string + Value string + Type string // "temperature", "fan", "voltage", etc. +} + +type sensorGroup struct { + Parent string + Sensors []sensorEntry +} + +type wifiEntry struct { + Name string + Manufacturer string + Bands string // all supported bands, e.g. "2.4 GHz, 5 GHz" + Standards string + MaxAP int +} + +type diskEntry struct { + Mount string + Size string + Available string + Percent int + Class string // "" / "is-warn" / "is-crit" +} + +// DashboardHandler serves the main dashboard page. +type DashboardHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Index renders the dashboard (GET /). +func (h *DashboardHandler) Index(w http.ResponseWriter, r *http.Request) { + data := dashboardData{ + PageData: newPageData(r, "dashboard", "Overview"), + } + + // Detach from the request context so that RESTCONF calls survive + // browser connection resets (common during login redirects). + // The RESTCONF client's own 10 s timeout still bounds each call. + ctx := context.WithoutCancel(r.Context()) + var ( + state systemStateWrapper + hw hardwareWrapper + sysConf struct { + System struct { + Hostname string `json:"hostname"` + Contact string `json:"contact"` + Location string `json:"location"` + } `json:"ietf-system:system"` + } + stateErr, hwErr, confErr error + wg sync.WaitGroup + ) + + wg.Add(3) + go func() { + defer wg.Done() + stateErr = h.RC.Get(ctx, "/data/ietf-system:system-state", &state) + }() + go func() { + defer wg.Done() + hwErr = h.RC.Get(ctx, "/data/ietf-hardware:hardware", &hw) + }() + go func() { + defer wg.Done() + confErr = h.RC.Get(ctx, "/data/ietf-system:system", &sysConf) + }() + wg.Wait() + + if stateErr != nil { + log.Printf("restconf system-state: %v", stateErr) + data.Error = "Could not fetch system information" + } else { + ss := state.SystemState + data.OSName = ss.Platform.OSName + data.OSVersion = ss.Platform.OSVersion + data.Machine = ss.Platform.Machine + if data.Machine == "arm64" { + data.Machine = "aarch64" + } + data.Firmware = firmwareVersion(ss.Software) + data.Uptime = computeUptime(ss.Clock.BootDatetime, ss.Clock.CurrentDatetime) + data.CurrentTime = formatCurrentTime(ss.Clock.CurrentDatetime) + + total := int64(ss.Resource.Memory.Total) + avail := int64(ss.Resource.Memory.Available) + data.MemTotal = total / 1024 // KiB → MiB + data.MemUsed = (total - avail) / 1024 + if total > 0 { + data.MemPercent = int(float64(total-avail) / float64(total) * 100) + } + + switch { + case data.MemPercent >= 90: + data.MemClass = "is-crit" + case data.MemPercent >= 70: + data.MemClass = "is-warn" + default: + data.MemClass = "" + } + + la := ss.Resource.LoadAverage + if la1 := float64(la.Load1min); la1 >= 0.9 { + data.CPUClass = "is-crit" + } else if la1 >= 0.7 { + data.CPUClass = "is-warn" + } + + data.Load1 = strconv.FormatFloat(float64(la.Load1min), 'f', 2, 64) + data.Load5 = strconv.FormatFloat(float64(la.Load5min), 'f', 2, 64) + data.Load15 = strconv.FormatFloat(float64(la.Load15min), 'f', 2, 64) + + for _, fs := range ss.Resource.Filesystem { + size := int64(fs.Size) + used := int64(fs.Used) + pct := 0 + if size > 0 { + pct = int(float64(used) / float64(size) * 100) + } + diskClass := "" + switch { + case pct >= 90: + diskClass = "is-crit" + case pct >= 70: + diskClass = "is-warn" + } + data.Disks = append(data.Disks, diskEntry{ + Mount: fs.MountPoint, + Size: humanKiB(size), + Available: humanKiB(int64(fs.Available)), + Percent: pct, + Class: diskClass, + }) + } + } + + if hwErr != nil { + log.Printf("restconf hardware: %v", hwErr) + } else { + sensorMap := make(map[string][]sensorEntry) + var sensorParents []string + + for _, c := range hw.Hardware.Component { + class := shortClass(c.Class) + if class == "chassis" { + data.Board = boardInfo{ + Model: c.ModelName, + Manufacturer: c.MfgName, + SerialNum: c.SerialNum, + HardwareRev: c.HardwareRev, + BaseMAC: c.PhysAddress, + } + } + if c.SensorData != nil && c.SensorData.OperStatus == "ok" { + entry := sensorEntry{ + Name: c.Name, + Value: formatSensor(c.SensorData.ValueType, int64(c.SensorData.Value), c.SensorData.ValueScale), + Type: c.SensorData.ValueType, + } + p := c.Parent + if _, ok := sensorMap[p]; !ok { + sensorParents = append(sensorParents, p) + } + sensorMap[p] = append(sensorMap[p], entry) + } + if c.WiFiRadio != nil { + var stds []string + var ht, vht, he bool + var bandNames []string + for _, b := range c.WiFiRadio.Bands { + if b.HTCapable { + ht = true + } + if b.VHTCapable { + vht = true + } + if b.HECapable { + he = true + } + name := b.Name + if name == "" { + name = b.Band + } + if name != "" { + bandNames = append(bandNames, name) + } + } + if len(bandNames) == 0 && c.WiFiRadio.Band != "" { + bandNames = append(bandNames, c.WiFiRadio.Band) + } + if ht { + stds = append(stds, "11n") + } + if vht { + stds = append(stds, "11ac") + } + if he { + stds = append(stds, "11ax") + } + maxAP := 0 + if c.WiFiRadio.MaxInterfaces != nil { + maxAP = c.WiFiRadio.MaxInterfaces.AP + } + data.WiFiRadios = append(data.WiFiRadios, wifiEntry{ + Name: c.Name, + Manufacturer: c.MfgName, + Bands: strings.Join(bandNames, ", "), + Standards: strings.Join(stds, "/"), + MaxAP: maxAP, + }) + } + } + for _, p := range sensorParents { + data.SensorGroups = append(data.SensorGroups, sensorGroup{ + Parent: p, + Sensors: sensorMap[p], + }) + } + } + + if confErr != nil { + log.Printf("restconf system config: %v", confErr) + } else { + data.Hostname = sysConf.System.Hostname + data.Contact = sysConf.System.Contact + data.Location = sysConf.System.Location + } + + tmplName := "dashboard.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// firmwareVersion returns the version string for the booted software slot. +func firmwareVersion(sw software) string { + for _, slot := range sw.Slot { + if slot.Name == sw.Booted { + return slot.Version + } + } + return "" +} + +// computeUptime returns a human-readable uptime string from RFC3339 timestamps. +func computeUptime(boot, now string) string { + bootT, err := time.Parse(time.RFC3339, boot) + if err != nil { + return "" + } + nowT, err := time.Parse(time.RFC3339, now) + if err != nil { + nowT = time.Now() + } + + d := nowT.Sub(bootT) + days := int(d.Hours()) / 24 + hours := int(d.Hours()) % 24 + mins := int(d.Minutes()) % 60 + + switch { + case days > 0: + return fmt.Sprintf("%dd %dh %dm", days, hours, mins) + case hours > 0: + return fmt.Sprintf("%dh %dm", hours, mins) + default: + return fmt.Sprintf("%dm", mins) + } +} + +// formatCurrentTime formats an RFC3339 timestamp as "2006-01-02 15:04:05 +00:00". +func formatCurrentTime(s string) string { + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return "" + } + return t.UTC().Format("2006-01-02 15:04:05 +00:00") +} + +// shortClass strips the YANG module prefix from a hardware class identity. +func shortClass(full string) string { + if i := strings.LastIndex(full, ":"); i >= 0 { + return full[i+1:] + } + return full +} + +// formatSensor converts a raw sensor value to a human-readable string, +// matching the formatting used by cli_pretty. +func formatSensor(valueType string, value int64, scale string) string { + v := float64(value) + switch scale { + case "milli": + v /= 1000 + case "micro": + v /= 1000000 + } + switch valueType { + case "celsius": + return fmt.Sprintf("%.1f\u00b0C", v) + case "rpm": + return fmt.Sprintf("%.0f RPM", v) + case "volts-DC": + return fmt.Sprintf("%.2f VDC", v) + case "amperes": + return fmt.Sprintf("%.2f A", v) + case "watts": + return fmt.Sprintf("%.2f W", v) + default: + return fmt.Sprintf("%.1f", v) + } +} + +// humanBytes converts bytes to a human-readable string (B, KiB, MiB, GiB, TiB). +func humanBytes(b int64) string { + v := float64(b) + for _, unit := range []string{"B", "KiB", "MiB", "GiB", "TiB"} { + if v < 1024 || unit == "TiB" { + if v == math.Trunc(v) { + return fmt.Sprintf("%.0f %s", v, unit) + } + return fmt.Sprintf("%.1f %s", v, unit) + } + v /= 1024 + } + return fmt.Sprintf("%.1f PiB", v) +} + +// humanKiB converts KiB to a human-readable string using binary (IEC) units. +func humanKiB(kib int64) string { + v := float64(kib) + for _, unit := range []string{"KiB", "MiB", "GiB", "TiB"} { + if v < 1024 || unit == "TiB" { + if v == math.Trunc(v) { + return fmt.Sprintf("%.0f %s", v, unit) + } + return fmt.Sprintf("%.1f %s", v, unit) + } + v /= 1024 + } + return fmt.Sprintf("%.1f PiB", v) +} diff --git a/src/webui/internal/handlers/dashboard_test.go b/src/webui/internal/handlers/dashboard_test.go new file mode 100644 index 000000000..d996ad847 --- /dev/null +++ b/src/webui/internal/handlers/dashboard_test.go @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "html/template" + "net/http" + "net/http/httptest" + "testing" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/security" +) + +var minimalDashTmpl = template.Must(template.New("dashboard.html").Parse( + `{{define "dashboard.html"}}hostname={{.Hostname}} error={{.Error}}{{end}}` + + `{{define "content"}}{{.Hostname}}{{end}}`, +)) + +func TestDashboardIndex_ReturnsOK(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &DashboardHandler{Template: minimalDashTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/", nil) + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "testuser", + Password: "testpass", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Index(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d", w.Code) + } +} + +func TestDashboardIndex_ShowsErrorOnRESTCONFFailure(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &DashboardHandler{Template: minimalDashTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/", nil) + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "tok") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Index(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + + body := w.Body.String() + if body == "" { + t.Error("expected non-empty response body") + } +} + +func TestDashboardIndex_HTMXPartial(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &DashboardHandler{Template: minimalDashTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set("HX-Request", "true") + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "tok") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Index(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } +} diff --git a/src/webui/internal/handlers/dhcp.go b/src/webui/internal/handlers/dhcp.go new file mode 100644 index 000000000..966c1273c --- /dev/null +++ b/src/webui/internal/handlers/dhcp.go @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "fmt" + "html/template" + "log" + "net/http" + "time" + + "github.com/kernelkit/webui/internal/restconf" +) + +// ─── DHCP types ────────────────────────────────────────────────────────────── + +// DHCPLease is a single active DHCP lease. +type DHCPLease struct { + Address string + MAC string + Hostname string + Expires string // relative or "never" + ClientID string +} + +// DHCPStats holds DHCP packet counters. +type DHCPStats struct { + InDiscoveries int64 + InRequests int64 + InReleases int64 + OutOffers int64 + OutAcks int64 + OutNaks int64 +} + +// DHCPData is the parsed DHCP server state. +type DHCPData struct { + Enabled bool + Leases []DHCPLease + Stats DHCPStats +} + +// ─── Page data ─────────────────────────────────────────────────────────────── + +type dhcpPageData struct { + PageData + DHCP *DHCPData + Error string +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// DHCPHandler serves the DHCP status page. +type DHCPHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the DHCP page (GET /dhcp). +func (h *DHCPHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := dhcpPageData{ + PageData: newPageData(r, "dhcp", "DHCP Server"), + } + + ctx := context.WithoutCancel(r.Context()) + + var raw struct { + DHCP struct { + Enabled yangBool `json:"enabled"` + Leases struct { + Lease []struct { + Address string `json:"address"` + PhysAddr string `json:"phys-address"` + Hostname string `json:"hostname"` + Expires string `json:"expires"` + ClientID string `json:"client-id"` + } `json:"lease"` + } `json:"leases"` + Statistics struct { + OutOffers yangInt64 `json:"out-offers"` + OutAcks yangInt64 `json:"out-acks"` + OutNaks yangInt64 `json:"out-naks"` + InDiscoveries yangInt64 `json:"in-discovers"` + InRequests yangInt64 `json:"in-requests"` + InReleases yangInt64 `json:"in-releases"` + } `json:"statistics"` + } `json:"infix-dhcp-server:dhcp-server"` + } + if err := h.RC.Get(ctx, "/data/infix-dhcp-server:dhcp-server", &raw); err != nil { + log.Printf("restconf dhcp-server: %v", err) + data.Error = "Failed to fetch DHCP data" + } else { + d := raw.DHCP + dhcp := &DHCPData{ + Enabled: bool(d.Enabled), + Stats: DHCPStats{ + InDiscoveries: int64(d.Statistics.InDiscoveries), + InRequests: int64(d.Statistics.InRequests), + InReleases: int64(d.Statistics.InReleases), + OutOffers: int64(d.Statistics.OutOffers), + OutAcks: int64(d.Statistics.OutAcks), + OutNaks: int64(d.Statistics.OutNaks), + }, + } + for _, l := range d.Leases.Lease { + dhcp.Leases = append(dhcp.Leases, DHCPLease{ + Address: l.Address, + MAC: l.PhysAddr, + Hostname: l.Hostname, + Expires: formatDHCPExpiry(l.Expires), + ClientID: l.ClientID, + }) + } + data.DHCP = dhcp + } + + tmplName := "dhcp.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +// formatDHCPExpiry converts a YANG date-and-time or "never" string to a +// human-readable relative expiry string. +func formatDHCPExpiry(s string) string { + if s == "" || s == "never" { + return "never" + } + t, err := time.Parse(time.RFC3339, s) + if err != nil { + // Try without timezone + t, err = time.Parse("2006-01-02T15:04:05", s) + if err != nil { + return s + } + } + d := time.Until(t) + if d < 0 { + d = -d + return "expired " + formatRelDuration(d) + " ago" + } + return "in " + formatRelDuration(d) +} + +// formatRelDuration formats a time.Duration in a compact human-readable form. +func formatRelDuration(d time.Duration) string { + switch { + case d >= 24*time.Hour: + return fmt.Sprintf("%dd", int(d.Hours())/24) + case d >= time.Hour: + return fmt.Sprintf("%dh%dm", int(d.Hours()), int(d.Minutes())%60) + case d >= time.Minute: + return fmt.Sprintf("%dm", int(d.Minutes())) + default: + return fmt.Sprintf("%ds", int(d.Seconds())) + } +} diff --git a/src/webui/internal/handlers/firewall.go b/src/webui/internal/handlers/firewall.go new file mode 100644 index 000000000..e4332f18b --- /dev/null +++ b/src/webui/internal/handlers/firewall.go @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "errors" + "html/template" + "log" + "net/http" + "sort" + "strings" + + "github.com/kernelkit/webui/internal/restconf" +) + +// RESTCONF JSON structures for infix-firewall:firewall. + +type firewallWrapper struct { + Firewall firewallJSON `json:"infix-firewall:firewall"` +} + +type firewallJSON struct { + Enabled *yangBool `json:"enabled"` // YANG default: true; nil means enabled + Default string `json:"default"` + Logging string `json:"logging"` + Lockdown yangBool `json:"lockdown"` + Zone []zoneJSON `json:"zone"` + Policy []policyJSON `json:"policy"` +} + +type zoneJSON struct { + Name string `json:"name"` + Action string `json:"action"` + Description string `json:"description"` + Interface []string `json:"interface"` + Network []string `json:"network"` + Service []string `json:"service"` + PortForward []portForwardJSON `json:"port-forward"` + Immutable bool `json:"immutable"` +} + +type portForwardJSON struct { + Port string `json:"port"` + Protocol string `json:"protocol"` + ToAddr string `json:"to-addr"` + ToPort string `json:"to-port"` +} + +type policyJSON struct { + Name string `json:"name"` + Action string `json:"action"` + Description string `json:"description"` + Priority yangInt64 `json:"priority"` + Ingress []string `json:"ingress"` + Egress []string `json:"egress"` + Service []string `json:"service"` + Masquerade bool `json:"masquerade"` + Immutable bool `json:"immutable"` +} + +// Template data structures. + +type firewallData struct { + PageData + Enabled bool + EnabledText string + DefaultZone string + Lockdown bool + Logging string + ZoneNames []string + Matrix []matrixRow + Zones []zoneEntry + Policies []policyEntry + Error string +} + +type matrixRow struct { + Zone string + Cells []matrixCell +} + +type matrixCell struct { + Class string + Symbol string + Verdict string // "allow", "deny", "conditional", "self" + From string + To string + Detail string // human-readable reason for the drill-down panel +} + +type zoneEntry struct { + Name string + Action string + Interfaces string + Networks string + Services string // services allowed to HOST from this zone +} + +type policyEntry struct { + Name string + Action string + Priority int64 + Ingress string + Egress string + Services string + Masquerade bool + Immutable bool + Description string +} + +// FirewallHandler serves the firewall overview page. +type FirewallHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the firewall overview (GET /firewall). +func (h *FirewallHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := firewallData{ + PageData: newPageData(r, "firewall", "Firewall"), + } + + var fw firewallWrapper + err := h.RC.Get(r.Context(), "/data/infix-firewall:firewall", &fw) + if err != nil { + var rcErr *restconf.Error + if errors.As(err, &rcErr) && rcErr.StatusCode == http.StatusNotFound { + // Firewall module not active — show disabled state, not an error. + data.EnabledText = "Inactive" + } else { + log.Printf("restconf firewall: %v", err) + data.Error = "Could not fetch firewall configuration" + } + } + if err == nil { + f := fw.Firewall + data.Enabled = f.Enabled == nil || bool(*f.Enabled) + if data.Enabled { + data.EnabledText = "Active" + } else { + data.EnabledText = "Inactive" + } + data.DefaultZone = f.Default + data.Lockdown = bool(f.Lockdown) + data.Logging = f.Logging + if data.Logging == "" { + data.Logging = "off" + } + + for _, z := range f.Zone { + data.Zones = append(data.Zones, zoneEntry{ + Name: z.Name, + Action: z.Action, + Interfaces: strings.Join(z.Interface, ", "), + Networks: strings.Join(z.Network, ", "), + Services: strings.Join(z.Service, ", "), + }) + } + + for _, p := range f.Policy { + data.Policies = append(data.Policies, policyEntry{ + Name: p.Name, + Action: p.Action, + Priority: int64(p.Priority), + Ingress: strings.Join(p.Ingress, ", "), + Egress: strings.Join(p.Egress, ", "), + Services: strings.Join(p.Service, ", "), + Masquerade: p.Masquerade, + Immutable: p.Immutable, + Description: p.Description, + }) + } + + sort.Slice(data.Policies, func(i, j int) bool { + return data.Policies[i].Priority < data.Policies[j].Priority + }) + + data.ZoneNames, data.Matrix = buildMatrix(f.Zone, f.Policy) + } + + tmplName := "firewall.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// buildMatrix creates the zone-to-zone traffic flow matrix. +// Zones are listed along both axes with HOST (the device itself) prepended. +// Each cell shows whether traffic from the row zone to the column zone +// is allowed, denied, or conditional. +func buildMatrix(zones []zoneJSON, policies []policyJSON) ([]string, []matrixRow) { + if len(zones) == 0 { + return nil, nil + } + + names := []string{"HOST"} + zoneByName := map[string]zoneJSON{} + for _, z := range zones { + names = append(names, z.Name) + zoneByName[z.Name] = z + } + + // Sort policies by priority for evaluation. + sorted := make([]policyJSON, len(policies)) + copy(sorted, policies) + sort.Slice(sorted, func(i, j int) bool { + return int64(sorted[i].Priority) < int64(sorted[j].Priority) + }) + + rows := make([]matrixRow, len(names)) + for i, src := range names { + rows[i] = matrixRow{Zone: src, Cells: make([]matrixCell, len(names))} + for j, dst := range names { + var cell matrixCell + switch { + case src == dst: + cell = matrixCell{Class: "matrix-self", Symbol: "—", Verdict: "self"} + case src == "HOST": + // Traffic from the device to any zone is always allowed. + cell = matrixCell{Class: "matrix-allow", Symbol: "✓", Verdict: "allow", + Detail: "HOST can reach all zones"} + case dst == "HOST": + // Input to device: governed by zone action + zone services. + cell = zoneToHost(zoneByName[src]) + default: + // Forwarding between zones: governed by policies. + cell = evalForward(src, dst, sorted) + } + cell.From = src + cell.To = dst + rows[i].Cells[j] = cell + } + } + + return names, rows +} + +// zoneToHost determines traffic flow from a zone to the device (HOST). +// This mirrors the CLI: it is based solely on the zone's action and services, +// not on policies (per cli_pretty.py traffic_flow logic). +func zoneToHost(zone zoneJSON) matrixCell { + if zone.Action == "accept" { + return matrixCell{Class: "matrix-allow", Symbol: "✓", Verdict: "allow", + Detail: "Zone default action: accept"} + } + if len(zone.Service) > 0 || len(zone.PortForward) > 0 { + if len(zone.Service) > 0 { + return matrixCell{Class: "matrix-cond", Symbol: "⚠", Verdict: "conditional", + Detail: "Services: " + strings.Join(zone.Service, ", ")} + } + return matrixCell{Class: "matrix-cond", Symbol: "⚠", Verdict: "conditional", + Detail: "Port-forwarding rules apply"} + } + return matrixCell{Class: "matrix-deny", Symbol: "✗", Verdict: "deny", + Detail: "Zone default action: " + zone.Action} +} + +// evalForward determines traffic flow between two different zones via policies. +func evalForward(src, dst string, policies []policyJSON) matrixCell { + if v, name := evalPolicies(src, dst, policies); v != "" { + return makeCell(v, name) + } + return matrixCell{Class: "matrix-deny", Symbol: "✗", Verdict: "deny", + Detail: "No policy — default deny"} +} + +// evalPolicies walks the sorted policy list and returns the first terminal +// verdict (accept/reject/drop) for traffic from src to dst, plus the policy name. +// "continue" policies are skipped (they don't produce a final verdict). +func evalPolicies(src, dst string, policies []policyJSON) (verdict, name string) { + for _, p := range policies { + if !matchesZone(src, p.Ingress) || !matchesZone(dst, p.Egress) { + continue + } + if p.Action == "continue" { + continue + } + return p.Action, p.Name + } + return "", "" +} + +// matchesZone checks whether zone appears in list, treating "ANY" as a wildcard. +func matchesZone(zone string, list []string) bool { + for _, z := range list { + if z == zone || z == "ANY" { + return true + } + } + return false +} + +func makeCell(verdict, policyName string) matrixCell { + if verdict == "accept" { + return matrixCell{Class: "matrix-allow", Symbol: "✓", Verdict: "allow", + Detail: "Policy: " + policyName + " (accept)"} + } + return matrixCell{Class: "matrix-deny", Symbol: "✗", Verdict: "deny", + Detail: "Policy: " + policyName + " (" + verdict + ")"} +} diff --git a/src/webui/internal/handlers/interfaces.go b/src/webui/internal/handlers/interfaces.go new file mode 100644 index 000000000..c2d1e3527 --- /dev/null +++ b/src/webui/internal/handlers/interfaces.go @@ -0,0 +1,1055 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "fmt" + "html/template" + "log" + "math" + "net/http" + "sort" + "strings" + "sync" + + "github.com/kernelkit/webui/internal/restconf" +) + +// RESTCONF JSON structures for ietf-interfaces:interfaces. + +type interfacesWrapper struct { + Interfaces struct { + Interface []ifaceJSON `json:"interface"` + } `json:"ietf-interfaces:interfaces"` +} + +type ifaceJSON struct { + Name string `json:"name"` + Description string `json:"description"` + Type string `json:"type"` + Enabled *bool `json:"enabled"` + OperStatus string `json:"oper-status"` + PhysAddress string `json:"phys-address"` + IfIndex int `json:"if-index"` + IPv4 *ipCfg `json:"ietf-ip:ipv4"` + IPv6 *ipCfg `json:"ietf-ip:ipv6"` + Statistics *ifaceStats `json:"statistics"` + Ethernet *ethernetJSON `json:"ieee802-ethernet-interface:ethernet"` + Bridge *bridgeCfgJSON `json:"infix-interfaces:bridge"` + BridgePort *bridgePortJSON `json:"infix-interfaces:bridge-port"` + Lag *lagCfgJSON `json:"infix-interfaces:lag"` + LagPort *lagPortCfgJSON `json:"infix-interfaces:lag-port"` + Vlan *vlanCfgJSON `json:"infix-interfaces:vlan"` + WiFi *wifiJSON `json:"infix-interfaces:wifi"` + WireGuard *wireGuardJSON `json:"infix-interfaces:wireguard"` +} + +type vlanCfgJSON struct { + ID int `json:"id"` + TagType string `json:"tag-type"` + LowerLayerIf string `json:"lower-layer-if"` +} + +type bridgePortJSON struct { + Bridge string `json:"bridge"` + STP *struct { + CIST *struct { + State string `json:"state"` + } `json:"cist"` + } `json:"stp"` +} + +type wifiJSON struct { + Radio string `json:"radio"` + AccessPoint *wifiAPJSON `json:"access-point"` + Station *wifiStationJSON `json:"station"` +} + +type wifiAPJSON struct { + SSID string `json:"ssid"` + Stations struct { + Station []wifiStaJSON `json:"station"` + } `json:"stations"` +} + +type wifiStaJSON struct { + MACAddress string `json:"mac-address"` + SignalStrength *int `json:"signal-strength"` + ConnectedTime yangInt64 `json:"connected-time"` + RxPackets yangInt64 `json:"rx-packets"` + TxPackets yangInt64 `json:"tx-packets"` + RxBytes yangInt64 `json:"rx-bytes"` + TxBytes yangInt64 `json:"tx-bytes"` + RxSpeed yangInt64 `json:"rx-speed"` + TxSpeed yangInt64 `json:"tx-speed"` +} + +type wifiStationJSON struct { + SSID string `json:"ssid"` + SignalStrength *int `json:"signal-strength"` + RxSpeed yangInt64 `json:"rx-speed"` + TxSpeed yangInt64 `json:"tx-speed"` + ScanResults []wifiScanResultJSON `json:"scan-results"` +} + +type wifiScanResultJSON struct { + SSID string `json:"ssid"` + BSSID string `json:"bssid"` + SignalStrength *int `json:"signal-strength"` + Channel int `json:"channel"` + Encryption []string `json:"encryption"` +} + +// WiFi radio survey RESTCONF structures (from ietf-hardware:hardware). + +type wifiRadioJSON struct { + Survey *wifiSurveyJSON `json:"survey"` +} + +type wifiSurveyJSON struct { + Channel []surveyChanJSON `json:"channel"` +} + +type surveyChanJSON struct { + Frequency int `json:"frequency"` + InUse yangBool `json:"in-use"` + Noise int `json:"noise"` + ActiveTime int `json:"active-time"` + BusyTime int `json:"busy-time"` + ReceiveTime int `json:"receive-time"` + TransmitTime int `json:"transmit-time"` +} + +type wireGuardJSON struct { + PeerStatus *struct { + Peer []wgPeerJSON `json:"peer"` + } `json:"peer-status"` +} + +type wgPeerJSON struct { + PublicKey string `json:"public-key"` + ConnectionStatus string `json:"connection-status"` + EndpointAddress string `json:"endpoint-address"` + EndpointPort int `json:"endpoint-port"` + LatestHandshake string `json:"latest-handshake"` + Transfer *struct { + TxBytes yangInt64 `json:"tx-bytes"` + RxBytes yangInt64 `json:"rx-bytes"` + } `json:"transfer"` +} + +type ipCfg struct { + Address []ipAddr `json:"address"` + MTU int `json:"mtu"` + DHCP *struct{} `json:"infix-dhcp-client:dhcp"` // DHCPv4 presence + Autoconf *struct{} `json:"infix-ip:autoconf"` // IPv4 link-local presence + SLAACv6 *struct{} `json:"autoconf"` // IPv6 SLAAC presence + DHCPv6 *struct{} `json:"infix-dhcpv6-client:dhcp"` // DHCPv6 presence +} + +type ipAddr struct { + IP string `json:"ip"` + PrefixLength yangInt64 `json:"prefix-length"` + Origin string `json:"origin"` +} + +type ifaceStats struct { + InOctets yangInt64 `json:"in-octets"` + OutOctets yangInt64 `json:"out-octets"` + InUnicastPkts yangInt64 `json:"in-unicast-pkts"` + InBroadcastPkts yangInt64 `json:"in-broadcast-pkts"` + InMulticastPkts yangInt64 `json:"in-multicast-pkts"` + InDiscards yangInt64 `json:"in-discards"` + InErrors yangInt64 `json:"in-errors"` + OutUnicastPkts yangInt64 `json:"out-unicast-pkts"` + OutBroadcastPkts yangInt64 `json:"out-broadcast-pkts"` + OutMulticastPkts yangInt64 `json:"out-multicast-pkts"` + OutDiscards yangInt64 `json:"out-discards"` + OutErrors yangInt64 `json:"out-errors"` +} + +type ethernetJSON struct { + Speed string `json:"speed"` + Duplex string `json:"duplex"` + AutoNegotiation *struct { + Enable bool `json:"enable"` + } `json:"auto-negotiation"` + Statistics *struct { + Frame *ethFrameStats `json:"frame"` + } `json:"statistics"` +} + +type ethFrameStats struct { + InTotalPkts yangInt64 `json:"in-total-pkts"` + InTotalOctets yangInt64 `json:"in-total-octets"` + InGoodPkts yangInt64 `json:"in-good-pkts"` + InGoodOctets yangInt64 `json:"in-good-octets"` + InBroadcast yangInt64 `json:"in-broadcast"` + InMulticast yangInt64 `json:"in-multicast"` + InErrorFCS yangInt64 `json:"in-error-fcs"` + InErrorUndersize yangInt64 `json:"in-error-undersize"` + InErrorOversize yangInt64 `json:"in-error-oversize"` + InErrorMACInternal yangInt64 `json:"in-error-mac-internal"` + OutTotalPkts yangInt64 `json:"out-total-pkts"` + OutTotalOctets yangInt64 `json:"out-total-octets"` + OutGoodPkts yangInt64 `json:"out-good-pkts"` + OutGoodOctets yangInt64 `json:"out-good-octets"` + OutBroadcast yangInt64 `json:"out-broadcast"` + OutMulticast yangInt64 `json:"out-multicast"` +} + +// Template data structures. + +type interfacesData struct { + PageData + Interfaces []ifaceEntry + Error string +} + +type ifaceEntry struct { + HasMembers bool // is a bridge/LAG master with child ports + IsMember bool // is a bridge port or LAG member + IsLastMember bool // is the last child in its group + Forwarding bool // IP forwarding enabled (⇅ flag) + GroupID string // bridge/LAG name — set on parent and all its members + Name string + Type string + Status string + StatusUp bool + PhysAddr string + Addresses []addrEntry + Detail string // extra info: wifi AP, wireguard peers, etc. + RxBytes string + TxBytes string +} + +type addrEntry struct { + Address string + Origin string +} + +// InterfacesHandler serves the interfaces pages. +type InterfacesHandler struct { + Template *template.Template + DetailTemplate *template.Template + CountersTemplate *template.Template + RC *restconf.Client +} + +// Overview renders the interfaces page (GET /interfaces). +func (h *InterfacesHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := interfacesData{ + PageData: newPageData(r, "interfaces", "Interfaces"), + } + + var ( + ifaces interfacesWrapper + ri struct { + Routing struct { + Interfaces struct { + Interface []string `json:"interface"` + } `json:"interfaces"` + } `json:"ietf-routing:routing"` + } + ifaceErr error + wg sync.WaitGroup + ) + wg.Add(2) + go func() { + defer wg.Done() + ifaceErr = h.RC.Get(r.Context(), "/data/ietf-interfaces:interfaces", &ifaces) + }() + go func() { + defer wg.Done() + // Best-effort: ignore errors (routing may not be configured). + // Fetch the full routing object — the /interfaces sub-path returns empty + // on Infix even when the data is present in the parent resource. + h.RC.Get(r.Context(), "/data/ietf-routing:routing", &ri) //nolint:errcheck + }() + wg.Wait() + + if ifaceErr != nil { + log.Printf("restconf interfaces: %v", ifaceErr) + data.Error = "Could not fetch interface information" + } else { + fwdSet := make(map[string]bool, len(ri.Routing.Interfaces.Interface)) + for _, name := range ri.Routing.Interfaces.Interface { + fwdSet[name] = true + } + data.Interfaces = buildIfaceList(ifaces.Interfaces.Interface, fwdSet) + } + + tmplName := "interfaces.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +const ( + ifTypeEthernet = "ethernet" + ifTypeLoopback = "loopback" +) + +// prettyIfType converts a YANG interface type identity to the display +// name used by the Infix CLI (cli_pretty). +func prettyIfType(full string) string { + pretty := map[string]string{ + "bridge": "bridge", + "dummy": "dummy", + "ethernet": "ethernet", + "gre": "gre", + "gretap": "gretap", + "vxlan": "vxlan", + "wireguard": "wireguard", + "lag": "lag", + "loopback": "loopback", + "veth": "veth", + "vlan": "vlan", + "wifi": "wifi", + "other": "other", + "ethernetCsmacd": "ethernet", + "softwareLoopback": "loopback", + "l2vlan": "vlan", + "ieee8023adLag": "lag", + "ieee80211": "wifi", + "ilan": "veth", + } + + if i := strings.LastIndex(full, ":"); i >= 0 { + full = full[i+1:] + } + if name, ok := pretty[full]; ok { + return name + } + return full +} + +// buildIfaceList converts raw RESTCONF interface data into a flat, +// hierarchically ordered display list. Bridge/LAG members are grouped +// under their parent. fwdSet contains the names of interfaces with IP +// forwarding enabled (the ⇅ flag). +func buildIfaceList(raw []ifaceJSON, fwdSet map[string]bool) []ifaceEntry { + byName := map[string]*ifaceJSON{} + children := map[string][]string{} + childSet := map[string]bool{} + + for i := range raw { + iface := &raw[i] + byName[iface.Name] = iface + if iface.BridgePort != nil && iface.BridgePort.Bridge != "" { + parent := iface.BridgePort.Bridge + children[parent] = append(children[parent], iface.Name) + childSet[iface.Name] = true + } + } + + // Collect top-level interfaces (not bridge/LAG members) and sort them: + // loopback first, then alphabetically. + var topLevel []ifaceJSON + for _, iface := range raw { + if !childSet[iface.Name] { + topLevel = append(topLevel, iface) + } + } + sort.Slice(topLevel, func(i, j int) bool { + li := prettyIfType(topLevel[i].Type) == ifTypeLoopback + lj := prettyIfType(topLevel[j].Type) == ifTypeLoopback + if li != lj { + return li + } + return topLevel[i].Name < topLevel[j].Name + }) + + // Sort children of each bridge/LAG alphabetically. + for parent := range children { + sort.Strings(children[parent]) + } + + var result []ifaceEntry + + for _, iface := range topLevel { + e := makeIfaceEntry(iface, fwdSet) + members := children[iface.Name] + e.HasMembers = len(members) > 0 + if e.HasMembers { + e.GroupID = iface.Name + } + result = append(result, e) + + for i, childName := range members { + child, ok := byName[childName] + if !ok { + continue + } + me := makeIfaceEntry(*child, fwdSet) + me.IsMember = true + me.IsLastMember = i == len(members)-1 + me.GroupID = iface.Name + if child.BridgePort != nil && child.BridgePort.STP != nil && + child.BridgePort.STP.CIST != nil && child.BridgePort.STP.CIST.State != "" { + me.Status = child.BridgePort.STP.CIST.State + me.StatusUp = me.Status == "forwarding" + } + result = append(result, me) + } + } + + return result +} + +func makeIfaceEntry(iface ifaceJSON, fwdSet map[string]bool) ifaceEntry { + e := ifaceEntry{ + Forwarding: fwdSet[iface.Name], + Name: iface.Name, + Type: prettyIfType(iface.Type), + Status: iface.OperStatus, + StatusUp: iface.OperStatus == "up", + PhysAddr: iface.PhysAddress, + } + + if iface.Statistics != nil { + e.RxBytes = humanBytes(int64(iface.Statistics.InOctets)) + e.TxBytes = humanBytes(int64(iface.Statistics.OutOctets)) + } + + if iface.IPv4 != nil { + for _, a := range iface.IPv4.Address { + e.Addresses = append(e.Addresses, addrEntry{ + Address: fmt.Sprintf("%s/%d", a.IP, int(a.PrefixLength)), + Origin: a.Origin, + }) + } + } + if iface.IPv6 != nil { + for _, a := range iface.IPv6.Address { + e.Addresses = append(e.Addresses, addrEntry{ + Address: fmt.Sprintf("%s/%d", a.IP, int(a.PrefixLength)), + Origin: a.Origin, + }) + } + } + + if v := iface.Vlan; v != nil { + if v.LowerLayerIf != "" { + e.Detail = fmt.Sprintf("vid %d (%s)", v.ID, v.LowerLayerIf) + } else { + e.Detail = fmt.Sprintf("vid %d", v.ID) + } + } + + if iface.WiFi != nil { + if ap := iface.WiFi.AccessPoint; ap != nil { + n := len(ap.Stations.Station) + e.Detail = fmt.Sprintf("AP, ssid: %s, stations: %d", ap.SSID, n) + } else if st := iface.WiFi.Station; st != nil { + e.Detail = fmt.Sprintf("Station, ssid: %s", st.SSID) + } + } + + if wg := iface.WireGuard; wg != nil && wg.PeerStatus != nil { + total := len(wg.PeerStatus.Peer) + up := 0 + for _, p := range wg.PeerStatus.Peer { + if p.ConnectionStatus == "up" { + up++ + } + } + e.Detail = fmt.Sprintf("%d peers (%d up)", total, up) + } + + return e +} + +// Template data for the interface detail page. +type ifaceDetailData struct { + PageData + Name string + Type string + Status string + StatusUp bool + PhysAddr string + IfIndex int + MTU int + Speed string + Duplex string + AutoNeg string + Addresses []addrEntry + WiFiMode string // "Access Point" or "Station" + WiFiSSID string + WiFiSignal string + WiFiRxSpeed string + WiFiTxSpeed string + WiFiStationCount string // e.g. "3" for AP mode + WGPeerSummary string // e.g. "3 peers (2 up)" + Counters ifaceCounters + EthFrameStats []kvEntry + WGPeers []wgPeerEntry + WiFiStations []wifiStaEntry + ScanResults []wifiScanEntry +} + +type ifaceCounters struct { + RxBytes string + RxUnicast string + RxBroadcast string + RxMulticast string + RxDiscards string + RxErrors string + TxBytes string + TxUnicast string + TxBroadcast string + TxMulticast string + TxDiscards string + TxErrors string +} + +type kvEntry struct { + Key string + Value string +} + +type wgPeerEntry struct { + PublicKey string + Status string + StatusUp bool + Endpoint string + Handshake string + TxBytes string + RxBytes string +} + +type wifiStaEntry struct { + MAC string + Signal string + SignalCSS string // "excellent", "good", "poor", "bad" + Time string + RxPkts string + TxPkts string + RxBytes string + TxBytes string + RxSpeed string + TxSpeed string +} + +type wifiScanEntry struct { + SSID string + BSSID string + Signal string + SignalCSS string + Channel string + Encryption string +} + +// fetchInterface retrieves a single interface by name from RESTCONF. +func (h *InterfacesHandler) fetchInterface(r *http.Request, name string) (*ifaceJSON, error) { + var all interfacesWrapper + if err := h.RC.Get(r.Context(), "/data/ietf-interfaces:interfaces", &all); err != nil { + return nil, err + } + for i := range all.Interfaces.Interface { + if all.Interfaces.Interface[i].Name == name { + return &all.Interfaces.Interface[i], nil + } + } + return nil, fmt.Errorf("interface %q not found", name) +} + +// buildDetailData converts raw RESTCONF interface data to template data. +func buildDetailData(r *http.Request, iface *ifaceJSON) ifaceDetailData { + d := ifaceDetailData{ + Name: iface.Name, + Type: prettyIfType(iface.Type), + Status: iface.OperStatus, + StatusUp: iface.OperStatus == "up", + PhysAddr: iface.PhysAddress, + IfIndex: iface.IfIndex, + } + + if iface.IPv4 != nil { + if iface.IPv4.MTU > 0 { + d.MTU = iface.IPv4.MTU + } + for _, a := range iface.IPv4.Address { + d.Addresses = append(d.Addresses, addrEntry{ + Address: fmt.Sprintf("%s/%d", a.IP, int(a.PrefixLength)), + Origin: a.Origin, + }) + } + } + if iface.IPv6 != nil { + for _, a := range iface.IPv6.Address { + d.Addresses = append(d.Addresses, addrEntry{ + Address: fmt.Sprintf("%s/%d", a.IP, int(a.PrefixLength)), + Origin: a.Origin, + }) + } + } + + if iface.Ethernet != nil && prettyIfType(iface.Type) == ifTypeEthernet { + d.Speed = prettySpeed(iface.Ethernet.Speed) + d.Duplex = iface.Ethernet.Duplex + if iface.Ethernet.AutoNegotiation != nil { + if iface.Ethernet.AutoNegotiation.Enable { + d.AutoNeg = "on" + } else { + d.AutoNeg = "off" + } + } + if iface.Ethernet.Statistics != nil && iface.Ethernet.Statistics.Frame != nil { + d.EthFrameStats = buildEthFrameStats(iface.Ethernet.Statistics.Frame) + } + } + + if iface.Statistics != nil { + d.Counters = buildCounters(iface.Statistics) + } + + if iface.WiFi != nil { + if ap := iface.WiFi.AccessPoint; ap != nil { + d.WiFiMode = "Access Point" + d.WiFiSSID = ap.SSID + d.WiFiStationCount = fmt.Sprintf("%d", len(ap.Stations.Station)) + for _, s := range ap.Stations.Station { + d.WiFiStations = append(d.WiFiStations, buildWifiStaEntry(s)) + } + } else if st := iface.WiFi.Station; st != nil { + d.WiFiMode = "Station" + d.WiFiSSID = st.SSID + if st.SignalStrength != nil { + d.WiFiSignal = fmt.Sprintf("%d dBm", *st.SignalStrength) + } + if st.RxSpeed > 0 { + d.WiFiRxSpeed = fmt.Sprintf("%.1f Mbps", float64(st.RxSpeed)/10) + } + if st.TxSpeed > 0 { + d.WiFiTxSpeed = fmt.Sprintf("%.1f Mbps", float64(st.TxSpeed)/10) + } + for _, sr := range st.ScanResults { + d.ScanResults = append(d.ScanResults, buildWifiScanEntry(sr)) + } + } + } + + if wg := iface.WireGuard; wg != nil && wg.PeerStatus != nil { + total := len(wg.PeerStatus.Peer) + up := 0 + for _, p := range wg.PeerStatus.Peer { + pe := wgPeerEntry{ + PublicKey: p.PublicKey, + Status: p.ConnectionStatus, + StatusUp: p.ConnectionStatus == "up", + } + if p.EndpointAddress != "" { + pe.Endpoint = fmt.Sprintf("%s:%d", p.EndpointAddress, p.EndpointPort) + } + if p.LatestHandshake != "" { + pe.Handshake = p.LatestHandshake + } + if p.Transfer != nil { + pe.TxBytes = humanBytes(int64(p.Transfer.TxBytes)) + pe.RxBytes = humanBytes(int64(p.Transfer.RxBytes)) + } + if p.ConnectionStatus == "up" { + up++ + } + d.WGPeers = append(d.WGPeers, pe) + } + d.WGPeerSummary = fmt.Sprintf("%d peers (%d up)", total, up) + } + + return d +} + +func buildCounters(s *ifaceStats) ifaceCounters { + return ifaceCounters{ + RxBytes: humanBytes(int64(s.InOctets)), + RxUnicast: formatCount(int64(s.InUnicastPkts)), + RxBroadcast: formatCount(int64(s.InBroadcastPkts)), + RxMulticast: formatCount(int64(s.InMulticastPkts)), + RxDiscards: formatCount(int64(s.InDiscards)), + RxErrors: formatCount(int64(s.InErrors)), + TxBytes: humanBytes(int64(s.OutOctets)), + TxUnicast: formatCount(int64(s.OutUnicastPkts)), + TxBroadcast: formatCount(int64(s.OutBroadcastPkts)), + TxMulticast: formatCount(int64(s.OutMulticastPkts)), + TxDiscards: formatCount(int64(s.OutDiscards)), + TxErrors: formatCount(int64(s.OutErrors)), + } +} + +func buildEthFrameStats(f *ethFrameStats) []kvEntry { + return []kvEntry{ + {"eth-in-frames", formatCount(int64(f.InTotalPkts))}, + {"eth-in-octets", humanBytes(int64(f.InTotalOctets))}, + {"eth-in-good-frames", formatCount(int64(f.InGoodPkts))}, + {"eth-in-good-octets", humanBytes(int64(f.InGoodOctets))}, + {"eth-in-broadcast", formatCount(int64(f.InBroadcast))}, + {"eth-in-multicast", formatCount(int64(f.InMulticast))}, + {"eth-in-fcs-error", formatCount(int64(f.InErrorFCS))}, + {"eth-in-undersize", formatCount(int64(f.InErrorUndersize))}, + {"eth-in-oversize", formatCount(int64(f.InErrorOversize))}, + {"eth-in-mac-error", formatCount(int64(f.InErrorMACInternal))}, + {"eth-out-frames", formatCount(int64(f.OutTotalPkts))}, + {"eth-out-octets", humanBytes(int64(f.OutTotalOctets))}, + {"eth-out-good-frames", formatCount(int64(f.OutGoodPkts))}, + {"eth-out-good-octets", humanBytes(int64(f.OutGoodOctets))}, + {"eth-out-broadcast", formatCount(int64(f.OutBroadcast))}, + {"eth-out-multicast", formatCount(int64(f.OutMulticast))}, + } +} + +// prettySpeed converts YANG ethernet speed identities to display strings. +func prettySpeed(s string) string { + if i := strings.LastIndex(s, ":"); i >= 0 { + s = s[i+1:] + } + return s +} + +func buildWifiStaEntry(s wifiStaJSON) wifiStaEntry { + e := wifiStaEntry{ + MAC: s.MACAddress, + Time: formatDuration(int64(s.ConnectedTime)), + RxPkts: formatCount(int64(s.RxPackets)), + TxPkts: formatCount(int64(s.TxPackets)), + RxBytes: humanBytes(int64(s.RxBytes)), + TxBytes: humanBytes(int64(s.TxBytes)), + RxSpeed: fmt.Sprintf("%.1f Mbps", float64(s.RxSpeed)/10), + TxSpeed: fmt.Sprintf("%.1f Mbps", float64(s.TxSpeed)/10), + } + if s.SignalStrength != nil { + sig := *s.SignalStrength + e.Signal = fmt.Sprintf("%d dBm", sig) + switch { + case sig >= -50: + e.SignalCSS = "excellent" + case sig >= -60: + e.SignalCSS = "good" + case sig >= -70: + e.SignalCSS = "poor" + default: + e.SignalCSS = "bad" + } + } + return e +} + +func buildWifiScanEntry(sr wifiScanResultJSON) wifiScanEntry { + e := wifiScanEntry{ + SSID: sr.SSID, + BSSID: sr.BSSID, + Channel: fmt.Sprintf("%d", sr.Channel), + } + if len(sr.Encryption) > 0 { + e.Encryption = strings.Join(sr.Encryption, ", ") + } else { + e.Encryption = "Open" + } + if sr.SignalStrength != nil { + sig := *sr.SignalStrength + e.Signal = fmt.Sprintf("%d dBm", sig) + switch { + case sig >= -50: + e.SignalCSS = "excellent" + case sig >= -60: + e.SignalCSS = "good" + case sig >= -70: + e.SignalCSS = "poor" + default: + e.SignalCSS = "bad" + } + } + return e +} + +func formatDuration(secs int64) string { + if secs < 60 { + return fmt.Sprintf("%ds", secs) + } + if secs < 3600 { + return fmt.Sprintf("%dm %ds", secs/60, secs%60) + } + h := secs / 3600 + m := (secs % 3600) / 60 + return fmt.Sprintf("%dh %dm", h, m) +} + +// formatCount formats a packet/frame count with thousand separators. +func formatCount(n int64) string { + if n == 0 { + return "0" + } + s := fmt.Sprintf("%d", n) + // Insert thousand separators from the right. + var result []byte + for i, c := range s { + if i > 0 && (len(s)-i)%3 == 0 { + result = append(result, ',') + } + result = append(result, byte(c)) + } + return string(result) +} + +// Detail renders the interface detail page (GET /interfaces/{name}). +func (h *InterfacesHandler) Detail(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + + iface, err := h.fetchInterface(r, name) + if err != nil { + log.Printf("restconf interface %s: %v", name, err) + http.Error(w, "Interface not found", http.StatusNotFound) + return + } + + data := buildDetailData(r, iface) + data.PageData = newPageData(r, "interfaces", "Interface "+name) + + tmplName := "iface-detail.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.DetailTemplate.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// Counters renders the counters fragment for htmx polling (GET /interfaces/{name}/counters). +func (h *InterfacesHandler) Counters(w http.ResponseWriter, r *http.Request) { + name := r.PathValue("name") + + iface, err := h.fetchInterface(r, name) + if err != nil { + log.Printf("restconf interface %s counters: %v", name, err) + http.Error(w, "Interface not found", http.StatusNotFound) + return + } + + data := buildDetailData(r, iface) + + if err := h.CountersTemplate.ExecuteTemplate(w, "iface-counters", data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// freqToChannel converts a WiFi center frequency (MHz) to a channel number. +func freqToChannel(freq int) int { + switch { + case freq == 2484: + return 14 + case freq >= 2412 && freq <= 2472: + return (freq - 2407) / 5 + case freq >= 5180 && freq <= 5885: + return (freq - 5000) / 5 + case freq >= 5955 && freq <= 7115: + return (freq - 5950) / 5 + default: + return 0 + } +} + +// renderSurveySVG generates an inline SVG bar chart visualizing WiFi channel +// survey data. Each channel gets a stacked bar showing receive, transmit, +// and other busy time as a percentage of active time. A dashed noise-floor +// line is overlaid with a right-side dBm axis. The in-use channel is marked +// with a triangle. +func renderSurveySVG(channels []surveyChanJSON) template.HTML { + n := len(channels) + if n == 0 { + return "" + } + + sorted := make([]surveyChanJSON, n) + copy(sorted, channels) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].Frequency < sorted[j].Frequency + }) + + // Layout constants. + const chartH = 200 + padL, padR, padT, padB := 44, 48, 28, 58 + + slotW := 600.0 / float64(n) + if slotW > 44 { + slotW = 44 + } + if slotW < 16 { + slotW = 16 + } + barW := slotW * 0.65 + chartW := slotW * float64(n) + svgW := int(chartW) + padL + padR + svgH := chartH + padT + padB + + // Noise range for right axis. + hasNoise := false + noiseMin, noiseMax := 0, 0 + for i, ch := range sorted { + if ch.Noise != 0 { + if !hasNoise { + noiseMin, noiseMax = ch.Noise, ch.Noise + hasNoise = true + } + if ch.Noise < noiseMin { + noiseMin = ch.Noise + } + if ch.Noise > noiseMax { + noiseMax = ch.Noise + } + } + _ = i + } + nFloor := int(math.Floor(float64(noiseMin)/5))*5 - 5 + nCeil := int(math.Ceil(float64(noiseMax)/5))*5 + 5 + nRange := float64(nCeil - nFloor) + if nRange == 0 { + nRange = 10 + } + + var b strings.Builder + + fmt.Fprintf(&b, ``, + svgW, svgH) + + // Y-axis grid lines and labels (utilization %). + for _, pct := range []int{0, 25, 50, 75, 100} { + y := padT + chartH - pct*chartH/100 + fmt.Fprintf(&b, ``, + padL, y, float64(padL)+chartW, y) + fmt.Fprintf(&b, `%d%%`, + padL-4, y+4, pct) + } + + // Right Y-axis labels (noise dBm). + if hasNoise { + nMid := (nFloor + nCeil) / 2 + for _, db := range []int{nFloor + 5, nMid, nCeil - 5} { + ny := float64(padT+chartH) - float64(db-nFloor)/nRange*float64(chartH) + fmt.Fprintf(&b, `%d`, + float64(padL)+chartW+4, ny+3, db) + } + } + + // Draw bars and collect noise line points. + var noisePts []string + + for i, ch := range sorted { + cx := float64(padL) + float64(i)*slotW + slotW/2 + bx := cx - barW/2 + + var rxPct, txPct, otherPct float64 + if ch.ActiveTime > 0 { + act := float64(ch.ActiveTime) + rxPct = float64(ch.ReceiveTime) / act * 100 + txPct = float64(ch.TransmitTime) / act * 100 + otherPct = float64(ch.BusyTime)/act*100 - rxPct - txPct + if otherPct < 0 { + otherPct = 0 + } + if total := rxPct + txPct + otherPct; total > 100 { + s := 100 / total + rxPct *= s + txPct *= s + otherPct *= s + } + } + + baseY := float64(padT + chartH) + + // Stacked: other busy (bottom), transmit (middle), receive (top). + if otherPct > 0.5 { + h := otherPct / 100 * float64(chartH) + fmt.Fprintf(&b, ``, + bx, baseY-h, barW, h) + baseY -= h + } + if txPct > 0.5 { + h := txPct / 100 * float64(chartH) + fmt.Fprintf(&b, ``, + bx, baseY-h, barW, h) + baseY -= h + } + if rxPct > 0.5 { + h := rxPct / 100 * float64(chartH) + fmt.Fprintf(&b, ``, + bx, baseY-h, barW, h) + } + + // In-use marker (triangle above bar). + if ch.InUse { + totalPct := rxPct + txPct + otherPct + topY := float64(padT+chartH) - totalPct/100*float64(chartH) + fmt.Fprintf(&b, ``, + cx, topY-3, cx-4, topY-11, cx+4, topY-11) + } + + // Channel label on X-axis. + chNum := freqToChannel(ch.Frequency) + label := fmt.Sprintf("%d", chNum) + if chNum == 0 { + label = fmt.Sprintf("%d", ch.Frequency) + } + fmt.Fprintf(&b, `%s`, + cx, padT+chartH+14, label) + + // Noise line point. + if hasNoise && ch.Noise != 0 { + ny := float64(padT+chartH) - float64(ch.Noise-nFloor)/nRange*float64(chartH) + noisePts = append(noisePts, fmt.Sprintf("%.1f,%.1f", cx, ny)) + } + } + + // Draw noise floor line. + if len(noisePts) > 1 { + fmt.Fprintf(&b, ``, + strings.Join(noisePts, " ")) + for _, pt := range noisePts { + parts := strings.SplitN(pt, ",", 2) + fmt.Fprintf(&b, ``, + parts[0], parts[1]) + } + } + + // Legend row. + ly := svgH - 8 + lx := float64(padL) + + for _, item := range []struct{ color, label string }{ + {"#3b82f6", "Rx"}, + {"#22c55e", "Tx"}, + {"#f59e0b", "Other"}, + } { + fmt.Fprintf(&b, ``, + lx, ly-9, item.color) + fmt.Fprintf(&b, `%s`, + lx+13, ly, item.label) + lx += 13 + float64(len(item.label))*7 + 10 + } + + if hasNoise { + fmt.Fprintf(&b, ``, + lx, ly-4, lx+14, ly-4) + lx += 18 + fmt.Fprintf(&b, `Noise (dBm)`, + lx, ly) + lx += 80 + } + + // In-use legend marker. + fmt.Fprintf(&b, ``, + lx+5, ly-9, lx+1, ly-1, lx+9, ly-1) + fmt.Fprintf(&b, `In use`, + lx+14, ly) + + b.WriteString(``) + return template.HTML(b.String()) +} diff --git a/src/webui/internal/handlers/keystore.go b/src/webui/internal/handlers/keystore.go new file mode 100644 index 000000000..0b86b3c19 --- /dev/null +++ b/src/webui/internal/handlers/keystore.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "encoding/base64" + "strings" +) + +// RESTCONF JSON structures for ietf-keystore:keystore. + +type keystoreWrapper struct { + Keystore keystoreJSON `json:"ietf-keystore:keystore"` +} + +type keystoreJSON struct { + SymmetricKeys symmetricKeysJSON `json:"symmetric-keys"` + AsymmetricKeys asymmetricKeysJSON `json:"asymmetric-keys"` +} + +type symmetricKeysJSON struct { + SymmetricKey []symmetricKeyJSON `json:"symmetric-key"` +} + +type symmetricKeyJSON struct { + Name string `json:"name"` + KeyFormat string `json:"key-format"` + CleartextSymmetricKey string `json:"cleartext-symmetric-key"` +} + +type asymmetricKeysJSON struct { + AsymmetricKey []asymmetricKeyJSON `json:"asymmetric-key"` +} + +type asymmetricKeyJSON struct { + Name string `json:"name"` + PrivateKeyFormat string `json:"private-key-format"` + PublicKeyFormat string `json:"public-key-format"` + PublicKey string `json:"public-key"` + CleartextPrivateKey string `json:"cleartext-private-key"` + Certificates certificatesJSON `json:"certificates"` +} + +type certificatesJSON struct { + Certificate []certificateJSON `json:"certificate"` +} + +type certificateJSON struct { + Name string `json:"name"` + CertData string `json:"cert-data"` +} + +// shortFormat strips the YANG module prefix and "-key-format" suffix. +// e.g. "ietf-crypto-types:octet-string-key-format" → "octet-string" +func shortFormat(full string) string { + if i := strings.LastIndex(full, ":"); i >= 0 { + full = full[i+1:] + } + full = strings.TrimSuffix(full, "-key-format") + full = strings.TrimSuffix(full, "-private-key-format") + full = strings.TrimSuffix(full, "-public-key-format") + return full +} + +// asymAlgorithm derives the key algorithm from the format fields. +func asymAlgorithm(k asymmetricKeyJSON) string { + for _, fmt := range []string{k.PrivateKeyFormat, k.PublicKeyFormat} { + name := shortFormat(fmt) + if name != "" { + return name + } + } + return "" +} + +// decodeSymmetricValue returns the displayable value for a symmetric key. +// Passphrases are base64-decoded to plaintext; others shown as-is. +func decodeSymmetricValue(k symmetricKeyJSON) string { + val := k.CleartextSymmetricKey + if val == "" { + return "-" + } + if shortFormat(k.KeyFormat) == "passphrase" { + if decoded, err := base64.StdEncoding.DecodeString(val); err == nil { + return string(decoded) + } + } + return val +} diff --git a/src/webui/internal/handlers/lldp.go b/src/webui/internal/handlers/lldp.go new file mode 100644 index 000000000..33b19c052 --- /dev/null +++ b/src/webui/internal/handlers/lldp.go @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "encoding/json" + "html/template" + "log" + "net/http" + "strings" + + "github.com/kernelkit/webui/internal/restconf" +) + +// ─── LLDP types ────────────────────────────────────────────────────────────── + +// LLDPNeighbor is a remote system seen via LLDP. +type LLDPNeighbor struct { + LocalPort string + ChassisID string + SystemName string + PortID string + PortDesc string + SystemDesc string + Capabilities string // comma-separated + MgmtAddress string +} + +// ─── Page data ─────────────────────────────────────────────────────────────── + +type lldpPageData struct { + PageData + Neighbors []LLDPNeighbor + Error string +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// LLDPHandler serves the LLDP neighbors page. +type LLDPHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the LLDP page (GET /lldp). +func (h *LLDPHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := lldpPageData{ + PageData: newPageData(r, "lldp", "LLDP Neighbors"), + } + + ctx := context.WithoutCancel(r.Context()) + + var raw struct { + LLDP struct { + Port []struct { + Name string `json:"name"` + DestMACAddress string `json:"dest-mac-address"` + RemoteSystemsData []struct { + ChassisID string `json:"chassis-id"` + PortID string `json:"port-id"` + PortDesc string `json:"port-desc"` + SystemName string `json:"system-name"` + SystemDescription string `json:"system-description"` + SystemCapabilitiesEnabled json.RawMessage `json:"system-capabilities-enabled"` + ManagementAddress []struct { + Address string `json:"address"` + } `json:"management-address"` + } `json:"remote-systems-data"` + } `json:"port"` + } `json:"ieee802-dot1ab-lldp:lldp"` + } + if err := h.RC.Get(ctx, "/data/ieee802-dot1ab-lldp:lldp", &raw); err != nil { + log.Printf("restconf lldp: %v", err) + data.Error = "Failed to fetch LLDP data" + } else { + for _, port := range raw.LLDP.Port { + for _, rs := range port.RemoteSystemsData { + mgmt := "" + if len(rs.ManagementAddress) > 0 { + mgmt = rs.ManagementAddress[0].Address + } + data.Neighbors = append(data.Neighbors, LLDPNeighbor{ + LocalPort: port.Name, + ChassisID: rs.ChassisID, + SystemName: rs.SystemName, + PortID: rs.PortID, + PortDesc: rs.PortDesc, + SystemDesc: rs.SystemDescription, + Capabilities: parseLLDPCapabilities(rs.SystemCapabilitiesEnabled), + MgmtAddress: mgmt, + }) + } + } + } + + tmplName := "lldp.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +// parseLLDPCapabilities turns the YANG system-capabilities-enabled bits +// value into a readable comma-separated string. +func parseLLDPCapabilities(raw json.RawMessage) string { + if len(raw) == 0 { + return "" + } + // Try plain string first (some implementations encode as "bridge router") + var s string + if json.Unmarshal(raw, &s) == nil { + parts := strings.Fields(s) + return strings.Join(parts, ", ") + } + // Try array of strings + var arr []string + if json.Unmarshal(raw, &arr) == nil { + return strings.Join(arr, ", ") + } + // Fallback: return raw minus braces + trimmed := strings.TrimSpace(string(raw)) + if trimmed == "{}" || trimmed == "null" || trimmed == "[]" { + return "" + } + return trimmed +} diff --git a/src/webui/internal/handlers/mdns.go b/src/webui/internal/handlers/mdns.go new file mode 100644 index 000000000..16ace4c66 --- /dev/null +++ b/src/webui/internal/handlers/mdns.go @@ -0,0 +1,263 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "fmt" + "html/template" + "log" + "net/http" + "sort" + "strings" + + "github.com/kernelkit/webui/internal/restconf" +) + +// ─── RESTCONF JSON types ────────────────────────────────────────────────────── + +type mdnsWrapper struct { + MDNS mdnsJSON `json:"infix-services:mdns"` +} + +type mdnsJSON struct { + Enabled *yangBool `json:"enabled"` + Domain string `json:"domain"` + Hostname string `json:"hostname"` + Interfaces mdnsIfacesJSON `json:"interfaces"` + Reflector mdnsReflJSON `json:"reflector"` + Neighbors mdnsNeighborsJSON `json:"neighbors"` +} + +type mdnsIfacesJSON struct { + Allow []string `json:"allow"` + Deny []string `json:"deny"` +} + +type mdnsReflJSON struct { + Enabled *yangBool `json:"enabled"` + ServiceFilter []string `json:"service-filter"` +} + +type mdnsNeighborsJSON struct { + Neighbor []mdnsNeighborJSON `json:"neighbor"` +} + +type mdnsNeighborJSON struct { + Hostname string `json:"hostname"` + Address []string `json:"address"` + LastSeen string `json:"last-seen"` + Service []mdnsServiceJSON `json:"service"` +} + +type mdnsServiceJSON struct { + Name string `json:"name"` + Type string `json:"type"` + Port uint16 `json:"port"` + Txt []string `json:"txt"` +} + +// ─── Template data ──────────────────────────────────────────────────────────── + +type mdnsPageData struct { + PageData + Enabled bool + EnabledText string + Domain string + Allow string + Deny string + Reflector bool + SvcFilter string + Neighbors []mdnsNeighborEntry + Error string +} + +type mdnsNeighborEntry struct { + Hostname string + PrimaryAddr string + ExtraAddrs []string // additional addresses shown in fold-out + LastSeen string // HH:MM:SS + Services []mdnsSvcEntry +} + +type mdnsSvcEntry struct { + Label string // "https", "ssh", etc. + Port uint16 + URL template.URL // non-empty for http/https — safe to use in href +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// MDNSHandler serves the mDNS overview page. +type MDNSHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the mDNS page (GET /mdns). +func (h *MDNSHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := mdnsPageData{ + PageData: newPageData(r, "mdns", "mDNS"), + } + + var raw mdnsWrapper + if err := h.RC.Get(r.Context(), "/data/infix-services:mdns", &raw); err != nil { + log.Printf("restconf mdns: %v", err) + data.Error = "Could not fetch mDNS data" + } else { + m := raw.MDNS + data.Enabled = m.Enabled == nil || bool(*m.Enabled) + if data.Enabled { + data.EnabledText = "Active" + } else { + data.EnabledText = "Inactive" + } + data.Domain = m.Domain + if data.Domain == "" { + data.Domain = "local" + } + data.Allow = strings.Join(m.Interfaces.Allow, ", ") + data.Deny = strings.Join(m.Interfaces.Deny, ", ") + data.Reflector = m.Reflector.Enabled != nil && bool(*m.Reflector.Enabled) + data.SvcFilter = strings.Join(m.Reflector.ServiceFilter, ", ") + + for _, n := range m.Neighbors.Neighbor { + data.Neighbors = append(data.Neighbors, buildMDNSNeighbor(n)) + } + sort.Slice(data.Neighbors, func(i, j int) bool { + return data.Neighbors[i].Hostname < data.Neighbors[j].Hostname + }) + } + + tmplName := "mdns.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +func buildMDNSNeighbor(n mdnsNeighborJSON) mdnsNeighborEntry { + // mdnsSortAddrs puts IPv4 first, then global IPv6, then link-local — so + // addrs[0] is already the best address for URL construction. + addrs := mdnsSortAddrs(n.Address) + + primary := "" + if len(addrs) > 0 { + primary = addrs[0] + } + + entry := mdnsNeighborEntry{ + Hostname: n.Hostname, + PrimaryAddr: primary, + LastSeen: mdnsLastSeen(n.LastSeen), + } + if len(addrs) > 1 { + entry.ExtraAddrs = addrs[1:] + } + + for _, svc := range n.Service { + entry.Services = append(entry.Services, buildMDNSSvc(svc, primary)) + } + return entry +} + +func buildMDNSSvc(svc mdnsServiceJSON, addr string) mdnsSvcEntry { + meta := mdnsParseTxt(svc.Txt) + + var rawURL string + switch svc.Type { + case "_http._tcp": + if meta.adminurl != "" { + rawURL = meta.adminurl + } else { + rawURL = fmt.Sprintf("http://%s:%d%s", addr, svc.Port, meta.path) + } + case "_https._tcp": + if meta.adminurl != "" { + rawURL = meta.adminurl + } else { + rawURL = fmt.Sprintf("https://%s:%d%s", addr, svc.Port, meta.path) + } + } + + // Use the DNS-SD instance name for clickable services so that two https + // entries on different ports ("Infix NOS" vs "ttyd") are distinguishable. + label := mdnsSvcLabel(svc.Type) + if rawURL != "" && svc.Name != "" { + label = svc.Name + } + + return mdnsSvcEntry{ + Label: label, + Port: svc.Port, + URL: template.URL(rawURL), // #nosec G203 — URL built from trusted operational data + } +} + +// "_https._tcp" → "https", "_netconf-ssh._tcp" → "netconf-ssh" +func mdnsSvcLabel(stype string) string { + s := strings.TrimPrefix(stype, "_") + if idx := strings.Index(s, "._"); idx >= 0 { + s = s[:idx] + } + return s +} + +func mdnsSortAddrs(addrs []string) []string { + out := make([]string, len(addrs)) + copy(out, addrs) + sort.SliceStable(out, func(i, j int) bool { + return mdnsAddrPrio(out[i]) < mdnsAddrPrio(out[j]) + }) + return out +} + +func mdnsAddrPrio(a string) int { + if !strings.Contains(a, ":") { + return 0 // IPv4 + } + if strings.HasPrefix(strings.ToLower(a), "fe80:") { + return 2 // link-local IPv6 + } + return 1 // global IPv6 +} + +func mdnsLastSeen(ts string) string { + if ts == "" { + return "" + } + parts := strings.SplitN(ts, "T", 2) + if len(parts) < 2 { + return ts + } + t := parts[1] + if len(t) >= 8 { + return t[:8] + } + return t +} + +type mdnsTxtMeta struct { + path string + adminurl string +} + +func mdnsParseTxt(records []string) mdnsTxtMeta { + var m mdnsTxtMeta + for _, r := range records { + switch { + case m.path == "" && strings.HasPrefix(r, "path="): + m.path = r[5:] + case m.adminurl == "" && strings.HasPrefix(r, "adminurl="): + m.adminurl = r[9:] + } + if m.path != "" && m.adminurl != "" { + break + } + } + return m +} diff --git a/src/webui/internal/handlers/nacm.go b/src/webui/internal/handlers/nacm.go new file mode 100644 index 000000000..e1c647dfa --- /dev/null +++ b/src/webui/internal/handlers/nacm.go @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "html/template" + "log" + "net/http" + "strings" + + "github.com/kernelkit/webui/internal/restconf" +) + +// ─── RESTCONF JSON types ────────────────────────────────────────────────────── + +type nacmWrapper struct { + NACM nacmJSON `json:"ietf-netconf-acm:nacm"` +} + +type nacmJSON struct { + EnableNACM bool `json:"enable-nacm"` + ReadDefault string `json:"read-default"` + WriteDefault string `json:"write-default"` + ExecDefault string `json:"exec-default"` + DeniedOperations uint32 `json:"denied-operations"` + DeniedDataWrites uint32 `json:"denied-data-writes"` + DeniedNotifications uint32 `json:"denied-notifications"` + Groups nacmGroupsJSON `json:"groups"` + RuleList []nacmRuleListJSON `json:"rule-list"` +} + +type nacmGroupsJSON struct { + Group []nacmGroupJSON `json:"group"` +} + +type nacmGroupJSON struct { + Name string `json:"name"` + UserName []string `json:"user-name"` +} + +type nacmRuleListJSON struct { + Name string `json:"name"` + Group []string `json:"group"` + Rule []nacmRuleJSON `json:"rule"` +} + +type nacmRuleJSON struct { + Name string `json:"name"` + ModuleName string `json:"module-name"` + Path string `json:"path"` + AccessOperations string `json:"access-operations"` + Action string `json:"action"` +} + +type nacmAuthWrapper struct { + System struct { + Authentication struct { + User []nacmUserJSON `json:"user"` + } `json:"authentication"` + } `json:"ietf-system:system"` +} + +type nacmUserJSON struct { + Name string `json:"name"` + Password string `json:"password"` + Shell string `json:"infix-system:shell"` + AuthorizedKey []interface{} `json:"authorized-key"` +} + +// ─── Template data ──────────────────────────────────────────────────────────── + +type nacmPageData struct { + PageData + Error string + + // Summary card + Enabled string + ReadDefault string + WriteDefault string + ExecDefault string + DeniedOperations uint32 + DeniedDataWrites uint32 + DeniedNotifications uint32 + + // Permission matrix + Matrix []nacmGroupPerm + + // Users and Groups tables + Users []nacmUserEntry + Groups []nacmGroupEntry +} + +type nacmGroupPerm struct { + Name string + Read nacmCell + Write nacmCell + Exec nacmCell + Restrictions []string +} + +type nacmCell struct { + Class string // "nacm-full" | "nacm-restricted" | "nacm-denied" + Symbol string // "✓" | "⚠" | "✗" +} + +type nacmUserEntry struct { + Name string + Shell string + Login string +} + +type nacmGroupEntry struct { + Name string + Members string +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// NACMHandler serves the NACM page. +type NACMHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the NACM page (GET /nacm). +func (h *NACMHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := nacmPageData{ + PageData: newPageData(r, "nacm", "NACM"), + } + + var nacmRaw nacmWrapper + nacmErr := h.RC.Get(r.Context(), "/data/ietf-netconf-acm:nacm", &nacmRaw) + + var authRaw nacmAuthWrapper + authErr := h.RC.Get(r.Context(), "/data/ietf-system:system/authentication", &authRaw) + + if nacmErr != nil { + log.Printf("restconf nacm: %v", nacmErr) + data.Error = "Could not fetch NACM data" + } else { + n := nacmRaw.NACM + if n.ReadDefault == "" { + n.ReadDefault = "permit" + } + if n.WriteDefault == "" { + n.WriteDefault = "deny" + } + if n.ExecDefault == "" { + n.ExecDefault = "permit" + } + + if n.EnableNACM { + data.Enabled = "yes" + } else { + data.Enabled = "no" + } + data.ReadDefault = n.ReadDefault + data.WriteDefault = n.WriteDefault + data.ExecDefault = n.ExecDefault + data.DeniedOperations = n.DeniedOperations + data.DeniedDataWrites = n.DeniedDataWrites + data.DeniedNotifications = n.DeniedNotifications + + data.Matrix = analyzeNACMPermissions(n) + + for _, g := range n.Groups.Group { + data.Groups = append(data.Groups, nacmGroupEntry{ + Name: g.Name, + Members: strings.Join(g.UserName, " "), + }) + } + } + + if authErr != nil { + log.Printf("restconf nacm auth: %v", authErr) + } else { + for _, u := range authRaw.System.Authentication.User { + data.Users = append(data.Users, buildNACMUserEntry(u)) + } + } + + tmplName := "nacm.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// ─── Permission matrix logic ────────────────────────────────────────────────── +// Mirrors cli_pretty._analyze_group_permissions exactly. + +func analyzeNACMPermissions(n nacmJSON) []nacmGroupPerm { + readDefault := n.ReadDefault == "permit" + writeDefault := n.WriteDefault == "permit" + execDefault := n.ExecDefault == "permit" + + // Collect deny rules that apply to "*" (all groups) — these become restrictions. + var globalDenials []nacmRuleJSON + for _, rl := range n.RuleList { + for _, g := range rl.Group { + if g == "*" { + for _, rule := range rl.Rule { + if rule.Action == "deny" { + globalDenials = append(globalDenials, rule) + } + } + break + } + } + } + + var results []nacmGroupPerm + + for _, group := range n.Groups.Group { + canRead := readDefault + canWrite := writeDefault + canExec := execDefault + hasPermitAll := false + var restrictions []string + + // Process rule-lists in order; only those that name this group specifically. + for _, rl := range n.RuleList { + applies := false + for _, g := range rl.Group { + if g == group.Name { + applies = true + break + } + } + if !applies { + continue + } + + for _, rule := range rl.Rule { + action := rule.Action + accessOps := strings.ToLower(rule.AccessOperations) + moduleName := rule.ModuleName + + // permit-all: module-name="*" AND access-operations="*" + if action == "permit" && moduleName == "*" && accessOps == "*" { + hasPermitAll = true + break + } + + // Explicit deny for write/exec operations + if action == "deny" && moduleName == "*" { + if strings.Contains(accessOps, "create") && + strings.Contains(accessOps, "update") && + strings.Contains(accessOps, "delete") { + canWrite = false + } + if strings.Contains(accessOps, "exec") { + canExec = false + } + } + } + + if hasPermitAll { + break + } + } + + // permit-all overrides everything, including unfavourable global defaults. + if hasPermitAll { + canRead = true + canWrite = true + canExec = true + } + + // Global denials create restrictions for groups that don't have permit-all. + if !hasPermitAll { + seen := map[string]bool{} + for _, rule := range globalDenials { + var restriction string + if rule.Path != "" { + parts := strings.Split(strings.TrimRight(rule.Path, "/"), "/") + restriction = parts[len(parts)-1] + } else if rule.ModuleName != "" { + restriction = strings.TrimPrefix(rule.ModuleName, "ietf-") + } + if restriction != "" && !seen[restriction] { + seen[restriction] = true + restrictions = append(restrictions, restriction) + } + } + } + + results = append(results, nacmGroupPerm{ + Name: group.Name, + Read: makeNACMCell(canRead, len(restrictions) > 0), + Write: makeNACMCell(canWrite, len(restrictions) > 0), + Exec: makeNACMCell(canExec, len(restrictions) > 0), + Restrictions: restrictions, + }) + } + + return results +} + +func makeNACMCell(hasAccess, hasRestrictions bool) nacmCell { + if !hasAccess { + return nacmCell{Class: "nacm-denied", Symbol: "✗"} + } + if hasRestrictions { + return nacmCell{Class: "nacm-restricted", Symbol: "⚠"} + } + return nacmCell{Class: "nacm-full", Symbol: "✓"} +} + +// ─── User entry helper ──────────────────────────────────────────────────────── + +func buildNACMUserEntry(u nacmUserJSON) nacmUserEntry { + shell := u.Shell + if idx := strings.LastIndex(shell, ":"); idx >= 0 { + shell = shell[idx+1:] + } + if shell == "" || shell == "false" { + shell = "-" + } + + var login string + hasPassword := u.Password != "" + hasKeys := len(u.AuthorizedKey) > 0 + switch { + case hasPassword && hasKeys: + login = "password+key" + case hasPassword: + login = "password" + case hasKeys: + login = "key" + default: + login = "-" + } + + return nacmUserEntry{ + Name: u.Name, + Shell: shell, + Login: login, + } +} diff --git a/src/webui/internal/handlers/ntp.go b/src/webui/internal/handlers/ntp.go new file mode 100644 index 000000000..0b456ffa0 --- /dev/null +++ b/src/webui/internal/handlers/ntp.go @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "encoding/json" + "fmt" + "html/template" + "log" + "net/http" + "strings" + + "github.com/kernelkit/webui/internal/restconf" +) + +// ─── NTP types ─────────────────────────────────────────────────────────────── + +// NTPAssoc is a single NTP association/peer. +type NTPAssoc struct { + Address string + Stratum int + RefID string + Reach string // octal string + Poll int + Offset string // ms + Delay string // ms +} + +// NTPData is the parsed NTP state. +type NTPData struct { + Synchronized bool + Stratum int + RefID string + Offset string // ms + RootDelay string // ms + Associations []NTPAssoc +} + +// ─── Page data ─────────────────────────────────────────────────────────────── + +type ntpPageData struct { + PageData + NTP *NTPData + Error string +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// NTPHandler serves the NTP status page. +type NTPHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the NTP page (GET /ntp). +func (h *NTPHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := ntpPageData{ + PageData: newPageData(r, "ntp", "NTP"), + } + + ctx := context.WithoutCancel(r.Context()) + + var raw struct { + NTP struct { + ClockState struct { + SystemStatus struct { + ClockState string `json:"clock-state"` + ClockStratum int `json:"clock-stratum"` + ClockRefID json.RawMessage `json:"clock-refid"` + ClockOffset yangFloat64 `json:"clock-offset"` + RootDelay yangFloat64 `json:"root-delay"` + } `json:"system-status"` + } `json:"clock-state"` + Associations struct { + Association []struct { + Address string `json:"address"` + Stratum int `json:"stratum"` + RefID json.RawMessage `json:"refid"` + Reach uint8 `json:"reach"` + Poll int `json:"poll"` + Offset yangFloat64 `json:"offset"` + Delay yangFloat64 `json:"delay"` + } `json:"association"` + } `json:"associations"` + } `json:"ietf-ntp:ntp"` + } + if err := h.RC.Get(ctx, "/data/ietf-ntp:ntp", &raw); err != nil { + log.Printf("restconf ntp: %v", err) + data.Error = "Failed to fetch NTP data" + } else { + ss := raw.NTP.ClockState.SystemStatus + synced := strings.Contains(ss.ClockState, "synchronized") && + !strings.Contains(ss.ClockState, "unsynchronized") + ntp := &NTPData{ + Synchronized: synced, + Stratum: ss.ClockStratum, + RefID: rawJSONString(ss.ClockRefID), + Offset: fmt.Sprintf("%.3f ms", float64(ss.ClockOffset)), + RootDelay: fmt.Sprintf("%.3f ms", float64(ss.RootDelay)), + } + for _, a := range raw.NTP.Associations.Association { + ntp.Associations = append(ntp.Associations, NTPAssoc{ + Address: a.Address, + Stratum: a.Stratum, + RefID: rawJSONString(a.RefID), + Reach: fmt.Sprintf("%o", a.Reach), + Poll: a.Poll, + Offset: fmt.Sprintf("%.3f ms", float64(a.Offset)), + Delay: fmt.Sprintf("%.3f ms", float64(a.Delay)), + }) + } + data.NTP = ntp + } + + tmplName := "ntp.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +// rawJSONString extracts the unquoted string value from a JSON raw message +// that may be a string, number, or other scalar. +func rawJSONString(b json.RawMessage) string { + if len(b) == 0 { + return "" + } + // Try string + var s string + if json.Unmarshal(b, &s) == nil { + return s + } + // Fall back to raw bytes (number, etc.) + return strings.Trim(string(b), `"`) +} diff --git a/src/webui/internal/handlers/routing.go b/src/webui/internal/handlers/routing.go new file mode 100644 index 000000000..fb3dbe515 --- /dev/null +++ b/src/webui/internal/handlers/routing.go @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "encoding/json" + "fmt" + "html/template" + "log" + "net/http" + "strings" + "sync" + "time" + + "github.com/kernelkit/webui/internal/restconf" +) + +type RouteEntry struct { + DestPrefix string + NextHopIface string + NextHopAddr string + Protocol string + Preference string // formatted as "distance/metric" + Uptime string + Active bool +} + +type OSPFNeighbor struct { + RouterID string + Priority int + State string // includes role suffix, e.g. "Full/DR" + Uptime string + Address string + Interface string + Area string +} + +type OSPFIface struct { + Name string + Area string + Type string + State string + Cost int + Priority int + DR string + BDR string + NbrCount int +} + +type routingData struct { + PageData + Routes []RouteEntry + OSPFNeighbors []OSPFNeighbor + OSPFIfaces []OSPFIface + HasOSPF bool + Error string +} + +type RoutingHandler struct { + Template *template.Template + RC *restconf.Client +} + +type ribWrapper struct { + Routing *struct { + Ribs struct { + Rib []ribJSON `json:"rib"` + } `json:"ribs"` + } `json:"ietf-routing:routing"` + Ribs *struct { + Rib []ribJSON `json:"rib"` + } `json:"ietf-routing:ribs"` +} + +type ribJSON struct { + Name string `json:"name"` + Routes ribRoutesJSON `json:"routes"` +} + +type ribRoutesJSON struct { + Route []ribRouteJSON `json:"route"` +} + +type ribRouteJSON struct { + DestPrefix4 string `json:"ietf-ipv4-unicast-routing:destination-prefix"` + DestPrefix6 string `json:"ietf-ipv6-unicast-routing:destination-prefix"` + DestPrefix string `json:"destination-prefix"` + NextHop ribNextHopJSON `json:"next-hop"` + SourceProtocol string `json:"source-protocol"` + Active *json.RawMessage `json:"active"` + RoutePreference int `json:"route-preference"` + OspfMetric int `json:"ietf-ospf:metric"` + LastUpdated string `json:"last-updated"` +} + +func (r ribRouteJSON) destinationPrefix() string { + if r.DestPrefix4 != "" { + return r.DestPrefix4 + } + if r.DestPrefix6 != "" { + return r.DestPrefix6 + } + return r.DestPrefix +} + +type ribNextHopJSON struct { + OutgoingInterface string `json:"outgoing-interface"` + NextHopAddress string `json:"next-hop-address"` + NextHopList *ribNextHopList `json:"next-hop-list"` +} + +type ribNextHopList struct { + NextHop []ribNextHopEntry `json:"next-hop"` +} + +type ribNextHopEntry struct { + OutgoingInterface string `json:"outgoing-interface"` + Address4 string `json:"ietf-ipv4-unicast-routing:address"` + Address6 string `json:"ietf-ipv6-unicast-routing:address"` + NextHopAddress string `json:"next-hop-address"` +} + +func (nh ribNextHopJSON) resolve() (iface, addr string) { + if nh.OutgoingInterface != "" || nh.NextHopAddress != "" { + return nh.OutgoingInterface, nh.NextHopAddress + } + if nh.NextHopList != nil && len(nh.NextHopList.NextHop) > 0 { + e := nh.NextHopList.NextHop[0] + addr = e.NextHopAddress + if addr == "" { + addr = e.Address4 + } + if addr == "" { + addr = e.Address6 + } + return e.OutgoingInterface, addr + } + return "", "" +} + +type ospfCPPWrapper struct { + Routing struct { + CPP struct { + Protocol []ospfProtocolJSON `json:"control-plane-protocol"` + } `json:"control-plane-protocols"` + } `json:"ietf-routing:routing"` +} + +type ospfProtocolJSON struct { + Type string `json:"type"` + Name string `json:"name"` + OSPF *ospfJSON `json:"ietf-ospf:ospf"` +} + +type ospfJSON struct { + Areas struct { + Area []ospfAreaJSON `json:"area"` + } `json:"areas"` +} + +type ospfAreaJSON struct { + AreaID string `json:"area-id"` + Interfaces struct { + Interface []ospfIfaceJSON `json:"interface"` + } `json:"interfaces"` +} + +type ospfIfaceJSON struct { + Name string `json:"name"` + State string `json:"state"` + Cost int `json:"cost"` + Priority int `json:"priority"` + InterfaceType string `json:"interface-type"` + DRRouterID string `json:"dr-router-id"` + BDRRouterID string `json:"bdr-router-id"` + Neighbors struct { + Neighbor []ospfNeighborJSON `json:"neighbor"` + } `json:"neighbors"` +} + +type ospfNeighborJSON struct { + NeighborRouterID string `json:"neighbor-router-id"` + Address string `json:"address"` + Priority int `json:"priority"` + State string `json:"state"` + Role string `json:"infix-routing:role"` + InterfaceName string `json:"infix-routing:interface-name"` + Uptime uint32 `json:"infix-routing:uptime"` +} + +func (h *RoutingHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := routingData{ + PageData: newPageData(r, "routing", "Routing"), + } + + ctx := context.WithoutCancel(r.Context()) + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + raw, err := h.RC.GetRaw(ctx, "/data/ietf-routing:routing/ribs") + if err != nil { + log.Printf("restconf rib: %v", err) + return + } + var rib ribWrapper + if err := json.Unmarshal(raw, &rib); err != nil { + log.Printf("restconf rib unmarshal: %v", err) + return + } + var ribs []ribJSON + if rib.Routing != nil { + ribs = rib.Routing.Ribs.Rib + } else if rib.Ribs != nil { + ribs = rib.Ribs.Rib + } + for _, rb := range ribs { + for _, route := range rb.Routes.Route { + iface, addr := route.NextHop.resolve() + data.Routes = append(data.Routes, RouteEntry{ + DestPrefix: route.destinationPrefix(), + NextHopIface: iface, + NextHopAddr: addr, + Protocol: shortProto(route.SourceProtocol), + Preference: fmt.Sprintf("%d/%d", route.RoutePreference, route.OspfMetric), + Uptime: uptimeFromTimestamp(route.LastUpdated), + Active: route.Active != nil, + }) + } + } + }() + + go func() { + defer wg.Done() + var cpp ospfCPPWrapper + if err := h.RC.Get(ctx, + "/data/ietf-routing:routing/control-plane-protocols", + &cpp); err != nil { + log.Printf("restconf ospf (ignored): %v", err) + return + } + for _, proto := range cpp.Routing.CPP.Protocol { + if proto.OSPF == nil { + continue + } + data.HasOSPF = true + for _, area := range proto.OSPF.Areas.Area { + for _, iface := range area.Interfaces.Interface { + data.OSPFIfaces = append(data.OSPFIfaces, OSPFIface{ + Name: iface.Name, + Area: area.AreaID, + Type: ospfIfaceType(iface.InterfaceType), + State: ospfIfaceState(iface.State), + Cost: iface.Cost, + Priority: iface.Priority, + DR: iface.DRRouterID, + BDR: iface.BDRRouterID, + NbrCount: len(iface.Neighbors.Neighbor), + }) + for _, nbr := range iface.Neighbors.Neighbor { + stateStr := capitalize(nbr.State) + if nbr.Role != "" && nbr.State == "full" { + stateStr = "Full/" + nbr.Role + } + ifaceName := nbr.InterfaceName + if ifaceName == "" { + ifaceName = iface.Name + } + data.OSPFNeighbors = append(data.OSPFNeighbors, OSPFNeighbor{ + RouterID: nbr.NeighborRouterID, + Priority: nbr.Priority, + State: stateStr, + Uptime: formatUptime(nbr.Uptime), + Address: nbr.Address, + Interface: ifaceName, + Area: area.AreaID, + }) + } + } + } + } + }() + + wg.Wait() + + tmplName := "routing.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +func uptimeFromTimestamp(s string) string { + if s == "" { + return "" + } + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return "" + } + return formatUptime(uint32(time.Since(t).Seconds())) +} + +func ospfIfaceType(t string) string { + switch t { + case "broadcast": + return "Broadcast" + case "point-to-point": + return "P2P" + case "point-to-multipoint": + return "P2MP" + case "non-broadcast": + return "NBMA" + case "virtual-link": + return "VLink" + default: + if t != "" { + return capitalize(t) + } + return "" + } +} + +func ospfIfaceState(s string) string { + switch s { + case "dr": + return "DR" + case "bdr": + return "BDR" + case "dr-other": + return "DROther" + default: + return capitalize(s) + } +} + +func capitalize(s string) string { + if len(s) == 0 { + return s + } + return strings.ToUpper(s[:1]) + s[1:] +} + +func shortProto(proto string) string { + if i := strings.LastIndex(proto, ":"); i >= 0 { + return proto[i+1:] + } + return proto +} + +func formatUptime(sec uint32) string { + if sec == 0 { + return "" + } + days := sec / 86400 + hours := (sec % 86400) / 3600 + mins := (sec % 3600) / 60 + secs := sec % 60 + switch { + case days > 0: + return fmt.Sprintf("%dd %dh %dm", days, hours, mins) + case hours > 0: + return fmt.Sprintf("%dh %dm", hours, mins) + case mins > 0: + return fmt.Sprintf("%dm %ds", mins, secs) + default: + return fmt.Sprintf("%ds", secs) + } +} diff --git a/src/webui/internal/handlers/routing_test.go b/src/webui/internal/handlers/routing_test.go new file mode 100644 index 000000000..12da62a6a --- /dev/null +++ b/src/webui/internal/handlers/routing_test.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "html/template" + "net/http" + "net/http/httptest" + "testing" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/security" +) + +var minimalRoutingTmpl = template.Must(template.New("routing.html").Parse( + `{{define "routing.html"}}routes={{len .Routes}}{{end}}` + + `{{define "content"}}{{len .Routes}}{{end}}`, +)) + +func TestRoutingOverview_ReturnsOK(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &RoutingHandler{Template: minimalRoutingTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/routing", nil) + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + + body := w.Body.String() + if body == "" { + t.Error("expected non-empty response body") + } +} + +func TestRoutingOverview_HTMXPartial(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &RoutingHandler{Template: minimalRoutingTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/routing", nil) + req.Header.Set("HX-Request", "true") + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + + body := w.Body.String() + if body == "" { + t.Error("expected non-empty response body for htmx partial") + } +} diff --git a/src/webui/internal/handlers/services.go b/src/webui/internal/handlers/services.go new file mode 100644 index 000000000..bdeb398ed --- /dev/null +++ b/src/webui/internal/handlers/services.go @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "encoding/json" + "fmt" + "html/template" + "log" + "net/http" + "sort" + "strconv" + + "github.com/kernelkit/webui/internal/restconf" +) + +// ─── RESTCONF JSON types ────────────────────────────────────────────────────── + +type servicesWrapper struct { + SystemState struct { + Services struct { + Service []serviceJSON `json:"service"` + } `json:"infix-system:services"` + } `json:"ietf-system:system-state"` +} + +type serviceJSON struct { + Name string `json:"name"` + PID uint32 `json:"pid"` + Description string `json:"description"` + Status string `json:"status"` + Statistics serviceStatsJSON `json:"statistics"` +} + +// memory-usage and uptime are marshalled as strings by the statd Python layer. +type serviceStatsJSON struct { + MemoryUsage json.Number `json:"memory-usage"` + Uptime json.Number `json:"uptime"` + RestartCount uint32 `json:"restart-count"` +} + +func (s serviceStatsJSON) memoryBytes() uint64 { + v, _ := strconv.ParseUint(s.MemoryUsage.String(), 10, 64) + return v +} + +func (s serviceStatsJSON) uptimeSeconds() uint64 { + v, _ := strconv.ParseUint(s.Uptime.String(), 10, 64) + return v +} + +// ─── Template data ──────────────────────────────────────────────────────────── + +type servicesPageData struct { + PageData + Services []serviceEntry + Error string +} + +type serviceEntry struct { + Name string + Status string + StatusClass string // "svc-running", "svc-stopped", "svc-error", "svc-done" + PID string + Memory string + Uptime string + RestartCount uint32 + Description string +} + +// ─── Handler ───────────────────────────────────────────────────────────────── + +// ServicesHandler serves the system services page. +type ServicesHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the services page (GET /services). +func (h *ServicesHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := servicesPageData{ + PageData: newPageData(r, "services", "Services"), + } + + var raw servicesWrapper + if err := h.RC.Get(r.Context(), "/data/ietf-system:system-state/infix-system:services", &raw); err != nil { + log.Printf("restconf services: %v", err) + data.Error = "Could not fetch services data" + } else { + for _, svc := range raw.SystemState.Services.Service { + data.Services = append(data.Services, buildServiceEntry(svc)) + } + sort.Slice(data.Services, func(i, j int) bool { + return data.Services[i].Name < data.Services[j].Name + }) + } + + tmplName := "services.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// ─── Helpers ───────────────────────────────────────────────────────────────── + +func buildServiceEntry(svc serviceJSON) serviceEntry { + pid := "" + if svc.PID > 0 { + pid = fmt.Sprintf("%d", svc.PID) + } + + return serviceEntry{ + Name: svc.Name, + Status: svc.Status, + StatusClass: serviceStatusClass(svc.Status), + PID: pid, + Memory: formatServiceMemory(svc.Statistics.memoryBytes()), + Uptime: formatServiceUptime(svc.Statistics.uptimeSeconds()), + RestartCount: svc.Statistics.RestartCount, + Description: svc.Description, + } +} + +func serviceStatusClass(status string) string { + switch status { + case "running", "active", "done": + return "svc-running" // green + case "crashed", "failed", "halted", "missing", "dead", "conflict": + return "svc-error" // red + default: + // stopped, paused, and anything else → yellow + return "svc-stopped" + } +} + +// formatServiceMemory mirrors the CLI's format_memory_bytes. +func formatServiceMemory(b uint64) string { + switch { + case b == 0: + return "" + case b < 1024: + return fmt.Sprintf("%dB", b) + case b < 1024*1024: + return fmt.Sprintf("%dK", b/1024) + case b < 1024*1024*1024: + return fmt.Sprintf("%.1fM", float64(b)/float64(1024*1024)) + default: + return fmt.Sprintf("%.1fG", float64(b)/float64(1024*1024*1024)) + } +} + +// formatServiceUptime mirrors the CLI's format_uptime_seconds. +func formatServiceUptime(s uint64) string { + switch { + case s == 0: + return "" + case s < 60: + return fmt.Sprintf("%ds", s) + case s < 3600: + return fmt.Sprintf("%dm", s/60) + case s < 86400: + return fmt.Sprintf("%dh", s/3600) + default: + return fmt.Sprintf("%dd", s/86400) + } +} diff --git a/src/webui/internal/handlers/services_test.go b/src/webui/internal/handlers/services_test.go new file mode 100644 index 000000000..0c6abc6d1 --- /dev/null +++ b/src/webui/internal/handlers/services_test.go @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "html/template" + "net/http" + "net/http/httptest" + "testing" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/security" +) + +var minimalDHCPTmpl = template.Must(template.New("dhcp.html").Parse( + `{{define "dhcp.html"}}dhcp={{.DHCP}}{{end}}` + + `{{define "content"}}{{.DHCP}}{{end}}`, +)) + +var minimalNTPTmpl = template.Must(template.New("ntp.html").Parse( + `{{define "ntp.html"}}ntp={{.NTP}}{{end}}` + + `{{define "content"}}{{.NTP}}{{end}}`, +)) + +var minimalLLDPTmpl = template.Must(template.New("lldp.html").Parse( + `{{define "lldp.html"}}lldp={{.Neighbors}}{{end}}` + + `{{define "content"}}{{.Neighbors}}{{end}}`, +)) + +func testCtx(req *http.Request) *http.Request { + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + return req.WithContext(ctx) +} + +func TestDHCPOverview_ReturnsOK(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &DHCPHandler{Template: minimalDHCPTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/dhcp", nil) + req = testCtx(req) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + if w.Body.String() == "" { + t.Error("expected non-empty response body") + } +} + +func TestDHCPOverview_HTMXPartial(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &DHCPHandler{Template: minimalDHCPTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/dhcp", nil) + req.Header.Set("HX-Request", "true") + req = testCtx(req) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + if w.Body.String() == "" { + t.Error("expected non-empty response body for htmx partial") + } +} + +func TestNTPOverview_ReturnsOK(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &NTPHandler{Template: minimalNTPTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/ntp", nil) + req = testCtx(req) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + if w.Body.String() == "" { + t.Error("expected non-empty response body") + } +} + +func TestLLDPOverview_ReturnsOK(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &LLDPHandler{Template: minimalLLDPTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/lldp", nil) + req = testCtx(req) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + if w.Body.String() == "" { + t.Error("expected non-empty response body") + } +} diff --git a/src/webui/internal/handlers/system.go b/src/webui/internal/handlers/system.go new file mode 100644 index 000000000..f409b13dd --- /dev/null +++ b/src/webui/internal/handlers/system.go @@ -0,0 +1,672 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "html/template" + "io" + "log" + "net/http" + "os" + "strings" + "time" + + "github.com/kernelkit/webui/internal/restconf" +) + +// SystemHandler provides reboot, config download, and firmware update actions. +type SystemHandler struct { + RC *restconf.Client + Template *template.Template // firmware page template + SysCtrlTmpl *template.Template // system control page template + BackupTmpl *template.Template // backup & restore page template +} + +// DeviceStatus returns 200 if the RESTCONF device is reachable, 502 otherwise. +// Used by the reboot spinner to detect when the device goes down and comes back. +// A short timeout keeps the poll snappy during the reboot window. +func (h *SystemHandler) DeviceStatus(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + var target struct{} + err := h.RC.Get(ctx, "/data/ietf-system:system-state/platform", &target) + if err != nil { + w.WriteHeader(http.StatusBadGateway) + return + } + w.WriteHeader(http.StatusOK) +} + +// Reboot triggers a device restart via the ietf-system:system-restart RPC +// and returns a spinner fragment that polls until the device is back. +func (h *SystemHandler) Reboot(w http.ResponseWriter, r *http.Request) { + err := h.RC.Post(r.Context(), "/operations/ietf-system:system-restart") + if err != nil { + log.Printf("reboot: %v", err) + http.Error(w, "reboot failed", http.StatusBadGateway) + return + } + + w.Header().Set("Content-Type", "text/html") + fmt.Fprint(w, rebootSpinnerHTML) +} + +const rebootSpinnerHTML = `
+
+

Rebooting…

+

Waiting for device to shut down…

+
` + +type systemControlData struct { + PageData + CurrentDatetime string // device clock, empty when unavailable +} + +// SystemControl renders the System Control maintenance page. +func (h *SystemHandler) SystemControl(w http.ResponseWriter, r *http.Request) { + data := systemControlData{ + PageData: newPageData(r, "system-control", "System Control"), + } + + var clockResp struct { + SystemState struct { + Clock struct { + CurrentDatetime string `json:"current-datetime"` + } `json:"clock"` + } `json:"ietf-system:system-state"` + } + if err := h.RC.Get(r.Context(), "/data/ietf-system:system-state/clock", &clockResp); err == nil { + dt := clockResp.SystemState.Clock.CurrentDatetime + if len(dt) > 19 { + dt = dt[:19] + } + data.CurrentDatetime = strings.Replace(dt, "T", " ", 1) + " UTC" + } + + tmplName := "system-control.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.SysCtrlTmpl.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("system-control template: %v", err) + } +} + +// SetDatetime sets the device clock via the ietf-system:set-current-datetime RPC. +// The form value is a datetime-local string (YYYY-MM-DDTHH:MM) treated as UTC. +func (h *SystemHandler) SetDatetime(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + raw := r.FormValue("datetime") // YYYY-MM-DDTHH:MM from datetime-local input + if raw == "" { + http.Error(w, "datetime required", http.StatusBadRequest) + return + } + + body := map[string]map[string]string{ + "ietf-system:input": {"current-datetime": raw + ":00+00:00"}, + } + err := h.RC.PostJSON(r.Context(), "/operations/ietf-system:set-current-datetime", body) + + w.Header().Set("Content-Type", "text/html") + if err != nil { + msg := err.Error() + if strings.Contains(msg, "ntp-active") { + fmt.Fprint(w, `NTP is active — disable NTP first under Configure > System.`) + } else { + fmt.Fprintf(w, `Failed: %s`, template.HTMLEscapeString(msg)) + } + return + } + fmt.Fprint(w, `✓ System time updated`) +} + +// Shutdown triggers a device power-off via the ietf-system:system-shutdown RPC. +func (h *SystemHandler) Shutdown(w http.ResponseWriter, r *http.Request) { + if err := h.RC.Post(r.Context(), "/operations/ietf-system:system-shutdown"); err != nil { + log.Printf("shutdown: %v", err) + http.Error(w, "shutdown failed", http.StatusBadGateway) + return + } + w.Header().Set("Content-Type", "text/html") + fmt.Fprint(w, shutdownHTML) +} + +// shutdownHTML is the overlay shown after a shutdown RPC succeeds. +// Uses .reboot-overlay so the 60 s hard-cap redirect fires (session is dead anyway), +// but omits #reboot-status so the JS does not update the message to "coming back…". +const shutdownHTML = `
+
+

Shutting down…

+

The device is powering off.

+
` + +// FactoryDefault resets the running datastore to factory defaults without rebooting. +// Uses the infix-factory-default:factory-default RPC. +func (h *SystemHandler) FactoryDefault(w http.ResponseWriter, r *http.Request) { + if err := h.RC.Post(r.Context(), "/operations/infix-factory-default:factory-default"); err != nil { + log.Printf("factory-default: %v", err) + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, `Failed: %s`, + template.HTMLEscapeString(err.Error())) + return + } + w.Header().Set("Content-Type", "text/html") + fmt.Fprint(w, `✓ Running config reset to factory defaults`) +} + +// FactoryReset wipes all datastores and non-volatile storage, then reboots. +// Uses the ietf-factory-default:factory-reset RPC. +func (h *SystemHandler) FactoryReset(w http.ResponseWriter, r *http.Request) { + if err := h.RC.Post(r.Context(), "/operations/ietf-factory-default:factory-reset"); err != nil { + log.Printf("factory-reset: %v", err) + http.Error(w, "factory reset failed", http.StatusBadGateway) + return + } + w.Header().Set("Content-Type", "text/html") + fmt.Fprint(w, factoryResetSpinnerHTML) +} + +const factoryResetSpinnerHTML = `
+
+

Factory reset in progress…

+

Wiping configuration and rebooting…

+
` + +// Backup renders the Backup & Restore maintenance page. +func (h *SystemHandler) Backup(w http.ResponseWriter, r *http.Request) { + data := newPageData(r, "backup", "Backup & Restore") + tmplName := "backup.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.BackupTmpl.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("backup template: %v", err) + } +} + +// RestoreConfig accepts a multipart-uploaded JSON config file and applies it. +// target="running" (default): PUT to running so changes take effect immediately; +// sets the cfg-unsaved cookie so the persistent notification prompts a save. +// target="startup": PUT to startup only; reboot required to apply. +func (h *SystemHandler) RestoreConfig(w http.ResponseWriter, r *http.Request) { + if err := r.ParseMultipartForm(10 << 20); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + file, _, err := r.FormFile("config") + if err != nil { + http.Error(w, "config file required", http.StatusBadRequest) + return + } + defer file.Close() + + raw, err := io.ReadAll(file) + if err != nil { + http.Error(w, "read error", http.StatusInternalServerError) + return + } + + var check json.RawMessage + if err := json.Unmarshal(raw, &check); err != nil { + w.Header().Set("Content-Type", "text/html") + fmt.Fprint(w, `Invalid JSON file.`) + return + } + + target := "running" + if r.FormValue("save-to-startup") == "on" { + target = "startup" + } + + if err := h.RC.PutDatastore(r.Context(), target, check); err != nil { + log.Printf("restore(%s): %v", target, err) + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, `Restore failed: %s`, + template.HTMLEscapeString(err.Error())) + return + } + + if target == "running" { + setCfgUnsaved(w) + w.Header().Set("HX-Refresh", "true") + w.WriteHeader(http.StatusNoContent) + return + } + + w.Header().Set("Content-Type", "text/html") + fmt.Fprint(w, `✓ Startup configuration restored. Reboot to apply.`) +} + +// DownloadConfig serves the startup datastore as a JSON file download. +// Filename includes the device hostname and current UTC date+time. +func (h *SystemHandler) DownloadConfig(w http.ResponseWriter, r *http.Request) { + data, err := h.RC.GetRaw(r.Context(), "/ds/ietf-datastores:startup") + if err != nil { + log.Printf("config download: %v", err) + http.Error(w, "failed to fetch config", http.StatusBadGateway) + return + } + + hostname := "device" + var sysResp struct { + System struct { + Hostname string `json:"hostname"` + } `json:"ietf-system:system"` + } + if err := h.RC.Get(r.Context(), "/data/ietf-system:system", &sysResp); err == nil { + if hn := sysResp.System.Hostname; hn != "" { + hostname = hn + } + } + ts := time.Now().UTC().Format("20060102-1504") + filename := fmt.Sprintf("startup-config-%s-%s.cfg", hostname, ts) + + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename=%q`, filename)) + w.Write(data) +} + +// RESTCONF JSON structures for infix-system:software state. + +type fwSoftwareWrapper struct { + SystemState struct { + Platform struct { + Machine string `json:"machine"` + } `json:"platform"` + Software fwSoftwareState `json:"infix-system:software"` + } `json:"ietf-system:system-state"` +} + +type fwSoftwareState struct { + Compatible string `json:"compatible"` + Variant string `json:"variant"` + Booted string `json:"booted"` + BootOrder []string `json:"boot-order"` + Installer fwInstallerState `json:"installer"` + Slots []fwSlot `json:"slot"` +} + +type fwInstallerState struct { + Operation string `json:"operation"` + Progress fwInstallerProgress `json:"progress"` + LastError string `json:"last-error"` +} + +type fwInstallerProgress struct { + Percentage int `json:"percentage"` + Message string `json:"message"` +} + +type fwSlot struct { + Name string `json:"name"` + BootName string `json:"bootname"` + Class string `json:"class"` + State string `json:"state"` + Bundle fwSlotBundle `json:"bundle"` + Installed struct { + Datetime string `json:"datetime"` + } `json:"installed"` +} + +type fwSlotBundle struct { + Compatible string `json:"compatible"` + Version string `json:"version"` +} + +// Template data for the firmware page. + +type firmwareData struct { + PageData + Machine string + BootOrder []string + Slots []slotEntry + Installer *installerEntry + Installing bool // install was triggered this session; keep card visible during RAUC phase gaps + AutoReboot bool + Error string + Message string +} + +type slotEntry struct { + Name string // bootname: primary, secondary, etc. + State string + Version string + InstallDate string + Booted bool +} + +type installerEntry struct { + Operation string + Percentage int + Message string + LastError string + Active bool + Done bool // idle after an install ran (percentage>0 or error set) + Success bool // Done with no error +} + +// Firmware renders the firmware overview page (GET /firmware). +func (h *SystemHandler) Firmware(w http.ResponseWriter, r *http.Request) { + data := firmwareData{ + PageData: newPageData(r, "firmware", "Firmware"), + Message: r.URL.Query().Get("msg"), + Installing: r.URL.Query().Get("installing") == "1", + AutoReboot: r.URL.Query().Get("auto-reboot") == "1", + } + + var sw fwSoftwareWrapper + err := h.RC.Get(r.Context(), "/data/ietf-system:system-state", &sw) + if err != nil { + log.Printf("restconf firmware: %v", err) + data.Error = "Could not fetch firmware status" + } else { + data.Machine = sw.SystemState.Platform.Machine + if data.Machine == "arm64" { + data.Machine = "aarch64" + } + data.BootOrder = sw.SystemState.Software.BootOrder + for _, s := range sw.SystemState.Software.Slots { + if s.Class != "rootfs" { + continue + } + name := s.BootName + if name == "" { + name = s.Name + } + date := s.Installed.Datetime + if len(date) > 19 { + date = date[:19] + } + data.Slots = append(data.Slots, slotEntry{ + Name: name, + State: s.State, + Version: s.Bundle.Version, + InstallDate: date, + Booted: s.BootName == sw.SystemState.Software.Booted, + }) + } + + data.Installer = newInstallerEntry(sw.SystemState.Software.Installer) + // Don't re-open the SSE progress card for an already-finished install + // (e.g. user navigating back to /firmware?installing=1 after reboot). + if data.Installer.Done { + data.Installing = false + } + } + + tmplName := "firmware.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// SetBootOrder calls the infix-system:set-boot-order RPC with the ordered +// boot-order form values submitted by the boot order card. +// On success it refreshes the page so the Software card badges update. +func (h *SystemHandler) SetBootOrder(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + order := r.Form["boot-order"] + if len(order) == 0 || len(order) > 3 { + w.Header().Set("Content-Type", "text/html") + fmt.Fprint(w, `Invalid boot order.`) + return + } + body := map[string]any{ + "infix-system:input": map[string]any{ + "boot-order": order, + }, + } + if err := h.RC.PostJSON(r.Context(), "/operations/infix-system:set-boot-order", body); err != nil { + log.Printf("set-boot-order: %v", err) + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, `Failed: %s`, + template.HTMLEscapeString(err.Error())) + return + } + w.Header().Set("HX-Refresh", "true") + w.WriteHeader(http.StatusNoContent) +} + +// FirmwareUpload accepts a .pkg file upload, saves it to a temp file, and +// triggers RAUC installation via the install-bundle RPC with a file:// URL. +// The response is a plain-text redirect target which the JS XHR handler follows. +func (h *SystemHandler) FirmwareUpload(w http.ResponseWriter, r *http.Request) { + rc := http.NewResponseController(w) + _ = rc.SetReadDeadline(time.Now().Add(10 * time.Minute)) + + if err := r.ParseMultipartForm(1 << 30); err != nil { + http.Error(w, "bad request: "+err.Error(), http.StatusBadRequest) + return + } + + file, _, err := r.FormFile("pkg") + if err != nil { + http.Error(w, "pkg file required", http.StatusBadRequest) + return + } + defer file.Close() + + tmp, err := os.CreateTemp("", "webui-fw-*.pkg") + if err != nil { + log.Printf("firmware upload: create temp: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + tmpPath := tmp.Name() + + if _, err := io.Copy(tmp, file); err != nil { + tmp.Close() + os.Remove(tmpPath) + log.Printf("firmware upload: write: %v", err) + http.Error(w, "failed to save firmware", http.StatusInternalServerError) + return + } + tmp.Close() + + body := map[string]map[string]string{ + "infix-system:input": {"url": "file://" + tmpPath}, + } + if err := h.RC.PostJSON(r.Context(), "/operations/infix-system:install-bundle", body); err != nil { + os.Remove(tmpPath) + log.Printf("firmware upload: install-bundle: %v", err) + http.Error(w, "install failed: "+err.Error(), http.StatusBadGateway) + return + } + + creds := restconf.CredentialsFromContext(r.Context()) + go h.cleanupFirmwareTemp(creds, tmpPath) + + target := "/firmware?installing=1" + if r.FormValue("auto-reboot") == "1" { + target += "&auto-reboot=1" + } + w.Header().Set("Content-Type", "text/plain") + fmt.Fprint(w, target) +} + +// cleanupFirmwareTemp polls the installer state and removes the temp file once +// RAUC goes idle. Falls back to deletion after 30 minutes in any case. +func (h *SystemHandler) cleanupFirmwareTemp(creds restconf.Credentials, path string) { + ctx, cancel := context.WithTimeout( + restconf.ContextWithCredentials(context.Background(), creds), + 30*time.Minute, + ) + defer cancel() + defer os.Remove(path) + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + var sw fwSoftwareWrapper + if err := h.RC.Get(ctx, "/data/ietf-system:system-state", &sw); err != nil { + continue + } + op := sw.SystemState.Software.Installer.Operation + if op == "" || op == "idle" { + return + } + } + } +} + +// FirmwareInstall triggers a firmware install via the install-bundle RPC (POST /firmware/install). +func (h *SystemHandler) FirmwareInstall(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + + url := r.FormValue("url") + if url == "" { + http.Error(w, "url is required", http.StatusBadRequest) + return + } + + body := map[string]map[string]string{ + "infix-system:input": { + "url": url, + }, + } + + err := h.RC.PostJSON(r.Context(), "/operations/infix-system:install-bundle", body) + if err != nil { + log.Printf("firmware install: %v", err) + w.Header().Set("HX-Redirect", "/firmware?msg=Install+failed:+"+err.Error()) + w.WriteHeader(http.StatusNoContent) + return + } + + target := "/firmware?installing=1" + if r.FormValue("auto-reboot") == "1" { + target += "&auto-reboot=1" + } + w.Header().Set("HX-Redirect", target) + w.WriteHeader(http.StatusNoContent) +} + +// FirmwareProgress streams installer status as SSE so the Go server does the +// polling and the browser just receives rendered HTML fragments. +// GET /firmware/progress +func (h *SystemHandler) FirmwareProgress(w http.ResponseWriter, r *http.Request) { + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "streaming not supported", http.StatusInternalServerError) + return + } + + autoReboot := r.URL.Query().Get("auto-reboot") == "1" + + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("X-Accel-Buffering", "no") + w.WriteHeader(http.StatusOK) + flusher.Flush() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + var lastKey string // change-detection: suppress redundant SSE frames + + for { + select { + case <-r.Context().Done(): + return + case <-ticker.C: + data := h.installerSnapshot(r, autoReboot) + + // Build a cheap key for change detection; skip frames with identical state. + var key string + if data.Installer != nil { + key = fmt.Sprintf("%s|%d|%s|%s", data.Installer.Operation, data.Installer.Percentage, data.Installer.Message, data.Installer.LastError) + } + if key == lastKey && key != "" { + continue + } + lastKey = key + + var buf bytes.Buffer + if err := h.Template.ExecuteTemplate(&buf, "fw-progress-body", data); err != nil { + log.Printf("firmware progress template: %v", err) + continue + } + + // SSE data must not contain raw newlines; collapse to spaces. + line := strings.ReplaceAll(buf.String(), "\n", " ") + + eventName := "progress" + if data.Installer != nil && data.Installer.Done { + if autoReboot && data.Installer.Success { + eventName = "reboot" + } else { + eventName = "done" + } + } + + fmt.Fprintf(w, "event: %s\ndata: %s\n\n", eventName, line) + flusher.Flush() + + if data.Installer != nil && data.Installer.Done { + return + } + } + } +} + +// installerSnapshot fetches the current installer state from RESTCONF and +// builds the template data for the fw-progress-body fragment. +func (h *SystemHandler) installerSnapshot(r *http.Request, autoReboot bool) firmwareProgressData { + data := firmwareProgressData{ + AutoReboot: autoReboot, + } + + var sw fwSoftwareWrapper + if err := h.RC.Get(r.Context(), "/data/ietf-system:system-state", &sw); err != nil { + // RESTCONF temporarily unavailable during upgrade — leave Installer nil + // so the template renders an indeterminate "Installing…" state. + log.Printf("firmware progress poll: %v", err) + return data + } + + data.Installer = newInstallerEntry(sw.SystemState.Software.Installer) + return data +} + +// newInstallerEntry converts a raw YANG installer state to the template-facing struct. +func newInstallerEntry(inst fwInstallerState) *installerEntry { + idle := inst.Operation == "" || inst.Operation == "idle" + done := idle && (inst.Progress.Percentage > 0 || inst.LastError != "") + return &installerEntry{ + Operation: inst.Operation, + Percentage: inst.Progress.Percentage, + Message: inst.Progress.Message, + LastError: inst.LastError, + Active: !idle, + Done: done, + Success: done && inst.LastError == "", + } +} + +// firmwareProgressData is the template data for the fw-progress-body fragment. +type firmwareProgressData struct { + AutoReboot bool + Installer *installerEntry +} diff --git a/src/webui/internal/handlers/vpn.go b/src/webui/internal/handlers/vpn.go new file mode 100644 index 000000000..9edbc9e7c --- /dev/null +++ b/src/webui/internal/handlers/vpn.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "fmt" + "html/template" + "log" + "net/http" + "sync" + "time" + + "github.com/kernelkit/webui/internal/restconf" +) + +// wgConfigJSON is a local extension for fetching WireGuard configuration +// fields (like listen-port) that are not in the shared wireGuardJSON type. +type wgConfigJSON struct { + ListenPort int `json:"listen-port"` +} + +// wgIfaceConfigWrapper is used to fetch per-interface WireGuard config. +type wgIfaceConfigWrapper struct { + WireGuard *wgConfigJSON `json:"infix-interfaces:wireguard"` +} + +// WGPeer holds display-ready data for a single WireGuard peer. +type WGPeer struct { + PublicKey string + PublicKeyShort string // first 8 chars + "..." + Endpoint string // "IP:port" or empty + Status string // "up" or "down" + LastHandshake string // relative time, e.g. "2 min ago" or "never" + RxBytes string // human-readable + TxBytes string // human-readable +} + +// WGTunnel holds display-ready data for a single WireGuard interface. +type WGTunnel struct { + Name string + ListenPort int + Addresses []string // IP addresses from ietf-ip + OperStatus string + Peers []WGPeer +} + +// vpnData is the template data struct for the VPN page. +type vpnData struct { + PageData + Tunnels []WGTunnel + Error string +} + +// VPNHandler serves the VPN/WireGuard status page. +type VPNHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the VPN page (GET /vpn). +func (h *VPNHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := vpnData{ + PageData: newPageData(r, "vpn", "VPN"), + } + + // Detach from the request context so that RESTCONF calls survive + // browser connection resets (common during login redirects). + ctx := context.WithoutCancel(r.Context()) + + var ifaces interfacesWrapper + if err := h.RC.Get(ctx, "/data/ietf-interfaces:interfaces", &ifaces); err != nil { + log.Printf("restconf interfaces (vpn): %v", err) + data.Error = "Could not fetch interface information" + h.render(w, r, data) + return + } + + // Filter for WireGuard interfaces only. + var wgIfaces []ifaceJSON + for _, iface := range ifaces.Interfaces.Interface { + if iface.Type == "infix-if-type:wireguard" { + wgIfaces = append(wgIfaces, iface) + } + } + + if len(wgIfaces) == 0 { + h.render(w, r, data) + return + } + + // Fetch per-interface WireGuard config concurrently. + tunnels := make([]WGTunnel, len(wgIfaces)) + var wg sync.WaitGroup + + for i, iface := range wgIfaces { + wg.Add(1) + go func(idx int, iface ifaceJSON) { + defer wg.Done() + tunnels[idx] = buildWGTunnel(ctx, h.RC, iface) + }(i, iface) + } + + wg.Wait() + data.Tunnels = tunnels + h.render(w, r, data) +} + +// buildWGTunnel constructs a WGTunnel from an interface and optional config fetch. +func buildWGTunnel(ctx context.Context, rc *restconf.Client, iface ifaceJSON) WGTunnel { + tunnel := WGTunnel{ + Name: iface.Name, + OperStatus: iface.OperStatus, + } + + // Collect IP addresses. + if iface.IPv4 != nil { + for _, a := range iface.IPv4.Address { + tunnel.Addresses = append(tunnel.Addresses, fmt.Sprintf("%s/%d", a.IP, int(a.PrefixLength))) + } + } + if iface.IPv6 != nil { + for _, a := range iface.IPv6.Address { + tunnel.Addresses = append(tunnel.Addresses, fmt.Sprintf("%s/%d", a.IP, int(a.PrefixLength))) + } + } + + // Fetch ListenPort from config endpoint (separate from oper-state). + var cfgWrap wgIfaceConfigWrapper + path := fmt.Sprintf("/data/ietf-interfaces:interfaces/interface=%s/infix-interfaces:wireguard", iface.Name) + if err := rc.Get(ctx, path, &cfgWrap); err == nil && cfgWrap.WireGuard != nil { + tunnel.ListenPort = cfgWrap.WireGuard.ListenPort + } + + // Build peers from embedded peer-status. + if iface.WireGuard != nil && iface.WireGuard.PeerStatus != nil { + for _, p := range iface.WireGuard.PeerStatus.Peer { + peer := WGPeer{ + PublicKey: p.PublicKey, + PublicKeyShort: shortKey(p.PublicKey), + Status: p.ConnectionStatus, + LastHandshake: relativeTime(p.LatestHandshake), + } + if p.EndpointAddress != "" { + peer.Endpoint = fmt.Sprintf("%s:%d", p.EndpointAddress, p.EndpointPort) + } + if p.Transfer != nil { + peer.RxBytes = humanBytes(int64(p.Transfer.RxBytes)) + peer.TxBytes = humanBytes(int64(p.Transfer.TxBytes)) + } else { + peer.RxBytes = "0 B" + peer.TxBytes = "0 B" + } + tunnel.Peers = append(tunnel.Peers, peer) + } + } + + return tunnel +} + +// shortKey returns the first 8 characters of a WireGuard public key followed by "...". +func shortKey(key string) string { + if len(key) <= 8 { + return key + } + return key[:8] + "..." +} + +// relativeTime converts an RFC3339 timestamp to a human-readable relative time string. +// Returns "never" if the timestamp is empty or cannot be parsed. +func relativeTime(ts string) string { + if ts == "" { + return "never" + } + t, err := time.Parse(time.RFC3339, ts) + if err != nil { + // Try RFC3339Nano as fallback. + t, err = time.Parse(time.RFC3339Nano, ts) + if err != nil { + return "never" + } + } + if t.IsZero() { + return "never" + } + + d := time.Since(t) + switch { + case d < 0: + return "just now" + case d < time.Minute: + return fmt.Sprintf("%d sec ago", int(d.Seconds())) + case d < time.Hour: + mins := int(d.Minutes()) + if mins == 1 { + return "1 min ago" + } + return fmt.Sprintf("%d min ago", mins) + case d < 24*time.Hour: + hours := int(d.Hours()) + if hours == 1 { + return "1 hour ago" + } + return fmt.Sprintf("%d hours ago", hours) + default: + days := int(d.Hours()) / 24 + if days == 1 { + return "1 day ago" + } + return fmt.Sprintf("%d days ago", days) + } +} + +// render executes the correct template based on whether it's an htmx request. +func (h *VPNHandler) render(w http.ResponseWriter, r *http.Request, data vpnData) { + tmplName := "vpn.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("template error (vpn): %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} diff --git a/src/webui/internal/handlers/vpn_test.go b/src/webui/internal/handlers/vpn_test.go new file mode 100644 index 000000000..62ad57af1 --- /dev/null +++ b/src/webui/internal/handlers/vpn_test.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "html/template" + "net/http" + "net/http/httptest" + "testing" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/security" +) + +var minimalVPNTmpl = template.Must(template.New("vpn.html").Parse( + `{{define "vpn.html"}}tunnels={{len .Tunnels}}{{end}}` + + `{{define "content"}}{{len .Tunnels}}{{end}}`, +)) + +func TestVPNOverview_ReturnsOK(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &VPNHandler{Template: minimalVPNTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/vpn", nil) + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + + body := w.Body.String() + if body == "" { + t.Error("expected non-empty response body") + } +} + +func TestVPNOverview_HTMXPartial(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &VPNHandler{Template: minimalVPNTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/vpn", nil) + req.Header.Set("HX-Request", "true") + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + + body := w.Body.String() + if body == "" { + t.Error("expected non-empty response body for htmx partial") + } +} diff --git a/src/webui/internal/handlers/wifi.go b/src/webui/internal/handlers/wifi.go new file mode 100644 index 000000000..4a59c0667 --- /dev/null +++ b/src/webui/internal/handlers/wifi.go @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "context" + "fmt" + "html/template" + "log" + "net/http" + "sync" + + "github.com/kernelkit/webui/internal/restconf" +) + +// wifiRadioHWJSON extends the hardware component wifi-radio container with +// operational fields from infix-hardware YANG that are not in wifiRadioJSON. +// wifiRadioJSON (defined in interfaces.go) only covers survey data; this +// struct captures the full operational state returned by RESTCONF. +// wifiMaxIfJSON maps the max-interfaces container from infix-hardware YANG. +type wifiMaxIfJSON struct { + AP int `json:"ap"` + Station int `json:"station"` + Monitor int `json:"monitor"` +} + +type wifiRadioHWJSON struct { + Channel interface{} `json:"channel"` // uint16 or "auto" + Band string `json:"band"` + Frequency int `json:"frequency"` // MHz, operational + Noise int `json:"noise"` // dBm, operational + Driver string `json:"driver"` + Bands []wifiBandJSON `json:"bands"` + MaxInterfaces *wifiMaxIfJSON `json:"max-interfaces"` + Survey *wifiSurveyJSON `json:"survey"` +} + +type wifiBandJSON struct { + Band string `json:"band"` + Name string `json:"name"` + HTCapable bool `json:"ht-capable"` + VHTCapable bool `json:"vht-capable"` + HECapable bool `json:"he-capable"` +} + +// hwComponentWiFiJSON is a minimal hardware component used for the wifi page. +// We reuse hardwareWrapper but need to decode wifi-radio with the richer struct. +type hwComponentWiFiJSON struct { + Name string `json:"name"` + Class string `json:"class"` + MfgName string `json:"mfg-name"` + WiFiRadio *wifiRadioHWJSON `json:"infix-hardware:wifi-radio"` +} + +type hardwareWiFiWrapper struct { + Hardware struct { + Component []hwComponentWiFiJSON `json:"component"` + } `json:"ietf-hardware:hardware"` +} + +// WiFiRadio is the template data for a single physical radio. +type WiFiRadio struct { + Name string + Channel string + Band string + Frequency int + Noise int + Driver string + Manufacturer string + Standards string + MaxAP string + HTCapable bool + VHTCapable bool + HECapable bool + Bands []WiFiBand + SurveySVG template.HTML + Interfaces []WiFiInterface +} + +type WiFiBand struct { + Band string + Name string + HTCapable bool + VHTCapable bool + HECapable bool +} + +// ChannelSurvey holds processed survey data for one channel. +type ChannelSurvey struct { + Frequency int + Channel int + InUse bool + Noise int + ActiveTime int64 + BusyTime int64 + UtilPct int // BusyTime/ActiveTime * 100 +} + +// WiFiInterface is the template data for a virtual WiFi interface. +type WiFiInterface struct { + Name string + Mode string // "ap" or "station" + SSID string + OperStatus string + StatusUp bool + // AP mode + APClients []WiFiClient + // Station mode + Signal string + SignalCSS string + RxSpeed string + TxSpeed string + ScanResults []WiFiScan +} + +// WiFiClient is the template data for a connected station client. +type WiFiClient struct { + MAC string + Signal string + SignalCSS string + ConnTime string + RxBytes string + TxBytes string + RxSpeed string + TxSpeed string +} + +// WiFiScan is the template data for a scan result entry. +type WiFiScan struct { + SSID string + BSSID string + Signal string + SignalCSS string + Channel string + Encryption string +} + +// wifiData is the top-level template data for the WiFi page. +type wifiData struct { + PageData + Radios []WiFiRadio + Error string +} + +// WiFiHandler serves the WiFi status page. +type WiFiHandler struct { + Template *template.Template + RC *restconf.Client +} + +// Overview renders the WiFi page (GET /wifi). +func (h *WiFiHandler) Overview(w http.ResponseWriter, r *http.Request) { + data := wifiData{ + PageData: newPageData(r, "wifi", "WiFi"), + } + + // Detach from the request context so that RESTCONF calls survive + // browser connection resets. + ctx := context.WithoutCancel(r.Context()) + + var ( + hw hardwareWiFiWrapper + ifaces interfacesWrapper + hwErr, ifErr error + wg sync.WaitGroup + ) + + wg.Add(2) + go func() { + defer wg.Done() + hwErr = h.RC.Get(ctx, "/data/ietf-hardware:hardware", &hw) + }() + go func() { + defer wg.Done() + ifErr = h.RC.Get(ctx, "/data/ietf-interfaces:interfaces", &ifaces) + }() + wg.Wait() + + if hwErr != nil { + log.Printf("wifi: restconf hardware: %v", hwErr) + data.Error = "Could not fetch hardware information" + } + if ifErr != nil { + log.Printf("wifi: restconf interfaces: %v", ifErr) + if data.Error == "" { + data.Error = "Could not fetch interface information" + } + } + + if hwErr == nil && ifErr == nil { + data.Radios = buildWiFiRadios(hw.Hardware.Component, ifaces.Interfaces.Interface) + } + + tmplName := "wifi.html" + if r.Header.Get("HX-Request") == "true" { + tmplName = "content" + } + if err := h.Template.ExecuteTemplate(w, tmplName, data); err != nil { + log.Printf("wifi: template error: %v", err) + http.Error(w, "Internal server error", http.StatusInternalServerError) + } +} + +// buildWiFiRadios assembles the WiFiRadio slice from hardware components +// and interface data, matching interfaces to their radio by name. +func buildWiFiRadios(components []hwComponentWiFiJSON, ifaces []ifaceJSON) []WiFiRadio { + var radios []WiFiRadio + + for _, c := range components { + if c.WiFiRadio == nil { + continue + } + r := c.WiFiRadio + + radio := WiFiRadio{ + Name: c.Name, + Band: r.Band, + Frequency: r.Frequency, + Noise: r.Noise, + Driver: r.Driver, + Channel: wifiChannelString(r.Channel), + Manufacturer: c.MfgName, + } + + // Capability flags: check per-band capabilities; if any band supports + // HT/VHT/HE, mark the radio as capable. + var bandNames []string + for _, b := range r.Bands { + name := b.Name + if name == "" { + name = b.Band + } + radio.Bands = append(radio.Bands, WiFiBand{ + Band: b.Band, + Name: name, + HTCapable: b.HTCapable, + VHTCapable: b.VHTCapable, + HECapable: b.HECapable, + }) + if b.Name != "" { + bandNames = append(bandNames, b.Name) + } + if b.HTCapable { + radio.HTCapable = true + } + if b.VHTCapable { + radio.VHTCapable = true + } + if b.HECapable { + radio.HECapable = true + } + } + + // Derive standards string from aggregated capabilities (matches CLI). + var standards []string + if radio.HTCapable { + standards = append(standards, "11n") + } + if radio.VHTCapable { + standards = append(standards, "11ac") + } + if radio.HECapable { + standards = append(standards, "11ax") + } + if len(standards) > 0 { + radio.Standards = joinStrings(standards, "/") + } + + // Max AP count from max-interfaces container. + if r.MaxInterfaces != nil && r.MaxInterfaces.AP > 0 { + radio.MaxAP = fmt.Sprintf("%d", r.MaxInterfaces.AP) + } + + // Generate channel survey SVG if survey data exists. + if r.Survey != nil && len(r.Survey.Channel) > 0 { + radio.SurveySVG = renderSurveySVG(r.Survey.Channel) + } + + // Attach wifi interfaces that reference this radio. + radio.Interfaces = buildWiFiInterfaces(c.Name, ifaces) + + radios = append(radios, radio) + } + + return radios +} + +// buildWiFiInterfaces returns the WiFiInterface entries for all virtual +// interfaces that reference the given radio name. +func buildWiFiInterfaces(radioName string, ifaces []ifaceJSON) []WiFiInterface { + var result []WiFiInterface + + for _, iface := range ifaces { + if iface.WiFi == nil || iface.WiFi.Radio != radioName { + continue + } + + wi := WiFiInterface{ + Name: iface.Name, + OperStatus: iface.OperStatus, + StatusUp: iface.OperStatus == "up", + } + + if ap := iface.WiFi.AccessPoint; ap != nil { + wi.Mode = "ap" + wi.SSID = ap.SSID + for _, s := range ap.Stations.Station { + wi.APClients = append(wi.APClients, buildWiFiClient(s)) + } + } else if st := iface.WiFi.Station; st != nil { + wi.Mode = "station" + wi.SSID = st.SSID + if st.SignalStrength != nil { + sig := *st.SignalStrength + wi.Signal = fmt.Sprintf("%d dBm", sig) + wi.SignalCSS = wifiSignalCSS(sig) + } + if st.RxSpeed > 0 { + wi.RxSpeed = fmt.Sprintf("%.1f Mbps", float64(st.RxSpeed)/10) + } + if st.TxSpeed > 0 { + wi.TxSpeed = fmt.Sprintf("%.1f Mbps", float64(st.TxSpeed)/10) + } + for _, sr := range st.ScanResults { + wi.ScanResults = append(wi.ScanResults, buildWiFiScan(sr)) + } + } + + result = append(result, wi) + } + + return result +} + +// buildWiFiClient converts a wifiStaJSON to a WiFiClient template entry. +func buildWiFiClient(s wifiStaJSON) WiFiClient { + c := WiFiClient{ + MAC: s.MACAddress, + ConnTime: formatDuration(int64(s.ConnectedTime)), + RxBytes: humanBytes(int64(s.RxBytes)), + TxBytes: humanBytes(int64(s.TxBytes)), + RxSpeed: fmt.Sprintf("%.1f Mbps", float64(s.RxSpeed)/10), + TxSpeed: fmt.Sprintf("%.1f Mbps", float64(s.TxSpeed)/10), + } + if s.SignalStrength != nil { + sig := *s.SignalStrength + c.Signal = fmt.Sprintf("%d dBm", sig) + c.SignalCSS = wifiSignalCSS(sig) + } + return c +} + +// buildWiFiScan converts a wifiScanResultJSON to a WiFiScan template entry. +func buildWiFiScan(sr wifiScanResultJSON) WiFiScan { + enc := "Open" + if len(sr.Encryption) > 0 { + parts := make([]string, 0, len(sr.Encryption)) + for _, e := range sr.Encryption { + parts = append(parts, e) + } + enc = joinStrings(parts, ", ") + } + s := WiFiScan{ + SSID: sr.SSID, + BSSID: sr.BSSID, + Channel: fmt.Sprintf("%d", sr.Channel), + Encryption: enc, + } + if sr.SignalStrength != nil { + sig := *sr.SignalStrength + s.Signal = fmt.Sprintf("%d dBm", sig) + s.SignalCSS = wifiSignalCSS(sig) + } + return s +} + +// wifiSignalCSS returns a CSS class based on signal strength in dBm. +func wifiSignalCSS(sig int) string { + switch { + case sig >= -50: + return "signal-excellent" + case sig >= -60: + return "signal-good" + case sig >= -70: + return "signal-ok" + default: + return "signal-poor" + } +} + +// wifiChannelString converts the YANG channel union value to a display string. +// The JSON may arrive as a float64 (number) or string ("auto"). +func wifiChannelString(v interface{}) string { + if v == nil { + return "" + } + switch val := v.(type) { + case string: + return val + case float64: + return fmt.Sprintf("%d", int(val)) + case int: + return fmt.Sprintf("%d", val) + default: + return fmt.Sprintf("%v", val) + } +} + +// joinStrings joins a slice of strings with a separator. +func joinStrings(parts []string, sep string) string { + result := "" + for i, p := range parts { + if i > 0 { + result += sep + } + result += p + } + return result +} diff --git a/src/webui/internal/handlers/wifi_test.go b/src/webui/internal/handlers/wifi_test.go new file mode 100644 index 000000000..02947a4a4 --- /dev/null +++ b/src/webui/internal/handlers/wifi_test.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "html/template" + "net/http" + "net/http/httptest" + "testing" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/security" +) + +var minimalWiFiTmpl = template.Must(template.New("wifi.html").Parse( + `{{define "wifi.html"}}radios={{len .Radios}}{{end}}` + + `{{define "content"}}{{len .Radios}}{{end}}`, +)) + +func TestWiFiOverview_ReturnsOK(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &WiFiHandler{Template: minimalWiFiTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/wifi", nil) + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + + body := w.Body.String() + if body == "" { + t.Error("expected non-empty response body") + } +} + +func TestWiFiOverview_HTMXPartial(t *testing.T) { + rc := restconf.NewClient("http://127.0.0.1:19999/restconf", false) + h := &WiFiHandler{Template: minimalWiFiTmpl, RC: rc} + + req := httptest.NewRequest(http.MethodGet, "/wifi", nil) + req.Header.Set("HX-Request", "true") + ctx := restconf.ContextWithCredentials(req.Context(), restconf.Credentials{ + Username: "admin", + Password: "admin", + }) + ctx = security.WithToken(ctx, "test-csrf-token") + req = req.WithContext(ctx) + + w := httptest.NewRecorder() + h.Overview(w, req) + + if w.Code != http.StatusOK { + t.Errorf("want 200 got %d; body: %s", w.Code, w.Body.String()) + } + + body := w.Body.String() + if body == "" { + t.Error("expected non-empty response body for htmx partial") + } +} diff --git a/src/webui/internal/handlers/yang_data.go b/src/webui/internal/handlers/yang_data.go new file mode 100644 index 000000000..f7b9392b1 --- /dev/null +++ b/src/webui/internal/handlers/yang_data.go @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" +) + +const candidateDS = "/ds/ietf-datastores:candidate" + +// DataHandler serves GET /api/data — raw RESTCONF JSON for a path. +// PUT and DELETE are handled by TreeHandler to share template rendering. +type DataHandler struct { + RC restconf.Fetcher + Schema *schema.Cache +} + +// Get serves GET /api/data?path=... +// Returns the raw RESTCONF JSON subtree from candidate (falls back to running). +func (h *DataHandler) Get(w http.ResponseWriter, r *http.Request) { + path := r.URL.Query().Get("path") + if path == "" { + http.Error(w, "path required", http.StatusBadRequest) + return + } + + data, err := h.RC.GetRaw(r.Context(), candidateDS+path) + if err != nil { + data, err = h.RC.GetRaw(r.Context(), "/data"+path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + } + + w.Header().Set("Content-Type", "application/json") + w.Write(data) +} + +// navigateToNode traverses a RESTCONF JSON response using path segments to +// reach the target node. The server always wraps responses in the full +// module-root hierarchy ({"module:root": {"list": [{...}]}}), so we walk each +// segment; for list segments with a key predicate (e.g. "interface=eth0") we +// enter the array and take the first (only) element. +// Returns nil when any segment cannot be found. +func navigateToNode(data []byte, path string) json.RawMessage { + segs := strings.Split(strings.TrimPrefix(path, "/"), "/") + current := json.RawMessage(data) + for _, seg := range segs { + hasPred := strings.ContainsAny(seg, "[=") + _, localName := splitModPrefix(stripModPredicate(seg)) + var obj map[string]json.RawMessage + if err := json.Unmarshal(current, &obj); err != nil { + return nil + } + var found json.RawMessage + for k, v := range obj { + _, local := splitModPrefix(k) + if local == localName { + found = v + break + } + } + if found == nil { + return nil + } + if hasPred { + var arr []json.RawMessage + if err := json.Unmarshal(found, &arr); err != nil || len(arr) == 0 { + return nil + } + // Extract key value from predicate: "name=eth0" → "eth0". + keyVal := "" + if i := strings.IndexByte(seg, '='); i >= 0 { + keyVal = seg[i+1:] + } + matched := arr[0] // fallback: first element + if keyVal != "" { + for _, elem := range arr { + var row map[string]json.RawMessage + if json.Unmarshal(elem, &row) != nil { + continue + } + for _, v := range row { + var s string + if json.Unmarshal(v, &s) == nil && s == keyVal { + matched = elem + goto nextSeg + } + } + } + } + nextSeg: + found = matched + } + current = found + } + return current +} + +// flattenNodeValues extracts direct scalar leaf values from a JSON object, +// returning a map of bare-name → string. Nested objects and arrays (which +// represent sub-containers and sub-lists) are silently skipped. +func flattenNodeValues(raw json.RawMessage) map[string]string { + var obj map[string]json.RawMessage + if err := json.Unmarshal(raw, &obj); err != nil { + return nil + } + result := make(map[string]string, len(obj)) + for k, v := range obj { + if len(v) > 0 && v[0] == '[' { + // YANG empty type is encoded as [null] in JSON (RFC 7951 §6.9). + // Represent presence as "true"; skip real arrays (sub-lists). + trimmed := bytes.TrimSpace(v) + if bytes.Equal(trimmed, []byte("[null]")) { + _, local := splitModPrefix(k) + result[local] = "true" + } + continue + } + if len(v) > 0 && v[0] == '{' { + continue // sub-container — not a direct leaf + } + _, local := splitModPrefix(k) + result[local] = extractScalar(v) + } + return result +} + +// stripModPredicate removes both a module prefix and a RESTCONF key predicate +// from a path segment, e.g. "ietf-interfaces:interface=eth0" → "interface". +func stripModPredicate(seg string) string { + if i := strings.IndexByte(seg, '['); i >= 0 { + seg = seg[:i] + } + if i := strings.IndexByte(seg, '='); i >= 0 { + seg = seg[:i] + } + return seg +} + +// extractLeafValue unwraps the single-key RESTCONF JSON envelope that wraps a +// leaf value: {"module:name": } → string representation of . +// Recursively unwraps single-key nested objects so that a response like +// {"module:parent": {"certificate": "gencert"}} → "gencert". +func extractLeafValue(data []byte) string { + var m map[string]json.RawMessage + if err := json.Unmarshal(data, &m); err != nil { + return "" + } + for _, raw := range m { + return extractScalar(raw) + } + return "" +} + +// extractScalar converts a JSON value to a display string. +// Single-key objects are recursively unwrapped (RESTCONF sometimes wraps leaf +// values in a choice/case or container envelope). +func extractScalar(raw json.RawMessage) string { + // Plain string. + var s string + if json.Unmarshal(raw, &s) == nil { + return s + } + // Bool or number (unquoted JSON token). + v := string(raw) + if v == "true" || v == "false" || (len(v) > 0 && (v[0] == '-' || (v[0] >= '0' && v[0] <= '9'))) { + return v + } + // Single-key object: unwrap one level and recurse. + var nested map[string]json.RawMessage + if err := json.Unmarshal(raw, &nested); err == nil && len(nested) == 1 { + for _, inner := range nested { + return extractScalar(inner) + } + } + // Fallback: return the raw token (strips outer quotes if present). + if len(v) >= 2 && v[0] == '"' && v[len(v)-1] == '"' { + return v[1 : len(v)-1] + } + return v +} + +// coerceLeafValue converts the raw form string to the JSON type that RESTCONF +// expects for the leaf based on the schema Node type. +func coerceLeafValue(raw string, node *schema.Node) any { + if node == nil || node.Type == nil { + return raw + } + switch node.Type.Kind { + case "boolean": + return raw == "on" || raw == "true" + case "int8", "int16", "int32", "int64", + "uint8", "uint16", "uint32", "uint64": + var n int64 + if _, err := fmt.Sscanf(raw, "%d", &n); err == nil { + return n + } + case "binary": + // Strip whitespace that textarea input may add (trailing newlines, spaces). + // If the cleaned value is valid base64, send it as-is; otherwise encode. + cleaned := strings.Map(func(r rune) rune { + if r == ' ' || r == '\t' || r == '\n' || r == '\r' { + return -1 + } + return r + }, raw) + if _, err := base64.StdEncoding.DecodeString(cleaned); err == nil { + return cleaned + } + return base64.StdEncoding.EncodeToString([]byte(raw)) + } + return raw +} diff --git a/src/webui/internal/handlers/yang_tree.go b/src/webui/internal/handlers/yang_tree.go new file mode 100644 index 000000000..654335c62 --- /dev/null +++ b/src/webui/internal/handlers/yang_tree.go @@ -0,0 +1,1435 @@ +// SPDX-License-Identifier: MIT + +package handlers + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "html/template" + "log" + "net/http" + "net/url" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" +) + +// SchemaHandler serves YANG schema queries as JSON (used by the tree UI and +// for direct API access / testing). +type SchemaHandler struct { + Cache *schema.Cache +} + +// Schema serves GET /api/schema?path= +// Returns a single Node (without children) as JSON. +func (h *SchemaHandler) Schema(w http.ResponseWriter, r *http.Request) { + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + + path := r.URL.Query().Get("path") + if path == "" { + path = "/" + } + + node, err := mgr.NodeAt(path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(node) +} + +// Children serves GET /api/schema/children?path= +// Returns a JSON array of direct child Nodes. +func (h *SchemaHandler) Children(w http.ResponseWriter, r *http.Request) { + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + + path := r.URL.Query().Get("path") + if path == "" { + path = "/" + } + + nodes, err := mgr.Children(path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(nodes) +} + +// ─── Tree UI handler ───────────────────────────────────────────────────────── + +// TreeHandler serves the YANG tree navigation UI pages and HTMX fragments. +type TreeHandler struct { + Cache *schema.Cache + RC restconf.Fetcher + PageTmpl *template.Template + FragTmpl *template.Template + ReadOnly bool // true for the status/operational tree; suppresses all writes +} + +// treeNodeData wraps a schema.Node with live presence-state from the datastore. +// Exists is only meaningful when Node.Presence != "". +// TreeBase is the URL prefix for tree HTMX requests ("/configure/tree" or "/status/tree"). +type treeNodeData struct { + *schema.Node + Exists bool + TreeBase string +} + +type yangTreePageData struct { + PageData + Nodes []*treeNodeData + Loading bool + InitialPath string // non-empty: auto-load this node in the right pane on page load + ReadOnly bool // true for the operational status tree + TreeBase string // URL prefix: "/configure/tree" or "/status/tree" +} + +// nodeDetailData is the template data for the yang-node-detail fragment. +type nodeDetailData struct { + *schema.Node + CurrentValue string + UsingDefault bool // true when CurrentValue is the YANG default, not an explicit candidate value + IsBinary bool // true when the leaf is binary type and CurrentValue holds decoded text + LeafrefValues []string + SavedOK bool + Error string + ReadOnly bool +} + +// leafGroupData is the template data for yang-leaf-group: a container or +// list-instance rendered as an auto-generated "level page". It shows direct +// leaf children as an inline editable form, and structural children (sub-containers +// and lists) as clickable navigation items below. +type leafGroupData struct { + Path string + ParentPath string // set when rendered as an inline sub-container + Name string // display name of the parent node + Kind string // "container" or "list-instance" + Presence string // non-empty for presence containers (YANG presence stmt) + Exists bool // only meaningful when Presence != ""; true = present in datastore + Leaves []*leafGroupItem + InlineLists []*listTableData // simple sub-lists shown inline as tables + InlineContainers []*leafGroupData // flat sub-containers (all-leaf) shown inline + SubNodes []*schema.Node // complex containers/lists → navigation buttons + SavedOK bool + Error string + ReadOnly bool +} + +// listTableColumn is a schema Node with an optional display-name override, +// used so column headers can differ from the YANG leaf name (e.g. "hidden-*" +// presence leaves are shown without the "hidden-" prefix in the heading). +type listTableColumn struct { + *schema.Node + DisplayName string +} + +// listTableData is the template data for yang-list-table. +type listTableData struct { + Path string + ParentPath string // set when rendered inline inside a container leaf-group + Name string + Keys []string + Columns []*listTableColumn // display columns (binary excluded to keep table readable) + FormColumns []*listTableColumn // all leaf columns including binary, used by the add-row form + Rows []listTableRow + Complex bool // has nested containers/lists; rows navigate to a full detail page + SavedOK bool + Error string + ReadOnly bool +} + +// listTableRow holds one instance's display path and column values. +type listTableRow struct { + InstancePath string + InstanceName string // key value(s) for display + Values map[string]string +} + +// listAddData is the template data for yang-list-add: the add-row form. +type listAddData struct { + Path string + ParentPath string // set when opened from an inline list + Name string + Keys []string + Columns []*schema.Node + Error string +} + +// leafGroupItem holds a single leaf's schema node and its current candidate value. +type leafGroupItem struct { + *schema.Node + CurrentValue string + UsingDefault bool + IsBinary bool // leaf has binary type + HasBinary bool // a value is present (even if non-decodable to text, e.g. DER keys) + RawBase64 string // raw RESTCONF base64 value, populated when HasBinary && !CurrentValue + LeafrefValues []string +} + +// Overview serves GET /configure/tree (or /status/tree for ReadOnly mode). +// When path is set the right pane auto-loads the node on page load. +func (h *TreeHandler) Overview(w http.ResponseWriter, r *http.Request) { + activePage := "configure-tree" + title := "Advanced Configuration" + if h.ReadOnly { + activePage = "status-tree" + title = "Advanced Status" + } + base := h.treeBase() + data := yangTreePageData{ + PageData: newPageData(r, activePage, title), + InitialPath: r.URL.Query().Get("path"), + ReadOnly: h.ReadOnly, + TreeBase: base, + } + + mgr := h.Cache.Manager() + if mgr == nil { + data.Loading = true + } else { + nodes, err := h.childrenFunc(mgr)("/") + if err == nil { + treeNodes := make([]*treeNodeData, len(nodes)) + for i, n := range nodes { + treeNodes[i] = &treeNodeData{Node: n, Exists: true, TreeBase: base} + } + data.Nodes = treeNodes + } + } + + if r.Header.Get("HX-Request") == "true" { + h.PageTmpl.ExecuteTemplate(w, "content", data) + } else { + h.PageTmpl.ExecuteTemplate(w, "yang-tree.html", data) + } +} + +// TreeChildren serves GET /configure/tree/children?path=... +// For plain list paths (no key predicate) it returns the actual instances from +// the candidate datastore. For everything else it returns schema children. +func (h *TreeHandler) TreeChildren(w http.ResponseWriter, r *http.Request) { + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + + path := r.URL.Query().Get("path") + if path == "" { + path = "/" + } + + // For a plain list path (no key predicate in the *last* segment), show data + // instances rather than the schema template. Check only the last segment so + // that paths like /…/interface=br0/ietf-ip:ipv4/address are not excluded. + lastSeg := path + if i := strings.LastIndexByte(path, '/'); i >= 0 { + lastSeg = path[i+1:] + } + base := h.treeBase() + if !strings.ContainsAny(lastSeg, "[=") { + if node, err := mgr.NodeAt(path); err == nil && node.Kind == "list" { + instances := h.fetchListInstances(r, path, node) + treeNodes := make([]*treeNodeData, len(instances)) + for i, n := range instances { + treeNodes[i] = &treeNodeData{Node: n, Exists: true, TreeBase: base} + } + h.FragTmpl.ExecuteTemplate(w, "yang-tree-nodes", treeNodes) + return + } + } + + childFn := h.childrenFunc(mgr) + nodes, err := childFn(path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + // For list instances, fetch live data so when conditions can be evaluated. + // For bare schema paths (no instance key) values stays nil → conservative. + var values map[string]string + if strings.ContainsAny(lastSeg, "[=") { + values = h.fetchNodeValues(r, path) + } + + // Filter out nodes that are inlined in the detail pane (simple lists and + // flat containers) so they don't clutter the tree. Also hide nodes whose + // when condition is not satisfied by the current instance data. + var visible []*schema.Node + for _, n := range nodes { + if n.When != "" && !schema.EvaluateWhen(mgr, n.When, values) { + continue + } + if n.Kind == "list" || n.Kind == "container" { + kids, _ := childFn(n.Path) + if isSimpleList(kids) { + continue + } + } + visible = append(visible, n) + } + h.FragTmpl.ExecuteTemplate(w, "yang-tree-nodes", h.resolvePresenceState(r, path, visible)) +} + +// resolvePresenceState wraps schema nodes in treeNodeData. +// Existence is not checked here — presence toggle lives in the right-pane card header +// and is only resolved lazily when a node is selected (see TreeNode/checkPresenceExists). +func (h *TreeHandler) resolvePresenceState(r *http.Request, parentPath string, nodes []*schema.Node) []*treeNodeData { + base := h.treeBase() + result := make([]*treeNodeData, len(nodes)) + for i, n := range nodes { + result[i] = &treeNodeData{Node: n, TreeBase: base} + } + return result +} + +// treeBase returns the URL prefix for this handler's tree routes. +func (h *TreeHandler) treeBase() string { + if h.ReadOnly { + return "/status/tree" + } + return "/configure/tree" +} + +func (h *TreeHandler) childrenFunc(mgr *schema.Manager) func(string) ([]*schema.Node, error) { + if h.ReadOnly { + return mgr.ChildrenAll + } + return mgr.Children +} + +// fetchData fetches from the candidate datastore in config mode, or from /data +// (operational) in ReadOnly mode. In config mode it falls back to /data on error. +func (h *TreeHandler) fetchData(r *http.Request, path string) ([]byte, error) { + if h.ReadOnly { + return h.RC.GetRaw(r.Context(), "/data"+path) + } + data, err := h.RC.GetRaw(r.Context(), candidateDS+path) + if err != nil { + return h.RC.GetRaw(r.Context(), "/data"+path) + } + return data, nil +} + +// checkPresenceExists reports whether the YANG presence container at path +// currently exists in the candidate (or running) datastore. +func (h *TreeHandler) checkPresenceExists(r *http.Request, path string) bool { + _, err := h.fetchData(r, path) + return err == nil +} + +// fetchListInstances queries the candidate (fallback: running) for a list +// node and returns one schema.Node per instance, using the key values to +// build RESTCONF key predicates for the path. +// +// rousette always wraps GET responses in the full module-root hierarchy, so +// we can GET the parent path and then use navigateToNode with the full list +// path to reach the list array directly. +func (h *TreeHandler) fetchListInstances(r *http.Request, path string, listNode *schema.Node) []*schema.Node { + segs := strings.Split(strings.TrimPrefix(path, "/"), "/") + parentPath := "/" + strings.Join(segs[:len(segs)-1], "/") + if len(segs) < 2 { + parentPath = path + } + + data, err := h.fetchData(r, parentPath) + if err != nil { + log.Printf("yang-tree: list GET %s: %v", parentPath, err) + return nil + } + + // Navigate directly to the list array using the full path. + rawItems := navigateToNode(data, path) + if rawItems == nil { + log.Printf("yang-tree: list %q not found in response for %s", listNode.Name, path) + return nil + } + + var items []map[string]json.RawMessage + if err := json.Unmarshal(rawItems, &items); err != nil { + log.Printf("yang-tree: list items unmarshal %s: %v", path, err) + return nil + } + + var nodes []*schema.Node + for _, item := range items { + // Build RESTCONF key predicate: "=val" or "=val1,val2". + pred := buildKeyPredicate(item, listNode.Keys) + instancePath := path + pred + // Display name is the key value(s) without the leading "=". + displayName := pred[1:] + + nodes = append(nodes, &schema.Node{ + Path: instancePath, + Name: displayName, + Kind: "list-instance", + Config: listNode.Config, + Keys: listNode.Keys, + }) + } + // Natural sort: eth2 before eth10. + sort.Slice(nodes, func(i, j int) bool { + return naturalLess(nodes[i].Name, nodes[j].Name) + }) + return nodes +} + +// buildKeyPredicate constructs a RESTCONF list-key predicate from a JSON +// object and a list of key names (RFC 8040 §3.5.3). +// Single key: "=eth0" +// Composite key: "=default,ipv4" +func buildKeyPredicate(item map[string]json.RawMessage, keys []string) string { + var vals []string + for _, key := range keys { + raw, ok := item[key] + if !ok { + // Try with module prefix (RESTCONF may qualify key names). + for k, v := range item { + if k == key || strings.HasSuffix(k, ":"+key) { + raw = v + ok = true + break + } + } + } + s := "?" + if ok { + if json.Unmarshal(raw, &s) != nil { + s = strings.Trim(string(raw), `"`) + } + s = url.PathEscape(s) + } + vals = append(vals, s) + } + if len(vals) == 0 { + return "=?" + } + return "=" + strings.Join(vals, ",") +} + +// TreeNode serves GET /configure/tree/node?path=... +// For containers/list-instances whose children are all leaves it renders an +// inline leaf-group form. For individual leaves it renders the leaf detail form. +func (h *TreeHandler) TreeNode(w http.ResponseWriter, r *http.Request) { + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + + path := r.URL.Query().Get("path") + if path == "" { + http.Error(w, "path required", http.StatusBadRequest) + return + } + // parent= is set when navigating from an inline list row so that saving + // re-renders the parent container page instead of the current one. + parentPath := r.URL.Query().Get("parent") + + node, err := mgr.NodeAt(path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + // Containers and list-instances always render as a level page — direct leaves + // as an editable form, structural children as navigation items below. + // NodeAt strips key predicates so a list instance always comes back as "list"; + // detect an instance by checking ONLY the last path segment for a key predicate, + // so that nested paths like /…/interface=br0/ietf-ip:ipv4/address are not + // mistakenly treated as instances due to a predicate in a parent segment. + lastSeg := path + if i := strings.LastIndexByte(path, '/'); i >= 0 { + lastSeg = path[i+1:] + } + isInstance := node.Kind == "list" && strings.ContainsAny(lastSeg, "[=") + + // Bare list node (no key predicate): show a table of all instances when the + // list is "simple" (only leaf children — no nested containers or lists). + if node.Kind == "list" && !isInstance { + if td := h.buildListTable(r, mgr, path, node); td != nil { + if parentPath != "" { + td.ParentPath = parentPath + } + h.FragTmpl.ExecuteTemplate(w, "yang-list-table", td) + return + } + } + + if node.Kind == "container" || node.Kind == "list-instance" || isInstance { + dispKind := node.Kind + if isInstance { + dispKind = "list-instance" + } + gd := h.buildLeafGroup(r, mgr, path, node.Name, dispKind) + if gd == nil && node.Presence == "" { + // Non-presence empty container — fall through to node detail view. + } else { + if gd == nil { + gd = &leafGroupData{Path: path, Name: node.Name, Kind: dispKind} + } + if parentPath != "" { + gd.ParentPath = parentPath + } + if node.Presence != "" { + gd.Presence = node.Presence + gd.Exists = h.checkPresenceExists(r, path) + } + gd.ReadOnly = h.ReadOnly + h.FragTmpl.ExecuteTemplate(w, "yang-leaf-group", gd) + return + } + } + + data := &nodeDetailData{Node: node, ReadOnly: h.ReadOnly} + if node.Kind == "leaf" || node.Kind == "leaf-list" { + item := &leafGroupItem{Node: node} + resolveLeafItem(item, h.fetchLeafValue(r, path)) + data.CurrentValue = item.CurrentValue + data.UsingDefault = item.UsingDefault + data.IsBinary = item.IsBinary + data.LeafrefValues = h.fetchLeafrefValues(r, mgr, node) + } + + h.FragTmpl.ExecuteTemplate(w, "yang-node-detail", data) +} + +// buildLeafGroup builds a level-page for any container or list-instance. +// It fetches the whole node once (one HTTP round-trip) and extracts leaf +// values from the response, rather than fetching each leaf individually. +// Structural children (sub-containers, lists) are listed as navigation items. +// Returns nil only when the schema has no children (empty container). +func (h *TreeHandler) buildLeafGroup(r *http.Request, mgr *schema.Manager, path, name, kind string) *leafGroupData { + childFn := h.childrenFunc(mgr) + children, err := childFn(path) + if err != nil || len(children) == 0 { + return nil + } + gd := &leafGroupData{Path: path, Name: name, Kind: kind, ReadOnly: h.ReadOnly} + values := h.fetchNodeValues(r, path) + + // For list instances: enrich the card heading ("interface wan") and build a + // key-name set so key leaves can be sorted first in the form. + var keySet map[string]bool + if kind == "list-instance" { + schemaPath := stripKeyPredicate(path) + if listNode, lerr := mgr.NodeAt(schemaPath); lerr == nil && len(listNode.Keys) > 0 { + keySet = make(map[string]bool, len(listNode.Keys)) + for _, k := range listNode.Keys { + keySet[k] = true + } + _, lastSeg := splitLastSegment(path) + if i := strings.IndexByte(lastSeg, '='); i >= 0 { + if keyVal, uerr := url.PathUnescape(lastSeg[i+1:]); uerr == nil && keyVal != "" { + gd.Name = name + " " + keyVal + } + } + } + } + + // mgr.Children always returns schema paths (no key predicates). When path + // is a list instance (e.g. /…/user=admin), we must rebuild each child's + // path using the instance path as prefix so that RESTCONF requests and form + // actions address the correct instance, not a bare list path. + schemaBase := stripKeyPredicate(path) // path with last key predicate removed + + for _, c := range children { + // Skip nodes whose when condition is false for the current data values. + if c.When != "" && !schema.EvaluateWhen(mgr, c.When, values) { + continue + } + + childPath := c.Path + if schemaBase != path && strings.HasPrefix(c.Path, schemaBase+"/") { + childPath = path + c.Path[len(schemaBase):] + } + + switch c.Kind { + case "leaf", "leaf-list": + item := &leafGroupItem{Node: c} + val := "" + if values != nil { + val = values[c.Name] + } + resolveLeafItem(item, val) + item.LeafrefValues = h.fetchLeafrefValues(r, mgr, c) + gd.Leaves = append(gd.Leaves, item) + case "list": + if td := h.buildListTable(r, mgr, childPath, c); td != nil { + td.ParentPath = path + gd.InlineLists = append(gd.InlineLists, td) + } + case "container": + kids2, err2 := childFn(c.Path) + if err2 == nil && isSimpleList(kids2) { + sub := h.buildLeafGroup(r, mgr, childPath, c.Name, "container") + if sub != nil { + sub.ParentPath = path + gd.InlineContainers = append(gd.InlineContainers, sub) + break + } + } + subNode := c + if childPath != c.Path { + cn := *c + cn.Path = childPath + subNode = &cn + } + gd.SubNodes = append(gd.SubNodes, subNode) + default: + subNode := c + if childPath != c.Path { + cn := *c + cn.Path = childPath + subNode = &cn + } + gd.SubNodes = append(gd.SubNodes, subNode) + } + } + + // Sort key leaves to the top of the form (schema order preserved within groups). + if len(keySet) > 0 { + sort.SliceStable(gd.Leaves, func(i, j int) bool { + return keySet[gd.Leaves[i].Name] && !keySet[gd.Leaves[j].Name] + }) + } + + return gd +} + +// isSimpleList returns true when every direct child of the list is a leaf or +// leaf-list — no nested containers or sub-lists. Used to decide whether to +// render a bare list node as a data table rather than a tree expansion. +func isSimpleList(children []*schema.Node) bool { + if len(children) == 0 { + return false + } + for _, c := range children { + if c.Kind != "leaf" && c.Kind != "leaf-list" { + return false + } + } + return true +} + +// buildListTable builds a listTableData for a bare (no-predicate) list node. +// Simple lists (all-leaf children) show all leaf columns. +// Complex lists (with nested containers/lists) show only leaf columns and set +// Complex=true so the template renders rows as click-through navigation items +// instead of an inline add form. +// Returns nil only if children cannot be resolved. +func (h *TreeHandler) buildListTable(r *http.Request, mgr *schema.Manager, path string, listNode *schema.Node) *listTableData { + children, err := h.childrenFunc(mgr)(path) + if err != nil { + return nil + } + + simple := isSimpleList(children) + + // Keys first, then up to 4 non-key LEAF columns so the table stays readable. + // For complex lists we skip containers and sub-lists from the column set. + // FormColumns tracks all leaf columns (including binary) for use by the add-row + // form — binary is excluded from Columns only to keep the table display readable. + keySet := make(map[string]bool, len(listNode.Keys)) + for _, k := range listNode.Keys { + keySet[k] = true + } + var keyNodes, otherNodes, formNodes []*listTableColumn + for _, c := range children { + if c.Kind != "leaf" && c.Kind != "leaf-list" { + continue // skip containers/sub-lists from column display + } + col := &listTableColumn{Node: c, DisplayName: c.Name} + // Strip "hidden-" prefix from the display name (e.g. "hidden-private-key" + // → "private-key") so the heading names the data concept, not the YANG case. + if c.Type != nil && c.Type.Kind == "empty" && strings.HasPrefix(c.Name, "hidden-") { + col.DisplayName = strings.TrimPrefix(c.Name, "hidden-") + } + formNodes = append(formNodes, col) + if keySet[c.Name] { + keyNodes = append(keyNodes, col) + } else if c.Type == nil || (c.Type.Kind != "binary") { + // Skip binary columns — unreadable in a table and blow out column widths. + otherNodes = append(otherNodes, col) + } + } + const maxOther = 4 + if len(otherNodes) > maxOther { + otherNodes = otherNodes[:maxOther] + } + columns := append(keyNodes, otherNodes...) + if len(columns) == 0 { + return nil + } + + instances := h.fetchListInstances(r, path, listNode) + td := &listTableData{ + Path: path, + Name: listNode.Name, + Keys: listNode.Keys, + Columns: columns, + FormColumns: formNodes, + Complex: !simple, + ReadOnly: h.ReadOnly, + } + for _, inst := range instances { + rawVals := h.fetchNodeValues(r, inst.Path) + display := make(map[string]string, len(rawVals)) + for _, col := range columns { + v := rawVals[col.Name] + switch { + case col.Type != nil && col.Type.Kind == "identityref": + // Strip module prefix for display. + if i := strings.LastIndexByte(v, ':'); i >= 0 { + v = v[i+1:] + } + case col.Type != nil && col.Type.Kind == "empty": + // YANG empty type: "true" means present (from [null] in JSON). + // Hidden-* leaves indicate the value is stored but not exported. + if v == "true" { + if strings.HasPrefix(col.Name, "hidden-") { + v = "Hidden" + } else { + v = "✓" + } + } else { + v = "" + } + } + display[col.Name] = v + } + td.Rows = append(td.Rows, listTableRow{ + InstancePath: inst.Path, + InstanceName: inst.Name, + Values: display, + }) + } + return td +} + +// stripKeyPredicate removes the key predicate from the last segment of a path, +// e.g. "/interfaces/interface=eth0" → "/interfaces/interface". +// splitLastSegment splits "/a/b/c=d" into ("/a/b", "c=d"). +func splitLastSegment(path string) (parent, last string) { + i := strings.LastIndexByte(path, '/') + if i < 0 { + return "", path + } + return path[:i], path[i+1:] +} + +// buildRootedPatch returns the top-level module container path and a body +// suitable for PATCH /candidateDS+topPath that nests entry inside the full +// schema path. Patching at the module root (like configure-system.go does +// for hostname/NTP) gives libyang the complete ancestor-key context it needs +// to validate nested list entries — patching at a sub-path like user=admin +// leaves libyang without parent-list key context. +func buildRootedPatch(mgr *schema.Manager, listPath string, listNode *schema.Node, entry map[string]any) (topPath string, body map[string]any) { + segs := strings.Split(strings.TrimPrefix(listPath, "/"), "/") + if len(segs) == 0 { + qn, _ := mgr.ModuleQualifiedName(listPath) + return listPath, map[string]any{qn: []map[string]any{entry}} + } + + topSeg := segs[0] // e.g. "ietf-system:system" + topModName, _ := splitModPrefix(topSeg) + + // qualName returns the bare name if the node is in the same module as + // topMod, or "module:name" if it's in a different module. + qualName := func(nodePath, name string) string { + mod, err := mgr.ModuleName(nodePath) + if err == nil && mod != "" && mod != topModName { + return mod + ":" + name + } + return name + } + + // Start with the innermost: the target list → [entry]. + innerVal := map[string]any{qualName(listPath, listNode.Name): []map[string]any{entry}} + + // Walk upward through intermediate segments, wrapping at each level. + for i := len(segs) - 2; i >= 1; i-- { + seg := segs[i] + segPath := "/" + strings.Join(segs[:i+1], "/") + eqIdx := strings.IndexByte(seg, '=') + if eqIdx >= 0 { + // List instance (e.g. "user=admin"): build a list entry that + // includes the key value(s) extracted from the path predicate. + listName := seg[:eqIdx] + predVals := strings.SplitN(seg[eqIdx+1:], ",", 16) + schemaPath := stripKeyPredicate(segPath) + schemaNode, err := mgr.NodeAt(schemaPath) + listEntry := make(map[string]any) + if err == nil { + for j, k := range schemaNode.Keys { + if j < len(predVals) { + v, _ := url.PathUnescape(predVals[j]) + listEntry[k] = v + } + } + } + for k, v := range innerVal { + listEntry[k] = v + } + innerVal = map[string]any{qualName(schemaPath, listName): []map[string]any{listEntry}} + } else { + // Container segment: wrap innerVal in the container object. + innerVal = map[string]any{qualName(segPath, seg): innerVal} + } + } + + return "/" + topSeg, map[string]any{topSeg: innerVal} +} + +func stripKeyPredicate(path string) string { + segs := strings.Split(strings.TrimPrefix(path, "/"), "/") + if len(segs) == 0 { + return path + } + last := segs[len(segs)-1] + if i := strings.IndexByte(last, '='); i >= 0 { + segs[len(segs)-1] = last[:i] + } + return "/" + strings.Join(segs, "/") +} + +// AddListRowForm serves GET /configure/tree/list-add?path=...&parent=... +// Renders a blank add-row form for the list at path. +// parent is set when the list is embedded inside a container leaf-group. +func (h *TreeHandler) AddListRowForm(w http.ResponseWriter, r *http.Request) { + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + path := r.URL.Query().Get("path") + parent := r.URL.Query().Get("parent") + node, err := mgr.NodeAt(path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + children, _ := mgr.Children(path) + var leafCols []*schema.Node + for _, c := range children { + if c.Kind == "leaf" || c.Kind == "leaf-list" { + leafCols = append(leafCols, c) + } + } + h.FragTmpl.ExecuteTemplate(w, "yang-list-add", &listAddData{ + Path: path, + ParentPath: parent, + Name: node.Name, + Keys: node.Keys, + Columns: leafCols, + }) +} + +// SaveListRow serves POST /configure/tree/list-row?path=...&parent=... +// Creates a new list instance from posted form values, then re-renders. +// If parent is set, re-renders the parent container leaf-group (inline mode). +func (h *TreeHandler) SaveListRow(w http.ResponseWriter, r *http.Request) { + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + path := r.URL.Query().Get("path") + parent := r.URL.Query().Get("parent") + listNode, err := mgr.NodeAt(path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + r.ParseForm() + children, _ := mgr.Children(path) + + // Build RESTCONF body from all posted leaf values. + // Use bare field names (no module prefix) inside the entry; cross-module + // augmented fields still need their module prefix. + listModName, _ := mgr.ModuleName(path) + entry := make(map[string]any) + for _, child := range children { + if child.Kind != "leaf" && child.Kind != "leaf-list" { + continue + } + raw := r.FormValue(child.Name) + if raw == "" { + continue + } + fieldModName, _ := mgr.ModuleName(child.Path) + fieldKey := child.Name + if fieldModName != listModName { + fieldKey = fieldModName + ":" + child.Name + } + entry[fieldKey] = coerceLeafValue(raw, child) + } + + // PATCH at the top-level module container (e.g. ietf-system:system) with + // the full nested structure so libyang has complete ancestor-key context. + // Patching at a sub-path leaves libyang without parent list-key context + // and produces "List requires N keys" errors. + topPath, rootBody := buildRootedPatch(mgr, path, listNode, entry) + putErr := h.RC.Patch(r.Context(), candidateDS+topPath, rootBody) + + if parent != "" { + parentNode, pErr := mgr.NodeAt(parent) + if pErr == nil { + gd := h.buildLeafGroup(r, mgr, parent, parentNode.Name, parentNode.Kind) + if gd != nil { + if putErr != nil { + gd.Error = putErr.Error() + } else { + gd.SavedOK = true + } + h.FragTmpl.ExecuteTemplate(w, "yang-leaf-group", gd) + return + } + } + } + + td := h.buildListTable(r, mgr, path, listNode) + if td == nil { + td = &listTableData{Path: path, Name: listNode.Name} + } + if putErr != nil { + td.Error = putErr.Error() + } else { + td.SavedOK = true + } + h.FragTmpl.ExecuteTemplate(w, "yang-list-table", td) +} + +// DeleteListRow serves DELETE /configure/tree/list-row?path=...&parent=... +// Deletes a list instance and re-renders. If parent is set, re-renders the +// parent container leaf-group (inline mode); otherwise re-renders the list table. +func (h *TreeHandler) DeleteListRow(w http.ResponseWriter, r *http.Request) { + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + path := r.URL.Query().Get("path") + parent := r.URL.Query().Get("parent") + listPath := stripKeyPredicate(path) + listNode, err := mgr.NodeAt(listPath) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + errMsg := "" + // Skip direct DELETE when the path contains '@': libyang treats it as a + // module@revision separator in path predicates and always returns 400. + useParentPut := strings.ContainsRune(path, '@') + if !useParentPut { + if delErr := h.RC.Delete(r.Context(), candidateDS+path); delErr != nil { + useParentPut = true + } + } + if useParentPut { + if fbErr := h.deleteViaParentPut(r, mgr, path, listPath, listNode); fbErr != nil { + errMsg = fbErr.Error() + } + } + + // Inline list delete (parent set): the button uses hx-swap="delete" to remove + // the row from the DOM — no full re-render needed. Return a minimal signal. + if parent != "" { + if errMsg != "" { + renderSaveError(w, errors.New(errMsg)) + } else { + renderSaved(w, "Deleted") + } + return + } + + td := h.buildListTable(r, mgr, listPath, listNode) + if td == nil { + td = &listTableData{Path: listPath, Name: listNode.Name} + } + td.Error = errMsg + h.FragTmpl.ExecuteTemplate(w, "yang-list-table", td) +} + +// deleteViaParentPut removes a list instance without using a DELETE to the +// instance path. It GETs the parent, filters the list, and PUTs the parent +// back — identical in spirit to the curated DeleteKey workaround. +// Used when a direct DELETE fails (e.g. '@' in key value causes libyang to +// treat it as a module@revision separator and return "Syntax error"). +func (h *TreeHandler) deleteViaParentPut(r *http.Request, mgr *schema.Manager, instancePath, listPath string, listNode *schema.Node) error { + // Parent of the list (e.g. user=admin for authorized-key). + listParentPath, _ := splitLastSegment(listPath) + if listParentPath == "" { + return fmt.Errorf("no parent for %s", listPath) + } + + // GET parent — rousette returns full module-root hierarchy. + data, err := h.fetchData(r, listParentPath) + if err != nil { + return err + } + + // Navigate to the parent instance. + parentRaw := navigateToNode(data, listParentPath) + if parentRaw == nil { + return nil // already absent + } + var parentObj map[string]json.RawMessage + if err := json.Unmarshal(parentRaw, &parentObj); err != nil { + return err + } + + // Find the list array by bare name inside the parent object. + _, listBareName := splitModPrefix(listPath[strings.LastIndexByte(listPath, '/')+1:]) + var listKey string + var rawItems json.RawMessage + for k, v := range parentObj { + _, local := splitModPrefix(k) + if local == listBareName { + listKey, rawItems = k, v + break + } + } + if rawItems == nil { + return nil // list absent, nothing to do + } + + var items []map[string]json.RawMessage + if err := json.Unmarshal(rawItems, &items); err != nil { + return err + } + + // Key values from instance path predicate (after the '=', comma-separated). + _, instanceSeg := splitLastSegment(instancePath) + eqIdx := strings.IndexByte(instanceSeg, '=') + if eqIdx < 0 { + return fmt.Errorf("no predicate in %s", instancePath) + } + delVals := strings.Split(instanceSeg[eqIdx+1:], ",") + + // Filter: keep all entries except the one matching the deletion key. + var kept []json.RawMessage + for _, item := range items { + pred := buildKeyPredicate(item, listNode.Keys) + itemVals := strings.Split(pred[1:], ",") // strip leading "=" + match := len(delVals) == len(itemVals) + for i := range delVals { + if !match { + break + } + d1, _ := url.PathUnescape(delVals[i]) + d2, _ := url.PathUnescape(itemVals[i]) + if d1 != d2 { + match = false + } + } + if !match { + b, _ := json.Marshal(item) + kept = append(kept, b) + } + } + + filteredJSON, _ := json.Marshal(kept) + parentObj[listKey] = json.RawMessage(filteredJSON) + + // Convert parent object back to map[string]any for the PUT body. + parentAny := make(map[string]any, len(parentObj)) + for k, v := range parentObj { + var val any + json.Unmarshal(v, &val) //nolint:errcheck + parentAny[k] = val + } + + // Wrap in the module-qualified list-instance envelope expected by RESTCONF. + _, parentLastSeg := splitLastSegment(listParentPath) + _, parentBareName := splitModPrefix(stripModPredicate(parentLastSeg)) + parentModName, _ := mgr.ModuleName(listParentPath) + body := map[string]any{ + parentModName + ":" + parentBareName: []any{parentAny}, + } + return h.RC.Put(r.Context(), candidateDS+listParentPath, body) +} + +// fetchNodeValues fetches a container or list-instance and returns a flat map +// of bare-leaf-name → string for its direct scalar children. +// +// When the path contains characters like '@' that rousette/libyang rejects in +// URL path predicates, a direct GET fails. We fall back to GETting the parent +// path — rousette always returns the full module-root hierarchy, so +// navigateToNode can still walk down to the target node. +func (h *TreeHandler) fetchNodeValues(r *http.Request, path string) map[string]string { + // If the last segment contains percent-encoded characters (e.g. %40 for @), + // Go's HTTP client decodes them before sending and libyang then rejects the + // path with "Syntax error". Skip directly to the parent-path fallback. + _, lastSeg := splitLastSegment(path) + + var data []byte + var err error + if !strings.ContainsRune(lastSeg, '%') { + data, err = h.fetchData(r, path) + } + if data == nil { + parentPath, _ := splitLastSegment(path) + if parentPath == "" { + return nil + } + data, err = h.fetchData(r, parentPath) + if err != nil { + return nil + } + } + raw := navigateToNode(data, path) + if raw == nil { + return nil + } + return flattenNodeValues(raw) +} + +// SaveGroup serves PUT /configure/tree/group?path=... +// Saves every leaf in the group form to the candidate datastore. +// Returns HX-Trigger cfgSaved/cfgError so the form shows inline feedback +// without a full re-render (forms use hx-swap="none"). +func (h *TreeHandler) SaveGroup(w http.ResponseWriter, r *http.Request) { + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + + path := r.URL.Query().Get("path") + if path == "" { + http.Error(w, "path required", http.StatusBadRequest) + return + } + + node, err := mgr.NodeAt(path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + r.ParseForm() + + children, _ := mgr.Children(path) + var firstErr string + + for _, child := range children { + if child.Kind != "leaf" && child.Kind != "leaf-list" { + continue + } + rawValue := r.FormValue(child.Name) + qualName, qErr := mgr.ModuleQualifiedName(child.Path) + if qErr != nil { + qualName = child.Name + } + body := map[string]any{qualName: coerceLeafValue(rawValue, child)} + if putErr := h.RC.Put(r.Context(), candidateDS+child.Path, body); putErr != nil && firstErr == "" { + firstErr = child.Name + ": " + putErr.Error() + } + } + + if firstErr != "" { + w.Header().Set("HX-Trigger", `{"cfgError":"`+firstErr+`"}`) + w.WriteHeader(http.StatusUnprocessableEntity) + return + } + w.Header().Set("HX-Trigger", `{"cfgSaved":"Saved `+node.Name+` to candidate"}`) + w.WriteHeader(http.StatusNoContent) +} + +// SaveLeaf serves PUT /configure/tree/node?path=... +// Writes the form value to the candidate datastore and re-renders the detail pane. +func (h *TreeHandler) SaveLeaf(w http.ResponseWriter, r *http.Request) { + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + + path := r.URL.Query().Get("path") + if path == "" { + http.Error(w, "path required", http.StatusBadRequest) + return + } + + node, err := mgr.NodeAt(path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + rawValue := r.FormValue("value") + qualName, err := mgr.ModuleQualifiedName(path) + if err != nil { + log.Printf("yang: qualified name for %s: %v", path, err) + qualName = node.Name + } + + body := map[string]any{qualName: coerceLeafValue(rawValue, node)} + data := &nodeDetailData{Node: node} + + if putErr := h.RC.Put(r.Context(), candidateDS+path, body); putErr != nil { + data.Error = putErr.Error() + data.CurrentValue = rawValue + } else { + data.SavedOK = true + data.CurrentValue = rawValue + } + + h.FragTmpl.ExecuteTemplate(w, "yang-node-detail", data) +} + +// DeleteLeaf serves DELETE /configure/tree/node?path=... +// Removes the node from the candidate datastore; returns 204. +func (h *TreeHandler) DeleteLeaf(w http.ResponseWriter, r *http.Request) { + path := r.URL.Query().Get("path") + if path == "" { + http.Error(w, "path required", http.StatusBadRequest) + return + } + + if err := h.RC.Delete(r.Context(), candidateDS+path); err != nil { + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + w.WriteHeader(http.StatusNoContent) +} + +// fetchLeafrefValues resolves a leafref schema path against the candidate +// (fallback: running) datastore and returns the available key/leaf values. +// Returns nil when the leafref cannot be resolved or the datastore is empty. +func (h *TreeHandler) fetchLeafrefValues(r *http.Request, mgr *schema.Manager, node *schema.Node) []string { + if node.Type == nil || node.Type.Kind != "leafref" || node.Type.Leafref == "" { + return nil + } + absPath := mgr.ResolveLeafref(node.Type.Leafref, node.Path) + if absPath == "" { + return nil + } + + // Split into parent (the list/container) and target leaf name. + segs := strings.Split(strings.TrimPrefix(absPath, "/"), "/") + if len(segs) < 2 { + return nil + } + _, targetLeaf := splitModPrefix(segs[len(segs)-1]) + parentPath := "/" + strings.Join(segs[:len(segs)-1], "/") + + data, err := h.fetchData(r, parentPath) + if err != nil { + return nil + } + return extractFieldValues(data, targetLeaf) +} + +// splitModPrefix splits "module:name" into ("module", "name"). +// If there is no prefix it returns ("", name). +func splitModPrefix(s string) (string, string) { + if i := strings.IndexByte(s, ':'); i >= 0 { + return s[:i], s[i+1:] + } + return "", s +} + +// extractFieldValues walks a RESTCONF JSON response and collects every string +// value whose key (stripped of module prefix) matches fieldName. +// Handles both single-object and array envelopes. +func extractFieldValues(data []byte, fieldName string) []string { + var raw interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return nil + } + var vals []string + collectField(raw, fieldName, &vals) + sort.Slice(vals, func(i, j int) bool { return naturalLess(vals[i], vals[j]) }) + return vals +} + +func collectField(v interface{}, field string, out *[]string) { + switch t := v.(type) { + case map[string]interface{}: + for k, child := range t { + _, local := splitModPrefix(k) + if local == field { + if s, ok := child.(string); ok { + *out = append(*out, s) + } + } else { + collectField(child, field, out) + } + } + case []interface{}: + for _, item := range t { + collectField(item, field, out) + } + } +} + +// naturalLess compares two strings with numeric-aware ordering so that +// "eth2" < "eth10" (matching faux_str_numcmp in klish-plugin-sysrepo). +func naturalLess(a, b string) bool { + for len(a) > 0 && len(b) > 0 { + ra, rb := rune(a[0]), rune(b[0]) + if unicode.IsDigit(ra) && unicode.IsDigit(rb) { + // Collect digit runs from both strings. + na, nb := 0, 0 + ia, ib := 0, 0 + for ia < len(a) && unicode.IsDigit(rune(a[ia])) { + na = na*10 + int(a[ia]-'0') + ia++ + } + for ib < len(b) && unicode.IsDigit(rune(b[ib])) { + nb = nb*10 + int(b[ib]-'0') + ib++ + } + if na != nb { + return na < nb + } + a, b = a[ia:], b[ib:] + continue + } + if ra != rb { + return ra < rb + } + a, b = a[1:], b[1:] + } + return len(a) < len(b) +} + +// fetchLeafValue reads the current leaf value from the candidate datastore +// (falling back to running) and returns a display string. +// For paths through list instances the server wraps the response in the full +// module hierarchy; navigateToNode is used to reach the value in that case, +// with a fallback to the simpler single-key envelope unwrap. +func (h *TreeHandler) fetchLeafValue(r *http.Request, path string) string { + var data []byte + var err error + _, lastSeg := splitLastSegment(path) + if !strings.ContainsRune(lastSeg, '%') { + data, err = h.fetchData(r, path) + } + if data == nil { + parentPath, _ := splitLastSegment(path) + if parentPath == "" { + return "" + } + data, err = h.fetchData(r, parentPath) + if err != nil { + return "" + } + } + if raw := navigateToNode(data, path); raw != nil { + return extractScalar(raw) + } + return extractLeafValue(data) +} + +// resolveLeafItem populates CurrentValue, UsingDefault, and IsBinary for a leafGroupItem. +// For boolean leaves with no stored value and no YANG default, absent == false. +// For binary leaves the RESTCONF value is base64; we decode it for display as +// plain text. If decoding fails or the result is not valid UTF-8 the leaf is +// still marked IsBinary=true but CurrentValue is left empty so the template +// can show a "non-text binary data" placeholder instead. +func resolveLeafItem(item *leafGroupItem, val string) { + node := item.Node + if node.Type != nil && node.Type.Kind == "binary" { + item.IsBinary = true + if val != "" { + item.HasBinary = true + if decoded, err := base64.StdEncoding.DecodeString(val); err == nil && utf8.Valid(decoded) { + item.CurrentValue = string(decoded) + } else { + item.RawBase64 = val // non-decodable (DER); show raw base64 for display + } + } + return + } + if val == "" && node.Default != "" { + item.CurrentValue = node.Default + item.UsingDefault = true + } else if val == "" && node.Type != nil && node.Type.Kind == "boolean" { + item.CurrentValue = "false" + item.UsingDefault = true + } else { + item.CurrentValue = val + } +} + +// TogglePresence serves PUT and DELETE /configure/tree/presence?path=... +// PUT creates the presence container with an empty body; DELETE removes it. +// Returns the updated yang-tree-node HTML fragment for HTMX outerHTML swap. +func (h *TreeHandler) TogglePresence(w http.ResponseWriter, r *http.Request) { + if h.ReadOnly { + http.Error(w, "read-only tree", http.StatusMethodNotAllowed) + return + } + mgr := h.Cache.Manager() + if mgr == nil { + http.Error(w, "schema not yet loaded", http.StatusServiceUnavailable) + return + } + path := r.URL.Query().Get("path") + if path == "" { + http.Error(w, "path required", http.StatusBadRequest) + return + } + + node, err := mgr.NodeAt(path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + + var exists bool + if r.Method == http.MethodDelete { + if delErr := h.RC.Delete(r.Context(), candidateDS+path); delErr != nil { + http.Error(w, delErr.Error(), http.StatusBadGateway) + return + } + exists = false + } else { + qualName, qErr := mgr.ModuleQualifiedName(path) + if qErr != nil { + http.Error(w, qErr.Error(), http.StatusBadRequest) + return + } + if putErr := h.RC.Put(r.Context(), candidateDS+path, map[string]any{qualName: map[string]any{}}); putErr != nil { + http.Error(w, putErr.Error(), http.StatusBadGateway) + return + } + exists = true + } + + setCfgUnsaved(w) + + // Re-render the right-pane leaf group with the updated presence state. + gd := h.buildLeafGroup(r, mgr, path, node.Name, "container") + if gd == nil { + gd = &leafGroupData{Path: path, Name: node.Name, Kind: "container"} + } + gd.Presence = node.Presence + gd.Exists = exists + h.FragTmpl.ExecuteTemplate(w, "yang-leaf-group", gd) +} diff --git a/src/webui/internal/restconf/client.go b/src/webui/internal/restconf/client.go new file mode 100644 index 000000000..40ff4577f --- /dev/null +++ b/src/webui/internal/restconf/client.go @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: MIT + +package restconf + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +// Credentials holds username/password for Basic Auth. +// Stored in request contexts by the auth middleware. +type Credentials struct { + Username string + Password string +} + +type ctxKey struct{} + +// ContextWithCredentials returns a child context carrying creds. +func ContextWithCredentials(ctx context.Context, c Credentials) context.Context { + return context.WithValue(ctx, ctxKey{}, c) +} + +// CredentialsFromContext extracts credentials set by the auth middleware. +func CredentialsFromContext(ctx context.Context) Credentials { + c, _ := ctx.Value(ctxKey{}).(Credentials) + return c +} + +// Client talks to the rousette RESTCONF server. +type Client struct { + baseURL string + httpClient *http.Client +} + +// NewClient creates a RESTCONF client pointing at baseURL +// (e.g. "https://192.168.1.1/restconf" or "https://127.0.0.1/restconf"). +// When insecureTLS is true, TLS certificate verification is disabled. +func NewClient(baseURL string, insecureTLS bool) *Client { + var tlsConfig *tls.Config + if insecureTLS { + tlsConfig = &tls.Config{InsecureSkipVerify: true} + } + return &Client{ + baseURL: strings.TrimRight(escapeZoneID(baseURL), "/"), + httpClient: &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + }, + } +} + +// doRequest builds and executes an HTTP request against the RESTCONF server, +// setting Accept and Basic Auth from the context. The caller must close Body. +func (c *Client) doRequest(ctx context.Context, method, path string) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, method, c.baseURL+path, nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/yang-data+json") + creds := CredentialsFromContext(ctx) + req.SetBasicAuth(creds.Username, creds.Password) + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("restconf request failed: %w", err) + } + return resp, nil +} + +// Get fetches a RESTCONF resource, decoding the JSON response into target. +// User credentials are taken from the request context (set by auth middleware). +func (c *Client) Get(ctx context.Context, path string, target any) error { + resp, err := c.doRequest(ctx, http.MethodGet, path) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return parseError(resp) + } + return json.NewDecoder(resp.Body).Decode(target) +} + +// Post sends a POST request to a RESTCONF RPC endpoint. +// Used for operations like system-restart that return no body. +func (c *Client) Post(ctx context.Context, path string) error { + resp, err := c.doRequest(ctx, http.MethodPost, path) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return parseError(resp) + } + return nil +} + +// PostJSON sends a POST request with a JSON body to a RESTCONF RPC endpoint. +// Used for RPCs that require input parameters (e.g. install-bundle). +func (c *Client) PostJSON(ctx context.Context, path string, body any) error { + return c.writeJSON(ctx, http.MethodPost, path, body) +} + +// Put replaces a RESTCONF config resource with the given value. +func (c *Client) Put(ctx context.Context, path string, body any) error { + return c.writeJSON(ctx, http.MethodPut, path, body) +} + +// Patch merges the given value into a RESTCONF config resource. +func (c *Client) Patch(ctx context.Context, path string, body any) error { + return c.writeJSON(ctx, http.MethodPatch, path, body) +} + +// Delete removes a RESTCONF config resource. +func (c *Client) Delete(ctx context.Context, path string) error { + resp, err := c.doRequest(ctx, http.MethodDelete, path) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return parseError(resp) + } + return nil +} + +// GetDatastore fetches the full contents of the named NMDA datastore as raw JSON. +// datastore is one of "running", "candidate", "operational", "startup", "factory-default". +func (c *Client) GetDatastore(ctx context.Context, datastore string) (json.RawMessage, error) { + return c.GetRaw(ctx, "/ds/ietf-datastores:"+datastore) +} + +// PutDatastore replaces the named NMDA datastore with the given raw JSON. +func (c *Client) PutDatastore(ctx context.Context, datastore string, body json.RawMessage) error { + return c.writeJSON(ctx, http.MethodPut, "/ds/ietf-datastores:"+datastore, body) +} + +// CopyDatastore copies the full contents of src into dst. +// Mirrors the Infamy test framework's copy() operation. +func (c *Client) CopyDatastore(ctx context.Context, src, dst string) error { + data, err := c.GetDatastore(ctx, src) + if err != nil { + return fmt.Errorf("copy datastore: read %s: %w", src, err) + } + if err := c.PutDatastore(ctx, dst, data); err != nil { + return fmt.Errorf("copy datastore: write %s: %w", dst, err) + } + return nil +} + +// writeJSON encodes body as JSON and sends it with the given HTTP method. +func (c *Client) writeJSON(ctx context.Context, method, path string, body any) error { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(body); err != nil { + return fmt.Errorf("encoding request body: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, method, c.baseURL+path, &buf) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/yang-data+json") + req.Header.Set("Accept", "application/yang-data+json") + creds := CredentialsFromContext(ctx) + req.SetBasicAuth(creds.Username, creds.Password) + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("restconf request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusCreated && + resp.StatusCode != http.StatusNoContent { + return parseError(resp) + } + return nil +} + +// GetYANG downloads a YANG module file. YANG files are served outside the +// RESTCONF tree at /yang/{name}@{revision}.yang on the device host — mirroring +// the approach used by the Infamy test framework (yang_url = base_url + "/yang"). +func (c *Client) GetYANG(ctx context.Context, name, revision string) ([]byte, error) { + // Strip the /restconf suffix to get the host-level base URL. + yangBase := c.baseURL + if i := strings.LastIndex(yangBase, "/restconf"); i >= 0 { + yangBase = yangBase[:i] + } + + path := "/yang/" + name + if revision != "" { + path += "@" + revision + } + path += ".yang" + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, yangBase+path, nil) + if err != nil { + return nil, err + } + creds := CredentialsFromContext(ctx) + req.SetBasicAuth(creds.Username, creds.Password) + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("restconf request failed: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, parseError(resp) + } + return io.ReadAll(resp.Body) +} + +// GetRaw fetches a RESTCONF resource and returns the raw JSON bytes. +func (c *Client) GetRaw(ctx context.Context, path string) ([]byte, error) { + resp, err := c.doRequest(ctx, http.MethodGet, path) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, parseError(resp) + } + return io.ReadAll(resp.Body) +} + +// CheckAuth verifies that the given credentials are accepted by rousette. +// It does a simple GET against /data/ietf-system:system with Basic Auth. +func (c *Client) CheckAuth(username, password string) error { + req, err := http.NewRequestWithContext( + context.Background(), + http.MethodGet, + c.baseURL+"/data/ietf-system:system", + nil, + ) + if err != nil { + return err + } + + req.Header.Set("Accept", "application/yang-data+json") + req.SetBasicAuth(username, password) + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("restconf request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden { + return &AuthError{Code: resp.StatusCode} + } + if resp.StatusCode != http.StatusOK { + return parseError(resp) + } + + ct := resp.Header.Get("Content-Type") + if !strings.Contains(ct, "yang-data+json") { + return fmt.Errorf("unexpected content-type from RESTCONF server: %q", ct) + } + + return nil +} + +// escapeZoneID replaces bare "%" in IPv6 zone IDs with "%25" so that +// Go's url.Parse doesn't reject them as invalid percent-encoding. +// e.g. "https://[ff02::1%qtap1]/restconf" → "https://[ff02::1%25qtap1]/restconf" +func escapeZoneID(rawURL string) string { + open := strings.Index(rawURL, "[") + close := strings.Index(rawURL, "]") + if open < 0 || close < 0 || close < open { + return rawURL + } + + host := rawURL[open:close] + if pct := strings.Index(host, "%"); pct >= 0 && !strings.HasPrefix(host[pct:], "%25") { + return rawURL[:open+pct] + "%25" + rawURL[open+pct+1:] + } + return rawURL +} diff --git a/src/webui/internal/restconf/errors.go b/src/webui/internal/restconf/errors.go new file mode 100644 index 000000000..2741524d0 --- /dev/null +++ b/src/webui/internal/restconf/errors.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: MIT + +package restconf + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" +) + +// AuthError is returned when RESTCONF rejects credentials (401/403). +type AuthError struct { + Code int +} + +func (e *AuthError) Error() string { + return fmt.Sprintf("authentication failed (HTTP %d)", e.Code) +} + +// Error represents a RESTCONF error response. +type Error struct { + StatusCode int + Type string + Tag string + Message string +} + +func (e *Error) Error() string { + if e.Message != "" { + return fmt.Sprintf("restconf %d: %s", e.StatusCode, e.Message) + } + return fmt.Sprintf("restconf %d: %s", e.StatusCode, e.Tag) +} + +// parseError reads a RESTCONF error response body and returns an *Error. +func parseError(resp *http.Response) error { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 8192)) + + re := &Error{StatusCode: resp.StatusCode} + + // Try to parse the standard RESTCONF error envelope. + var envelope struct { + Errors struct { + Error []struct { + ErrorType string `json:"error-type"` + ErrorTag string `json:"error-tag"` + ErrorPath string `json:"error-path"` + ErrorMessage string `json:"error-message"` + ErrorInfo any `json:"error-info"` + } `json:"error"` + } `json:"ietf-restconf:errors"` + } + + if json.Unmarshal(body, &envelope) == nil && len(envelope.Errors.Error) > 0 { + var parts []string + for _, e := range envelope.Errors.Error { + msg := e.ErrorMessage + if msg == "" { + msg = e.ErrorTag + } + if e.ErrorPath != "" { + msg += " (path: " + e.ErrorPath + ")" + } + parts = append(parts, msg) + } + re.Type = envelope.Errors.Error[0].ErrorType + re.Tag = envelope.Errors.Error[0].ErrorTag + re.Message = strings.Join(parts, "; ") + } else { + re.Message = http.StatusText(resp.StatusCode) + } + + return re +} diff --git a/src/webui/internal/restconf/fetcher.go b/src/webui/internal/restconf/fetcher.go new file mode 100644 index 000000000..9c5f455d8 --- /dev/null +++ b/src/webui/internal/restconf/fetcher.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MIT + +package restconf + +import ( + "context" + "encoding/json" +) + +// Fetcher is the RESTCONF client interface used by handlers. +// *Client satisfies this interface; testutil.MockFetcher provides a test double. +type Fetcher interface { + Get(ctx context.Context, path string, target any) error + GetRaw(ctx context.Context, path string) ([]byte, error) + // GetYANG downloads a YANG module file from /yang/{name}@{revision}.yang + // on the device host (outside the /restconf tree). + GetYANG(ctx context.Context, name, revision string) ([]byte, error) + Post(ctx context.Context, path string) error + PostJSON(ctx context.Context, path string, body any) error + + Put(ctx context.Context, path string, body any) error + Patch(ctx context.Context, path string, body any) error + Delete(ctx context.Context, path string) error + + GetDatastore(ctx context.Context, datastore string) (json.RawMessage, error) + PutDatastore(ctx context.Context, datastore string, body json.RawMessage) error + CopyDatastore(ctx context.Context, src, dst string) error +} + +var _ Fetcher = (*Client)(nil) diff --git a/src/webui/internal/schema/fetch.go b/src/webui/internal/schema/fetch.go new file mode 100644 index 000000000..7f3cc1303 --- /dev/null +++ b/src/webui/internal/schema/fetch.go @@ -0,0 +1,125 @@ +package schema + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + + "github.com/kernelkit/webui/internal/restconf" +) + +// ModuleInfo identifies a YANG module by name and revision. +type ModuleInfo struct { + Name string + Revision string +} + +func (m ModuleInfo) filename() string { + if m.Revision == "" { + return m.Name + ".yang" + } + return m.Name + "@" + m.Revision + ".yang" +} + +// rfc7895ModulesState is the RFC 7895 /modules-state response structure. +type rfc7895ModulesState struct { + ModulesState struct { + Module []struct { + Name string `json:"name"` + Revision string `json:"revision"` + Submodule []struct { + Name string `json:"name"` + Revision string `json:"revision"` + } `json:"submodule"` + } `json:"module"` + } `json:"ietf-yang-library:modules-state"` +} + +// rfc8525YangLibrary is the RFC 8525 /yang-library response structure (fallback). +type rfc8525YangLibrary struct { + YangLibrary struct { + ModuleSet []struct { + Module []struct { + Name string `json:"name"` + Revision string `json:"revision"` + } `json:"module"` + } `json:"module-set"` + } `json:"ietf-yang-library:yang-library"` +} + +// FetchModules downloads any YANG files not already cached in cacheDir. +// It first tries the RFC 7895 modules-state endpoint, then falls back to +// the RFC 8525 yang-library endpoint (same as capabilities.go). +// Each module and its submodules are downloaded from /yang/{name}@{rev}.yang. +func FetchModules(ctx context.Context, rc restconf.Fetcher, cacheDir string) ([]ModuleInfo, error) { + if err := os.MkdirAll(cacheDir, 0750); err != nil { + return nil, fmt.Errorf("schema: create cache dir: %w", err) + } + + modules, err := listModules(ctx, rc) + if err != nil { + return nil, err + } + + var downloaded []ModuleInfo + for _, m := range modules { + if err := downloadIfMissing(ctx, rc, cacheDir, m); err != nil { + log.Printf("schema: skip %s: %v", m.filename(), err) + continue + } + downloaded = append(downloaded, m) + } + return downloaded, nil +} + +// listModules queries the device for the list of implemented YANG modules. +func listModules(ctx context.Context, rc restconf.Fetcher) ([]ModuleInfo, error) { + // Try RFC 7895 modules-state first. + var ms rfc7895ModulesState + if err := rc.Get(ctx, "/data/ietf-yang-library:modules-state", &ms); err == nil { + var mods []ModuleInfo + for _, m := range ms.ModulesState.Module { + mods = append(mods, ModuleInfo{Name: m.Name, Revision: m.Revision}) + for _, sub := range m.Submodule { + mods = append(mods, ModuleInfo{Name: sub.Name, Revision: sub.Revision}) + } + } + if len(mods) > 0 { + return mods, nil + } + } + + // Fall back to RFC 8525 yang-library. + var yl rfc8525YangLibrary + if err := rc.Get(ctx, "/data/ietf-yang-library:yang-library", &yl); err != nil { + return nil, fmt.Errorf("schema: list modules: %w", err) + } + var mods []ModuleInfo + for _, ms := range yl.YangLibrary.ModuleSet { + for _, m := range ms.Module { + mods = append(mods, ModuleInfo{Name: m.Name, Revision: m.Revision}) + } + } + return mods, nil +} + +// downloadIfMissing fetches a single YANG file from the device if not cached. +func downloadIfMissing(ctx context.Context, rc restconf.Fetcher, cacheDir string, m ModuleInfo) error { + dest := filepath.Join(cacheDir, m.filename()) + if _, err := os.Stat(dest); err == nil { + return nil // already cached + } + + data, err := rc.GetYANG(ctx, m.Name, m.Revision) + if err != nil { + return fmt.Errorf("GET /yang/%s: %w", m.filename(), err) + } + + if err := os.WriteFile(dest, data, 0640); err != nil { + return fmt.Errorf("write %s: %w", dest, err) + } + log.Printf("schema: cached %s", m.filename()) + return nil +} diff --git a/src/webui/internal/schema/helpers.go b/src/webui/internal/schema/helpers.go new file mode 100644 index 000000000..94d1a85f4 --- /dev/null +++ b/src/webui/internal/schema/helpers.go @@ -0,0 +1,55 @@ +package schema + +import "strings" + +// IdentityOption is a schema-resolved identity/enum value for use in select dropdowns. +type IdentityOption struct { + Value string // full identity, e.g. "infix-system:clish" — submitted to RESTCONF + Label string // display label with module prefix stripped, e.g. "clish" + IsDefault bool // true if this matches the leaf's YANG default +} + +// StripModulePrefix strips the "module:" prefix from an identity or enum value. +func StripModulePrefix(v string) string { + if i := strings.LastIndex(v, ":"); i >= 0 { + return v[i+1:] + } + return v +} + +// OptionsFor returns IdentityOption entries for the identityref or enumeration +// leaf at path. Returns nil when schema is unavailable or the leaf has no options. +func OptionsFor(mgr *Manager, path string) []IdentityOption { + if mgr == nil { + return nil + } + node, err := mgr.NodeAt(path) + if err != nil || node == nil || node.Type == nil { + return nil + } + values := node.Type.Identities + if len(values) == 0 { + values = node.Type.Enums + } + opts := make([]IdentityOption, 0, len(values)) + for _, v := range values { + opts = append(opts, IdentityOption{ + Value: v, + Label: StripModulePrefix(v), + IsDefault: node.Default != "" && v == node.Default, + }) + } + return opts +} + +// DescriptionOf returns the YANG description for the leaf at path, or "". +func DescriptionOf(mgr *Manager, path string) string { + if mgr == nil { + return "" + } + node, err := mgr.NodeAt(path) + if err != nil || node == nil { + return "" + } + return node.Description +} diff --git a/src/webui/internal/schema/manager.go b/src/webui/internal/schema/manager.go new file mode 100644 index 000000000..936be428d --- /dev/null +++ b/src/webui/internal/schema/manager.go @@ -0,0 +1,822 @@ +package schema + +import ( + "fmt" + "log" + "os" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + + "github.com/openconfig/goyang/pkg/yang" +) + +// Manager holds a fully processed goyang module set and provides schema queries. +// All modules are loaded before Process() is called so that cross-module +// identityref resolution and augments work correctly. +type Manager struct { + ms *yang.Modules +} + +// Load parses all .yang files in yangDir and returns a Manager. +// Errors from Process() that are non-fatal (e.g. unresolved augments for +// modules that were not downloaded) are logged but do not abort loading. +func Load(yangDir string) (*Manager, error) { + ms := yang.NewModules() + ms.Path = []string{yangDir} + + entries, err := os.ReadDir(yangDir) + if err != nil { + return nil, fmt.Errorf("schema: read yang dir: %w", err) + } + + for _, e := range entries { + if e.IsDir() || !strings.HasSuffix(e.Name(), ".yang") { + continue + } + if err := ms.Read(filepath.Join(yangDir, e.Name())); err != nil { + // Non-fatal: goyang may still register the module partially. + log.Printf("schema: parse (non-fatal) %s: %v", e.Name(), err) + } + } + + errs := ms.Process() + for _, err := range errs { + // goyang v1.6.3 has known YANG 1.1 gaps (must/when substatements, + // duplicate augments). These are non-fatal; log at debug level. + log.Printf("schema: process (non-fatal): %v", err) + } + + return &Manager{ms: ms}, nil +} + +// Children returns the direct config-relevant child Nodes of the node at path. +// Use path "/" for the top-level module list. +// choice/case nodes are inlined transparently (their children are promoted). +func (m *Manager) Children(path string) ([]*Node, error) { + if path == "" || path == "/" { + return m.topLevelNodes(), nil + } + e, err := m.entryAt(path) + if err != nil { + return nil, err + } + if e.Dir == nil { + return nil, nil + } + return dirToNodes(e.Dir, path), nil +} + +// ChildrenAll is like Children but includes config:false nodes, making it +// suitable for the operational/status tree where state data reuses read-write +// schema nodes. Only RPCs, notifications, anydata/anyxml, and deprecated or +// obsolete nodes are excluded. +func (m *Manager) ChildrenAll(path string) ([]*Node, error) { + if path == "" || path == "/" { + return m.topLevelNodesAll(), nil + } + e, err := m.entryAt(path) + if err != nil { + return nil, err + } + if e.Dir == nil { + return nil, nil + } + return dirToNodesAll(e.Dir, path), nil +} + +// NodeAt returns a Node for the YANG schema node at path (without children). +func (m *Manager) NodeAt(path string) (*Node, error) { + if path == "" || path == "/" { + return &Node{Path: "/", Name: "", Kind: "container", Config: true}, nil + } + e, err := m.entryAt(path) + if err != nil { + return nil, err + } + return entryToNode(e, path), nil +} + +// IdentitiesOf returns the names of all identities derived (directly or +// transitively) from baseName. baseName may be "name" or "module:name". +// Searches across all loaded modules. +func (m *Manager) IdentitiesOf(baseName string) []string { + // Strip module prefix if present; search by identity name. + _, localName := splitPrefix(baseName) + + for _, mod := range m.ms.Modules { + for _, id := range mod.Identities() { + if id.Name == localName { + var names []string + for _, v := range id.Values { + names = append(names, v.Name) + } + sort.Strings(names) + return names + } + } + } + return nil +} + +// ResolveLeafref converts a YANG leafref path expression to an absolute schema +// path that can be fetched via RESTCONF. contextPath is the schema path of the +// leaf that holds the leafref (needed for relative `../` resolution). +// Returns empty string if the path cannot be resolved. +func (m *Manager) ResolveLeafref(leafrefPath, contextPath string) string { + if strings.HasPrefix(leafrefPath, "/") { + return normalizeLeafrefPath(leafrefPath) + } + // Relative path: resolve `../` against the context, stripping the leaf itself. + parts := strings.Split(strings.TrimPrefix(contextPath, "/"), "/") + if len(parts) > 0 { + parts = parts[:len(parts)-1] // remove the leaf; start from its parent + } + for _, seg := range strings.Split(leafrefPath, "/") { + switch seg { + case "..": + if len(parts) > 0 { + parts = parts[:len(parts)-1] + } + case ".", "": + // skip + default: + parts = append(parts, stripPredicate(seg)) + } + } + if len(parts) == 0 { + return "" + } + return "/" + strings.Join(parts, "/") +} + +// normalizeLeafrefPath strips XPath predicates from each segment of an +// absolute YANG leafref path so it can be used as a schema lookup path. +func normalizeLeafrefPath(p string) string { + segs := strings.Split(strings.TrimPrefix(p, "/"), "/") + for i, s := range segs { + segs[i] = stripPredicate(s) + } + return "/" + strings.Join(segs, "/") +} + +// Default returns the YANG default value for the leaf at path, if any. +func (m *Manager) Default(path string) (string, bool) { + e, err := m.entryAt(path) + if err != nil { + return "", false + } + return e.SingleDefaultValue() +} + +// ModuleName returns the module name for the schema node at path. +func (m *Manager) ModuleName(path string) (string, error) { + e, err := m.entryAt(path) + if err != nil { + return "", err + } + return e.InstantiatingModule() +} + +// ModuleQualifiedName returns "module:name" for the schema node at path. +// This is the JSON object key used when PUT/PATCHing that node directly via +// RESTCONF (RFC 7951 §4 — namespace-qualified name at module boundaries). +func (m *Manager) ModuleQualifiedName(path string) (string, error) { + e, err := m.entryAt(path) + if err != nil { + return "", err + } + modName, err := e.InstantiatingModule() + if err != nil { + return "", err + } + return modName + ":" + e.Name, nil +} + +// internalOperationalTopNodes lists bare node names that must not appear at +// the top level of the operational/status tree. These are all transport- or +// server-monitoring nodes of no interest to end users. They are already +// absent from the configure tree because they are config:false; this list +// applies the same exclusion to topLevelNodesAll. +var internalOperationalTopNodes = map[string]bool{ + "netconf": true, // NETCONF session/lock monitoring + "netconf-state": true, // ietf-netconf-monitoring (RFC 6022) + "notification": true, // nc-notifications stream container + "restconf-state": true, // ietf-restconf-monitoring (RFC 8527) + "supported-algorithms": true, // SSH/TLS algorithm capability advertisement + "system-capabilities": true, // ietf-system-capabilities (RFC 9196) +} + +// internalModules is the deny-list of YANG modules that are infrastructure-only +// and must not appear in the user-facing configure tree. +// Mirrors sr_module_is_internal() from klish-plugin-sysrepo/src/pline.c. +// Note: ietf-netconf-acm is intentionally NOT listed here — Infix exposes NACM +// configuration to users. +var internalModules = map[string]bool{ + // libyang built-ins + "ietf-yang-metadata": true, + "yang": true, + "ietf-inet-types": true, + "ietf-yang-types": true, + // YANG library / schema mount + "ietf-datastores": true, + "ietf-yang-schema-mount": true, + "ietf-yang-library": true, + // NETCONF infrastructure + "ietf-netconf": true, + "ietf-netconf-with-defaults": true, + "ietf-origin": true, + "ietf-netconf-notifications": true, + // sysrepo internals + "sysrepo": true, + "sysrepo-monitoring": true, + "sysrepo-plugind": true, + // Infix test/debug subtree and meta-data — not user-facing + "infix-test": true, + "infix-meta": true, + // NETCONF server / transport infrastructure — managed by the system, not users + "ietf-netconf-server": true, + "libnetconf2-netconf-server": true, + "ietf-truststore": true, + // Notification subscriptions and filters — not user-configurable via WebUI + "ietf-subscribed-notifications": true, + // ACL, key-chains, network-instances — not exposed in WebUI yet + "ietf-access-control-list": true, + "ietf-key-chain": true, + "ietf-network-instance": true, +} + +// topLevelNodes returns a Node for each config-relevant top-level schema node +// across all loaded modules. Submodules, versioned aliases, internal modules, +// and config:false top-level nodes are all excluded. +func (m *Manager) topLevelNodes() []*Node { + var nodes []*Node + for key, mod := range m.ms.Modules { + if strings.Contains(key, "@") { + continue // versioned alias (e.g. "ieee802-dot1ab-lldp@2022-03-15") — duplicate + } + if mod.BelongsTo != nil { + continue // submodule — content appears under the parent module + } + if internalModules[key] { + continue + } + e := yang.ToEntry(mod) + if e == nil || e.Dir == nil { + continue + } + for _, child := range sortedEntries(e.Dir) { + if isNonConfigNode(child) { + continue + } + nodePath := "/" + key + ":" + child.Name + if isContainerList(child) { + if lc := listChildOf(child); lc != nil { + nodes = append(nodes, entryToNode(lc, nodePath+"/"+nodeSegment(lc, key))) + } + continue + } + nodes = append(nodes, entryToNode(child, nodePath)) + } + } + // Sort by node name (not module-qualified path) for a clean alphabetical list. + sort.Slice(nodes, func(i, j int) bool { return nodes[i].Name < nodes[j].Name }) + return nodes +} + +// entryAt resolves a RESTCONF-style path to a goyang Entry. +// Path format: /module:top-node/child[key='val']/grandchild +// Key predicates ([key='val']) are stripped — they carry instance identity, +// not schema identity. choice/case nodes are skipped during traversal. +func (m *Manager) entryAt(path string) (*yang.Entry, error) { + path = strings.TrimPrefix(path, "/") + parts := strings.SplitN(path, "/", 2) + + head := stripPredicate(parts[0]) + moduleName, nodeName := splitPrefix(head) + + if moduleName == "" { + return nil, fmt.Errorf("schema: path must start with module prefix: %s", path) + } + + mod, ok := m.ms.Modules[moduleName] + if !ok { + return nil, fmt.Errorf("schema: module not found: %s", moduleName) + } + + root := yang.ToEntry(mod) + if root == nil { + return nil, fmt.Errorf("schema: no entry for module: %s", moduleName) + } + + e := findInDir(root.Dir, nodeName) + if e == nil { + return nil, fmt.Errorf("schema: %s not found in %s", nodeName, moduleName) + } + + if len(parts) == 1 { + return e, nil + } + + // Traverse remaining path segments, stripping key predicates. + for _, seg := range strings.Split(parts[1], "/") { + if seg == "" { + continue + } + _, localName := splitPrefix(stripPredicate(seg)) + child := findInDir(e.Dir, localName) + if child == nil { + return nil, fmt.Errorf("schema: %s not found under %s", localName, e.Name) + } + e = child + } + return e, nil +} + +// stripPredicate removes a key predicate from a path segment. +// Handles both RESTCONF ("interface=eth0" → "interface") and +// XPath ("interface[name='eth0']" → "interface") forms. +func stripPredicate(seg string) string { + if i := strings.IndexByte(seg, '['); i >= 0 { + return seg[:i] + } + if i := strings.IndexByte(seg, '='); i >= 0 { + return seg[:i] + } + return seg +} + +// findInDir looks up name in a dir map, transparently descending into +// choice/case nodes which are not part of the RESTCONF path. +func findInDir(dir map[string]*yang.Entry, name string) *yang.Entry { + if dir == nil { + return nil + } + // Direct match — but if it is a choice/case, descend into it because YANG + // commonly names a case identically to the leaf it contains (e.g. the + // ietf-system timezone-name case wrapping the timezone-name leaf). + if e, ok := dir[name]; ok { + if !e.IsChoice() && !e.IsCase() { + return e + } + if found := findInDir(e.Dir, name); found != nil { + return found + } + } + // Search inside all choice/case children. + for _, e := range dir { + if e.IsChoice() || e.IsCase() { + if found := findInDir(e.Dir, name); found != nil { + return found + } + } + } + return nil +} + +// isContainerList returns true for containers that wrap exactly one list and +// nothing else — the classic YANG container/list idiom (e.g. /interfaces wrapping +// /interfaces/interface). Mirrors klysc_is_container_list() in klish-plugin-sysrepo +// exactly: ALL direct children are inspected, including config:false ones. Any +// non-list child (a leaf, another container, …) prevents collapsing. This is why +// "hardware" does not collapse — it has a config:false "last-change" leaf alongside +// "component", so the default case fires and returns false. +// +// Hard exceptions: +// - "static-routes" always collapses (ietf-routing, asymmetric naming) +// - "mdb" never collapses +// - "ipv4", "ipv6" never collapse (would confuse routing subtrees) +func isContainerList(e *yang.Entry) bool { + if !e.IsContainer() || e.Dir == nil { + return false + } + if e.Name == "mdb" || e.Name == "ipv4" || e.Name == "ipv6" { + return false + } + if e.Name == "static-routes" { + return true + } + listCount := 0 + for _, child := range e.Dir { + if !child.IsList() { + return false // any non-list child (even config:false) prevents collapse + } + listCount++ + } + return listCount == 1 +} + +// listChildOf returns the single list child of a collapsible container (panics if +// called on a non-collapsible container — callers must guard with isContainerList). +func listChildOf(e *yang.Entry) *yang.Entry { + for _, child := range e.Dir { + if child.IsList() && !isNonConfigNode(child) { + return child + } + } + return nil +} + +// listChildOfAll is like listChildOf but uses the data-node filter (includes +// config:false lists), used by dirToNodesAll / the operational status tree. +func listChildOfAll(e *yang.Entry) *yang.Entry { + for _, child := range e.Dir { + if child.IsList() && !isNonDataNode(child) { + return child + } + } + return nil +} + +// topLevelNodesAll is like topLevelNodes but includes config:false top-level +// nodes, for use by the operational status tree. +func (m *Manager) topLevelNodesAll() []*Node { + var nodes []*Node + for key, mod := range m.ms.Modules { + if strings.Contains(key, "@") { + continue + } + if mod.BelongsTo != nil { + continue + } + if internalModules[key] { + continue + } + e := yang.ToEntry(mod) + if e == nil || e.Dir == nil { + continue + } + for _, child := range sortedEntries(e.Dir) { + if isNonDataNode(child) { + continue + } + if internalOperationalTopNodes[child.Name] { + continue + } + nodePath := "/" + key + ":" + child.Name + if isContainerList(child) { + if lc := listChildOfAll(child); lc != nil { + nodes = append(nodes, entryToNode(lc, nodePath+"/"+nodeSegment(lc, key))) + } + continue + } + nodes = append(nodes, entryToNode(child, nodePath)) + } + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i].Name < nodes[j].Name }) + return nodes +} + +// dirToNodesAll is like dirToNodes but uses isNonDataNode, including +// config:false leaves and containers in the result. +func dirToNodesAll(dir map[string]*yang.Entry, parentPath string) []*Node { + parentMod := extractModuleFromPath(parentPath) + var nodes []*Node + for _, e := range sortedEntries(dir) { + if e.IsChoice() || e.IsCase() { + caseWhen := extractWhen(e) + for _, child := range dirToNodesAll(e.Dir, parentPath) { + if caseWhen != "" && child.When == "" { + child.When = caseWhen + } + nodes = append(nodes, child) + } + continue + } + if isNonDataNode(e) { + continue + } + nodePath := parentPath + "/" + nodeSegment(e, parentMod) + if isContainerList(e) { + if lc := listChildOfAll(e); lc != nil { + nodes = append(nodes, entryToNode(lc, nodePath+"/"+nodeSegment(lc, parentMod))) + } + continue + } + nodes = append(nodes, entryToNode(e, nodePath)) + } + return nodes +} + +// dirToNodes converts a goyang Dir map to a sorted slice of Nodes. +// choice/case children are inlined (their contents promoted to this level). +// Collapsible container-list wrappers are transparent: the list child is surfaced +// directly under the parent with the full (un-collapsed) RESTCONF path. +// RPC, notification, anydata, anyxml and config:false nodes are excluded. +// +// RESTCONF requires module qualification ("module:name") whenever a node's +// module differs from its parent's — the common case being augmented nodes. +// e.g. infix-lldp augments ieee802-dot1ab-lldp:lldp, so the path is +// /ieee802-dot1ab-lldp:lldp/infix-lldp:enabled, not /…/enabled. +func dirToNodes(dir map[string]*yang.Entry, parentPath string) []*Node { + parentMod := extractModuleFromPath(parentPath) + var nodes []*Node + for _, e := range sortedEntries(dir) { + if e.IsChoice() || e.IsCase() { + // Extract when from the choice/case before inlining its children. + // Cases contributed by augments carry the augment's when (e.g. + // lag-port only for ethernetCsmacd, bridge-port only for bridge). + // Propagate that constraint to each promoted child that has no when of + // its own; if the child already has one, leave it alone. + caseWhen := extractWhen(e) + for _, child := range dirToNodes(e.Dir, parentPath) { + if caseWhen != "" && child.When == "" { + child.When = caseWhen + } + nodes = append(nodes, child) + } + continue + } + if isNonConfigNode(e) { + continue + } + nodePath := parentPath + "/" + nodeSegment(e, parentMod) + if isContainerList(e) { + if lc := listChildOf(e); lc != nil { + // Container is collapsed; qualify list child against parentMod. + nodes = append(nodes, entryToNode(lc, nodePath+"/"+nodeSegment(lc, parentMod))) + } + continue + } + nodes = append(nodes, entryToNode(e, nodePath)) + } + return nodes +} + +// nodeSegment returns the path segment for e, qualified as "module:name" when +// e's instantiating module differs from parentMod (RESTCONF RFC 8040 §3.5.3). +func nodeSegment(e *yang.Entry, parentMod string) string { + mod, err := e.InstantiatingModule() + if err != nil || mod == parentMod { + return e.Name + } + return mod + ":" + e.Name +} + +// extractModuleFromPath returns the module from the rightmost module-qualified +// segment in a RESTCONF path, e.g. "/ieee802-dot1ab-lldp:lldp/port" → "ieee802-dot1ab-lldp". +func extractModuleFromPath(path string) string { + segs := strings.Split(strings.TrimPrefix(path, "/"), "/") + for i := len(segs) - 1; i >= 0; i-- { + seg := stripPredicate(segs[i]) + if j := strings.IndexByte(seg, ':'); j > 0 { + return seg[:j] + } + } + return "" +} + +// isNonConfigNode returns true for schema nodes that do not belong in the +// configuration tree: RPCs, notifications, anydata/anyxml, config:false subtrees, +// and nodes with YANG status deprecated or obsolete. +// goyang propagates config:false from parent to children during Process(), so a +// single check at each level is sufficient to prune entire read-only subtrees. +func isNonConfigNode(e *yang.Entry) bool { + return e.RPC != nil || + e.Kind == yang.NotificationEntry || + e.Kind == yang.AnyDataEntry || + e.Kind == yang.AnyXMLEntry || + e.Config == yang.TSFalse || + isDeprecatedOrObsolete(e) +} + +// isNonDataNode is like isNonConfigNode but keeps config:false nodes. +// Used by ChildrenAll / the operational status tree which shows all data — +// operational state reuses read-write schema nodes alongside state-only leaves. +func isNonDataNode(e *yang.Entry) bool { + return e.RPC != nil || + e.Kind == yang.NotificationEntry || + e.Kind == yang.AnyDataEntry || + e.Kind == yang.AnyXMLEntry || + isDeprecatedOrObsolete(e) +} + +// entryPresence returns the YANG presence statement string for e, or "". +// Presence is stored on the underlying Container Node via reflection because +// yang.Entry does not expose it directly. +func entryPresence(e *yang.Entry) string { + if e.Node == nil { + return "" + } + v := reflect.ValueOf(e.Node) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + f := v.FieldByName("Presence") + if !f.IsValid() || f.Kind() != reflect.Ptr || f.IsNil() { + return "" + } + name := f.Elem().FieldByName("Name") + if !name.IsValid() { + return "" + } + return name.String() +} + +// isDeprecatedOrObsolete returns true when the YANG node carries +// "status deprecated" or "status obsolete". goyang does not expose status +// directly on Entry, so we reach through to the underlying Node via reflection. +// All concrete yang node types (Container, Leaf, List, …) have Status *Value. +func isDeprecatedOrObsolete(e *yang.Entry) bool { + if e.Node == nil { + return false + } + v := reflect.ValueOf(e.Node) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + f := v.FieldByName("Status") + if !f.IsValid() || f.Kind() != reflect.Ptr || f.IsNil() { + return false + } + name := f.Elem().FieldByName("Name") + if !name.IsValid() { + return false + } + s := name.String() + return s == "deprecated" || s == "obsolete" +} + +// entryToNode converts a goyang Entry to a schema Node (no children populated). +func entryToNode(e *yang.Entry, path string) *Node { + n := &Node{ + Path: path, + Name: e.Name, + Kind: entryKind(e), + Description: e.Description, + Config: e.Config != yang.TSFalse, + Mandatory: e.Mandatory == yang.TSTrue, + When: extractWhen(e), + Presence: entryPresence(e), + } + + if def, ok := e.SingleDefaultValue(); ok { + n.Default = def + } + + if strings.Contains(e.Key, " ") || e.Key != "" { + n.Keys = strings.Fields(e.Key) + } + + if e.IsLeaf() || e.IsLeafList() { + n.Type = yangTypeInfo(e) + } + + return n +} + +// prefixInXPath matches a "prefix:X" token where X is a letter or underscore, +// the first character of an identifier. The matched letter is included so +// ReplaceAllStringFunc can reattach it after resolving the prefix. +var prefixInXPath = regexp.MustCompile(`[a-zA-Z][a-zA-Z0-9_\-]*:[a-zA-Z_]`) + +// extractWhen returns the pre-resolved YANG when expression for e, or "". +// It first checks e itself (when directly on the node), then checks parent +// augments — the Infix convention is to put when on the augment, not on the +// top-level container inside the augment. +func extractWhen(e *yang.Entry) string { + if xpath, ok := e.GetWhenXPath(); ok && xpath != "" { + return resolveWhenPrefixes(e.Node, xpath) + } + // Check parent's augment list: the augment may carry the when expression + // even though the individual container inside it does not. + if e.Parent == nil { + return "" + } + for _, aug := range e.Parent.Augmented { + if _, found := aug.Dir[e.Name]; !found { + continue + } + if xpath, ok := aug.GetWhenXPath(); ok && xpath != "" { + return resolveWhenPrefixes(aug.Node, xpath) + } + } + return "" +} + +// resolveWhenPrefixes replaces "prefix:x" tokens in an XPath expression with +// the canonical "module:x" form using FindModuleByPrefix on the given node. +// Unknown prefixes (not imported by the node's module) are left unchanged. +func resolveWhenPrefixes(node yang.Node, xpath string) string { + return prefixInXPath.ReplaceAllStringFunc(xpath, func(m string) string { + colon := strings.IndexByte(m, ':') + prefix := m[:colon] + rest := m[colon+1:] // the single identifier-start character + mod := yang.FindModuleByPrefix(node, prefix) + if mod == nil { + return m + } + modName := mod.Name + if mod.BelongsTo != nil { + modName = mod.BelongsTo.Name + } + return modName + ":" + rest + }) +} + +// entryKind maps a goyang Entry to a kind string. +func entryKind(e *yang.Entry) string { + switch { + case e.IsLeaf(): + return "leaf" + case e.IsLeafList(): + return "leaf-list" + case e.IsList(): + return "list" + case e.IsContainer(): + return "container" + case e.IsChoice(): + return "choice" + case e.IsCase(): + return "case" + case e.Kind == yang.AnyDataEntry: + return "anydata" + case e.Kind == yang.AnyXMLEntry: + return "anyxml" + case e.RPC != nil: + return "rpc" + default: + return "unknown" + } +} + +// yangTypeInfo builds a TypeInfo from a leaf's YangType. +func yangTypeInfo(e *yang.Entry) *TypeInfo { + t := e.Type + if t == nil { + return nil + } + + info := &TypeInfo{ + Kind: yang.TypeKindToName[t.Kind], + } + + switch t.Kind { + case yang.Yenum: + if t.Enum != nil { + info.Enums = t.Enum.Names() + sort.Strings(info.Enums) + } + case yang.Yidentityref: + if t.IdentityBase != nil { + for _, v := range t.IdentityBase.Values { + name := v.Name + if root := yang.RootNode(v); root != nil { + modName := root.Name + if root.Kind() == "submodule" && root.BelongsTo != nil { + modName = root.BelongsTo.Name + } + name = modName + ":" + v.Name + } + info.Identities = append(info.Identities, name) + } + sort.Strings(info.Identities) + } + case yang.Yleafref: + info.Leafref = t.Path + case yang.Yunion: + for _, sub := range t.Type { + if sub.Kind == yang.Yenum && sub.Enum != nil { + info.Enums = append(info.Enums, sub.Enum.Names()...) + } + } + if len(info.Enums) > 0 { + sort.Strings(info.Enums) + } + } + + if len(t.Pattern) > 0 { + info.Pattern = strings.Join(t.Pattern, "|") + } + + if len(t.Range) > 0 { + info.Range = t.Range.String() + } + + return info +} + +// splitPrefix splits "module:name" into ("module", "name"). +// If there is no prefix, it returns ("", name). +func splitPrefix(s string) (prefix, name string) { + if i := strings.Index(s, ":"); i >= 0 { + return s[:i], s[i+1:] + } + return "", s +} + +// sortedEntries returns the entries in a Dir map sorted by name. +func sortedEntries(dir map[string]*yang.Entry) []*yang.Entry { + keys := make([]string, 0, len(dir)) + for k := range dir { + keys = append(keys, k) + } + sort.Strings(keys) + result := make([]*yang.Entry, 0, len(keys)) + for _, k := range keys { + result = append(result, dir[k]) + } + return result +} diff --git a/src/webui/internal/schema/refresh.go b/src/webui/internal/schema/refresh.go new file mode 100644 index 000000000..f9335d1b1 --- /dev/null +++ b/src/webui/internal/schema/refresh.go @@ -0,0 +1,126 @@ +package schema + +import ( + "context" + "fmt" + "log" + "os" + "sync" + + "github.com/kernelkit/webui/internal/restconf" +) + +// Cache holds a lazily-loaded schema Manager and refreshes it at startup. +// All methods are safe for concurrent use. +type Cache struct { + mu sync.RWMutex + manager *Manager + syncing bool // guarded by mu + dir string + rc restconf.Fetcher +} + +// NewCache creates a Cache. +// Call LoadFromCacheBackground at startup, then RefreshBackground after login. +func NewCache(rc restconf.Fetcher, dir string) *Cache { + return &Cache{rc: rc, dir: dir} +} + +// LoadFromCache parses whatever .yang files are already in the cache +// directory. It makes no HTTP requests and needs no credentials. +// This is fast — suitable for server startup. If the directory is empty +// or has too few files to form a useful schema, the Manager is left nil. +func (c *Cache) LoadFromCache() error { + entries, err := os.ReadDir(c.dir) + if err != nil { + if os.IsNotExist(err) { + return nil // nothing cached yet — that is fine + } + return err + } + var count int + for _, e := range entries { + if !e.IsDir() && len(e.Name()) > 5 { // len(".yang") == 5 + if e.Name()[len(e.Name())-5:] == ".yang" { + count++ + } + } + } + if count == 0 { + return nil // empty cache — wait for first Refresh + } + + mgr, err := Load(c.dir) + if err != nil { + return fmt.Errorf("schema: load from cache: %w", err) + } + c.mu.Lock() + c.manager = mgr + c.mu.Unlock() + log.Printf("schema: loaded %d cached YANG file(s) from %s", count, c.dir) + return nil +} + +// LoadFromCacheBackground calls LoadFromCache in a goroutine. Errors are logged. +func (c *Cache) LoadFromCacheBackground() { + go func() { + if err := c.LoadFromCache(); err != nil { + log.Printf("schema: cache load failed: %v", err) + } + }() +} + +// Refresh downloads any missing YANG files from the device (credentials must +// be present in ctx) and then reloads the schema Manager. +// Only one refresh runs at a time; concurrent calls return immediately. +func (c *Cache) Refresh(ctx context.Context) error { + c.mu.Lock() + if c.syncing { + c.mu.Unlock() + return nil + } + c.syncing = true + c.mu.Unlock() + + defer func() { + c.mu.Lock() + c.syncing = false + c.mu.Unlock() + }() + + if _, err := FetchModules(ctx, c.rc, c.dir); err != nil { + return fmt.Errorf("schema refresh: fetch: %w", err) + } + + mgr, err := Load(c.dir) + if err != nil { + return fmt.Errorf("schema refresh: load: %w", err) + } + + c.mu.Lock() + c.manager = mgr + c.mu.Unlock() + + log.Printf("schema: refreshed successfully from %s", c.dir) + return nil +} + +// RefreshBackground calls Refresh in a goroutine. Errors are logged. +// The context's values (credentials) are preserved but its cancellation is +// detached so the goroutine is not killed when the originating HTTP request +// completes. +func (c *Cache) RefreshBackground(ctx context.Context) { + detached := context.WithoutCancel(ctx) + go func() { + if err := c.Refresh(detached); err != nil { + log.Printf("schema: background refresh failed: %v", err) + } + }() +} + +// Manager returns the current Manager, or nil if not yet loaded. +func (c *Cache) Manager() *Manager { + c.mu.RLock() + defer c.mu.RUnlock() + return c.manager +} diff --git a/src/webui/internal/schema/types.go b/src/webui/internal/schema/types.go new file mode 100644 index 000000000..53d719d78 --- /dev/null +++ b/src/webui/internal/schema/types.go @@ -0,0 +1,32 @@ +package schema + +// Node is a JSON-serialisable representation of a single YANG schema node. +// Children are omitted (nil) by default; use Manager.Children to lazy-load them. +type Node struct { + Path string `json:"path"` + Name string `json:"name"` + Kind string `json:"kind"` // container|list|leaf|leaf-list|choice|case|rpc|notification + Description string `json:"description,omitempty"` + Keys []string `json:"keys,omitempty"` + Children []*Node `json:"children,omitempty"` + Config bool `json:"config"` + Mandatory bool `json:"mandatory"` + Default string `json:"default,omitempty"` + Type *TypeInfo `json:"type,omitempty"` + // When holds the pre-resolved YANG when expression (prefix aliases replaced + // by canonical module names). Empty when there is no constraint. + When string `json:"when,omitempty"` + // Presence is non-empty for presence containers; its value is the YANG + // presence statement string (describes what the container's existence means). + Presence string `json:"presence,omitempty"` +} + +// TypeInfo describes the type of a leaf or leaf-list node. +type TypeInfo struct { + Kind string `json:"kind"` // string|boolean|int8..uint64|enumeration|identityref|leafref|binary|empty|... + Enums []string `json:"enums,omitempty"` // enumeration values + Identities []string `json:"identities,omitempty"` // identityref derived identity names + Range string `json:"range,omitempty"` + Pattern string `json:"pattern,omitempty"` + Leafref string `json:"leafref,omitempty"` // target path for leafref +} diff --git a/src/webui/internal/schema/when.go b/src/webui/internal/schema/when.go new file mode 100644 index 000000000..4821db629 --- /dev/null +++ b/src/webui/internal/schema/when.go @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: MIT + +package schema + +import "strings" + +// EvaluateWhen evaluates a pre-resolved YANG when XPath expression against +// the flat key→value map of the context node's parent data. +// +// Handles the practical subset used in Infix YANG models: +// - derived-from-or-self(path, 'module:identity') +// - derived-from(path, 'module:identity') +// - combinations joined with 'or' / 'and' +// +// Conservative: any unrecognised expression or absent data returns true (show). +func EvaluateWhen(mgr *Manager, expr string, values map[string]string) bool { + if expr == "" || values == nil { + return true + } + return evalOr(mgr, strings.TrimSpace(expr), values) +} + +func evalOr(mgr *Manager, expr string, values map[string]string) bool { + parts := splitOnKeyword(expr, "or") + for _, p := range parts { + if evalAnd(mgr, strings.TrimSpace(p), values) { + return true + } + } + return false +} + +func evalAnd(mgr *Manager, expr string, values map[string]string) bool { + parts := splitOnKeyword(expr, "and") + for _, p := range parts { + if !evalAtom(mgr, strings.TrimSpace(p), values) { + return false + } + } + return true +} + +func evalAtom(mgr *Manager, expr string, values map[string]string) bool { + if expr == "" { + return true // empty: conservative + } + if strings.HasPrefix(expr, "derived-from-or-self(") && strings.HasSuffix(expr, ")") { + inner := expr[len("derived-from-or-self(") : len(expr)-1] + return evalDerivedFrom(mgr, inner, values, true) + } + if strings.HasPrefix(expr, "derived-from(") && strings.HasSuffix(expr, ")") { + inner := expr[len("derived-from(") : len(expr)-1] + return evalDerivedFrom(mgr, inner, values, false) + } + return true // unknown expression: conservative show +} + +func evalDerivedFrom(mgr *Manager, inner string, values map[string]string, orSelf bool) bool { + comma := strings.Index(inner, ",") + if comma < 0 { + return true // malformed: conservative + } + xpathPath := strings.TrimSpace(inner[:comma]) + identity := strings.Trim(strings.TrimSpace(inner[comma+1:]), "'\"") + + leafName := xpathLeafName(xpathPath) + if leafName == "" { + return true // unresolvable path: conservative + } + + current := values[leafName] + if current == "" { + return true // no data: conservative show + } + return checkIdentity(mgr, current, identity, orSelf) +} + +// xpathLeafName extracts the bare leaf name from a simple XPath step. +// - "module:name" or "name" → "name" +// - "../module:name" → "name" (single parent step) +// - "../../../…" or path with "/" → "" (multi-level: conservative) +func xpathLeafName(path string) string { + if strings.HasPrefix(path, "../") { + path = path[3:] + // After one parent step, path must be a bare leaf name. + if strings.Contains(path, "/") || strings.HasPrefix(path, "..") { + return "" + } + } else if strings.Contains(path, "/") { + return "" // forward traversal: conservative + } + _, local := splitPrefix(strings.TrimSpace(path)) + return local +} + +// checkIdentity reports whether currentValue is equal to or derived from +// targetIdentity according to the YANG identity hierarchy. +func checkIdentity(mgr *Manager, currentValue, targetIdentity string, orSelf bool) bool { + _, currentLocal := splitPrefix(currentValue) + _, targetLocal := splitPrefix(targetIdentity) + + if orSelf && currentLocal == targetLocal { + return true + } + for _, d := range mgr.IdentitiesOf(targetLocal) { + if d == currentLocal { + return true + } + } + return false +} + +// splitOnKeyword splits expr on the XPath keyword kw at parenthesis depth 0, +// but NOT when kw is part of a compound name (e.g. "derived-from-or-self" +// contains "or"; "derived-from-and-…" would contain "and"). +// A keyword match is suppressed when immediately preceded by a hyphen. +// +// The Infix YANG files contain a concatenation bug where the 'or' keyword is +// immediately followed by the next function name without a separating space +// (e.g. "…') orderived-from-or-self(…)"). This splitter handles that case +// by treating any 'or'/'and' that is not preceded by '-' as a keyword. +func splitOnKeyword(expr, kw string) []string { + n := len(kw) + var parts []string + depth := 0 + start := 0 + for i := 0; i < len(expr); i++ { + switch expr[i] { + case '(': + depth++ + case ')': + depth-- + } + if depth != 0 || i+n > len(expr) || expr[i:i+n] != kw { + continue + } + // Suppress if immediately preceded by '-' (part of compound name). + if i > 0 && expr[i-1] == '-' { + continue + } + // Found keyword: save part, trim trailing space before keyword. + end := i + if end > start && expr[end-1] == ' ' { + end-- + } + parts = append(parts, strings.TrimSpace(expr[start:end])) + start = i + n + if start < len(expr) && expr[start] == ' ' { + start++ + } + i = start - 1 // loop will increment + } + parts = append(parts, strings.TrimSpace(expr[start:])) + return parts +} diff --git a/src/webui/internal/schema/when_test.go b/src/webui/internal/schema/when_test.go new file mode 100644 index 000000000..72042bb67 --- /dev/null +++ b/src/webui/internal/schema/when_test.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: MIT + +package schema + +import ( + "reflect" + "testing" +) + +func TestSplitOnKeyword(t *testing.T) { + tests := []struct { + name string + expr string + kw string + want []string + }{ + { + name: "single_term_no_split", + expr: "derived-from-or-self(if:type, 'gre')", + kw: "or", + want: []string{"derived-from-or-self(if:type, 'gre')"}, + }, + { + name: "two_terms_with_space", + expr: "derived-from-or-self(if:type, 'gre') or derived-from-or-self(if:type, 'gretap')", + kw: "or", + want: []string{ + "derived-from-or-self(if:type, 'gre')", + "derived-from-or-self(if:type, 'gretap')", + }, + }, + { + name: "two_terms_no_space_after_or", // Infix YANG concatenation bug + expr: "derived-from-or-self(if:type, 'gre') orderived-from-or-self(if:type, 'gretap')", + kw: "or", + want: []string{ + "derived-from-or-self(if:type, 'gre')", + "derived-from-or-self(if:type, 'gretap')", + }, + }, + { + name: "or_inside_parens_not_split", + expr: "derived-from-or-self(if:type, 'foo or bar')", + kw: "or", + want: []string{"derived-from-or-self(if:type, 'foo or bar')"}, + }, + { + name: "and_keyword", + expr: "cond-a and cond-b", + kw: "and", + want: []string{"cond-a", "cond-b"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := splitOnKeyword(tt.expr, tt.kw) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("splitOnKeyword(%q, %q) = %v; want %v", tt.expr, tt.kw, got, tt.want) + } + }) + } +} + +func TestXpathLeafName(t *testing.T) { + tests := []struct { + path string + want string + }{ + {"if:type", "type"}, + {"ietf-interfaces:type", "type"}, + {"type", "type"}, + {"../iehw:class", "class"}, + {"../infix-hardware:class", "class"}, + {"../../something", ""}, + {"../../../rt:address-family", ""}, + {"a/b", ""}, + {"/abs/path", ""}, + } + for _, tt := range tests { + got := xpathLeafName(tt.path) + if got != tt.want { + t.Errorf("xpathLeafName(%q) = %q; want %q", tt.path, got, tt.want) + } + } +} + +func TestEvaluateWhenConservative(t *testing.T) { + // nil values → conservative true + if !EvaluateWhen(nil, "derived-from-or-self(if:type, 'foo')", nil) { + t.Error("nil values should be conservative true") + } + // empty expr → true + if !EvaluateWhen(nil, "", map[string]string{"type": "bar"}) { + t.Error("empty expr should be conservative true") + } + // nil manager with unknown expr → true + if !EvaluateWhen(nil, "unknown-function(x)", map[string]string{"x": "y"}) { + t.Error("unknown expr should be conservative true") + } +} + +func TestEvaluateWhenDerivedFromOrSelf(t *testing.T) { + // Minimal mock manager: IdentitiesOf("gre") → ["gretap"] + mgr := &Manager{ms: nil} // ms is nil, but IdentitiesOf will handle it gracefully + + // We need a real identity hierarchy. Use a Manager with minimal modules. + // Since setting up goyang modules is complex, test via checkIdentity directly. + tests := []struct { + name string + current string + target string + orSelf bool + derived []string // what IdentitiesOf would return; we test checkIdentity + want bool + }{ + {"or-self exact match", "gre", "gre", true, nil, true}, + {"or-self different", "bridge", "gre", true, nil, false}, + {"derived match", "gretap", "gre", false, []string{"gretap"}, true}, + {"derived no match", "bridge", "gre", false, []string{"gretap"}, false}, + {"module-qualified current", "infix-if-type:gre", "gre", true, nil, true}, + {"module-qualified target", "gre", "infix-if-type:gre", true, nil, true}, + {"both qualified exact", "infix-if-type:gre", "infix-if-type:gre", true, nil, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Override IdentitiesOf by testing checkIdentity directly with a mock. + // Build a mini-mgr that returns tt.derived for any base name. + got := checkIdentityTest(tt.current, tt.target, tt.orSelf, tt.derived) + if got != tt.want { + t.Errorf("checkIdentity(%q, %q, orSelf=%v) = %v; want %v", + tt.current, tt.target, tt.orSelf, got, tt.want) + } + }) + } + _ = mgr +} + +// checkIdentityTest is a test helper that bypasses the Manager.IdentitiesOf call. +func checkIdentityTest(currentValue, targetIdentity string, orSelf bool, derivedNames []string) bool { + _, currentLocal := splitPrefix(currentValue) + _, targetLocal := splitPrefix(targetIdentity) + if orSelf && currentLocal == targetLocal { + return true + } + for _, d := range derivedNames { + if d == currentLocal { + return true + } + } + return false +} diff --git a/src/webui/internal/security/csrf.go b/src/webui/internal/security/csrf.go new file mode 100644 index 000000000..91a0232ab --- /dev/null +++ b/src/webui/internal/security/csrf.go @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: MIT + +package security + +import ( + "context" + "crypto/rand" + "encoding/base64" + "net/http" + "strings" +) + +const csrfCookieName = "csrf" + +type csrfKey struct{} + +// EnsureToken sets a CSRF cookie if missing (or preferred is provided) +// and returns the current token. +func EnsureToken(w http.ResponseWriter, r *http.Request, preferred string) string { + if token := strings.TrimSpace(preferred); token != "" { + http.SetCookie(w, &http.Cookie{ + Name: csrfCookieName, + Value: token, + Path: "/", + HttpOnly: true, + Secure: IsSecureRequest(r), + SameSite: http.SameSiteStrictMode, + }) + return token + } + + if c, err := r.Cookie(csrfCookieName); err == nil { + if token := strings.TrimSpace(c.Value); validToken(token) { + return token + } + } + + token := randomToken() + http.SetCookie(w, &http.Cookie{ + Name: csrfCookieName, + Value: token, + Path: "/", + HttpOnly: true, + Secure: IsSecureRequest(r), + SameSite: http.SameSiteStrictMode, + }) + return token +} + +// WithToken stores the token in the request context. +func WithToken(ctx context.Context, token string) context.Context { + return context.WithValue(ctx, csrfKey{}, token) +} + +// TokenFromContext returns the CSRF token from context, if set. +func TokenFromContext(ctx context.Context) string { + if v, ok := ctx.Value(csrfKey{}).(string); ok { + return v + } + return "" +} + +func randomToken() string { + var b [32]byte + if _, err := rand.Read(b[:]); err != nil { + return "" + } + return base64.RawURLEncoding.EncodeToString(b[:]) +} + +func validToken(token string) bool { + if token == "" { + return false + } + _, err := base64.RawURLEncoding.DecodeString(token) + return err == nil +} + +// ClearToken removes the CSRF cookie. +func ClearToken(w http.ResponseWriter, r *http.Request) { + http.SetCookie(w, &http.Cookie{ + Name: csrfCookieName, + Value: "", + Path: "/", + MaxAge: -1, + HttpOnly: true, + Secure: IsSecureRequest(r), + SameSite: http.SameSiteStrictMode, + }) +} + +// IsSecureRequest returns true for TLS or proxy-terminated HTTPS. +func IsSecureRequest(r *http.Request) bool { + if r.TLS != nil { + return true + } + if xf := r.Header.Get("X-Forwarded-Proto"); xf != "" { + parts := strings.Split(xf, ",") + return strings.EqualFold(strings.TrimSpace(parts[0]), "https") + } + return false +} diff --git a/src/webui/internal/server/middleware.go b/src/webui/internal/server/middleware.go new file mode 100644 index 000000000..31dff8e44 --- /dev/null +++ b/src/webui/internal/server/middleware.go @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: MIT + +package server + +import ( + "net/http" + "net/url" + "strings" + + "github.com/kernelkit/webui/internal/auth" + "github.com/kernelkit/webui/internal/handlers" + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/security" +) + +const cookieName = "session" + +// authMiddleware checks the session cookie on every request, looks up +// the session, and attaches decrypted credentials to the context. +// Unauthenticated requests are redirected to /login (or get a 401 if +// the request comes from HTMX). +func authMiddleware(store *auth.SessionStore, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if isPublicPath(r.URL.Path) { + next.ServeHTTP(w, r) + return + } + + cookie, err := r.Cookie(cookieName) + if err != nil { + deny(w, r) + return + } + + username, password, csrf, features, ok := store.Lookup(cookie.Value) + if !ok { + deny(w, r) + return + } + + // Sliding window: re-issue the cookie with a fresh timestamp. + // Skip for background polling endpoints so they don't keep + // the session alive indefinitely. + if !isPollingPath(r.URL.Path) { + if fresh, err := store.CreateWithCSRF(username, password, csrf, features); err == nil { + http.SetCookie(w, &http.Cookie{ + Name: cookieName, + Value: fresh, + Path: "/", + HttpOnly: true, + Secure: security.IsSecureRequest(r), + SameSite: http.SameSiteLaxMode, + }) + } + } + + ctx := restconf.ContextWithCredentials(r.Context(), restconf.Credentials{ + Username: username, + Password: password, + }) + ctx = security.WithToken(ctx, csrf) + ctx = handlers.ContextWithCapabilities(ctx, handlers.NewCapabilities(features)) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func csrfMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := security.TokenFromContext(r.Context()) + token = security.EnsureToken(w, r, token) + r = r.WithContext(security.WithToken(r.Context(), token)) + + switch r.Method { + case http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodTrace: + next.ServeHTTP(w, r) + return + } + + if !sameOrigin(r) { + http.Error(w, "Forbidden", http.StatusForbidden) + return + } + + if !validCSRF(r, token) { + http.Error(w, "Forbidden", http.StatusForbidden) + return + } + + next.ServeHTTP(w, r) + }) +} + +func sameOrigin(r *http.Request) bool { + host := r.Host + if xf := r.Header.Get("X-Forwarded-Host"); xf != "" { + parts := strings.Split(xf, ",") + host = strings.TrimSpace(parts[0]) + } + if host == "" { + return false + } + + origin := r.Header.Get("Origin") + if origin != "" { + u, err := url.Parse(origin) + if err != nil { + return false + } + return strings.EqualFold(u.Host, host) + } + + ref := r.Header.Get("Referer") + if ref != "" { + u, err := url.Parse(ref) + if err != nil { + return false + } + return strings.EqualFold(u.Host, host) + } + + return true +} + +func validCSRF(r *http.Request, token string) bool { + if token == "" { + return false + } + if hdr := r.Header.Get("X-CSRF-Token"); hdr != "" { + return subtleConstantTimeEquals(hdr, token) + } + if err := r.ParseForm(); err != nil { + return false + } + return subtleConstantTimeEquals(r.FormValue("csrf"), token) +} + +func subtleConstantTimeEquals(a, b string) bool { + if len(a) != len(b) { + return false + } + var diff byte + for i := 0; i < len(a); i++ { + diff |= a[i] ^ b[i] + } + return diff == 0 +} + +func securityHeadersMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Content-Type-Options", "nosniff") + w.Header().Set("X-Frame-Options", "DENY") + w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin") + w.Header().Set("Permissions-Policy", "geolocation=(), microphone=(), camera=()") + w.Header().Set("Content-Security-Policy", "default-src 'self'; script-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; font-src 'self'; connect-src 'self'; object-src 'none'; base-uri 'self'; frame-ancestors 'none'; form-action 'self'") + if security.IsSecureRequest(r) { + w.Header().Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains") + } + next.ServeHTTP(w, r) + }) +} + +func isPublicPath(path string) bool { + return path == "/login" || strings.HasPrefix(path, "/assets/") +} + +func isPollingPath(path string) bool { + return path == "/device-status" || strings.HasSuffix(path, "/counters") +} + +func deny(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("HX-Request") == "true" { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + http.Redirect(w, r, "/login", http.StatusSeeOther) +} diff --git a/src/webui/internal/server/server.go b/src/webui/internal/server/server.go new file mode 100644 index 000000000..f48128bab --- /dev/null +++ b/src/webui/internal/server/server.go @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: MIT + +package server + +import ( + "context" + "html/template" + "io/fs" + "net/http" + + "github.com/kernelkit/webui/internal/auth" + "github.com/kernelkit/webui/internal/handlers" + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" +) + +// New creates a fully wired http.Handler with all routes and middleware. +func New( + store *auth.SessionStore, + rc *restconf.Client, + schemaCache *schema.Cache, + templateFS fs.FS, + staticFS fs.FS, +) (http.Handler, error) { + // Parse templates per page so each can define its own "content" block + // without collisions. + loginTmpl, err := template.ParseFS(templateFS, "pages/login.html") + if err != nil { + return nil, err + } + dashTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/dashboard.html") + if err != nil { + return nil, err + } + fwTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/firewall.html") + if err != nil { + return nil, err + } + ksTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "fragments/configure-toolbar.html", "pages/configure-keystore.html") + if err != nil { + return nil, err + } + ifTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/interfaces.html") + if err != nil { + return nil, err + } + ifDetailTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/iface-detail.html", "fragments/iface-counters.html") + if err != nil { + return nil, err + } + ifCountersTmpl, err := template.ParseFS(templateFS, "fragments/iface-counters.html") + if err != nil { + return nil, err + } + fwrTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/firmware.html") + if err != nil { + return nil, err + } + sysCtrlTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/system-control.html") + if err != nil { + return nil, err + } + backupTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/backup.html") + if err != nil { + return nil, err + } + routingTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/routing.html") + if err != nil { + return nil, err + } + wifiTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/wifi.html") + if err != nil { + return nil, err + } + vpnTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/vpn.html") + if err != nil { + return nil, err + } + dhcpTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/dhcp.html") + if err != nil { + return nil, err + } + ntpTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/ntp.html") + if err != nil { + return nil, err + } + lldpTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/lldp.html") + if err != nil { + return nil, err + } + mdnsTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/mdns.html") + if err != nil { + return nil, err + } + nacmTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/nacm.html") + if err != nil { + return nil, err + } + servicesTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/services.html") + if err != nil { + return nil, err + } + containersTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "pages/containers.html") + if err != nil { + return nil, err + } + cfgSysTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "fragments/configure-toolbar.html", "pages/configure-system.html") + if err != nil { + return nil, err + } + cfgUsersTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "fragments/configure-toolbar.html", "pages/configure-users.html") + if err != nil { + return nil, err + } + cfgRoutesTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "fragments/configure-toolbar.html", "pages/configure-routes.html") + if err != nil { + return nil, err + } + cfgFwTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "fragments/configure-toolbar.html", "pages/configure-firewall.html") + if err != nil { + return nil, err + } + cfgIfTmpl, err := template.ParseFS(templateFS, "layouts/*.html", "fragments/configure-toolbar.html", "pages/configure-interfaces.html") + if err != nil { + return nil, err + } + yangTreeTmpl, err := template.ParseFS(templateFS, + "layouts/*.html", + "fragments/configure-toolbar.html", + "fragments/yang-tree-node.html", + "pages/yang-tree.html") + if err != nil { + return nil, err + } + yangFuncs := template.FuncMap{"stripPrefix": schema.StripModulePrefix} + yangFragTmpl, err := template.New("frag").Funcs(yangFuncs).ParseFS(templateFS, + "fragments/yang-tree-node.html", + "fragments/yang-node-detail.html", + "fragments/yang-leaf-group.html", + "fragments/yang-list-table.html") + if err != nil { + return nil, err + } + login := &auth.LoginHandler{ + Store: store, + RC: rc, + Template: loginTmpl, + OnLogin: func(ctx context.Context) { + schemaCache.RefreshBackground(ctx) + }, + } + + dash := &handlers.DashboardHandler{ + Template: dashTmpl, + RC: rc, + } + + fw := &handlers.FirewallHandler{ + Template: fwTmpl, + RC: rc, + } + + cfgKs := &handlers.ConfigureKeystoreHandler{ + Template: ksTmpl, + RC: rc, + Schema: schemaCache, + } + + iface := &handlers.InterfacesHandler{ + Template: ifTmpl, + DetailTemplate: ifDetailTmpl, + CountersTemplate: ifCountersTmpl, + RC: rc, + } + + sys := &handlers.SystemHandler{ + RC: rc, + Template: fwrTmpl, + SysCtrlTmpl: sysCtrlTmpl, + BackupTmpl: backupTmpl, + } + + routing := &handlers.RoutingHandler{Template: routingTmpl, RC: rc} + wifi := &handlers.WiFiHandler{Template: wifiTmpl, RC: rc} + vpn := &handlers.VPNHandler{Template: vpnTmpl, RC: rc} + dhcp := &handlers.DHCPHandler{Template: dhcpTmpl, RC: rc} + ntp := &handlers.NTPHandler{Template: ntpTmpl, RC: rc} + lldp := &handlers.LLDPHandler{Template: lldpTmpl, RC: rc} + mdns := &handlers.MDNSHandler{Template: mdnsTmpl, RC: rc} + nacm := &handlers.NACMHandler{Template: nacmTmpl, RC: rc} + services := &handlers.ServicesHandler{Template: servicesTmpl, RC: rc} + containers := &handlers.ContainersHandler{Template: containersTmpl, RC: rc} + cfg := &handlers.ConfigureHandler{RC: rc} + cfgSys := &handlers.ConfigureSystemHandler{Template: cfgSysTmpl, RC: rc, Schema: schemaCache} + cfgUsers := &handlers.ConfigureUsersHandler{Template: cfgUsersTmpl, RC: rc, Schema: schemaCache} + cfgRoutes := &handlers.ConfigureRoutesHandler{Template: cfgRoutesTmpl, RC: rc, Schema: schemaCache} + cfgFw := &handlers.ConfigureFirewallHandler{Template: cfgFwTmpl, RC: rc, Schema: schemaCache} + cfgIf := &handlers.ConfigureInterfacesHandler{Template: cfgIfTmpl, RC: rc, Schema: schemaCache} + schemaH := &handlers.SchemaHandler{Cache: schemaCache} + dataH := &handlers.DataHandler{RC: rc, Schema: schemaCache} + treeH := &handlers.TreeHandler{ + Cache: schemaCache, + RC: rc, + PageTmpl: yangTreeTmpl, + FragTmpl: yangFragTmpl, + } + statusTreeH := &handlers.TreeHandler{ + Cache: schemaCache, + RC: rc, + PageTmpl: yangTreeTmpl, + FragTmpl: yangFragTmpl, + ReadOnly: true, + } + + mux := http.NewServeMux() + + // Auth routes (public). + mux.HandleFunc("GET /login", login.ShowLogin) + mux.HandleFunc("POST /login", login.DoLogin) + mux.HandleFunc("POST /logout", login.DoLogout) + + // Static assets (public). + staticServer := http.FileServerFS(staticFS) + mux.Handle("GET /assets/", http.StripPrefix("/assets/", staticServer)) + + // Authenticated routes. + mux.HandleFunc("GET /{$}", dash.Index) + mux.HandleFunc("GET /interfaces", iface.Overview) + mux.HandleFunc("GET /interfaces/{name}", iface.Detail) + mux.HandleFunc("GET /interfaces/{name}/counters", iface.Counters) + mux.HandleFunc("GET /firewall", fw.Overview) + mux.HandleFunc("GET /keystore", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/configure/keystore", http.StatusMovedPermanently) + }) + mux.HandleFunc("GET /firmware", sys.Firmware) + mux.HandleFunc("GET /firmware/progress", sys.FirmwareProgress) + mux.HandleFunc("POST /firmware/install", sys.FirmwareInstall) + mux.HandleFunc("POST /firmware/upload", sys.FirmwareUpload) + mux.HandleFunc("POST /firmware/boot-order", sys.SetBootOrder) + mux.HandleFunc("POST /reboot", sys.Reboot) // kept for firmware page "Reboot to activate" + mux.HandleFunc("GET /device-status", sys.DeviceStatus) + mux.HandleFunc("GET /config", sys.DownloadConfig) + mux.HandleFunc("GET /maintenance/backup", sys.Backup) + mux.HandleFunc("POST /maintenance/backup/restore", sys.RestoreConfig) + mux.HandleFunc("GET /maintenance/system", sys.SystemControl) + mux.HandleFunc("POST /maintenance/system/reboot", sys.Reboot) + mux.HandleFunc("POST /maintenance/system/shutdown", sys.Shutdown) + mux.HandleFunc("POST /maintenance/system/factory-default", sys.FactoryDefault) + mux.HandleFunc("POST /maintenance/system/factory-reset", sys.FactoryReset) + mux.HandleFunc("POST /maintenance/system/datetime", sys.SetDatetime) + mux.HandleFunc("GET /routing", routing.Overview) + mux.HandleFunc("GET /wifi", wifi.Overview) + mux.HandleFunc("GET /vpn", vpn.Overview) + mux.HandleFunc("GET /dhcp", dhcp.Overview) + mux.HandleFunc("GET /ntp", ntp.Overview) + mux.HandleFunc("GET /lldp", lldp.Overview) + mux.HandleFunc("GET /mdns", mdns.Overview) + mux.HandleFunc("GET /nacm", nacm.Overview) + mux.HandleFunc("GET /services", services.Overview) + mux.HandleFunc("GET /containers", containers.Overview) + + // Configure routes. + mux.HandleFunc("POST /configure/enter", cfg.Enter) + mux.HandleFunc("POST /configure/apply", cfg.Apply) + mux.HandleFunc("POST /configure/apply-and-save", cfg.ApplyAndSave) + mux.HandleFunc("POST /configure/abort", cfg.Abort) + mux.HandleFunc("POST /configure/save", cfg.Save) + mux.HandleFunc("DELETE /configure/leaf", cfg.DeleteLeaf) + mux.HandleFunc("GET /configure/system", cfgSys.Overview) + mux.HandleFunc("POST /configure/system/identity", cfgSys.SaveIdentity) + mux.HandleFunc("POST /configure/system/clock", cfgSys.SaveClock) + mux.HandleFunc("PUT /configure/system/ntp", cfgSys.SaveNTP) + mux.HandleFunc("PUT /configure/system/dns", cfgSys.SaveDNS) + mux.HandleFunc("POST /configure/system/preferences", cfgSys.SavePreferences) + mux.HandleFunc("GET /configure/interfaces", cfgIf.Overview) + mux.HandleFunc("POST /configure/interfaces", cfgIf.CreateInterface) + mux.HandleFunc("POST /configure/interfaces/{name}", cfgIf.SaveGeneral) + mux.HandleFunc("POST /configure/interfaces/{name}/ipv4", cfgIf.AddIPv4) + mux.HandleFunc("DELETE /configure/interfaces/{name}/ipv4/{ip}", cfgIf.DeleteIPv4) + mux.HandleFunc("POST /configure/interfaces/{name}/ipv4/dhcp", cfgIf.SaveIPv4DHCP) + mux.HandleFunc("POST /configure/interfaces/{name}/ipv4/autoconf", cfgIf.SaveIPv4Autoconf) + mux.HandleFunc("POST /configure/interfaces/{name}/ipv6", cfgIf.AddIPv6) + mux.HandleFunc("DELETE /configure/interfaces/{name}/ipv6/{ip}", cfgIf.DeleteIPv6) + mux.HandleFunc("POST /configure/interfaces/{name}/ipv6/autoconf", cfgIf.SaveIPv6SLAAC) + mux.HandleFunc("POST /configure/interfaces/{name}/ipv6/dhcp", cfgIf.SaveIPv6DHCP) + mux.HandleFunc("POST /configure/interfaces/{name}/bridge-port", cfgIf.SaveBridgePort) + mux.HandleFunc("DELETE /configure/interfaces/{name}/bridge-port", cfgIf.DeleteBridgePort) + mux.HandleFunc("POST /configure/interfaces/{name}/bridge", cfgIf.SaveBridge) + mux.HandleFunc("POST /configure/interfaces/{name}/bridge/members", cfgIf.SaveBridgeMembers) + mux.HandleFunc("POST /configure/interfaces/{name}/bridge/vlans", cfgIf.AddVLAN) + mux.HandleFunc("POST /configure/interfaces/{name}/bridge/vlans/{vid}", cfgIf.SaveVLAN) + mux.HandleFunc("DELETE /configure/interfaces/{name}/bridge/vlans/{vid}", cfgIf.DeleteVLAN) + mux.HandleFunc("POST /configure/interfaces/{name}/lag", cfgIf.SaveLAG) + mux.HandleFunc("POST /configure/interfaces/{name}/lag/members", cfgIf.SaveLAGMembers) + mux.HandleFunc("POST /configure/interfaces/{name}/lag-port", cfgIf.SaveLagPort) + mux.HandleFunc("DELETE /configure/interfaces/{name}/lag-port", cfgIf.DeleteLagPort) + mux.HandleFunc("GET /configure/firewall", cfgFw.Overview) + mux.HandleFunc("POST /configure/firewall/enable", cfgFw.Enable) + mux.HandleFunc("POST /configure/firewall/settings", cfgFw.SaveSettings) + mux.HandleFunc("POST /configure/firewall/zones", cfgFw.AddZone) + mux.HandleFunc("POST /configure/firewall/zones/{name}", cfgFw.SaveZone) + mux.HandleFunc("DELETE /configure/firewall/zones/{name}", cfgFw.DeleteZone) + mux.HandleFunc("POST /configure/firewall/policies", cfgFw.AddPolicy) + mux.HandleFunc("POST /configure/firewall/policies/{name}", cfgFw.SavePolicy) + mux.HandleFunc("DELETE /configure/firewall/policies/{name}", cfgFw.DeletePolicy) + mux.HandleFunc("GET /configure/routes", cfgRoutes.Overview) + mux.HandleFunc("POST /configure/routes", cfgRoutes.AddRoute) + mux.HandleFunc("PUT /configure/routes", cfgRoutes.UpdateRoute) + mux.HandleFunc("DELETE /configure/routes", cfgRoutes.DeleteRoute) + mux.HandleFunc("GET /configure/users", cfgUsers.Overview) + mux.HandleFunc("POST /configure/users", cfgUsers.AddUser) + mux.HandleFunc("DELETE /configure/users/{name}", cfgUsers.DeleteUser) + mux.HandleFunc("POST /configure/users/{name}/shell", cfgUsers.UpdateShell) + mux.HandleFunc("POST /configure/users/{name}/password", cfgUsers.ChangePassword) + mux.HandleFunc("POST /configure/users/{name}/keys", cfgUsers.AddKey) + mux.HandleFunc("DELETE /configure/users/{name}/keys/{keyname}", cfgUsers.DeleteKey) + mux.HandleFunc("GET /configure/keystore", cfgKs.Overview) + mux.HandleFunc("POST /configure/keystore/symmetric", cfgKs.AddSymKey) + mux.HandleFunc("POST /configure/keystore/symmetric/{name}", cfgKs.UpdateSymKey) + mux.HandleFunc("DELETE /configure/keystore/symmetric/{name}", cfgKs.DeleteSymKey) + mux.HandleFunc("POST /configure/keystore/asymmetric", cfgKs.AddAsymKey) + mux.HandleFunc("POST /configure/keystore/asymmetric/{name}", cfgKs.UpdateAsymKey) + mux.HandleFunc("DELETE /configure/keystore/asymmetric/{name}", cfgKs.DeleteAsymKey) + mux.HandleFunc("POST /configure/keystore/asymmetric/{name}/certs", cfgKs.AddCert) + mux.HandleFunc("POST /configure/keystore/asymmetric/{name}/certs/{certname}", cfgKs.UpdateCert) + mux.HandleFunc("DELETE /configure/keystore/asymmetric/{name}/certs/{certname}", cfgKs.DeleteCert) + + // Schema API routes (authenticated). + mux.HandleFunc("GET /api/schema", schemaH.Schema) + mux.HandleFunc("GET /api/schema/children", schemaH.Children) + + // Data API route (authenticated) — raw RESTCONF JSON passthrough. + mux.HandleFunc("GET /api/data", dataH.Get) + + // YANG tree UI routes (authenticated). + mux.HandleFunc("GET /configure/tree", treeH.Overview) + mux.HandleFunc("GET /configure/tree/children", treeH.TreeChildren) + mux.HandleFunc("GET /configure/tree/node", treeH.TreeNode) + mux.HandleFunc("PUT /configure/tree/node", treeH.SaveLeaf) + mux.HandleFunc("DELETE /configure/tree/node", treeH.DeleteLeaf) + mux.HandleFunc("PUT /configure/tree/group", treeH.SaveGroup) + mux.HandleFunc("GET /configure/tree/list-add", treeH.AddListRowForm) + mux.HandleFunc("POST /configure/tree/list-row", treeH.SaveListRow) + mux.HandleFunc("DELETE /configure/tree/list-row", treeH.DeleteListRow) + mux.HandleFunc("PUT /configure/tree/presence", treeH.TogglePresence) + mux.HandleFunc("DELETE /configure/tree/presence", treeH.TogglePresence) + + // Status tree (read-only operational view). + mux.HandleFunc("GET /status/tree", statusTreeH.Overview) + mux.HandleFunc("GET /status/tree/children", statusTreeH.TreeChildren) + mux.HandleFunc("GET /status/tree/node", statusTreeH.TreeNode) + + handler := authMiddleware(store, mux) + handler = csrfMiddleware(handler) + handler = securityHeadersMiddleware(handler) + return handler, nil +} diff --git a/src/webui/internal/testutil/helpers.go b/src/webui/internal/testutil/helpers.go new file mode 100644 index 000000000..7309bc319 --- /dev/null +++ b/src/webui/internal/testutil/helpers.go @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: MIT + +package testutil + +import ( + "context" + "encoding/json" + "sync" +) + +type mockEntry struct { + body any + err error +} + +type MockFetcher struct { + mu sync.Mutex + responses map[string]mockEntry + errors map[string]error +} + +func NewMockFetcher() *MockFetcher { + return &MockFetcher{ + responses: make(map[string]mockEntry), + errors: make(map[string]error), + } +} + +func (m *MockFetcher) SetResponse(path string, body any) { + m.mu.Lock() + defer m.mu.Unlock() + m.responses[path] = mockEntry{body: body} +} + +func (m *MockFetcher) SetError(path string, err error) { + m.mu.Lock() + defer m.mu.Unlock() + m.errors[path] = err +} + +func (m *MockFetcher) Get(_ context.Context, path string, target any) error { + m.mu.Lock() + defer m.mu.Unlock() + + if err, ok := m.errors[path]; ok { + return err + } + + entry, ok := m.responses[path] + if !ok { + return nil + } + + raw, err := json.Marshal(entry.body) + if err != nil { + return err + } + return json.Unmarshal(raw, target) +} + +func (m *MockFetcher) GetRaw(_ context.Context, path string) ([]byte, error) { + m.mu.Lock() + defer m.mu.Unlock() + + if err, ok := m.errors[path]; ok { + return nil, err + } + + entry, ok := m.responses[path] + if !ok { + return []byte("{}"), nil + } + + return json.Marshal(entry.body) +} + +func (m *MockFetcher) Post(_ context.Context, path string) error { + m.mu.Lock() + defer m.mu.Unlock() + + if err, ok := m.errors[path]; ok { + return err + } + return nil +} + +func (m *MockFetcher) PostJSON(_ context.Context, path string, _ any) error { + m.mu.Lock() + defer m.mu.Unlock() + + if err, ok := m.errors[path]; ok { + return err + } + return nil +} + +func (m *MockFetcher) GetYANG(_ context.Context, _, _ string) ([]byte, error) { return nil, nil } +func (m *MockFetcher) Put(_ context.Context, _ string, _ any) error { return nil } +func (m *MockFetcher) Patch(_ context.Context, _ string, _ any) error { return nil } +func (m *MockFetcher) Delete(_ context.Context, _ string) error { return nil } + +func (m *MockFetcher) GetDatastore(_ context.Context, _ string) (json.RawMessage, error) { + return json.RawMessage("{}"), nil +} + +func (m *MockFetcher) PutDatastore(_ context.Context, _ string, _ json.RawMessage) error { + return nil +} + +func (m *MockFetcher) CopyDatastore(_ context.Context, _, _ string) error { return nil } diff --git a/src/webui/main.go b/src/webui/main.go new file mode 100644 index 000000000..c71515bb2 --- /dev/null +++ b/src/webui/main.go @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: MIT + +package main + +import ( + "embed" + "flag" + "io/fs" + "log" + "net/http" + "os" + "strings" + "time" + + "github.com/kernelkit/webui/internal/auth" + "github.com/kernelkit/webui/internal/restconf" + "github.com/kernelkit/webui/internal/schema" + "github.com/kernelkit/webui/internal/server" +) + +//go:embed templates/* +var templateFS embed.FS + +//go:embed static/* +var staticFS embed.FS + +func main() { + defaultRC := "http://localhost:8080/restconf" + if env := os.Getenv("RESTCONF_URL"); env != "" { + defaultRC = env + } + + listen := flag.String("listen", ":8080", "address to listen on") + restconfURL := flag.String("restconf", defaultRC, "RESTCONF base URL") + sessionKey := flag.String("session-key", "/var/lib/misc/webui-session.key", "path to persistent session key file") + insecureTLS := flag.Bool("insecure-tls", envBool("INSECURE_TLS"), "disable TLS certificate verification") + yangCacheDir := flag.String("yang-cache-dir", "/var/cache/webui/yang", "directory for cached YANG schema files") + flag.Parse() + + store, err := auth.NewSessionStore(*sessionKey) + if err != nil { + log.Fatalf("session store: %v", err) + } + + rc := restconf.NewClient(*restconfURL, *insecureTLS) + + schemaCache := schema.NewCache(rc, *yangCacheDir) + schemaCache.LoadFromCacheBackground() // fast, no HTTP — uses whatever is already on disk + + tmplFS, err := fs.Sub(templateFS, "templates") + if err != nil { + log.Fatalf("template fs: %v", err) + } + + stFS, err := fs.Sub(staticFS, "static") + if err != nil { + log.Fatalf("static fs: %v", err) + } + + handler, err := server.New(store, rc, schemaCache, tmplFS, stFS) + if err != nil { + log.Fatalf("server setup: %v", err) + } + + log.Printf("listening on %s (restconf %s)", *listen, *restconfURL) + srv := &http.Server{ + Addr: *listen, + Handler: handler, + ReadHeaderTimeout: 5 * time.Second, + ReadTimeout: 15 * time.Second, + WriteTimeout: 15 * time.Second, + IdleTimeout: 60 * time.Second, + } + if err := srv.ListenAndServe(); err != nil { + log.Fatalf("listen: %v", err) + } +} + +func envBool(key string) bool { + v := strings.TrimSpace(os.Getenv(key)) + if v == "" { + return false + } + switch strings.ToLower(v) { + case "1", "true", "yes", "y", "on": + return true + default: + return false + } +} diff --git a/src/webui/static/css/style.css b/src/webui/static/css/style.css new file mode 100644 index 000000000..fcc136fe4 --- /dev/null +++ b/src/webui/static/css/style.css @@ -0,0 +1,2714 @@ +/* ========================================================================== + Design System: Primitives & Tokens + ========================================================================== */ + +:root { + /* --- Primitives: Slate (Neutral) --- */ + --slate-50: #f8fafc; + --slate-100: #f1f5f9; + --slate-200: #e2e8f0; + --slate-300: #cbd5e1; + --slate-400: #94a3b8; + --slate-500: #64748b; + --slate-600: #475569; + --slate-700: #334155; + --slate-800: #1e293b; + --slate-900: #0f172a; + --slate-950: #020617; + + /* --- Primitives: Colors --- */ + --blue-500: #3b82f6; + --blue-600: #2563eb; + --blue-700: #1d4ed8; + --green-500: #22c55e; + --green-600: #16a34a; + --amber-500: #f59e0b; + --amber-600: #d97706; + --red-500: #ef4444; + --red-600: #dc2626; + + /* --- Semantic Tokens: Light Mode (Default) --- */ + --bg: var(--slate-50); + --surface: #ffffff; + --fg: var(--slate-900); + --fg-muted: var(--slate-500); + --border: var(--slate-200); + --border-subtle: var(--slate-100); + + --primary: var(--blue-500); + --primary-hover: var(--blue-600); + + --success: var(--green-500); + --warning: var(--amber-500); + --danger: var(--red-500); + + --warning-bg: #fffbeb; + --warning-fg: #78350f; + --warning-border: #fcd34d; + + /* Sidebar — Light mode */ + --sidebar-bg: var(--slate-100); + --sidebar-fg: var(--slate-700); + --sidebar-hover: rgba(0, 0, 0, 0.07); + --sidebar-hover-fg: var(--primary); + --sidebar-border: rgba(0, 0, 0, 0.08); + --sidebar-width: 240px; + + /* Login page */ + --dot-color: rgba(0, 0, 0, 0.055); + + /* Layout */ + --topbar-height: 48px; + --radius: 8px; + --radius-sm: 4px; + --shadow-sm: 0 1px 2px 0 rgb(0 0 0 / 0.05); + --shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1); + --font-sans: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif; + --font-mono: "SF Mono", "Fira Code", "Fira Mono", Menlo, Consolas, monospace; + + --surface-alt: var(--slate-100); + + /* Legacy variable mapping */ + --card-bg: var(--surface); +} + +/* --- Dark Mode Overrides --- */ +@media (prefers-color-scheme: dark) { + :root { + --bg: var(--slate-900); + --surface: var(--slate-800); + --fg: var(--slate-100); + --fg-muted: var(--slate-400); + --border: var(--slate-700); + --border-subtle: var(--slate-800); + --surface-alt: var(--slate-900); + --sidebar-bg: var(--slate-800); + --sidebar-fg: var(--slate-200); + --sidebar-hover: rgba(255, 255, 255, 0.1); + --sidebar-hover-fg: #fff; + --sidebar-border: rgba(255, 255, 255, 0.1); + --dot-color: rgba(255, 255, 255, 0.045); + --warning-bg: #2d2007; + --warning-fg: #fde68a; + --warning-border: #92400e; + } +} + +/* Manual Dark Mode Toggle */ +.dark { + color-scheme: dark; + --bg: var(--slate-900); + --surface: var(--slate-800); + --fg: var(--slate-100); + --fg-muted: var(--slate-400); + --border: var(--slate-700); + --border-subtle: var(--slate-800); + --surface-alt: var(--slate-900); + --zebra-stripe: rgba(255,255,255,0.03); + --sidebar-bg: var(--slate-800); + --sidebar-fg: var(--slate-200); + --sidebar-hover: rgba(255, 255, 255, 0.1); + --sidebar-hover-fg: #fff; + --sidebar-border: rgba(255, 255, 255, 0.1); + --dot-color: rgba(255, 255, 255, 0.045); + --warning-bg: #2d2007; + --warning-fg: #fde68a; + --warning-border: #92400e; +} + +/* Force Light Mode (override system dark preference) */ +.light { + color-scheme: light; + --bg: var(--slate-50); + --surface: #ffffff; + --fg: var(--slate-900); + --fg-muted: var(--slate-500); + --border: var(--slate-200); + --border-subtle: var(--slate-100); + --surface-alt: var(--slate-100); + --sidebar-bg: var(--slate-100); + --sidebar-fg: var(--slate-700); + --sidebar-hover: rgba(0, 0, 0, 0.07); + --sidebar-hover-fg: var(--primary); + --sidebar-border: rgba(0, 0, 0, 0.08); + --dot-color: rgba(0, 0, 0, 0.055); + --warning-bg: #fffbeb; + --warning-fg: #78350f; + --warning-border: #fcd34d; +} +.light .alert-error { background: #fef2f2; color: #991b1b; border-color: #fecaca; } +.light .alert-info { background: #eff6ff; color: #1e40af; border-color: #bfdbfe; } +.light .health-bar-track { background-color: var(--slate-200); } +.light .zone-badge { + background-color: hsl(var(--zone-hue), 80%, 90%); + color: hsl(var(--zone-hue), 80%, 30%); + border-color: hsl(var(--zone-hue), 60%, 80%); +} +.light .matrix-allow { background: #dcfce7; color: #166534; } +.light .matrix-deny { background: #fef2f2; color: #991b1b; } +.light .matrix-self { background: var(--slate-100); color: var(--slate-400); } +.light .badge-accept { background: #dcfce7; color: #166534; } +.light .badge-reject { background: #fef2f2; color: #991b1b; } +.light .badge-drop { background: var(--slate-100); color: var(--slate-600); } +.light .badge-continue { background: #dbeafe; color: #1e40af; } + +/* ========================================================================== + Reset & Base + ========================================================================== */ + +*, *::before, *::after { + box-sizing: border-box; + margin: 0; + padding: 0; +} + +html { + height: 100%; + overflow: hidden; +} +body { + height: 100%; + font-family: var(--font-sans); + font-size: 15px; + line-height: 1.5; + color: var(--fg); + background: var(--bg); + -webkit-font-smoothing: antialiased; +} + +a { + color: var(--primary); + text-decoration: none; + transition: color 0.15s ease; +} + +a:hover { + text-decoration: underline; +} + +/* ========================================================================== + Layout + ========================================================================== */ + +.layout { + display: flex; + height: calc(100vh - var(--topbar-height)); + margin-top: var(--topbar-height); + overflow: hidden; +} + +#sidebar { + position: relative; + z-index: 150; + width: var(--sidebar-width); + background: var(--sidebar-bg); + color: var(--sidebar-fg); + display: flex; + flex-direction: column; + flex-shrink: 0; + border-right: 1px solid var(--sidebar-border); + overflow-y: auto; +} + +.main-column { + flex: 1; + display: flex; + flex-direction: column; + overflow: hidden; +} + +#content { + flex: 1; + width: 100%; + padding: 2rem; + overflow-y: auto; + max-width: 1200px; + margin: 0 auto; + box-sizing: border-box; +} + +/* ========================================================================== + Sidebar Components + ========================================================================== */ + +.topbar-logo { + height: calc(var(--topbar-height) - 14px); + width: auto; + display: block; +} + +.sidebar-nav { + flex: 1; + padding: 0.5rem 0; + overflow-y: auto; +} + +/* Standalone nav link (Dashboard) */ +.nav-standalone { + margin: 0.25rem 0.5rem 0.5rem; +} + +/* Top-level accordion groups (Status / Configure / Maintenance) */ +details.nav-group-top { +} + +details.nav-group-top > summary.nav-group-summary-top { + list-style: none; + cursor: pointer; + font-size: 0.7rem; + font-weight: 700; + letter-spacing: 0.07em; + text-transform: uppercase; + color: var(--sidebar-fg); + opacity: 0.7; + padding: 0.75rem 1rem 0.375rem; + display: flex; + align-items: center; + gap: 0.35rem; + user-select: none; + transition: opacity 0.15s; +} +details.nav-group-top > summary.nav-group-summary-top:hover { opacity: 1; } +details.nav-group-top > summary.nav-group-summary-top::-webkit-details-marker { display: none; } +details.nav-group-top > summary.nav-group-summary-top::marker { display: none; } + +details.nav-group-top > summary.nav-group-summary-top::before { + content: '▸'; + font-size: 0.7rem; +} +details[open].nav-group-top > summary.nav-group-summary-top::before { + content: '▾'; +} + +/* Static sub-section labels within top-level accordion */ +.nav-section-label { + font-size: 0.6rem; + font-weight: 600; + letter-spacing: 0.08em; + text-transform: uppercase; + color: var(--sidebar-fg); + opacity: 0.45; + padding: 0.75rem 1rem 0.2rem; + user-select: none; +} + +.nav-group-items { + list-style: none; + padding-bottom: 0.25rem; +} + +.nav-icon { + display: inline-block; + width: 1rem; + height: 1rem; + background-color: currentColor; + -webkit-mask-image: var(--icon); + mask-image: var(--icon); + -webkit-mask-size: contain; + mask-size: contain; + -webkit-mask-repeat: no-repeat; + mask-repeat: no-repeat; + -webkit-mask-position: center; + mask-position: center; + flex-shrink: 0; +} + +.nav-link { + display: flex; + align-items: center; + gap: 0.625rem; + padding: 0.5rem 1rem; + color: var(--sidebar-fg); + text-decoration: none; + font-size: 0.875rem; + border-radius: 0.375rem; + margin: 0 0.5rem; + transition: background-color 0.15s, color 0.15s; + font-weight: 500; +} + +.nav-link:hover { + background-color: var(--sidebar-hover); + color: var(--sidebar-hover-fg); + text-decoration: none; +} + +.nav-link.active { + background-color: var(--primary); + color: #fff; + text-decoration: none; +} + +/* Button styled as a nav-link (for actions like Reboot) */ +.nav-link-btn { + width: 100%; + background: none; + border: none; + cursor: pointer; + text-align: left; + font-family: inherit; + font-size: inherit; +} + +.nav-link-danger:hover { + color: var(--danger); + background-color: var(--sidebar-hover); +} + +.sidebar-footer { + padding: 1.25rem; + border-top: 1px solid var(--sidebar-border); +} + +/* ========================================================================== + Buttons + ========================================================================== */ + +.btn { + display: inline-flex; + align-items: center; + justify-content: center; + padding: 0.5rem 1rem; + border: 1px solid transparent; + border-radius: var(--radius); + font-size: 0.9rem; + font-weight: 500; + cursor: pointer; + text-decoration: none; + transition: all 0.15s ease; + line-height: 1.25; +} + +.btn-primary { + background: var(--primary); + color: #fff; +} + +.btn-primary:hover { + background: var(--primary-hover); + text-decoration: none; +} + +.btn-secondary { + background: transparent; + color: var(--text-muted); + border-color: var(--border); +} +.btn-secondary:hover { + background: var(--border); + color: var(--text); + text-decoration: none; +} + +.btn-block { + display: flex; + width: 100%; +} + +/* Sidebar Actions */ +.btn-sidebar-action, .btn-logout { + display: flex; + width: 100%; + align-items: center; + justify-content: center; + background: transparent; + color: var(--sidebar-fg); + border: 1px solid rgba(255,255,255,0.2); + padding: 0.5rem 0.75rem; + border-radius: var(--radius); + cursor: pointer; + font-size: 0.85rem; + text-decoration: none; + margin-bottom: 0.5rem; + transition: all 0.15s ease; +} + +.btn-sidebar-action:hover, .btn-logout:hover { + background: var(--sidebar-hover); + border-color: rgba(255,255,255,0.3); + text-decoration: none; +} + +.btn-danger-hover:hover { + border-color: var(--danger); + color: #fca5a5; + background: rgba(220, 38, 38, 0.1); +} + +/* ========================================================================== + Cards & Content Containers + ========================================================================== */ + +.info-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); + gap: 1.5rem; + margin-bottom: 1.5rem; + align-items: stretch; +} + +/* Cards inside a grid don't need their own bottom margin — the grid gap handles spacing */ +.info-grid > .info-card { margin-bottom: 0; } + +/* Full-width card spanning all grid columns */ +.info-grid-full { grid-column: 1 / -1; } + +/* Two-column card for wide tables — only kicks in once the grid has ≥2 columns */ +@media (min-width: 680px) { + .info-grid-span-2 { grid-column: span 2; } +} + +.info-card { + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + box-shadow: var(--shadow-sm); + margin-bottom: 1.5rem; + display: flex; + flex-direction: column; +} + +/* Legacy support: info-card often contains direct h2 */ +.info-card h2 { + font-size: 0.85rem; + text-transform: uppercase; + letter-spacing: 0.05em; + font-weight: 600; + color: var(--fg-muted); + padding: 1rem 1.25rem; + border-bottom: 1px solid var(--border); + margin: 0; + background: var(--bg); + border-radius: var(--radius) var(--radius) 0 0; +} + +.card-header { + padding: 0.75rem 1.25rem; + background: var(--bg); + border-bottom: 1px solid var(--border); + font-size: 0.85rem; + text-transform: uppercase; + border-radius: var(--radius) var(--radius) 0 0; + letter-spacing: 0.05em; + font-weight: 600; + color: var(--fg-muted); +} + +.card-body { + padding: 1.25rem; +} + +/* Collapsible card: content is height-capped; JS adds "Show more" when it overflows */ +.card-collapsible-body { + position: relative; + max-height: 300px; + overflow: hidden; + transition: max-height 0.25s ease; +} +.card-collapsible.is-expanded .card-collapsible-body { + max-height: 2000px; +} +/* Fade hint at the bottom when collapsed */ +.card-collapsible:not(.is-expanded) .card-collapsible-body::after { + content: ''; + position: absolute; + bottom: 0; + left: 0; + right: 0; + height: 2.5rem; + background: linear-gradient(transparent, var(--surface)); + pointer-events: none; +} +/* The "Show more / Show less" button injected by JS */ +.card-expand-btn { + display: none; + width: 100%; + padding: 0.45rem; + text-align: center; + background: var(--bg); + border: none; + border-top: 1px solid var(--border); + color: var(--fg-muted); + font-size: 0.78rem; + font-family: inherit; + cursor: pointer; + transition: color 0.15s; + letter-spacing: 0.02em; +} +.card-expand-btn:hover { color: var(--primary); } + +/* ========================================================================== + Tables + ========================================================================== */ + +.info-table, .disk-table, .counters-table { + width: 100%; + border-collapse: collapse; + font-size: 0.9rem; +} + +.info-table th, .info-table td, +.disk-table th, .disk-table td, +.counters-table th, .counters-table td { + padding: 0.75rem 1.25rem; + border-bottom: 1px solid var(--border); +} + +.info-table th { + width: 1%; + white-space: nowrap; + color: var(--fg-muted); + font-weight: 500; + text-align: left; +} + +/* Disk usage widget (one entry per mount point) */ +.disk-item { + padding: 0.7rem 1.25rem; + border-bottom: 1px solid var(--border); +} +.disk-item:last-child { border-bottom: none; } +.disk-item-header { + display: flex; + justify-content: space-between; + align-items: baseline; + margin-bottom: 0.4rem; +} +.disk-mount { + font-family: var(--font-mono); + font-size: 0.85rem; + font-weight: 500; + color: var(--fg); +} +.disk-pct { font-size: 0.8rem; color: var(--fg-muted); } +.disk-stats { + font-size: 0.78rem; + color: var(--fg-muted); + margin-top: 0.3rem; + font-variant-numeric: tabular-nums; +} + +.sensor-group-hdr th { + padding: 0.35rem 1.25rem 0.2rem; + font-size: 0.7rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.06em; + color: var(--fg-muted); + background: var(--bg); + border-bottom: none; +} +.sensor-child th { + padding-left: 2rem; +} + +.data-table th.num, +.data-table td.num { + text-align: right; + font-variant-numeric: tabular-nums; +} + +.info-table tr:last-child th, .info-table tr:last-child td, +.disk-table tbody tr:last-child td, +.counters-table tbody tr:last-child td { + border-bottom: none; +} + +.info-table tr:nth-child(even), +.disk-table tbody tr:nth-child(even), +.counters-table tbody tr:nth-child(even) { + background-color: var(--zebra-stripe, rgba(0,0,0,0.025)); +} + +.info-table th { + text-align: left; + font-weight: 500; + color: var(--fg-muted); + width: 40%; + background: transparent; +} + +.disk-table thead th, .counters-table thead th { + background: var(--bg); + font-weight: 600; + color: var(--fg-muted); + text-align: left; + font-size: 0.8rem; + text-transform: uppercase; +} + +.counters-table td { + font-family: var(--font-mono); +} + +/* New Data Table Styles */ +.data-table-wrap { + overflow-x: auto; + border: 1px solid var(--border); + border-radius: var(--radius); + /* Scroll-shadow: a right-edge shadow appears automatically when the table + overflows, fading away once scrolled to the end. Pure CSS, no JS needed. + The `local` gradients act as white masks pinned to the scroll content; + the `scroll` gradients are the actual shadows fixed to the viewport edge. */ + background: + linear-gradient(to right, var(--surface), var(--surface)) left / 24px 100% no-repeat local, + linear-gradient(to left, var(--surface), var(--surface)) right / 24px 100% no-repeat local, + linear-gradient(to right, rgba(0,0,0,0.07), transparent) left / 8px 100% no-repeat scroll, + linear-gradient(to left, rgba(0,0,0,0.07), transparent) right / 8px 100% no-repeat scroll; + background-color: var(--surface); +} + +/* Remove the nested border when a table lives inside a card — the card already provides the frame */ +.info-card .data-table-wrap { + border: none; + border-radius: 0; +} + +.data-table { + width: 100%; + border-collapse: collapse; + font-size: 0.9rem; + white-space: nowrap; +} + +.data-table th, .data-table td { + padding: 0.75rem 1rem; + border-bottom: 1px solid var(--border); + text-align: left; + vertical-align: top; +} + +.data-table th { + background: var(--bg); + color: var(--fg-muted); + font-weight: 600; + font-size: 0.8rem; + text-transform: uppercase; +} + +.data-table tbody tr:nth-child(even) { + background-color: var(--zebra-stripe, rgba(0,0,0,0.025)); +} + +.data-table tbody tr:hover { background-color: var(--slate-100); } +@media (prefers-color-scheme: dark) { + .data-table tbody tr:hover { background-color: rgba(255,255,255,0.06); } +} +.dark .data-table tbody tr:hover { background-color: rgba(255,255,255,0.06); } + +/* ========================================================================== + Forms & Inputs + ========================================================================== */ + +.form-group { + margin-bottom: 1.25rem; +} + +.form-group label { + display: block; + font-size: 0.85rem; + font-weight: 600; + margin-bottom: 0.4rem; + color: var(--fg); +} + +.form-group input { + display: block; + width: 100%; + padding: 0.6rem 0.8rem; + background: var(--surface); + color: var(--fg); + border: 1px solid var(--border); + border-radius: var(--radius); + font-size: 0.9rem; + transition: all 0.15s ease; +} + +.form-group input:focus { + outline: none; + border-color: var(--primary); + box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.15); +} + +/* ========================================================================== + Authentication + ========================================================================== */ + +@keyframes login-rise { + from { opacity: 0; transform: translateY(14px); } + to { opacity: 1; transform: translateY(0); } +} + +.login-wrapper { + display: flex; + align-items: center; + justify-content: center; + min-height: 100vh; + background-color: var(--bg); + background-image: radial-gradient(var(--dot-color) 1.5px, transparent 1.5px); + background-size: 22px 22px; + padding: 1rem; +} + +.login-card { + background: var(--surface); + padding: 2.5rem; + border-radius: var(--radius); + box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.07), 0 2px 4px -2px rgb(0 0 0 / 0.07), + 0 0 0 1px var(--border); + width: 100%; + max-width: 400px; + border-top: 2px solid var(--primary); + animation: login-rise 0.4s cubic-bezier(0.16, 1, 0.3, 1) both; +} + +.login-logo { + display: block; + max-width: 180px; + height: auto; + margin: 0 auto 2rem; +} + +/* ========================================================================== + Alerts & Banners + ========================================================================== */ + +/* Top progress bar — shown during htmx page navigations */ +.page-progress { + position: fixed; + top: 0; + left: 0; + height: 2px; + width: 0%; + background: var(--primary); + z-index: 9999; + opacity: 0; + pointer-events: none; +} + +.conn-banner { + background: var(--danger); + color: #fff; + text-align: center; + padding: 0.5rem; + font-size: 0.9rem; + font-weight: 600; + position: sticky; + top: 0; + z-index: 50; +} + +.alert { + padding: 1rem; + border-radius: var(--radius); + margin-bottom: 1.5rem; + font-size: 0.9rem; + border: 1px solid transparent; +} + +.alert-error { + background: #fef2f2; + color: var(--red-600); + border-color: #fecaca; +} + +.alert-info { + background: #eff6ff; + color: var(--blue-700); + border-color: #bfdbfe; +} + +/* Dark mode adjustments for alerts */ +@media (prefers-color-scheme: dark) { + .alert-error { + background: rgba(239, 68, 68, 0.1); + color: #fca5a5; + border-color: rgba(239, 68, 68, 0.2); + } + .alert-info { + background: rgba(59, 130, 246, 0.1); + color: #93c5fd; + border-color: rgba(59, 130, 246, 0.2); + } +} +.dark .alert-error { + background: rgba(239, 68, 68, 0.1); + color: #fca5a5; + border-color: rgba(239, 68, 68, 0.2); +} +.dark .alert-info { + background: rgba(59, 130, 246, 0.1); + color: #93c5fd; + border-color: rgba(59, 130, 246, 0.2); +} + +/* ========================================================================== + Status Indicators + ========================================================================== */ + +/* Legacy Status Dots */ +.status-dot { + display: inline-block; + width: 8px; + height: 8px; + border-radius: 50%; + margin-right: 0.5rem; + vertical-align: middle; +} + +.status-up { background: var(--success); } +.status-down { background: var(--danger); } + +/* New Interface Status Indicators */ +.iface-status { + display: inline-block; + width: 10px; + height: 10px; + border-radius: 50%; + margin-right: 0.5rem; +} + +.iface-up { + background-color: var(--success); + box-shadow: 0 0 0 rgba(34, 197, 94, 0.4); + animation: pulse-green 2s infinite; +} + +.iface-down { + background-color: var(--danger); +} + +.iface-lower-down { + background-color: var(--warning); +} + +@keyframes pulse-green { + 0% { box-shadow: 0 0 0 0 rgba(34, 197, 94, 0.7); } + 70% { box-shadow: 0 0 0 6px rgba(34, 197, 94, 0); } + 100% { box-shadow: 0 0 0 0 rgba(34, 197, 94, 0); } +} + + +/* ========================================================================== + Components: Health Bar + ========================================================================== */ + +.health-bar-track { + width: 100%; + height: 6px; + background-color: var(--slate-200); + border-radius: 99px; + overflow: hidden; +} + +.health-bar-fill { + height: 100%; + background-color: var(--success); + border-radius: 99px; + transition: width 0.5s ease; +} + +.health-bar-fill.is-warn { background-color: var(--warning); } +.health-bar-fill.is-crit { background-color: var(--danger); } + +/* Dark mode track */ +@media (prefers-color-scheme: dark) { + .health-bar-track { background-color: var(--slate-700); } +} +.dark .health-bar-track { background-color: var(--slate-700); } + +/* ========================================================================== + Components: Zone Badge + ========================================================================== */ + +.zone-badge { + /* Requires --zone-hue to be set inline or via utility */ + --zone-hue: 200; /* Default */ + display: inline-flex; + align-items: center; + padding: 0.25rem 0.6rem; + border-radius: var(--radius-sm); + font-size: 0.75rem; + font-weight: 600; + background-color: hsl(var(--zone-hue), 80%, 90%); + color: hsl(var(--zone-hue), 80%, 30%); + border: 1px solid hsl(var(--zone-hue), 60%, 80%); +} + +@media (prefers-color-scheme: dark) { + .zone-badge { + background-color: hsl(var(--zone-hue), 60%, 20%); + color: hsl(var(--zone-hue), 80%, 85%); + border-color: hsl(var(--zone-hue), 60%, 30%); + } +} +.dark .zone-badge { + background-color: hsl(var(--zone-hue), 60%, 20%); + color: hsl(var(--zone-hue), 80%, 85%); + border-color: hsl(var(--zone-hue), 60%, 30%); +} + +/* ========================================================================== + Utilities + ========================================================================== */ + +.num { + font-variant-numeric: tabular-nums; + font-family: var(--font-mono); +} + +.text-muted { color: var(--fg-muted); } + +/* Page title — used at the top of each content page */ +.page-title { + font-size: 1.375rem; + font-weight: 700; + color: var(--fg); + letter-spacing: -0.025em; + margin-bottom: 1.5rem; +} + +/* Removes the redundant wrapping div some pages use */ +.page-content { display: contents; } + +/* ========================================================================== + Domain Specific: Firewall, Keystore, Firmware + ========================================================================== */ + +/* Zone Matrix */ +.matrix-wrap { + overflow-x: auto; + border: 1px solid var(--border); + border-radius: var(--radius); +} + +.zone-matrix { + border-collapse: collapse; + font-size: 0.9rem; + width: 100%; +} + +.zone-matrix th, .zone-matrix td { + padding: 0.75rem; + text-align: center; + border: 1px solid var(--border); + min-width: 90px; +} + +.zone-matrix thead th { + font-weight: 600; + color: var(--fg-muted); + background: var(--bg); +} + +.zone-matrix tbody th { + font-weight: 600; + text-align: right; + background: var(--bg); + color: var(--fg-muted); +} + +.matrix-corner { + color: var(--fg-muted); +} + +.matrix-allow { + background: #dcfce7; + color: #166534; + font-weight: 700; +} +.matrix-deny { + background: #fef2f2; + color: #991b1b; + font-weight: 700; +} +.matrix-cond { + background: #fef9c3; + color: #854d0e; + font-weight: 700; +} +.matrix-self { + background: var(--slate-100); + color: var(--slate-400); +} +/* Clickable cells */ +.matrix-cell[data-verdict] { + cursor: pointer; +} +.matrix-cell[data-verdict]:hover { + filter: brightness(0.93); +} + +@media (prefers-color-scheme: dark) { + .matrix-allow { background: rgba(22, 163, 74, 0.2); color: #86efac; } + .matrix-deny { background: rgba(220, 38, 38, 0.2); color: #fca5a5; } + .matrix-cond { background: rgba(234, 179, 8, 0.2); color: #fde047; } + .matrix-self { background: var(--slate-800); color: var(--slate-600); } +} +.dark .matrix-allow { background: rgba(22, 163, 74, 0.2); color: #86efac; } +.dark .matrix-deny { background: rgba(220, 38, 38, 0.2); color: #fca5a5; } +.dark .matrix-cond { background: rgba(234, 179, 8, 0.2); color: #fde047; } +.dark .matrix-self { background: var(--slate-800); color: var(--slate-600); } +.light .matrix-cond { background: #fef9c3; color: #854d0e; } + +/* Flow detail panel — shown below matrix when a cell is clicked. + display:flex overrides HTML [hidden], so we restore that here. */ +.fw-flow-detail { + display: flex; + align-items: center; + gap: 0.5rem; + padding: 0.5rem 1rem; + background: var(--surface-alt); + border-top: 1px solid var(--border); + font-size: 0.875rem; +} +.fw-flow-detail[hidden] { display: none !important; } +.fw-detail-flow { font-weight: 600; } +.fw-detail-sep { color: var(--fg-muted); } +.fw-detail-text { color: var(--fg-muted); } + +/* Policy immutable lock icon */ +.policy-lock-col { width: 1.5rem; min-width: 1.5rem; padding: 0 0.25rem !important; text-align: center; } +.policy-lock { color: var(--fg-muted); font-size: 0.85rem; user-select: none; } +.data-table td.policy-lock-col, +.data-table th.policy-lock-col { vertical-align: middle; } + +.matrix-legend { + display: flex; + gap: 1.5rem; + font-size: 0.85rem; + color: var(--fg-muted); + padding: 0.75rem 1rem; +} + +.legend-item { + display: flex; + align-items: center; + gap: 0.5rem; +} + +.legend-swatch { + display: inline-flex; + align-items: center; + justify-content: center; + width: 1.5rem; + height: 1.25rem; + border-radius: 4px; + border: 1px solid var(--border); + font-weight: 700; + font-size: 0.75rem; +} + +/* Badges */ +.badge { + display: inline-block; + padding: 0.25rem 0.6rem; + border-radius: var(--radius-sm); + font-size: 0.75rem; + font-weight: 600; + text-transform: capitalize; +} + +.badge-accept { background: #dcfce7; color: #166534; } +.badge-reject { background: #fef2f2; color: #991b1b; } +.badge-drop { background: var(--slate-100); color: var(--slate-600); } +.badge-continue { background: #dbeafe; color: #1e40af; } + +@media (prefers-color-scheme: dark) { + .badge-accept { background: rgba(22, 163, 74, 0.2); color: #86efac; } + .badge-reject { background: rgba(220, 38, 38, 0.2); color: #fca5a5; } + .badge-drop { background: var(--slate-800); color: var(--slate-400); } + .badge-continue { background: rgba(59, 130, 246, 0.2); color: #93c5fd; } +} +.dark .badge-accept { background: rgba(22, 163, 74, 0.2); color: #86efac; } +.dark .badge-reject { background: rgba(220, 38, 38, 0.2); color: #fca5a5; } +.dark .badge-drop { background: var(--slate-800); color: var(--slate-400); } +.dark .badge-continue { background: rgba(59, 130, 246, 0.2); color: #93c5fd; } + +/* General-purpose status badges */ +.badge-up { background: #dcfce7; color: #166534; } +.badge-down { background: #fef2f2; color: #991b1b; } +.badge-warning { background: #fef9c3; color: #854d0e; } +.badge-info { background: #dbeafe; color: #1e40af; } +.badge-neutral { background: var(--slate-100); color: var(--slate-600); } + +@media (prefers-color-scheme: dark) { + .badge-up { background: rgba(22, 163, 74, 0.2); color: #86efac; } + .badge-down { background: rgba(220, 38, 38, 0.2); color: #fca5a5; } + .badge-warning { background: rgba(245, 158, 11, 0.2); color: #fcd34d; } + .badge-info { background: rgba(59, 130, 246, 0.2); color: #93c5fd; } + .badge-neutral { background: var(--slate-800); color: var(--slate-400); } +} +.dark .badge-up { background: rgba(22, 163, 74, 0.2); color: #86efac; } +.dark .badge-down { background: rgba(220, 38, 38, 0.2); color: #fca5a5; } +.dark .badge-warning { background: rgba(245, 158, 11, 0.2); color: #fcd34d; } +.dark .badge-info { background: rgba(59, 130, 246, 0.2); color: #93c5fd; } +.dark .badge-neutral { background: var(--slate-800); color: var(--slate-400); } +.light .badge-up { background: #dcfce7; color: #166534; } +.light .badge-down { background: #fef2f2; color: #991b1b; } +.light .badge-warning { background: #fef9c3; color: #854d0e; } +.light .badge-info { background: #dbeafe; color: #1e40af; } +.light .badge-neutral { background: var(--slate-100); color: var(--slate-600); } + +/* mDNS service chips — self-contained, no .badge inheritance */ +.mdns-svc { + display: inline-block; + font-size: 0.75rem; + font-weight: 500; + padding: 0.15rem 0.45rem; + border-radius: var(--radius-sm); + white-space: nowrap; + background: var(--slate-100); + color: var(--slate-600); +} +.mdns-svc-link { + background: #2563eb; + color: #ffffff; + text-decoration: none; +} +.mdns-svc-link:hover { background: #1d4ed8; color: #ffffff; } +@media (prefers-color-scheme: dark) { + .mdns-svc { background: var(--slate-800); color: var(--slate-400); } + .mdns-svc-link { background: rgba(59, 130, 246, 0.2); color: #93c5fd; } + .mdns-svc-link:hover { background: rgba(59, 130, 246, 0.35); color: #93c5fd; } +} +.dark .mdns-svc { background: var(--slate-800); color: var(--slate-400); } +.dark .mdns-svc-link { background: rgba(59, 130, 246, 0.2); color: #93c5fd; } +.dark .mdns-svc-link:hover { background: rgba(59, 130, 246, 0.35); color: #93c5fd; } +.light .mdns-svc { background: var(--slate-100); color: var(--slate-600); } +.light .mdns-svc-link { background: #2563eb; color: #ffffff; } +.light .mdns-svc-link:hover { background: #1d4ed8; color: #ffffff; } + +/* mDNS extra-address rows — hidden until toggle */ +.mdns-extra-row { display: none; } +.mdns-extra-addr { color: var(--fg-muted); padding-top: 0.1rem; padding-bottom: 0.1rem; } + +/* mDNS toggle column and chevron button */ +.mdns-toggle-col { width: 1.5rem; min-width: 1.5rem; padding: 0 0.25rem !important; text-align: center; } +.data-table td.mdns-toggle-col, +.data-table th.mdns-toggle-col { vertical-align: middle; } +.mdns-addr-toggle { + background: none; + border: none; + cursor: pointer; + padding: 0; + color: var(--fg-muted); + font-size: 0.75rem; + line-height: 1; +} +.mdns-addr-toggle[aria-expanded="true"] .mdns-addr-arrow { + display: inline-block; + transform: rotate(90deg); +} + +.mdns-lastseen { white-space: nowrap; } + +/* Services table */ +.svc-name { font-family: var(--font-mono); font-size: 0.85rem; white-space: nowrap; } +.svc-num { white-space: nowrap; color: var(--fg-muted); font-size: 0.85rem; } +.svc-desc { color: var(--fg-muted); font-size: 0.875rem; } +.svc-status { + font-size: 0.75rem; + font-weight: 600; + text-transform: lowercase; + white-space: nowrap; +} +.svc-running { color: #16a34a; } +.svc-stopped { color: #ca8a04; } +.svc-error { color: #dc2626; } +@media (prefers-color-scheme: dark) { + .svc-running { color: #86efac; } + .svc-stopped { color: #fde047; } + .svc-error { color: #fca5a5; } +} +.dark .svc-running { color: #86efac; } +.dark .svc-stopped { color: #fde047; } +.dark .svc-error { color: #fca5a5; } +.light .svc-running { color: #16a34a; } +.light .svc-stopped { color: #ca8a04; } +.light .svc-error { color: #dc2626; } + +/* ─── NACM matrix ─────────────────────────────────────────────────────────── */ +.nacm-matrix .nacm-col { text-align: center; width: 5rem; } +.nacm-group { font-family: var(--font-mono); font-size: 0.85rem; } +.nacm-cell { + display: inline-flex; + align-items: center; + justify-content: center; + width: 2rem; + height: 2rem; + border-radius: 4px; + font-size: 1rem; + font-weight: 700; +} +.nacm-full { background: rgba(34, 197, 94, 0.2); color: #16a34a; } +.nacm-restricted { background: rgba(234, 179, 8, 0.2); color: #854d0e; } +.nacm-denied { background: rgba(239, 68, 68, 0.2); color: #dc2626; } +.nacm-restrictions-row td { padding-top: 0; } +.nacm-restrictions { font-size: 0.75rem; color: var(--fg-muted); font-style: italic; } +.nacm-legend { + padding: 0.5rem 1rem 0.75rem; + font-size: 0.8rem; + color: var(--fg-muted); + display: flex; + align-items: center; + gap: 0.35rem; +} +.nacm-legend .nacm-cell { width: 1.4rem; height: 1.4rem; font-size: 0.8rem; } +@media (prefers-color-scheme: dark) { + .nacm-full { background: rgba(34, 197, 94, 0.2); color: #86efac; } + .nacm-restricted { background: rgba(234, 179, 8, 0.2); color: #fde047; } + .nacm-denied { background: rgba(239, 68, 68, 0.2); color: #fca5a5; } +} +.dark .nacm-full { background: rgba(34, 197, 94, 0.2); color: #86efac; } +.dark .nacm-restricted { background: rgba(234, 179, 8, 0.2); color: #fde047; } +.dark .nacm-denied { background: rgba(239, 68, 68, 0.2); color: #fca5a5; } +.light .nacm-full { background: rgba(34, 197, 94, 0.15); color: #16a34a; } +.light .nacm-restricted { background: rgba(234, 179, 8, 0.15); color: #854d0e; } +.light .nacm-denied { background: rgba(239, 68, 68, 0.15); color: #dc2626; } + +/* Sub-heading within a card body (e.g. "Active Leases" in DHCP) */ +.section-subtitle { + font-size: 0.75rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.06em; + color: var(--fg-muted); + padding: 1rem 1.25rem 0.5rem; + margin: 0; + border-top: 1px solid var(--border); +} + +/* Row of stat chips (e.g. DHCP counters) */ +.stats-row { + display: flex; + flex-wrap: wrap; + gap: 0.5rem; + padding: 0.75rem 1.25rem; + border-bottom: 1px solid var(--border); +} + +/* Stat chip: label + value pair */ +.stat-chip { + display: inline-flex; + align-items: center; + gap: 0.375rem; + padding: 0.3rem 0.65rem; + background: var(--bg); + border: 1px solid var(--border); + border-radius: var(--radius-sm); + font-size: 0.8rem; + white-space: nowrap; +} +.stat-chip-label { color: var(--fg-muted); font-weight: 500; } +.stat-chip-value { color: var(--fg); font-weight: 700; font-variant-numeric: tabular-nums; } + +/* Interfaces */ +.iface-name { white-space: nowrap; font-weight: 500; } +.iface-name a { color: var(--primary); text-decoration: none; } +.iface-name a:hover { text-decoration: underline; } + +/* Forwarding flag column */ +.iface-fwd-col { width: 1.5rem; min-width: 1.5rem; padding: 0 0.25rem !important; text-align: center; } +.iface-fwd-flag { font-size: 0.8rem; color: var(--primary); user-select: none; } + +/* Tree connector column — pseudo-elements draw the continuous vertical/horizontal lines. + Wide enough to be a comfortable click target for the collapse button on parent rows. */ +.iface-tree-col { + width: 1.75rem; + min-width: 1.75rem; + padding: 0 !important; + position: relative; + overflow: visible; + text-align: center; +} +/* These two narrow icon columns must stay middle-aligned even though the rest of the + table uses vertical-align: top. Use td.class for higher specificity (0-1-1 beats 0-1-0). */ +.data-table td.iface-tree-col, +.data-table td.iface-fwd-col, +.data-table th.iface-fwd-col { vertical-align: middle; } + +/* Parent row: draw a vertical line from row center down to the bottom edge */ +.iface-tree-col.tree-parent::after { + content: ''; + position: absolute; + left: 50%; top: 50%; bottom: 0; + width: 2px; margin-left: -1px; + background: var(--border); +} + +/* Middle member: full-height vertical line + horizontal arm right */ +.iface-tree-col.tree-mid::before { + content: ''; + position: absolute; + left: 50%; top: 0; bottom: 0; + width: 2px; margin-left: -1px; + background: var(--border); +} +.iface-tree-col.tree-mid::after { + content: ''; + position: absolute; + left: 50%; top: 50%; right: 0; + height: 2px; margin-top: -1px; + background: var(--border); +} + +/* Last member: vertical line top-to-center + horizontal arm right */ +.iface-tree-col.tree-last::before { + content: ''; + position: absolute; + left: 50%; top: 0; bottom: 50%; + width: 2px; margin-left: -1px; + background: var(--border); +} +.iface-tree-col.tree-last::after { + content: ''; + position: absolute; + left: 50%; top: 50%; right: 0; + height: 2px; margin-top: -1px; + background: var(--border); +} + +/* Member rows: subtle background tint; name indented to clear the tree arm */ +.iface-member { background: var(--surface-alt); } +/* Tighter left padding on the name cell so connector arms read as attached to the text */ +.iface-name { padding-left: 0.4rem !important; } +.iface-member-name { padding-left: 1rem !important; } + +/* Bridge/LAG collapse toggle — sits inside .iface-tree-col, centred over the connector line. + z-index: 1 ensures it is clickable above the ::after pseudo-element. */ +.bridge-toggle { + background: none; + border: none; + cursor: pointer; + padding: 0; + color: var(--fg-muted); + font-size: 0.65rem; + line-height: 1; + display: block; + width: 100%; + position: relative; + z-index: 1; +} +.bridge-toggle-arrow { + display: inline-block; + transition: transform 0.15s ease; +} +/* When the parent row is collapsed: rotate arrow and hide the connector line below */ +.bridge-collapsed .bridge-toggle-arrow { transform: rotate(-90deg); } +.bridge-collapsed .iface-tree-col.tree-parent::after { display: none; } + +.data-detail { color: var(--fg-muted); font-size: 0.85em; } + +.addr-origin { color: var(--fg-muted); font-size: 0.75rem; margin-left: 0.5rem; } + +.wg-peer { + padding: 1rem 0; + border-bottom: 1px solid var(--border); +} +.wg-peer:last-child { border-bottom: none; } + +.eth-stats { + display: grid; + grid-template-columns: 1fr 1fr; + font-size: 0.85rem; + border: 1px solid var(--border); + border-radius: var(--radius); + overflow: hidden; +} +.eth-stats-row { + display: flex; + justify-content: space-between; + padding: 0.5rem 1rem; + border-bottom: 1px solid var(--border); + background: var(--surface); +} +.eth-stats dt { color: var(--fg-muted); font-family: var(--font-mono); } +.eth-stats dd { text-align: right; font-family: var(--font-mono); font-weight: 600; } + +.iface-detail-header { margin-bottom: 1.5rem; border-bottom: 1px solid var(--border); padding-bottom: 1rem; } +.iface-detail-header h2 { font-size: 1.5rem; font-weight: 600; color: var(--fg); } +.back-link { display: inline-flex; align-items: center; margin-bottom: 0.5rem; font-size: 0.9rem; } + +.info-card h3 { + font-size: 0.85rem; + font-weight: 600; + text-transform: uppercase; + color: var(--fg-muted); + margin: 1.5rem 0 0.75rem; + padding-left: 0.5rem; + border-left: 3px solid var(--primary); +} + +/* Firmware & Reboot */ +.fw-install-grid { + display: grid; + grid-template-columns: repeat(auto-fill, minmax(340px, 1fr)); + gap: 1.5rem; + margin-top: 1.5rem; +} +.fw-install-grid > .info-card { margin: 0; } + +.fw-card-muted { opacity: 0.6; pointer-events: none; } + +.fw-help-text { + font-size: 0.875rem; + color: var(--fg-muted); + margin-bottom: 1rem; + line-height: 1.6; +} +.fw-help-text a { color: var(--primary); } +.fw-help-text code, .fw-hint-body code { font-family: var(--font-mono); font-size: 0.8em; } + +.fw-hint { + background: var(--bg); + border: 1px solid var(--border); + border-radius: var(--radius-sm); + margin-bottom: 1.25rem; + font-size: 0.875rem; +} +.fw-hint summary { + cursor: pointer; + padding: 0.55rem 0.8rem; + color: var(--fg-muted); + user-select: none; + list-style: none; + display: flex; + align-items: center; + gap: 0.4rem; +} +.fw-hint summary::-webkit-details-marker { display: none; } +.fw-hint summary::before { content: '›'; display: inline-block; transition: transform 0.15s; } +details.fw-hint[open] summary::before { transform: rotate(90deg); } +.fw-hint-body { + padding: 0.75rem 0.8rem; + border-top: 1px solid var(--border); +} +.fw-hint-body p { margin: 0 0 0.5rem; color: var(--fg-muted); font-size: 0.85rem; } +.fw-hint-body p:last-child { margin-bottom: 0; } +.fw-hint-code { + display: block; + font-family: var(--font-mono); + font-size: 0.8rem; + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius-sm); + padding: 0.6rem 0.75rem; + white-space: pre; + overflow-x: auto; + color: var(--fg); + margin-bottom: 0.5rem; +} + +.fw-boot-order-row { + display: flex; + align-items: center; + gap: 0.4rem; + padding: 0.55rem 1.25rem; + border-bottom: 1px solid var(--border); + font-size: 0.8rem; +} +.fw-boot-order-label { + color: var(--fg-muted); + font-size: 0.7rem; + text-transform: uppercase; + letter-spacing: 0.06em; + font-weight: 600; + margin-right: 0.2rem; +} + +.fw-boot-slots { display: flex; gap: 0.3rem; align-items: center; flex: 1; } +.fw-boot-badge { cursor: grab; user-select: none; } +.fw-boot-badge.fw-boot-dragging { opacity: 0.35; } +.fw-boot-badge.fw-boot-drop-before { box-shadow: -3px 0 0 var(--primary); } + +.fw-slot-list { display: flex; flex-direction: column; } +.fw-slot-item { + padding: 0.75rem 1.25rem; + border-bottom: 1px solid var(--border); +} +.fw-slot-item:last-child { border-bottom: none; } +.fw-slot-primary { + display: flex; + align-items: center; + gap: 0.5rem; + margin-bottom: 0.2rem; +} +.fw-slot-name { font-weight: 600; font-size: 0.9rem; } +.fw-slot-version { + font-family: var(--font-mono); + font-size: 0.8rem; + color: var(--fg-muted); +} +.fw-slot-date { + font-size: 0.75rem; + color: var(--fg-muted); + opacity: 0.75; + margin-top: 0.1rem; +} + +.fw-upload-placeholder { + border: 2px dashed var(--border); + border-radius: var(--radius); + padding: 2rem 1rem; + text-align: center; + color: var(--fg-muted); + margin-bottom: 1rem; + display: flex; + flex-direction: column; + align-items: center; + gap: 0.6rem; + font-size: 0.875rem; +} + +.firmware-form .form-group { margin-bottom: 0.75rem; } +.firmware-form .fw-checkbox-row { margin-bottom: 0.75rem; } + +.fw-checkbox-row { + display: flex; + align-items: center; + gap: 0.5rem; + font-size: 0.875rem; + color: var(--fg-muted); + cursor: pointer; + user-select: none; +} +.fw-checkbox-row input[type="checkbox"] { accent-color: var(--primary); cursor: pointer; } + +.fw-result { + display: flex; + align-items: center; + gap: 0.75rem; + font-size: 0.9rem; + font-weight: 500; +} +.fw-result svg { flex-shrink: 0; } +.fw-result-ok { color: var(--success); } +.fw-result-err { color: var(--danger); } +.fw-result-body { display: flex; flex-direction: column; } +.fw-result-actions { margin-top: 1rem; } + +/* Compact pill badges used in the firmware slot list and boot-order row. */ +.fw-install-grid .badge-neutral { + background: var(--slate-200); + font-size: 0.65rem; + font-weight: 600; + padding: 0.15em 0.5em; + border-radius: 999px; + text-transform: uppercase; + letter-spacing: 0.04em; + vertical-align: middle; +} +@media (prefers-color-scheme: dark) { + .fw-install-grid .badge-neutral { background: var(--slate-700); color: var(--slate-300); } +} +.dark .fw-install-grid .badge-neutral { background: var(--slate-700); color: var(--slate-300); } + +.progress-bar-wrap--flush { margin-top: 0; } + +.progress-bar-wrap { + background: var(--slate-200); + border-radius: var(--radius); + height: 1.25rem; + overflow: hidden; + margin-top: 0.5rem; +} +.progress-bar { + background: var(--primary); + height: 100%; + width: 0%; + transition: width 0.3s ease; +} +.progress-text { font-size: 0.85rem; color: var(--fg-muted); margin-top: 0.5rem; text-align: center; } + +.reboot-overlay { + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + min-height: 60vh; +} +.reboot-spinner { + width: 48px; + height: 48px; + border: 4px solid var(--slate-200); + border-top-color: var(--primary); + border-radius: 50%; + animation: spin 1s linear infinite; + margin-bottom: 1.5rem; +} +@keyframes spin { to { transform: rotate(360deg); } } + +.reboot-message { font-size: 1.25rem; font-weight: 600; color: var(--fg); } +.reboot-status { font-size: 1rem; color: var(--fg-muted); margin-top: 0.5rem; } +.reboot-status.is-error { color: var(--danger); } + +/* Keystore */ +.empty-message { color: var(--fg-muted); font-style: italic; padding: 1rem; text-align: center; } + +.key-row-toggle { + background: none; + border: none; + padding: 0; + cursor: pointer; + color: var(--fg); + font: inherit; + display: inline-flex; + align-items: center; + gap: 0.4rem; + text-align: left; +} +.key-row-arrow { + font-size: 0.6rem; + color: var(--fg-muted); + display: inline-block; + transition: transform 0.15s ease; + flex-shrink: 0; +} +.key-row-toggle[aria-expanded="true"] .key-row-arrow { transform: rotate(90deg); } + +.key-detail-row { display: none; } +.key-detail-row.is-open { display: table-row; } +.key-detail-cell { + background: var(--surface-alt); + padding: 0.75rem 1rem 0.75rem 1.5rem !important; + border-top: none !important; + text-align: left !important; +} +.key-detail-body { display: flex; flex-direction: column; gap: 0.6rem; } +.key-field { display: flex; flex-direction: column; gap: 0.2rem; } +.key-field-label { font-size: 0.7rem; font-weight: 600; text-transform: uppercase; letter-spacing: 0.05em; color: var(--fg-muted); } +.key-field-value { + font-family: var(--font-mono); + font-size: 0.8rem; + line-height: 1.6; + word-break: break-all; + white-space: pre-wrap; + color: var(--fg); + background: var(--surface); + padding: 0.4rem 0.6rem; + border-radius: 4px; + max-width: 72ch; + display: block; +} + +/* Survey */ +.survey-chart { max-width: 100%; height: auto; display: block; } +.survey-hint { font-size: 0.8rem; color: var(--fg-muted); margin-bottom: 1rem; } + +/* WiFi */ +.wifi-radio-header { + display: flex; + justify-content: space-between; + align-items: center; + width: 100%; +} + +.wifi-caps { + display: flex; + gap: 0.375rem; + align-items: center; +} + +.wifi-survey-body { + padding: 1rem; + display: flex; + align-items: center; + justify-content: center; +} + +.wifi-survey-body .survey-chart { + max-height: 220px; + width: auto; +} + +.wifi-ssid { + font-weight: 400; + color: var(--fg-muted); + font-size: 0.85rem; + margin-left: 0.5rem; + font-family: var(--font-mono); +} + +.wifi-band-list { + display: flex; + flex-direction: column; + gap: 0.375rem; +} + +.wifi-band-item { + display: flex; + align-items: center; + gap: 0.5rem; +} + +.wifi-band-caps { + display: inline-flex; + gap: 0.25rem; +} + +/* Signal Strength */ +.signal-excellent { color: var(--success); font-weight: 600; } +.signal-good { color: var(--green-600); font-weight: 600; } +.signal-ok { color: var(--warning); font-weight: 600; } +.signal-poor { color: var(--danger); font-weight: 600; } + +/* ========================================================================== + Topbar & User Menu + ========================================================================== */ + +.topbar { + position: fixed; + top: 0; + left: 0; + right: 0; + display: flex; + align-items: center; + gap: 0.75rem; + padding: 0 1rem; + height: var(--topbar-height); + background: var(--sidebar-bg); + border-bottom: 1px solid var(--sidebar-border); + z-index: 200; +} + +.topbar-right { + display: flex; + align-items: center; + gap: 0.5rem; + margin-left: auto; +} + +.hamburger-btn { + background: none; + border: 1px solid var(--border); + border-radius: 0.375rem; + padding: 0.375rem; + cursor: pointer; + color: var(--fg-muted); + display: none; + align-items: center; +} +.hamburger-btn:hover { + color: var(--fg); + border-color: var(--fg-muted); +} + +/* User menu button */ +.user-menu { + position: relative; +} + +.user-menu-btn { + display: flex; + align-items: center; + gap: 0.4rem; + background: none; + border: none; + border-radius: var(--radius); + padding: 0.35rem 0.5rem; + font-size: 0.875rem; + font-weight: 500; + color: var(--fg-muted); + cursor: pointer; + transition: color 0.15s; +} +.user-menu-btn:hover { + color: var(--fg); +} +.user-menu-btn .chevron { + color: var(--fg-muted); + transition: transform 0.2s; +} +.user-menu-btn[aria-expanded="true"] .chevron { + transform: rotate(180deg); +} + +/* Dropdown panel — opens on hover */ +.user-dropdown { + position: absolute; + top: 100%; + right: 0; + min-width: 190px; + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + box-shadow: var(--shadow); + padding: 0.375rem 0; + z-index: 200; + /* Hidden by default; shown on hover or when button is aria-expanded */ + opacity: 0; + pointer-events: none; + transform: translateY(-4px); + transition: opacity 0.15s, transform 0.15s; +} +.user-menu:hover .user-dropdown, +.user-menu-btn[aria-expanded="true"] + .user-dropdown { + opacity: 1; + pointer-events: auto; + transform: translateY(0); +} + +.dropdown-section-label { + font-size: 0.7rem; + font-weight: 600; + letter-spacing: 0.06em; + text-transform: uppercase; + color: var(--fg-muted); + padding: 0.25rem 0.875rem 0.125rem; +} + +.dropdown-divider { + border: none; + border-top: 1px solid var(--border); + margin: 0.375rem 0; +} + +.dropdown-item { + display: flex; + align-items: center; + gap: 0.5rem; + width: 100%; + padding: 0.45rem 0.875rem; + font-size: 0.875rem; + color: var(--fg-muted); + background: none; + border: none; + cursor: pointer; + text-decoration: none; + text-align: left; + transition: background 0.1s, color 0.1s; + box-sizing: border-box; +} +.dropdown-item:hover, +.dropdown-item:focus { + background: var(--border); + color: var(--fg); + text-decoration: none; +} +.dropdown-item svg { flex-shrink: 0; color: var(--fg-muted); transition: color 0.1s; } +.dropdown-item:hover svg, +.dropdown-item:focus svg { color: var(--fg); } +.dropdown-item-danger:hover, +.dropdown-item-danger:focus { color: var(--danger); } +.dropdown-item-danger:hover svg, +.dropdown-item-danger:focus svg { color: var(--danger); } + +/* Theme checkmark — hidden by default, shown on active option */ +.theme-check { margin-left: auto; flex-shrink: 0; opacity: 0; transition: opacity 0.1s; } +.theme-opt.is-active .theme-check { opacity: 1; } + +/* Login page floating theme button */ +.login-theme-btn { + position: fixed; + bottom: 1.25rem; + right: 1.25rem; + background: var(--surface); + border: 1px solid var(--border); + border-radius: var(--radius); + padding: 0.45rem; + cursor: pointer; + color: var(--fg-muted); + display: flex; + align-items: center; + justify-content: center; + box-shadow: var(--shadow-sm); + transition: all 0.15s; +} +.login-theme-btn:hover { color: var(--fg); border-color: var(--fg-muted); } + +.login-header { + display: flex; + align-items: center; + justify-content: center; + gap: 0.5rem; + margin-bottom: 1.75rem; + color: var(--fg-muted); +} + +.login-header h1 { + font-size: 1.25rem; + font-weight: 600; + color: var(--fg); + line-height: 1; +} + +/* ========================================================================== + Responsive Layout + ========================================================================== */ + +/* Narrow (≤1024px): Sidebar hidden behind hamburger overlay */ +@media (max-width: 1024px) { + #sidebar { + position: fixed; + top: var(--topbar-height); + left: 0; + height: calc(100vh - var(--topbar-height)); + z-index: 150; + transform: translateX(-100%); + transition: transform 0.25s ease; + width: var(--sidebar-width); + } + + body.sidebar-open #sidebar { + transform: translateX(0); + } + + body.sidebar-open::after { + content: ''; + position: fixed; + top: var(--topbar-height); + left: 0; + right: 0; + bottom: 0; + background: rgba(0,0,0,0.4); + z-index: 140; + } + + #content { + height: auto; + min-height: 100%; + } + + .hamburger-btn { + display: flex; + } +} + +/* Desktop (>1024px): hamburger hidden, sidebar always visible */ +@media (min-width: 1025px) { + .hamburger-btn { + display: none; + } +} + +/* Column priority hiding for data tables */ +@media (max-width: 600px) { + .col-priority-2 { display: none; } +} +@media (max-width: 480px) { + .col-priority-3 { display: none; } +} + +/* ─── Confirm dialog ──────────────────────────────────────────────────────── */ +#confirm-dialog { + border: 1px solid var(--border); + border-radius: var(--radius); + background: var(--surface); + color: var(--fg); + padding: 1.5rem 2rem 1.25rem; + min-width: 20rem; + max-width: 90vw; + box-shadow: var(--shadow); + /* Centre in the viewport regardless of scroll position */ + position: fixed; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + margin: 0; +} +#confirm-dialog::backdrop { + background: rgba(0, 0, 0, 0.45); +} +.dialog-message { + font-size: 0.95rem; + margin: 0 0 1.25rem; + line-height: 1.5; +} +.dialog-actions { + display: flex; + gap: 0.75rem; + justify-content: flex-end; +} +.btn-outline { + background: transparent; + color: var(--fg); + border: 1px solid var(--border); +} +.btn-outline:hover { background: var(--border); } +.btn-danger-outline { + background: transparent; + color: var(--danger); + border: 1px solid var(--danger); +} +.btn-danger-outline:hover { background: rgba(220, 38, 38, 0.08); } + +.btn-accept { + background: var(--success); + color: #fff; + border: 1px solid transparent; +} +.btn-accept:hover { background: var(--green-600); } + +/* ─── Configure toolbar ───────────────────────────────────────────────────── */ +.configure-toolbar { + position: fixed; + bottom: 0; + left: var(--sidebar-width); + right: 0; + z-index: 130; + background: var(--surface); + border-top: 1px solid var(--border); + padding: 0.75rem 1.5rem; + box-shadow: 0 -2px 8px rgba(0,0,0,0.08); +} +@media (max-width: 1024px) { + .configure-toolbar { left: 0; } +} +.configure-toolbar-inner { + display: flex; + gap: 0.75rem; +} +/* Push page content above the toolbar */ +.configure-page { padding-bottom: 4rem; } + +/* ─── Unsaved-changes banner ──────────────────────────────────────────────── */ +.cfg-unsaved-banner { + display: flex; + align-items: center; + gap: 0.6rem; + padding: 0.5rem 1.25rem; + background: var(--warning-bg); + border-bottom: 1px solid var(--warning-border); + color: var(--warning-fg); + font-size: 0.875rem; + flex-shrink: 0; +} + +/* cfgError inline feedback */ +.cfg-save-status.error { color: var(--danger); cursor: pointer; } + +/* ─── Configure activity log ─────────────────────────────────────────────── */ +.cfg-log-wrap { position: relative; margin-left: auto; } +.cfg-log-btn { position: relative; } +.cfg-log-badge { + position: absolute; + top: -6px; right: -6px; + min-width: 16px; height: 16px; + padding: 0 3px; + background: var(--danger); + color: #fff; + border-radius: 8px; + font-size: 0.65rem; + line-height: 16px; + text-align: center; +} +.cfg-log-panel { + display: none; + position: absolute; + bottom: calc(100% + 8px); + right: 0; + width: 420px; + max-height: 300px; + background: var(--card-bg); + border: 1px solid var(--border); + border-radius: var(--radius); + box-shadow: 0 4px 16px rgba(0,0,0,0.15); + flex-direction: column; + z-index: 300; + font-size: 0.8rem; +} +.cfg-log-panel:not([hidden]) { display: flex; } +.cfg-log-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 0.4rem 0.75rem; + border-bottom: 1px solid var(--border); + font-weight: 600; + font-size: 0.8rem; +} +.cfg-log-close { background: none; border: none; cursor: pointer; color: var(--fg-muted); font-size: 0.9rem; } +.cfg-log-list { list-style: none; margin: 0; padding: 0; overflow-y: auto; flex: 1; } +.cfg-log-entry { padding: 0.3rem 0.75rem; border-bottom: 1px solid var(--border-subtle, var(--border)); } +.cfg-log-empty { padding: 0.5rem 0.75rem; color: var(--fg-muted); } +.cfg-log-ts { color: var(--fg-muted); margin-right: 0.4rem; } +.cfg-log-ok { color: var(--fg); } +.cfg-log-error { color: var(--danger); background: rgba(var(--danger-rgb, 220,38,38), 0.05); } + +/* ─── Editable form inputs inside cards ───────────────────────────────────── */ +.cfg-input { + width: 100%; + padding: 0.4rem 0.6rem; + background: var(--surface); + color: var(--fg); + border: 1px solid var(--border); + border-radius: var(--radius); + font-size: 0.875rem; + font-family: inherit; + transition: border-color 0.15s, box-shadow 0.15s; + box-sizing: border-box; +} +.cfg-input:focus { + outline: none; + border-color: var(--primary); + box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.15); +} +select.cfg-input { + appearance: none; + -webkit-appearance: none; + padding-right: 2rem; + background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 24 24' fill='none' stroke='%236b7280' stroke-width='2.5' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6 9 12 15 18 9'%3E%3C/polyline%3E%3C/svg%3E"); + background-repeat: no-repeat; + background-position: right 0.6rem center; + cursor: pointer; +} +.cfg-input-sm { max-width: 8rem; } +.cfg-input-mono { font-family: var(--font-mono); font-size: 0.85rem; } +.cfg-reset-col { width: 1%; white-space: nowrap; padding-left: 0.25rem; } +.cfg-section-head { font-size: 0.85rem; font-weight: 600; text-transform: uppercase; letter-spacing: 0.05em; color: var(--text-muted); margin: 0 0 0.4rem; padding-bottom: 0.25rem; border-bottom: 1px solid var(--border); } +.cfg-subsection-head { font-size: 0.8rem; font-weight: 600; color: var(--text-muted); margin: 0.5rem 0 0.25rem; } +.fw-check-scroll { max-height: 12rem; overflow-y: auto; border: 1px solid var(--border); border-radius: 4px; padding: 0.4rem 0.6rem; background: var(--input-bg, var(--surface)); } +.fw-check-grid { display: flex; flex-wrap: wrap; gap: 0.35rem 1.25rem; } +.fw-check-grid label { display: flex; align-items: center; gap: 0.35rem; font-weight: normal; cursor: pointer; } + +/* Editable table rows */ +.cfg-table td { padding: 0.45rem 0.75rem; vertical-align: middle; } +.cfg-table > tbody > tr > td:last-child { width: 3rem; text-align: center; } +/* Leading nav-chevron column for complex (drill-down) list rows */ +.yt-nav-col { width: 1.5rem; padding: 0.25rem !important; text-align: center; color: var(--fg-muted); } +.btn-icon { + display: inline-flex; + align-items: center; + justify-content: center; + width: 1.75rem; + height: 1.75rem; + border: none; + border-radius: var(--radius); + background: transparent; + color: var(--fg-muted); + cursor: pointer; + transition: background 0.1s, color 0.1s; + padding: 0; +} +.btn-icon:hover { background: var(--border); color: var(--fg); } +.btn-icon-danger:hover { color: var(--danger); background: rgba(220,38,38,0.08); } +.btn-add-row { + display: flex; + align-items: center; + gap: 0.4rem; + font-size: 0.8rem; + color: var(--primary); + background: none; + border: none; + cursor: pointer; + padding: 0.5rem 0.75rem; +} +.btn-add-row:hover { text-decoration: underline; } + +/* Inline forms inside users table */ +.user-shell-form, .user-add-inline { + display: flex; + align-items: center; + gap: 0.5rem; + flex-wrap: wrap; +} +.user-shell-form .cfg-input { flex: 1; min-width: 0; } +.user-add-inline .cfg-input { width: auto; flex: 1; min-width: 8rem; } + +/* Card-level save feedback */ +.cfg-card-footer { + display: flex; + align-items: center; + gap: 0.75rem; + padding: 0.6rem 1rem; + border-top: 1px solid var(--border); +} +.cfg-save-status { + font-size: 0.8rem; + color: var(--fg-muted); + min-height: 1.2em; +} +.cfg-save-status.saved { color: #16a34a; } +.cfg-save-error { font-size: 0.8rem; color: var(--danger); } +@media (prefers-color-scheme: dark) { + .cfg-save-status.saved { color: #86efac; } +} +.dark .cfg-save-status.saved { color: #86efac; } +.light .cfg-save-status.saved { color: #16a34a; } + +/* ========================================================================== + System Control page + ========================================================================== */ + +.sc-action-body { + display: flex; + flex-direction: column; + justify-content: space-between; + flex: 1; + gap: 1rem; +} + +.sc-desc { + color: var(--fg-muted); + font-size: 0.875rem; + line-height: 1.5; + margin: 0; +} + +.sc-danger-card { border-color: var(--warning-border); } + +.sc-danger-body { display: flex; flex-direction: column; } + +.sc-danger-item { + display: flex; + align-items: center; + justify-content: space-between; + gap: 1.5rem; + padding: 1.25rem; + flex-wrap: wrap; +} + +.sc-danger-text { flex: 1; min-width: 0; } +.sc-danger-text strong { display: block; margin-bottom: 0.35rem; } + +.sc-danger-sep { height: 1px; background: var(--border); margin: 0 1.25rem; } + +.sc-fd-ok { color: var(--success); font-size: 0.875rem; } +.sc-fd-err { color: var(--danger); font-size: 0.875rem; } + +.sc-dt-row { + display: flex; + gap: 0.5rem; + align-items: center; + flex-wrap: wrap; + margin-bottom: 0.5rem; +} + +.sc-restore-label { + display: flex; + align-items: center; + gap: 0.4rem; + margin-top: 0.75rem; + cursor: pointer; +} +.sc-restore-warn-text { + color: var(--warning-fg); + font-size: 0.875rem; +} + +/* ========================================================================== + YANG Tree Navigation + ========================================================================== */ + +/* Two-column layout: tree pane + detail pane */ +.yang-layout { + display: flex; + gap: 1.5rem; + align-items: flex-start; +} + +.yang-tree-pane { + width: 260px; + flex-shrink: 0; + overflow-y: auto; + max-height: calc(100vh - 14rem); + margin-bottom: 0; +} + +.yang-detail-pane { + flex: 1; + min-width: 0; +} + +/* Tree list */ +.yang-tree, +.yt-children { + list-style: none; + padding: 0; + margin: 0; +} + +.yang-tree { padding: 0.25rem 0; } + +.yt-children { + padding-left: 0; + margin-left: 1.1rem; +} + +.yt-children > li { + position: relative; + padding-left: 1rem; +} + +/* Horizontal arm */ +.yt-children > li::before { + content: ''; + position: absolute; + left: 0; + top: 0.85rem; + width: 0.75rem; + height: 1px; + background: var(--border); +} + +/* Vertical connector — runs full height for non-last, stops at arm for last */ +.yt-children > li::after { + content: ''; + position: absolute; + left: 0; + top: 0; + width: 1px; + background: var(--border); +} +.yt-children > li:not(:last-child)::after { + bottom: 0; +} +.yt-children > li:last-child::after { + height: calc(0.85rem + 1px); +} + +/* Style for the root "/" node */ +.yt-root { + font-family: var(--font-mono); + font-weight: 600; + color: var(--fg-muted); +} + +/* Expandable node (container / list) */ +.yt-node { + position: relative; +} + +/* Suppress the browser's default triangle on */ +.yt-node > summary { list-style: none; } +.yt-node > summary::-webkit-details-marker { display: none; } + +/* Shared label style for both and '; + } + + // Re-index all inputs in a tbody so names stay sequential after add/delete. + function renumber(tbody) { + tbody.querySelectorAll('tr').forEach(function(row, i) { + row.querySelectorAll('input').forEach(function(inp) { + inp.name = inp.name.replace(/_\d+$/, '_' + i); + }); + }); + } + + document.addEventListener('click', function(e) { + var addBtn = e.target.closest('.btn-add-row'); + if (addBtn) { + var tbodyId = addBtn.getAttribute('data-table'); + var tmplKey = addBtn.getAttribute('data-template'); + var tbody = document.getElementById(tbodyId); + if (!tbody || !rowTemplates[tmplKey]) { return; } + var idx = tbody.querySelectorAll('tr').length; + tbody.insertAdjacentHTML('beforeend', rowTemplates[tmplKey](idx)); + var newInput = tbody.querySelector('tr:last-child input'); + if (newInput) { newInput.focus(); } + return; + } + + var delBtn = e.target.closest('.cfg-delete-row'); + if (delBtn) { + var row = delBtn.closest('tr'); + var tbody = row && row.closest('tbody'); + if (row) { row.remove(); } + if (tbody) { renumber(tbody); } + } + }); +})(); + +// ─── YANG tree accordion ─────────────────────────────────────────────────── +// When any node opens, collapse its siblings at the same level so only one +// subtree is expanded at a time. Works at every depth (top-level modules, +// list instances, nested containers). toggle doesn't bubble — use capture. +(function() { + document.addEventListener('toggle', function(e) { + var node = e.target; + if (!node.classList || !node.classList.contains('yt-node') || !node.open) return; + var li = node.parentElement; + var ul = li && li.parentElement; + if (!ul) return; + ul.querySelectorAll(':scope > li > details.yt-node').forEach(function(d) { + if (d !== node && d.open) d.removeAttribute('open'); + }); + }, true); +})(); + +// ─── ⓘ field-info tooltip (position:fixed to escape overflow clipping) ─────── +(function() { + var tip = null; + + function getTip() { + if (!tip) { + tip = document.createElement('div'); + tip.id = 'field-tip'; + document.body.appendChild(tip); + } + return tip; + } + + document.addEventListener('mouseover', function(e) { + var el = e.target.closest('.field-info[data-tip]'); + if (!el) return; + var t = getTip(); + t.textContent = el.getAttribute('data-tip'); + t.style.display = 'block'; + var r = el.getBoundingClientRect(); + // Position above the icon, centred; clamp to viewport edges. + var left = r.left + r.width / 2 - t.offsetWidth / 2; + var top = r.top - t.offsetHeight - 6; + if (left < 8) left = 8; + if (left + t.offsetWidth > window.innerWidth - 8) left = window.innerWidth - t.offsetWidth - 8; + if (top < 8) top = r.bottom + 6; // flip below if no room above + t.style.left = left + 'px'; + t.style.top = top + 'px'; + }); + + document.addEventListener('mouseout', function(e) { + var el = e.target.closest('.field-info[data-tip]'); + if (!el) return; + var t = getTip(); + t.style.display = 'none'; + }); +})(); + +// ─── Configure interaction log ───────────────────────────────────────────── +// Persists save/error events in sessionStorage so they survive page reloads +// (Apply/Abort both do HX-Refresh). Cleared on logout. +var cfgLogEntries = (function() { + try { return JSON.parse(sessionStorage.getItem('cfgLog') || '[]'); } catch(e) { return []; } +})(); + +function cfgIsoNow() { + var d = new Date(); + return d.getFullYear() + '-' + + String(d.getMonth() + 1).padStart(2, '0') + '-' + + String(d.getDate()).padStart(2, '0') + ' ' + + String(d.getHours()).padStart(2, '0') + ':' + + String(d.getMinutes()).padStart(2, '0') + ':' + + String(d.getSeconds()).padStart(2, '0'); +} + +function cfgLog(level, msg) { + cfgLogEntries.push({ level: level, msg: msg, ts: cfgIsoNow() }); + if (cfgLogEntries.length > 200) cfgLogEntries.shift(); + try { sessionStorage.setItem('cfgLog', JSON.stringify(cfgLogEntries)); } catch(e) {} + var badge = document.getElementById('cfg-log-badge'); + if (badge && level === 'error') { + badge.hidden = false; + badge.textContent = cfgLogEntries.filter(function(e) { return e.level === 'error'; }).length; + } +} + +// Restore error badge count after a page reload (entries came from sessionStorage). +document.addEventListener('DOMContentLoaded', function() { + var errCount = cfgLogEntries.filter(function(e) { return e.level === 'error'; }).length; + if (errCount > 0) { + var badge = document.getElementById('cfg-log-badge'); + if (badge) { badge.hidden = false; badge.textContent = errCount; } + } + // Clear log on logout so the next session starts fresh. + document.addEventListener('submit', function(e) { + if (e.target.closest('form[action="/logout"]')) { + try { sessionStorage.removeItem('cfgLog'); } catch(e2) {} + } + }); +}); + +function renderCfgLog() { + var panel = document.getElementById('cfg-log-panel'); + if (!panel) return; + if (cfgLogEntries.length === 0) { + panel.querySelector('.cfg-log-list').innerHTML = '
  • No activity yet.
  • '; + return; + } + var html = cfgLogEntries.slice().reverse().map(function(e) { + return '
  • ' + + '' + e.ts + ' ' + + '' + e.msg.replace(/
  • '; + }).join(''); + panel.querySelector('.cfg-log-list').innerHTML = html; +} + +// ─── Configure toolbar ───────────────────────────────────────────────────── +// Intercept HTMX's confirm event for toolbar buttons and show the custom modal. +// Apply / Apply & Save / Abort / Save-to-startup all respond with HX-Refresh. +(function () { + var toolbarLabels = { + 'cfg-apply-btn': 'Applied staged changes to running config', + 'cfg-apply-save-btn': 'Applied and saved to startup config', + 'cfg-abort-btn': 'Aborted: candidate reset to running config', + 'cfg-unsaved-save-btn': 'Saved running config to startup', + }; + document.addEventListener('htmx:confirm', function(e) { + var btn = e.detail.elt; + if (!btn) return; + var label = null; + for (var cls in toolbarLabels) { + if (btn.classList.contains(cls)) { label = toolbarLabels[cls]; break; } + } + if (!label) return; + e.preventDefault(); + openModal(e.detail.question, function() { + cfgLog('ok', label); + e.detail.issueRequest(true); + }); + }); + + // Log panel toggle / close. + document.addEventListener('click', function(e) { + if (e.target.closest('.cfg-log-close')) { + var panel = document.getElementById('cfg-log-panel'); + if (panel) panel.hidden = true; + return; + } + var btn = e.target.closest('#cfg-log-btn'); + if (!btn) return; + var panel = document.getElementById('cfg-log-panel'); + if (!panel) return; + panel.hidden = !panel.hidden; + if (!panel.hidden) { + renderCfgLog(); + var badge = document.getElementById('cfg-log-badge'); + if (badge) badge.hidden = true; + } + }); + + // cfgError: show error message in the .cfg-save-status span of the submitting form. + document.addEventListener('cfgError', function(e) { + var msg = e.detail && e.detail.value ? e.detail.value : 'Save failed'; + var form = e.target && e.target.closest('form'); + var span = form ? form.querySelector('.cfg-save-status') : null; + if (span) { + span.textContent = '✗ ' + msg; + span.classList.add('error'); + cfgLog('error', msg); + // Keep error visible until dismissed (click) or 30 s timeout. + var tid = setTimeout(function() { + span.textContent = ''; + span.classList.remove('error'); + }, 30000); + span.addEventListener('click', function once() { + clearTimeout(tid); + span.textContent = ''; + span.classList.remove('error'); + span.removeEventListener('click', once); + }); + } + }); + + // Show "Saved ✓" feedback when a card Save succeeds. + (function() { + var LS_KEY = 'fw-url-history'; + var MAX_HIST = 10; + + function loadHistory() { + try { return JSON.parse(localStorage.getItem(LS_KEY) || '[]'); } + catch (e) { return []; } + } + + function saveURL(url) { + var hist = loadHistory().filter(function(u) { return u !== url; }); + hist.unshift(url); + if (hist.length > MAX_HIST) hist = hist.slice(0, MAX_HIST); + localStorage.setItem(LS_KEY, JSON.stringify(hist)); + } + + function populateDatalist() { + var dl = document.getElementById('fw-url-history'); + if (!dl) return; + dl.innerHTML = loadHistory().map(function(u) { + return '