diff --git a/daemon/conman/connection.go b/daemon/conman/connection.go index fe9f671766..c4347a18fc 100644 --- a/daemon/conman/connection.go +++ b/daemon/conman/connection.go @@ -111,7 +111,7 @@ func newConnectionImpl(nfp *netfilter.Packet, c *Connection, protoType string) ( c.Process.ReadCmdline() c.Process.CWD = aevent.ProcDir audit.Lock.RUnlock() - // if the proc dir contains non alhpa-numeric chars the field is empty + // if the proc dir contains non alpha-numeric chars the field is empty if c.Process.CWD == "" { c.Process.ReadCwd() } diff --git a/daemon/core/system.go b/daemon/core/system.go index 3b705affe4..3c8b37db64 100644 --- a/daemon/core/system.go +++ b/daemon/core/system.go @@ -160,7 +160,7 @@ func CheckSysRequirements() { ] ` - reqsFullfiled := true + reqsFulfilled := true dec := json.NewDecoder(strings.NewReader(reqsList)) for { var reqs []ReqsList @@ -187,13 +187,13 @@ func CheckSysRequirements() { if checkOk { fmt.Printf("\n\t* %s\t %s\n", log.Bold(log.Green(req.Item)), log.Bold(log.Green("✔"))) } else { - reqsFullfiled = false + reqsFulfilled = false fmt.Printf("\n\t* %s\t %s\n", log.Bold(log.Red(req.Item)), log.Bold(log.Red("✘"))) } fmt.Println() } } - if !reqsFullfiled { + if !reqsFulfilled { log.Raw("\n%sWARNING:%s Your kernel doesn't support some of the features OpenSnitch needs:\nRead more: https://github.com/evilsocket/opensnitch/issues/774\n", log.FG_WHITE+log.BG_YELLOW, log.RESET) } } diff --git a/daemon/dns/ebpfhook.go b/daemon/dns/ebpfhook.go index 21067e5ff4..3b2b65f83c 100644 --- a/daemon/dns/ebpfhook.go +++ b/daemon/dns/ebpfhook.go @@ -132,7 +132,7 @@ func ListenerEbpf(ebpfModPath string) error { // libbcc resolves the offsets for us. without bcc the offset for uprobes must parsed from the elf files // some how 0 must be replaced with the offset of getaddrinfo bcc does this using bcc_resolve_symname - // Attaching to uprobe using perf open might be a better aproach requires https://github.com/iovisor/gobpf/pull/277 + // Attaching to uprobe using perf open might be a better approach requires https://github.com/iovisor/gobpf/pull/277 libcFile, err := findLibc() if err != nil { diff --git a/daemon/dns/systemd/monitor.go b/daemon/dns/systemd/monitor.go index 82fc7d8d68..2a81fac8b2 100644 --- a/daemon/dns/systemd/monitor.go +++ b/daemon/dns/systemd/monitor.go @@ -108,13 +108,13 @@ type ResolvedMonitor struct { // connection with the systemd-resolved unix socket: // /run/systemd/resolve/io.systemd.Resolve.Monitor Conn *varlink.Connection - // channel where all the DNS respones will be sent + // channel where all the DNS responses will be sent ChanResponse chan *MonitorResponse // error channel to signal any problem ChanConnError chan error - // callback that is emited when systemd-resolved resolves a domain name. + // callback that is emitted when systemd-resolved resolves a domain name. receiverCb resolvedCallback mu *sync.RWMutex connected bool diff --git a/daemon/firewall/config/config.go b/daemon/firewall/config/config.go index 12a6b7037b..bd56c3bbdc 100644 --- a/daemon/firewall/config/config.go +++ b/daemon/firewall/config/config.go @@ -193,7 +193,7 @@ func (c *Config) LoadDiskConfiguration(reload bool) error { return nil } -// loadConfigutation reads the system firewall rules from disk. +// loadConfiguration reads the system firewall rules from disk. // Then the rules are added based on the configuration defined. func (c *Config) loadConfiguration(rawConfig []byte) error { c.SysConfig.Lock() diff --git a/daemon/firewall/nftables/exprs/nat.go b/daemon/firewall/nftables/exprs/nat.go index 207eb381df..180374905c 100644 --- a/daemon/firewall/nftables/exprs/nat.go +++ b/daemon/firewall/nftables/exprs/nat.go @@ -141,7 +141,7 @@ func NewExprDNAT() *expr.NAT { // NewExprTproxy returns a new tproxy expression. // XXX: is "to x.x.x.x:1234" supported by google/nftables lib? or only "to :1234"? -// it creates an erronous rule. +// it creates an erroneous rule. func NewExprTproxy() *[]expr.Any { return &[]expr.Any{ &expr.TProxy{ diff --git a/daemon/log/loggers/remote.go b/daemon/log/loggers/remote.go index d5079132f7..9fa406504c 100644 --- a/daemon/log/loggers/remote.go +++ b/daemon/log/loggers/remote.go @@ -54,7 +54,7 @@ type Remote struct { // Name of the logger Name string - // channel used to write mesages + // channel used to write messages writerChan chan string Tag string diff --git a/daemon/netlink/procmon/procmon.go b/daemon/netlink/procmon/procmon.go index 15a5a63f49..161481d2a8 100644 --- a/daemon/netlink/procmon/procmon.go +++ b/daemon/netlink/procmon/procmon.go @@ -26,9 +26,9 @@ type ProcEvent struct { } // ProcEventsMonitor listens for process events from kernel. -// We listen for events via netlink, from the Process Events Conector: +// We listen for events via netlink, from the Process Events Connector: // https://lwn.net/Articles/157150/ -// The kernel must have the options CONFIG_CONECTOR and CONFIG_PROC_EVENTS enabled. +// The kernel must have the options CONFIG_CONNECTOR and CONFIG_PROC_EVENTS enabled. func ProcEventsMonitor(done <-chan struct{}) { log.Info("ProcEventMonitor started\n") runtime.LockOSThread() diff --git a/daemon/netlink/socket.go b/daemon/netlink/socket.go index 886a2b08d9..17c559937c 100644 --- a/daemon/netlink/socket.go +++ b/daemon/netlink/socket.go @@ -75,7 +75,7 @@ func GetSocketInfo(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPo log.Debug("GetSocketInfo() invalid: %d:%v -> %v:%d", sock.ID.SourcePort, sock.ID.Source, sock.ID.Destination, sock.ID.DestinationPort) } - // handle special cases (see function description): ntp queries (123), broadcasts, incomming connections. + // handle special cases (see function description): ntp queries (123), broadcasts, incoming connections. if len(inodes) == 0 && len(sockList) > 0 { for n, sock := range sockList { if sockList[n].ID.Destination.Equal(net.IPv4zero) || sockList[n].ID.Destination.Equal(net.IPv6zero) { @@ -216,7 +216,7 @@ func FlushConnections() { log.Error("error flushing ConntrackTable %s", err) } if err := netlink.ConntrackTableFlush(netlink.ConntrackExpectTable); err != nil { - log.Error("error flusing ConntrackExpectTable %s", err) + log.Error("error flushing ConntrackExpectTable %s", err) } // Force established connections to reestablish again. diff --git a/daemon/procmon/cache_events_test.go b/daemon/procmon/cache_events_test.go index 62b212c306..b89daafbdb 100644 --- a/daemon/procmon/cache_events_test.go +++ b/daemon/procmon/cache_events_test.go @@ -77,7 +77,7 @@ func TestCacheEvents2(t *testing.T) { }) // This process does not exist, so it should be removed from cache - // inmediately. We wait a couple of seconds, before deleting it. + // immediately. We wait a couple of seconds, before deleting it. // See exitDelay description for more info. t.Run("Delete() !isAlive()", func(t *testing.T) { evtsCache.Delete(fakePid) diff --git a/daemon/procmon/details.go b/daemon/procmon/details.go index c9f79e2044..fbda909020 100644 --- a/daemon/procmon/details.go +++ b/daemon/procmon/details.go @@ -334,7 +334,7 @@ func (p *Process) ReadCmdline() { } // CleanArgs applies fixes on the cmdline arguments. -// - AppImages cmdline reports the execuable launched as /proc/self/exe, +// - AppImages cmdline reports the executable launched as /proc/self/exe, // instead of the actual path to the binary. // - For processes launched from a file descriptor, leave them with the orig // path, which usually starts with /proc/*/fd/. @@ -497,7 +497,7 @@ func (p *Process) ResetChecksums() { } // ComputeChecksums calculates the checksums of a the process path to the binary. -// Users may want to use different hashing alogrithms. +// Users may want to use different hashing algorithms. func (p *Process) ComputeChecksums(hashes map[string]uint) { if p.IsAlive() && len(p.Checksums) > 0 { log.Debug("process.ComputeChecksums() already hashed: %d, path: %s, %v", p.ID, p.Path, p.Checksums) diff --git a/daemon/procmon/ebpf/utils.go b/daemon/procmon/ebpf/utils.go index a1693b6d31..149ad2f403 100644 --- a/daemon/procmon/ebpf/utils.go +++ b/daemon/procmon/ebpf/utils.go @@ -96,7 +96,7 @@ func getItems(proto string, isIPv6 bool) (items uint) { } // deleteOldItems deletes maps' elements in order to keep them below maximum capacity. -// If ebpf maps are full they don't allow any more insertions, ending up lossing events. +// If ebpf maps are full they don't allow any more insertions, ending up losing events. func deleteOldItems(proto string, isIPv6 bool, maxToDelete uint) (deleted uint) { var lookupKey []byte var nextKey []byte diff --git a/daemon/rule/loader_test.go b/daemon/rule/loader_test.go index 262f231565..9290e5eb51 100644 --- a/daemon/rule/loader_test.go +++ b/daemon/rule/loader_test.go @@ -99,7 +99,7 @@ func TestRuleLoaderInvalidRegexp(t *testing.T) { // Test rules of type operator.list. There're these scenarios: // - Enabled rules: // * operator Data field is ignored if it contains the list of operators as json string. -// * the operarots list is expanded as json objecs under "list": [] +// * the operarots list is expanded as json objects under "list": [] // For new rules (> v1.6.3), Data field will be empty. // // - Disabled rules diff --git a/daemon/rule/operator_lists.go b/daemon/rule/operator_lists.go index 0183809329..b77dfea056 100644 --- a/daemon/rule/operator_lists.go +++ b/daemon/rule/operator_lists.go @@ -67,7 +67,7 @@ func (o *Operator) monitorLists() { if needReload { // we can't reload a single list, because the domains of all lists are added to the same map. // we could have the domains separated by lists/files, but then we'd need to iterate the map in order - // to match a domain. Reloading the lists shoud only occur once a day. + // to match a domain. Reloading the lists should only occur once a day. if err := o.readLists(); err != nil { log.Warning("%s", err) } diff --git a/daemon/rule/operator_test.go b/daemon/rule/operator_test.go index f7fc27a2b8..b29584efbe 100644 --- a/daemon/rule/operator_test.go +++ b/daemon/rule/operator_test.go @@ -665,7 +665,7 @@ func TestNewOperatorListsDomainsRegexp(t *testing.T) { } // Must be launched with -race to test that we don't cause leaks -// Race occured on operator.go:241 reListCmp().MathString() +// Race occurred on operator.go:241 reListCmp().MathString() // fixed here: 53419fe func TestRaceNewOperatorListsDomainsRegexp(t *testing.T) { t.Log("Test NewOperator() Lists domains_regexp") diff --git a/daemon/statistics/stats.go b/daemon/statistics/stats.go index e9832d43b5..2b3801795c 100644 --- a/daemon/statistics/stats.go +++ b/daemon/statistics/stats.go @@ -14,7 +14,7 @@ import ( "github.com/evilsocket/opensnitch/daemon/ui/protocol" ) -// StatsConfig holds the stats confguration +// StatsConfig holds the stats configuration type StatsConfig struct { MaxEvents int `json:"MaxEvents"` MaxStats int `json:"MaxStats"` diff --git a/daemon/tasks/nodemonitor/main_test.go b/daemon/tasks/nodemonitor/main_test.go index ae3a03c7ca..ec172603b6 100644 --- a/daemon/tasks/nodemonitor/main_test.go +++ b/daemon/tasks/nodemonitor/main_test.go @@ -54,7 +54,7 @@ func TestNodeMonitor(t *testing.T) { var sysinfo syscall.Sysinfo_t err = json.Unmarshal([]byte(sysinfoRaw), &sysinfo) if err != nil { - t.Error("Error unmarshaling response:", err) + t.Error("Error unmarshalling response:", err) } }) diff --git a/daemon/tasks/pidmonitor/main_test.go b/daemon/tasks/pidmonitor/main_test.go index ae9a9d2db5..99bb825d31 100644 --- a/daemon/tasks/pidmonitor/main_test.go +++ b/daemon/tasks/pidmonitor/main_test.go @@ -56,7 +56,7 @@ func TestPIDMonitor(t *testing.T) { var proc procmon.Process err = json.Unmarshal([]byte(procRaw), &proc) if err != nil { - t.Error("Error unmarshaling response:", err) + t.Error("Error unmarshalling response:", err) } if proc.ID != ourPID { t.Error("invalid Process object received:", ourPID, proc) diff --git a/daemon/ui/alerts.go b/daemon/ui/alerts.go index 1496979ae7..adac60021c 100644 --- a/daemon/ui/alerts.go +++ b/daemon/ui/alerts.go @@ -102,7 +102,7 @@ func (c *Client) alertsDispatcher() { case ready := <-c.isConnected: connected = ready if ready { - log.Important("UI connected, dispathing queued alerts: %d", len(c.alertsChan)) + log.Important("UI connected, dispatching queued alerts: %d", len(c.alertsChan)) for { if isQueueEmpty(queuedAlerts) { // no more queued alerts, exit diff --git a/ui/opensnitch/actions/__init__.py b/ui/opensnitch/actions/__init__.py index e8827db444..92d8ab6a72 100644 --- a/ui/opensnitch/actions/__init__.py +++ b/ui/opensnitch/actions/__init__.py @@ -66,7 +66,7 @@ class Actions(QObject): "actions" is the list of actions to execute: - the name of the action defines the python plugin to load: - "highligh" -> plugins/highligh/highlight.py + "highlight" -> plugins/highligh/highlight.py "downloader" -> plugins/downloader/downloader.py, etc. - every action has its own plugin (*.py file) which is in charge of parse and compile to configuration if needed. diff --git a/ui/opensnitch/customwidgets/firewalltableview.py b/ui/opensnitch/customwidgets/firewalltableview.py index b9900fc064..f06c67cf4a 100644 --- a/ui/opensnitch/customwidgets/firewalltableview.py +++ b/ui/opensnitch/customwidgets/firewalltableview.py @@ -126,7 +126,7 @@ def reorderRows(self, action, row): def refresh(self, force=False): self.fillVisibleRows(0, force, *self.lastQueryArgs) - #Some QSqlQueryModel methods must be mimiced so that this class can serve as a drop-in replacement + #Some QSqlQueryModel methods must be mimicked so that this class can serve as a drop-in replacement #mimic QSqlQueryModel.query() def query(self): return self diff --git a/ui/opensnitch/customwidgets/generictableview.py b/ui/opensnitch/customwidgets/generictableview.py index 1ac000726c..c3caca8503 100644 --- a/ui/opensnitch/customwidgets/generictableview.py +++ b/ui/opensnitch/customwidgets/generictableview.py @@ -40,7 +40,7 @@ def __init__(self, tableName, headerLabels): QStandardItemModel.__init__(self, 0, self.lastColumnCount) self.setHorizontalHeaderLabels(self.headerLabels) - #Some QSqlQueryModel methods must be mimiced so that this class can serve as a drop-in replacement + #Some QSqlQueryModel methods must be mimicked so that this class can serve as a drop-in replacement #mimic QSqlQueryModel.query() def query(self): return self @@ -159,7 +159,7 @@ def fillVisibleRows(self, q, upperBound, force=False): for x in range(0, upperBound): q.next() if q.at() < 0: - # if we don't set query to a valid record here, it gets stucked + # if we don't set query to a valid record here, it gets stuck # forever at -2/-1. q.seek(upperBound) break @@ -404,7 +404,7 @@ def onRowCountChanged(self): self.vScrollBar.setVisible(True if totalCount > self.maxRowsInViewport else False) self.vScrollBar.setMinimum(0) - # we need to substract the displayed rows to the total rows, to scroll + # we need to subtract the displayed rows to the total rows, to scroll # down correctly. self.vScrollBar.setMaximum(max(0, totalCount - self.maxRowsInViewport+1)) diff --git a/ui/opensnitch/customwidgets/main.py b/ui/opensnitch/customwidgets/main.py index cb86ab0290..4e26cbb9ff 100644 --- a/ui/opensnitch/customwidgets/main.py +++ b/ui/opensnitch/customwidgets/main.py @@ -92,7 +92,7 @@ def __init__(self): QStandardItemModel.__init__(self, 0, len(self.headerLabels)) self.setHorizontalHeaderLabels(self.headerLabels) - #Some QSqlQueryModel methods must be mimiced so that this class can serve as a drop-in replacement + #Some QSqlQueryModel methods must be mimicked so that this class can serve as a drop-in replacement #mimic QSqlQueryModel.query() def query(self): return self diff --git a/ui/opensnitch/dialogs/firewall_rule.py b/ui/opensnitch/dialogs/firewall_rule.py index 9f7335bd67..c5407de7af 100644 --- a/ui/opensnitch/dialogs/firewall_rule.py +++ b/ui/opensnitch/dialogs/firewall_rule.py @@ -1549,7 +1549,7 @@ def _reset_fields(self): self.uuid = "" def _enable_save(self, enable=True): - """Enable Save buton whenever some detail of a route changes. + """Enable Save button whenever some detail of a route changes. The button may or not be hidden. If we're editing a rule it'll be shown but disabled/enabled. """ diff --git a/ui/opensnitch/dialogs/stats.py b/ui/opensnitch/dialogs/stats.py index 87bda2e564..40b6f53c4f 100644 --- a/ui/opensnitch/dialogs/stats.py +++ b/ui/opensnitch/dialogs/stats.py @@ -839,7 +839,7 @@ def __init__(self, parent=None, address=None, db=None, dbname="db", appicon=None self._configure_plugins() #Sometimes a maximized window which had been minimized earlier won't unminimize - #To workaround, we explicitely maximize such windows when unminimizing happens + #To workaround, we explicitly maximize such windows when unminimizing happens def changeEvent(self, event): if event.type() == QtCore.QEvent.WindowStateChange: if event.oldState() & QtCore.Qt.WindowMinimized and event.oldState() & QtCore.Qt.WindowMaximized: @@ -1732,7 +1732,7 @@ def _cb_tab_changed(self, index): self._monitor_node_netstat() else: if index == self.TAB_RULES: - # display the clean buton only if not in detail view + # display the clean button only if not in detail view self.TABLES[index]['cmdCleanStats'].setVisible( self.IN_DETAIL_VIEW[index] ) self._add_rulesTree_nodes() @@ -2581,7 +2581,7 @@ def _restore_scroll_value(self): def _restore_details_view_columns(self, header, settings_key): header.blockSignals(True); # In order to resize the last column of a view, we firstly force a - # resizeToContens call. + # resizeToContents call. # Secondly set resizeMode to Interactive (allow to move columns by # users + programmatically) header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents) diff --git a/ui/opensnitch/plugins/__init__.py b/ui/opensnitch/plugins/__init__.py index 0ec7b7301b..a20f6b6bdf 100644 --- a/ui/opensnitch/plugins/__init__.py +++ b/ui/opensnitch/plugins/__init__.py @@ -73,7 +73,7 @@ class PluginBase(PluginsList, ABC): When calling compile(), every plugin must create the python objects needed, so they can be reused later when run() is called. - When calling configure(), every plugin is responsable to modify the GUI + When calling configure(), every plugin is responsible to modify the GUI as needed, adding new buttons, modifying existing widgets, behaviour, ... """ name = "PluginBase" diff --git a/ui/opensnitch/plugins/downloader/downloader.py b/ui/opensnitch/plugins/downloader/downloader.py index e3ca58279f..3143d6954f 100644 --- a/ui/opensnitch/plugins/downloader/downloader.py +++ b/ui/opensnitch/plugins/downloader/downloader.py @@ -25,7 +25,7 @@ class Downloader(PluginBase): This plugin may require to create a rule to allow connections to the configured urls, to avoid popups. """ - # fields overriden from parent class + # fields overridden from parent class name = "Downloader" version = 0 author = "opensnitch" diff --git a/ui/opensnitch/plugins/sample/sample.py b/ui/opensnitch/plugins/sample/sample.py index fcd119545a..6537a8c737 100644 --- a/ui/opensnitch/plugins/sample/sample.py +++ b/ui/opensnitch/plugins/sample/sample.py @@ -2,7 +2,7 @@ class Sample(PluginBase): - # fields overriden from parent class + # fields overridden from parent class name = "Sample" version = 0 author = "opensnitch" diff --git a/ui/opensnitch/plugins/virustotal/virustotal.py b/ui/opensnitch/plugins/virustotal/virustotal.py index ac7b6cc634..b540db218f 100644 --- a/ui/opensnitch/plugins/virustotal/virustotal.py +++ b/ui/opensnitch/plugins/virustotal/virustotal.py @@ -347,7 +347,7 @@ def update_popup(self, what, response, parent, config, errmsg=None): #print("[Virustotal] RESULT:\n", conn.dst_host, "\n", result['data']['attributes']['last_analysis_stats']) # XXX: if we analyze multiple objects (domains, ips, hashes...), - # onlye the last response is stored. + # only the last response is stored. _popups.add_vt_response(parent, result) malicious = self.is_malicious(verdict['malicious']) diff --git a/ui/opensnitch/rules.py b/ui/opensnitch/rules.py index a0939c422b..fcb3e9ffe6 100644 --- a/ui/opensnitch/rules.py +++ b/ui/opensnitch/rules.py @@ -69,7 +69,7 @@ def new_from_records(records): ]) rule.operator.data = "" except Exception as e: - print("new_from_records exception parsing operartor list:", e) + print("new_from_records exception parsing operator list:", e) return rule