Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion daemon/conman/connection.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func newConnectionImpl(nfp *netfilter.Packet, c *Connection, protoType string) (
c.Process.ReadCmdline()
c.Process.CWD = aevent.ProcDir
audit.Lock.RUnlock()
// if the proc dir contains non alhpa-numeric chars the field is empty
// if the proc dir contains non alpha-numeric chars the field is empty
if c.Process.CWD == "" {
c.Process.ReadCwd()
}
Expand Down
6 changes: 3 additions & 3 deletions daemon/core/system.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ func CheckSysRequirements() {
]
`

reqsFullfiled := true
reqsFulfilled := true
dec := json.NewDecoder(strings.NewReader(reqsList))
for {
var reqs []ReqsList
Expand All @@ -187,13 +187,13 @@ func CheckSysRequirements() {
if checkOk {
fmt.Printf("\n\t* %s\t %s\n", log.Bold(log.Green(req.Item)), log.Bold(log.Green("✔")))
} else {
reqsFullfiled = false
reqsFulfilled = false
fmt.Printf("\n\t* %s\t %s\n", log.Bold(log.Red(req.Item)), log.Bold(log.Red("✘")))
}
fmt.Println()
}
}
if !reqsFullfiled {
if !reqsFulfilled {
log.Raw("\n%sWARNING:%s Your kernel doesn't support some of the features OpenSnitch needs:\nRead more: https://github.com/evilsocket/opensnitch/issues/774\n", log.FG_WHITE+log.BG_YELLOW, log.RESET)
}
}
2 changes: 1 addition & 1 deletion daemon/dns/ebpfhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func ListenerEbpf(ebpfModPath string) error {
// libbcc resolves the offsets for us. without bcc the offset for uprobes must parsed from the elf files
// some how 0 must be replaced with the offset of getaddrinfo bcc does this using bcc_resolve_symname

// Attaching to uprobe using perf open might be a better aproach requires https://github.com/iovisor/gobpf/pull/277
// Attaching to uprobe using perf open might be a better approach requires https://github.com/iovisor/gobpf/pull/277

libcFile, err := findLibc()
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions daemon/dns/systemd/monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,13 +108,13 @@ type ResolvedMonitor struct {
// connection with the systemd-resolved unix socket:
// /run/systemd/resolve/io.systemd.Resolve.Monitor
Conn *varlink.Connection
// channel where all the DNS respones will be sent
// channel where all the DNS responses will be sent
ChanResponse chan *MonitorResponse

// error channel to signal any problem
ChanConnError chan error

// callback that is emited when systemd-resolved resolves a domain name.
// callback that is emitted when systemd-resolved resolves a domain name.
receiverCb resolvedCallback
mu *sync.RWMutex
connected bool
Expand Down
2 changes: 1 addition & 1 deletion daemon/firewall/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ func (c *Config) LoadDiskConfiguration(reload bool) error {
return nil
}

// loadConfigutation reads the system firewall rules from disk.
// loadConfiguration reads the system firewall rules from disk.
// Then the rules are added based on the configuration defined.
func (c *Config) loadConfiguration(rawConfig []byte) error {
c.SysConfig.Lock()
Expand Down
2 changes: 1 addition & 1 deletion daemon/firewall/nftables/exprs/nat.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func NewExprDNAT() *expr.NAT {

// NewExprTproxy returns a new tproxy expression.
// XXX: is "to x.x.x.x:1234" supported by google/nftables lib? or only "to :1234"?
// it creates an erronous rule.
// it creates an erroneous rule.
func NewExprTproxy() *[]expr.Any {
return &[]expr.Any{
&expr.TProxy{
Expand Down
2 changes: 1 addition & 1 deletion daemon/log/loggers/remote.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ type Remote struct {
// Name of the logger
Name string

// channel used to write mesages
// channel used to write messages
writerChan chan string

Tag string
Expand Down
4 changes: 2 additions & 2 deletions daemon/netlink/procmon/procmon.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ type ProcEvent struct {
}

// ProcEventsMonitor listens for process events from kernel.
// We listen for events via netlink, from the Process Events Conector:
// We listen for events via netlink, from the Process Events Connector:
// https://lwn.net/Articles/157150/
// The kernel must have the options CONFIG_CONECTOR and CONFIG_PROC_EVENTS enabled.
// The kernel must have the options CONFIG_CONNECTOR and CONFIG_PROC_EVENTS enabled.
func ProcEventsMonitor(done <-chan struct{}) {
log.Info("ProcEventMonitor started\n")
runtime.LockOSThread()
Expand Down
4 changes: 2 additions & 2 deletions daemon/netlink/socket.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func GetSocketInfo(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPo
log.Debug("GetSocketInfo() invalid: %d:%v -> %v:%d", sock.ID.SourcePort, sock.ID.Source, sock.ID.Destination, sock.ID.DestinationPort)
}

// handle special cases (see function description): ntp queries (123), broadcasts, incomming connections.
// handle special cases (see function description): ntp queries (123), broadcasts, incoming connections.
if len(inodes) == 0 && len(sockList) > 0 {
for n, sock := range sockList {
if sockList[n].ID.Destination.Equal(net.IPv4zero) || sockList[n].ID.Destination.Equal(net.IPv6zero) {
Expand Down Expand Up @@ -216,7 +216,7 @@ func FlushConnections() {
log.Error("error flushing ConntrackTable %s", err)
}
if err := netlink.ConntrackTableFlush(netlink.ConntrackExpectTable); err != nil {
log.Error("error flusing ConntrackExpectTable %s", err)
log.Error("error flushing ConntrackExpectTable %s", err)
}

// Force established connections to reestablish again.
Expand Down
2 changes: 1 addition & 1 deletion daemon/procmon/cache_events_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func TestCacheEvents2(t *testing.T) {
})

// This process does not exist, so it should be removed from cache
// inmediately. We wait a couple of seconds, before deleting it.
// immediately. We wait a couple of seconds, before deleting it.
// See exitDelay description for more info.
t.Run("Delete() !isAlive()", func(t *testing.T) {
evtsCache.Delete(fakePid)
Expand Down
4 changes: 2 additions & 2 deletions daemon/procmon/details.go
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ func (p *Process) ReadCmdline() {
}

// CleanArgs applies fixes on the cmdline arguments.
// - AppImages cmdline reports the execuable launched as /proc/self/exe,
// - AppImages cmdline reports the executable launched as /proc/self/exe,
// instead of the actual path to the binary.
// - For processes launched from a file descriptor, leave them with the orig
// path, which usually starts with /proc/*/fd/<number>.
Expand Down Expand Up @@ -497,7 +497,7 @@ func (p *Process) ResetChecksums() {
}

// ComputeChecksums calculates the checksums of a the process path to the binary.
// Users may want to use different hashing alogrithms.
// Users may want to use different hashing algorithms.
func (p *Process) ComputeChecksums(hashes map[string]uint) {
if p.IsAlive() && len(p.Checksums) > 0 {
log.Debug("process.ComputeChecksums() already hashed: %d, path: %s, %v", p.ID, p.Path, p.Checksums)
Expand Down
2 changes: 1 addition & 1 deletion daemon/procmon/ebpf/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ func getItems(proto string, isIPv6 bool) (items uint) {
}

// deleteOldItems deletes maps' elements in order to keep them below maximum capacity.
// If ebpf maps are full they don't allow any more insertions, ending up lossing events.
// If ebpf maps are full they don't allow any more insertions, ending up losing events.
func deleteOldItems(proto string, isIPv6 bool, maxToDelete uint) (deleted uint) {
var lookupKey []byte
var nextKey []byte
Expand Down
2 changes: 1 addition & 1 deletion daemon/rule/loader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func TestRuleLoaderInvalidRegexp(t *testing.T) {
// Test rules of type operator.list. There're these scenarios:
// - Enabled rules:
// * operator Data field is ignored if it contains the list of operators as json string.
// * the operarots list is expanded as json objecs under "list": []
// * the operarots list is expanded as json objects under "list": []
// For new rules (> v1.6.3), Data field will be empty.
//
// - Disabled rules
Expand Down
2 changes: 1 addition & 1 deletion daemon/rule/operator_lists.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func (o *Operator) monitorLists() {
if needReload {
// we can't reload a single list, because the domains of all lists are added to the same map.
// we could have the domains separated by lists/files, but then we'd need to iterate the map in order
// to match a domain. Reloading the lists shoud only occur once a day.
// to match a domain. Reloading the lists should only occur once a day.
if err := o.readLists(); err != nil {
log.Warning("%s", err)
}
Expand Down
2 changes: 1 addition & 1 deletion daemon/rule/operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ func TestNewOperatorListsDomainsRegexp(t *testing.T) {
}

// Must be launched with -race to test that we don't cause leaks
// Race occured on operator.go:241 reListCmp().MathString()
// Race occurred on operator.go:241 reListCmp().MathString()
// fixed here: 53419fe
func TestRaceNewOperatorListsDomainsRegexp(t *testing.T) {
t.Log("Test NewOperator() Lists domains_regexp")
Expand Down
2 changes: 1 addition & 1 deletion daemon/statistics/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (
"github.com/evilsocket/opensnitch/daemon/ui/protocol"
)

// StatsConfig holds the stats confguration
// StatsConfig holds the stats configuration
type StatsConfig struct {
MaxEvents int `json:"MaxEvents"`
MaxStats int `json:"MaxStats"`
Expand Down
2 changes: 1 addition & 1 deletion daemon/tasks/nodemonitor/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func TestNodeMonitor(t *testing.T) {
var sysinfo syscall.Sysinfo_t
err = json.Unmarshal([]byte(sysinfoRaw), &sysinfo)
if err != nil {
t.Error("Error unmarshaling response:", err)
t.Error("Error unmarshalling response:", err)
}
})

Expand Down
2 changes: 1 addition & 1 deletion daemon/tasks/pidmonitor/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func TestPIDMonitor(t *testing.T) {
var proc procmon.Process
err = json.Unmarshal([]byte(procRaw), &proc)
if err != nil {
t.Error("Error unmarshaling response:", err)
t.Error("Error unmarshalling response:", err)
}
if proc.ID != ourPID {
t.Error("invalid Process object received:", ourPID, proc)
Expand Down
2 changes: 1 addition & 1 deletion daemon/ui/alerts.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func (c *Client) alertsDispatcher() {
case ready := <-c.isConnected:
connected = ready
if ready {
log.Important("UI connected, dispathing queued alerts: %d", len(c.alertsChan))
log.Important("UI connected, dispatching queued alerts: %d", len(c.alertsChan))
for {
if isQueueEmpty(queuedAlerts) {
// no more queued alerts, exit
Expand Down
2 changes: 1 addition & 1 deletion ui/opensnitch/actions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class Actions(QObject):
"actions" is the list of actions to execute:

- the name of the action defines the python plugin to load:
"highligh" -> plugins/highligh/highlight.py
"highlight" -> plugins/highligh/highlight.py
"downloader" -> plugins/downloader/downloader.py, etc.
- every action has its own plugin (*.py file) which is in charge
of parse and compile to configuration if needed.
Expand Down
2 changes: 1 addition & 1 deletion ui/opensnitch/customwidgets/firewalltableview.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def reorderRows(self, action, row):
def refresh(self, force=False):
self.fillVisibleRows(0, force, *self.lastQueryArgs)

#Some QSqlQueryModel methods must be mimiced so that this class can serve as a drop-in replacement
#Some QSqlQueryModel methods must be mimicked so that this class can serve as a drop-in replacement
#mimic QSqlQueryModel.query()
def query(self):
return self
Expand Down
6 changes: 3 additions & 3 deletions ui/opensnitch/customwidgets/generictableview.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self, tableName, headerLabels):
QStandardItemModel.__init__(self, 0, self.lastColumnCount)
self.setHorizontalHeaderLabels(self.headerLabels)

#Some QSqlQueryModel methods must be mimiced so that this class can serve as a drop-in replacement
#Some QSqlQueryModel methods must be mimicked so that this class can serve as a drop-in replacement
#mimic QSqlQueryModel.query()
def query(self):
return self
Expand Down Expand Up @@ -159,7 +159,7 @@ def fillVisibleRows(self, q, upperBound, force=False):
for x in range(0, upperBound):
q.next()
if q.at() < 0:
# if we don't set query to a valid record here, it gets stucked
# if we don't set query to a valid record here, it gets stuck
# forever at -2/-1.
q.seek(upperBound)
break
Expand Down Expand Up @@ -404,7 +404,7 @@ def onRowCountChanged(self):
self.vScrollBar.setVisible(True if totalCount > self.maxRowsInViewport else False)

self.vScrollBar.setMinimum(0)
# we need to substract the displayed rows to the total rows, to scroll
# we need to subtract the displayed rows to the total rows, to scroll
# down correctly.
self.vScrollBar.setMaximum(max(0, totalCount - self.maxRowsInViewport+1))

Expand Down
2 changes: 1 addition & 1 deletion ui/opensnitch/customwidgets/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def __init__(self):
QStandardItemModel.__init__(self, 0, len(self.headerLabels))
self.setHorizontalHeaderLabels(self.headerLabels)

#Some QSqlQueryModel methods must be mimiced so that this class can serve as a drop-in replacement
#Some QSqlQueryModel methods must be mimicked so that this class can serve as a drop-in replacement
#mimic QSqlQueryModel.query()
def query(self):
return self
Expand Down
2 changes: 1 addition & 1 deletion ui/opensnitch/dialogs/firewall_rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -1549,7 +1549,7 @@ def _reset_fields(self):
self.uuid = ""

def _enable_save(self, enable=True):
"""Enable Save buton whenever some detail of a route changes.
"""Enable Save button whenever some detail of a route changes.
The button may or not be hidden. If we're editing a rule it'll be shown
but disabled/enabled.
"""
Expand Down
6 changes: 3 additions & 3 deletions ui/opensnitch/dialogs/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -839,7 +839,7 @@ def __init__(self, parent=None, address=None, db=None, dbname="db", appicon=None
self._configure_plugins()

#Sometimes a maximized window which had been minimized earlier won't unminimize
#To workaround, we explicitely maximize such windows when unminimizing happens
#To workaround, we explicitly maximize such windows when unminimizing happens
def changeEvent(self, event):
if event.type() == QtCore.QEvent.WindowStateChange:
if event.oldState() & QtCore.Qt.WindowMinimized and event.oldState() & QtCore.Qt.WindowMaximized:
Expand Down Expand Up @@ -1732,7 +1732,7 @@ def _cb_tab_changed(self, index):
self._monitor_node_netstat()
else:
if index == self.TAB_RULES:
# display the clean buton only if not in detail view
# display the clean button only if not in detail view
self.TABLES[index]['cmdCleanStats'].setVisible( self.IN_DETAIL_VIEW[index] )
self._add_rulesTree_nodes()

Expand Down Expand Up @@ -2581,7 +2581,7 @@ def _restore_scroll_value(self):
def _restore_details_view_columns(self, header, settings_key):
header.blockSignals(True);
# In order to resize the last column of a view, we firstly force a
# resizeToContens call.
# resizeToContents call.
# Secondly set resizeMode to Interactive (allow to move columns by
# users + programmatically)
header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
Expand Down
2 changes: 1 addition & 1 deletion ui/opensnitch/plugins/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ class PluginBase(PluginsList, ABC):
When calling compile(), every plugin must create the python objects needed,
so they can be reused later when run() is called.

When calling configure(), every plugin is responsable to modify the GUI
When calling configure(), every plugin is responsible to modify the GUI
as needed, adding new buttons, modifying existing widgets, behaviour, ...
"""
name = "PluginBase"
Expand Down
2 changes: 1 addition & 1 deletion ui/opensnitch/plugins/downloader/downloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class Downloader(PluginBase):
This plugin may require to create a rule to allow connections to the
configured urls, to avoid popups.
"""
# fields overriden from parent class
# fields overridden from parent class
name = "Downloader"
version = 0
author = "opensnitch"
Expand Down
2 changes: 1 addition & 1 deletion ui/opensnitch/plugins/sample/sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@


class Sample(PluginBase):
# fields overriden from parent class
# fields overridden from parent class
name = "Sample"
version = 0
author = "opensnitch"
Expand Down
2 changes: 1 addition & 1 deletion ui/opensnitch/plugins/virustotal/virustotal.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ def update_popup(self, what, response, parent, config, errmsg=None):
#print("[Virustotal] RESULT:\n", conn.dst_host, "\n", result['data']['attributes']['last_analysis_stats'])

# XXX: if we analyze multiple objects (domains, ips, hashes...),
# onlye the last response is stored.
# only the last response is stored.
_popups.add_vt_response(parent, result)

malicious = self.is_malicious(verdict['malicious'])
Expand Down
2 changes: 1 addition & 1 deletion ui/opensnitch/rules.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def new_from_records(records):
])
rule.operator.data = ""
except Exception as e:
print("new_from_records exception parsing operartor list:", e)
print("new_from_records exception parsing operator list:", e)


return rule
Expand Down