Skip to content

Commit 5d1cb42

Browse files
feat(demo): Add demo example code file for a simple perceptron implementation.
1 parent 6a87ca8 commit 5d1cb42

2 files changed

Lines changed: 73 additions & 3 deletions

File tree

demo/example.py

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
"""
2+
Recreation of the simple perceptron algorithm maintaining the exact application of Rosenblatt.
3+
Rosenblatt, F. (1958). The perceptron: A probabilistic model for information storage and organization in the brain. Psychological Review, 65(6), 386–408.
4+
"""
5+
6+
class SimplePerceptron:
7+
def __init__(self, seed: float = 42.0) -> None:
8+
self.weights: list[float] = []
9+
self.bias: float = 0.0
10+
self.seed = seed
11+
12+
def train(self, labeled_data: dict[tuple[float | int, ...], int], epochs: int = 30, learning_rate: float = 0.001) -> None:
13+
input_dim = len(list(labeled_data.keys())[0])
14+
self.weights = [self._lcg() for _ in range(input_dim)]
15+
self.bias = self._lcg()
16+
17+
for e in range(epochs):
18+
error_count: int = 0
19+
20+
for inputs, target in labeled_data.items():
21+
model_pred: float = self._net_input(inputs)
22+
23+
if target != self._binary_step(model_pred):
24+
error_count += 1
25+
update = learning_rate * (target - self._binary_step(model_pred)) # Product of learning rate and the difference betwen target and model prediction.
26+
27+
# Update the model bias and weights using the rosenblatt learning rule.
28+
for i in range(len(self.weights)): self.weights[i] += update * inputs[i]
29+
self.bias += update
30+
31+
if error_count == 0: # Simple early stopping mechanism to use less resources.
32+
print(f"The model stopped converging at epoch {e}.")
33+
return
34+
35+
print(f"For epoch {e}, the accuracy for actual model is {(len(labeled_data) - error_count) / len(labeled_data) * 100} percent.")
36+
37+
def inference(self, values: tuple[float | int, ...]) -> int:
38+
return self._binary_step(self._net_input(values))
39+
40+
def _net_input(self, values: tuple[float]) -> float:
41+
return sum(w_i * x_i for w_i, x_i in zip(self.weights, values)) + self.bias # The dot product of the vector of weights and values ​​plus the bias term.
42+
43+
def _binary_step(self, pred: float) -> int:
44+
return 1 if pred >= 0.0 else 0
45+
46+
def _lcg(self) -> float: # Congruential linear generator.
47+
self.seed = (self.seed * 16807) % 2147483647
48+
return (self.seed / 2147483647) - 0.5 # Normalized between -0.5 and 0.5 to avoid data scaling problems.
49+
50+
or_gate: dict[tuple[int, int], int] = {
51+
(0, 0): 0,
52+
(0, 1): 1,
53+
(1, 0): 1,
54+
(1, 1): 1
55+
}
56+
57+
model = SimplePerceptron()
58+
model.train(or_gate)
59+
pred: int = model.inference((0, 0))
60+
print(f"Prediction for (0,0): {pred}")

demo/main.js

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import { CodeJar } from 'https://esm.sh/codejar@4';
44

55
const MAX_LINES = 99;
66
const TAB_SIZE = 4;
7+
const EXAMPLE_FILE = "example.py"
78
const DEV = !['demo.edgepython.com'].includes(location.hostname);
89
const FETCH_OPTS = DEV ? { cache: 'no-store' } : undefined;
910

@@ -227,11 +228,20 @@ const Editor = (() => {
227228
jar.onUpdate(syncLines);
228229
jar.updateCode(DEFAULT_CODE);
229230

230-
return { getCode: () => jar.toString() };
231+
return { getCode: () => jar.toString(), setCode: (code) => jar.updateCode(code) };
231232
})();
232233

233234
// Init
234-
235235
el.btn.addEventListener('click', () => PythonWorker.run(Editor.getCode()));
236236
loadIcons();
237-
PythonWorker.load();
237+
PythonWorker.load();
238+
239+
fetch(`./${EXAMPLE_FILE}`, FETCH_OPTS ?? {}).then(r => {
240+
if (!r.ok) throw new Error(`HTTP ${r.status}`);
241+
return r.text();
242+
}).then(code => {
243+
Editor.setCode(code);
244+
}).catch(() => {
245+
console.warn(`${EXAMPLE_FILE} could not be loaded, using default code.`);
246+
}
247+
);

0 commit comments

Comments
 (0)