|
| 1 | +#pragma once |
| 2 | +#ifndef QUANUX_EGRESS_HPP |
| 3 | +#define QUANUX_EGRESS_HPP |
| 4 | + |
| 5 | +#include "quanux_synapse.hpp" |
| 6 | +#include <atomic> |
| 7 | +#include <cstddef> |
| 8 | +#include <cstdint> |
| 9 | +#include <cstring> |
| 10 | + |
| 11 | +namespace quanux { |
| 12 | +namespace aether { |
| 13 | + |
| 14 | +/** |
| 15 | + * @brief The Kinetic Egress (Solarflare EF_VI Transmit Organelle) |
| 16 | + * |
| 17 | + * This organelle implements the "Strike". |
| 18 | + * The TCP/UDP templates are pre-built into the SovereignState memory space |
| 19 | + * during the STATE_VIGIL (1) phase. When the Nucleus decides to execute, |
| 20 | + * it does not parse logic, compute sizes, or copy buffers. |
| 21 | + * It merely flips the "Intent Bit" to trigger the `ef_vi_transmit_init()` DMA |
| 22 | + * send. |
| 23 | + */ |
| 24 | +struct alignas(64) KineticEgress { |
| 25 | + // Cache Line 1: Pre-calculated Network Packet Template |
| 26 | + // Includes Ethernet, IP, and TCP/UDP headers and static payload components. |
| 27 | + // Kept strictly within its own boundary to prevent False Sharing during the |
| 28 | + // Strike. |
| 29 | + uint8_t packet_template[60]; |
| 30 | + uint32_t packet_length; // Must total 64 bytes |
| 31 | + |
| 32 | + // Cache Line 2: The Intent Trigger |
| 33 | + // When the Sovereign Nucleus (Core 3) completes its matrix evaluation |
| 34 | + // and determines an alpha strike condition is met, it asserts this flag. |
| 35 | + alignas(64) std::atomic<bool> intent_bit; |
| 36 | + |
| 37 | + KineticEgress() : packet_length(0), intent_bit(false) { |
| 38 | + std::memset(packet_template, 0, sizeof(packet_template)); |
| 39 | + } |
| 40 | + |
| 41 | + /** |
| 42 | + * @brief The Law of Pre-Calculation (Ritchie Protocol) |
| 43 | + * To be called strictly during STATE_VOID or STATE_VIGIL. |
| 44 | + * All checksums, routing, and sizing math must be finalized here. |
| 45 | + */ |
| 46 | + inline void template_payload(const uint8_t *raw_headers, uint32_t len) { |
| 47 | + // Enforce the 60 byte max limit for direct-hit single-cache-line |
| 48 | + // transmission |
| 49 | + size_t copy_len = (len < 60) ? len : 60; |
| 50 | + std::memcpy(packet_template, raw_headers, copy_len); |
| 51 | + packet_length = static_cast<uint32_t>(copy_len); |
| 52 | + } |
| 53 | + |
| 54 | + /** |
| 55 | + * @brief The Strike |
| 56 | + * In a live EF_VI environment, flipping this bit prompts the Envoy core |
| 57 | + * to immediately push the pre-loaded Hugepage memory buffer pointer |
| 58 | + * into the ef_vi TX ring. |
| 59 | + */ |
| 60 | + inline void fire() { intent_bit.store(true, std::memory_order_release); } |
| 61 | +}; |
| 62 | + |
| 63 | +static_assert(sizeof(KineticEgress) == 128, |
| 64 | + "KineticEgress MUST map to exactly two 64-byte cache lines."); |
| 65 | +static_assert(alignof(KineticEgress) == 64, |
| 66 | + "KineticEgress MUST be 64-byte cache aligned."); |
| 67 | + |
| 68 | +} // namespace aether |
| 69 | +} // namespace quanux |
| 70 | + |
| 71 | +#endif // QUANUX_EGRESS_HPP |
0 commit comments