Skip to content

Commit c00e96b

Browse files
committed
Fix some build error of LD_PRELOAD's ring IPC.
1 parent 84ddaf1 commit c00e96b

4 files changed

Lines changed: 173 additions & 138 deletions

File tree

adapter/syscall/Makefile

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,11 +104,19 @@ FSTACK_SRCS= \
104104
ff_so_zone.c \
105105
ff_socket_ops.c
106106

107+
ifdef FF_USE_RING_IPC
108+
FSTACK_SRCS += ff_ring_ipc.c
109+
endif
110+
107111
FF_SYSCALL_SRCS= \
108112
ff_so_zone.c \
109113
ff_hook_syscall.c \
110114
ff_linux_syscall.c
111115

116+
ifdef FF_USE_RING_IPC
117+
FF_SYSCALL_SRCS += ff_ring_ipc.c
118+
endif
119+
112120
FSTACK_OBJS= $(patsubst %.c,%.o,${FSTACK_SRCS})
113121

114122
FF_SYSCALL_OBJS= $(patsubst %.c,%.o,${FF_SYSCALL_SRCS})

adapter/syscall/ff_ring_ipc.c

Lines changed: 157 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,157 @@
1+
/*
2+
* ff_ring_ipc.c — Lock-free ring IPC helper functions.
3+
*
4+
* Shared by both fstack (primary) and libff_syscall.so (LD_PRELOAD).
5+
* Split from ff_socket_ops.c to avoid dragging ff_* API symbols into
6+
* the LD_PRELOAD library.
7+
*/
8+
9+
#include <errno.h>
10+
#include <sched.h>
11+
#include <unistd.h>
12+
13+
#include <rte_ring.h>
14+
#include <rte_cycles.h>
15+
16+
#include "ff_socket_ops.h"
17+
18+
#ifdef FF_USE_RING_IPC
19+
20+
/*
21+
* Batch dequeue requests from req_ring and invoke handler for each.
22+
*/
23+
uint16_t
24+
ff_ring_process_requests(struct ff_sc_ring_zone *ring_zone,
25+
void (*handler)(struct ff_so_context *),
26+
uint16_t max_burst)
27+
{
28+
void *objs[SOCKET_OPS_CONTEXT_MAX_NUM];
29+
unsigned int nb, i;
30+
31+
if (ring_zone == NULL || ring_zone->req_ring == NULL) {
32+
return 0;
33+
}
34+
35+
nb = rte_ring_sc_dequeue_burst(ring_zone->req_ring,
36+
objs, max_burst, NULL);
37+
38+
for (i = 0; i < nb; i++) {
39+
handler((struct ff_so_context *)objs[i]);
40+
}
41+
42+
return (uint16_t)nb;
43+
}
44+
45+
/*
46+
* Enqueue processed sc to response ring.
47+
* If eventfd mode, also write to eventfd to wake up APP.
48+
*/
49+
int
50+
ff_ring_send_response(struct ff_sc_ring_zone *ring_zone,
51+
struct ff_so_context *sc)
52+
{
53+
int ret;
54+
55+
if (ring_zone == NULL || ring_zone->rsp_ring == NULL) {
56+
return -1;
57+
}
58+
59+
ret = rte_ring_sp_enqueue(ring_zone->rsp_ring, sc);
60+
if (ret != 0) {
61+
ERR_LOG("rsp_ring enqueue failed, sc:%p, ret:%d\n", sc, ret);
62+
return -1;
63+
}
64+
65+
if (ring_zone->wait_mode == FF_RING_WAIT_EVENTFD &&
66+
ring_zone->eventfd_rsp >= 0) {
67+
uint64_t val = 1;
68+
if (write(ring_zone->eventfd_rsp, &val, sizeof(val)) < 0) {
69+
ERR_LOG("eventfd_rsp write failed, errno:%d\n", errno);
70+
}
71+
}
72+
73+
return 0;
74+
}
75+
76+
/*
77+
* Timeout-aware ring dequeue using rte_rdtsc for high-precision timing.
78+
* Returns 0 on success, -ETIMEDOUT on timeout.
79+
*/
80+
int
81+
ff_ring_dequeue_wait(struct rte_ring *ring, void **obj_p,
82+
int64_t timeout_us, uint8_t wait_mode)
83+
{
84+
uint64_t tsc_hz, timeout_tsc, start_tsc;
85+
uint32_t spin_count = 0;
86+
87+
if (ring == NULL || obj_p == NULL) {
88+
return -EINVAL;
89+
}
90+
91+
tsc_hz = rte_get_tsc_hz();
92+
start_tsc = rte_rdtsc();
93+
94+
if (timeout_us > 0) {
95+
timeout_tsc = (uint64_t)timeout_us * tsc_hz / 1000000ULL;
96+
} else if (timeout_us == 0) {
97+
/* Non-blocking: single try */
98+
if (rte_ring_sc_dequeue(ring, obj_p) == 0) {
99+
return 0;
100+
}
101+
return -ETIMEDOUT;
102+
} else {
103+
timeout_tsc = UINT64_MAX; /* -1 = wait forever */
104+
}
105+
106+
while (rte_ring_sc_dequeue(ring, obj_p) != 0) {
107+
if (rte_rdtsc() - start_tsc >= timeout_tsc) {
108+
return -ETIMEDOUT;
109+
}
110+
111+
switch (wait_mode) {
112+
case FF_RING_WAIT_BUSY_POLL:
113+
rte_pause();
114+
break;
115+
case FF_RING_WAIT_YIELD_POLL:
116+
if ((++spin_count & 0xFF) == 0) {
117+
sched_yield();
118+
} else {
119+
rte_pause();
120+
}
121+
break;
122+
case FF_RING_WAIT_EVENTFD:
123+
/* Eventfd handled by caller */
124+
rte_pause();
125+
break;
126+
default:
127+
rte_pause();
128+
break;
129+
}
130+
}
131+
132+
return 0;
133+
}
134+
135+
/*
136+
* Wakeup APP by enqueuing a sentinel to rsp_ring.
137+
* Replaces alarm_event_sem() in ring mode.
138+
*/
139+
void
140+
ff_ring_alarm_wakeup(struct ff_sc_ring_zone *ring_zone,
141+
struct ff_so_context *sc)
142+
{
143+
if (ring_zone == NULL || ring_zone->rsp_ring == NULL || sc == NULL) {
144+
return;
145+
}
146+
147+
/* Enqueue sc as sentinel — APP will dequeue and check */
148+
rte_ring_sp_enqueue(ring_zone->rsp_ring, sc);
149+
150+
if (ring_zone->wait_mode == FF_RING_WAIT_EVENTFD &&
151+
ring_zone->eventfd_rsp >= 0) {
152+
uint64_t val = 1;
153+
write(ring_zone->eventfd_rsp, &val, sizeof(val));
154+
}
155+
}
156+
157+
#endif /* FF_USE_RING_IPC */

adapter/syscall/ff_so_zone.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <rte_memzone.h>
66
#ifdef FF_USE_RING_IPC
77
#include <rte_ring.h>
8+
#include <rte_errno.h>
89
#include <sys/eventfd.h>
910
#endif
1011

adapter/syscall/ff_socket_ops.c

Lines changed: 7 additions & 138 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,10 @@ static int sem_flag = 0;
2828
* The event num kevent or epoll_wait returned.
2929
* Use for burst process event in one F-Stack loop to improve performance.
3030
*/
31-
#define EVENT_LOOP_TIMES 32
32-
static int ff_event_loop_nb = 0;
31+
//#define EVENT_LOOP_TIMES 32
32+
//#ifndef FF_USE_RING_IPC
33+
//static int ff_event_loop_nb = 0;
34+
//#endif
3335
//static int ff_next_event_flag = 0;
3436

3537
struct ff_bound_info {
@@ -652,8 +654,6 @@ ff_handle_each_context()
652654
ff_global_cfg.dpdk.pkt_tx_delay, drain_tsc);
653655
}
654656

655-
ff_event_loop_nb = 0;
656-
657657
cur_tsc = rte_rdtsc();
658658

659659
rte_spinlock_lock(&ff_so_zone->lock);
@@ -713,140 +713,9 @@ ff_handle_each_context()
713713

714714
#ifdef FF_USE_RING_IPC
715715
/*
716-
* Batch dequeue requests from req_ring and invoke handler for each.
717-
*/
718-
uint16_t
719-
ff_ring_process_requests(struct ff_sc_ring_zone *ring_zone,
720-
void (*handler)(struct ff_so_context *),
721-
uint16_t max_burst)
722-
{
723-
void *objs[SOCKET_OPS_CONTEXT_MAX_NUM];
724-
unsigned int nb, i;
725-
726-
if (ring_zone == NULL || ring_zone->req_ring == NULL) {
727-
return 0;
728-
}
729-
730-
nb = rte_ring_sc_dequeue_burst(ring_zone->req_ring,
731-
objs, max_burst, NULL);
732-
733-
for (i = 0; i < nb; i++) {
734-
handler((struct ff_so_context *)objs[i]);
735-
}
736-
737-
return (uint16_t)nb;
738-
}
739-
740-
/*
741-
* Enqueue processed sc to response ring.
742-
* If eventfd mode, also write to eventfd to wake up APP.
743-
*/
744-
int
745-
ff_ring_send_response(struct ff_sc_ring_zone *ring_zone,
746-
struct ff_so_context *sc)
747-
{
748-
int ret;
749-
750-
if (ring_zone == NULL || ring_zone->rsp_ring == NULL) {
751-
return -1;
752-
}
753-
754-
ret = rte_ring_sp_enqueue(ring_zone->rsp_ring, sc);
755-
if (ret != 0) {
756-
ERR_LOG("rsp_ring enqueue failed, sc:%p, ret:%d\n", sc, ret);
757-
return -1;
758-
}
759-
760-
if (ring_zone->wait_mode == FF_RING_WAIT_EVENTFD &&
761-
ring_zone->eventfd_rsp >= 0) {
762-
uint64_t val = 1;
763-
if (write(ring_zone->eventfd_rsp, &val, sizeof(val)) < 0) {
764-
ERR_LOG("eventfd_rsp write failed, errno:%d\n", errno);
765-
}
766-
}
767-
768-
return 0;
769-
}
770-
771-
/*
772-
* Timeout-aware ring dequeue using rte_rdtsc for high-precision timing.
773-
* Returns 0 on success, -ETIMEDOUT on timeout.
774-
*/
775-
int
776-
ff_ring_dequeue_wait(struct rte_ring *ring, void **obj_p,
777-
int64_t timeout_us, uint8_t wait_mode)
778-
{
779-
uint64_t tsc_hz, timeout_tsc, start_tsc;
780-
uint32_t spin_count = 0;
781-
782-
if (ring == NULL || obj_p == NULL) {
783-
return -EINVAL;
784-
}
785-
786-
tsc_hz = rte_get_tsc_hz();
787-
start_tsc = rte_rdtsc();
788-
789-
if (timeout_us > 0) {
790-
timeout_tsc = (uint64_t)timeout_us * tsc_hz / 1000000ULL;
791-
} else if (timeout_us == 0) {
792-
/* Non-blocking: single try */
793-
if (rte_ring_sc_dequeue(ring, obj_p) == 0) {
794-
return 0;
795-
}
796-
return -ETIMEDOUT;
797-
} else {
798-
timeout_tsc = UINT64_MAX; /* -1 = wait forever */
799-
}
800-
801-
while (rte_ring_sc_dequeue(ring, obj_p) != 0) {
802-
if (rte_rdtsc() - start_tsc >= timeout_tsc) {
803-
return -ETIMEDOUT;
804-
}
805-
806-
switch (wait_mode) {
807-
case FF_RING_WAIT_BUSY_POLL:
808-
rte_pause();
809-
break;
810-
case FF_RING_WAIT_YIELD_POLL:
811-
if ((++spin_count & 0xFF) == 0) {
812-
sched_yield();
813-
} else {
814-
rte_pause();
815-
}
816-
break;
817-
case FF_RING_WAIT_EVENTFD:
818-
/* Eventfd handled by caller */
819-
rte_pause();
820-
break;
821-
default:
822-
rte_pause();
823-
break;
824-
}
825-
}
826-
827-
return 0;
828-
}
829-
830-
/*
831-
* Wakeup APP by enqueuing a sentinel to rsp_ring.
832-
* Replaces alarm_event_sem() in ring mode.
716+
* Ring helper functions (ff_ring_process_requests, ff_ring_send_response,
717+
* ff_ring_dequeue_wait, ff_ring_alarm_wakeup) have been moved to
718+
* ff_ring_ipc.c for shared use by both fstack and libff_syscall.so.
833719
*/
834-
void
835-
ff_ring_alarm_wakeup(struct ff_sc_ring_zone *ring_zone,
836-
struct ff_so_context *sc)
837-
{
838-
if (ring_zone == NULL || ring_zone->rsp_ring == NULL || sc == NULL) {
839-
return;
840-
}
841-
842-
/* Enqueue sc as sentinel — APP will dequeue and check */
843-
rte_ring_sp_enqueue(ring_zone->rsp_ring, sc);
844-
845-
if (ring_zone->wait_mode == FF_RING_WAIT_EVENTFD &&
846-
ring_zone->eventfd_rsp >= 0) {
847-
uint64_t val = 1;
848-
write(ring_zone->eventfd_rsp, &val, sizeof(val));
849-
}
850-
}
851720
#endif /* FF_USE_RING_IPC */
852721

0 commit comments

Comments
 (0)