@@ -9,7 +9,8 @@ struct multicore_worker __percpu *
99wg_packet_percpu_multicore_worker_alloc (work_func_t function , void * ptr )
1010{
1111 int cpu ;
12- struct multicore_worker __percpu * worker = alloc_percpu (struct multicore_worker );
12+ struct multicore_worker __percpu * worker =
13+ alloc_percpu (struct multicore_worker );
1314
1415 if (!worker )
1516 return NULL ;
@@ -22,86 +23,33 @@ wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
2223}
2324
2425int wg_packet_queue_init (struct crypt_queue * queue , work_func_t function ,
25- unsigned int len )
26+ bool multicore , unsigned int len )
2627{
2728 int ret ;
2829
2930 memset (queue , 0 , sizeof (* queue ));
3031 ret = ptr_ring_init (& queue -> ring , len , GFP_KERNEL );
3132 if (ret )
3233 return ret ;
33- queue -> worker = wg_packet_percpu_multicore_worker_alloc (function , queue );
34- if (!queue -> worker ) {
35- ptr_ring_cleanup (& queue -> ring , NULL );
36- return - ENOMEM ;
34+ if (function ) {
35+ if (multicore ) {
36+ queue -> worker = wg_packet_percpu_multicore_worker_alloc (
37+ function , queue );
38+ if (!queue -> worker ) {
39+ ptr_ring_cleanup (& queue -> ring , NULL );
40+ return - ENOMEM ;
41+ }
42+ } else {
43+ INIT_WORK (& queue -> work , function );
44+ }
3745 }
3846 return 0 ;
3947}
4048
41- void wg_packet_queue_free (struct crypt_queue * queue )
49+ void wg_packet_queue_free (struct crypt_queue * queue , bool multicore )
4250{
43- free_percpu (queue -> worker );
51+ if (multicore )
52+ free_percpu (queue -> worker );
4453 WARN_ON (!__ptr_ring_empty (& queue -> ring ));
4554 ptr_ring_cleanup (& queue -> ring , NULL );
4655}
47-
48- #define NEXT (skb ) ((skb)->prev)
49- #define STUB (queue ) ((struct sk_buff *)&queue->empty)
50-
51- void wg_prev_queue_init (struct prev_queue * queue )
52- {
53- NEXT (STUB (queue )) = NULL ;
54- queue -> head = queue -> tail = STUB (queue );
55- queue -> peeked = NULL ;
56- atomic_set (& queue -> count , 0 );
57- BUILD_BUG_ON (
58- offsetof(struct sk_buff , next ) != offsetof(struct prev_queue , empty .next ) -
59- offsetof(struct prev_queue , empty ) ||
60- offsetof(struct sk_buff , prev ) != offsetof(struct prev_queue , empty .prev ) -
61- offsetof(struct prev_queue , empty ));
62- }
63-
64- static void __wg_prev_queue_enqueue (struct prev_queue * queue , struct sk_buff * skb )
65- {
66- WRITE_ONCE (NEXT (skb ), NULL );
67- WRITE_ONCE (NEXT (xchg_release (& queue -> head , skb )), skb );
68- }
69-
70- bool wg_prev_queue_enqueue (struct prev_queue * queue , struct sk_buff * skb )
71- {
72- if (!atomic_add_unless (& queue -> count , 1 , MAX_QUEUED_PACKETS ))
73- return false;
74- __wg_prev_queue_enqueue (queue , skb );
75- return true;
76- }
77-
78- struct sk_buff * wg_prev_queue_dequeue (struct prev_queue * queue )
79- {
80- struct sk_buff * tail = queue -> tail , * next = smp_load_acquire (& NEXT (tail ));
81-
82- if (tail == STUB (queue )) {
83- if (!next )
84- return NULL ;
85- queue -> tail = next ;
86- tail = next ;
87- next = smp_load_acquire (& NEXT (next ));
88- }
89- if (next ) {
90- queue -> tail = next ;
91- atomic_dec (& queue -> count );
92- return tail ;
93- }
94- if (tail != READ_ONCE (queue -> head ))
95- return NULL ;
96- __wg_prev_queue_enqueue (queue , STUB (queue ));
97- next = smp_load_acquire (& NEXT (tail ));
98- if (next ) {
99- queue -> tail = next ;
100- atomic_dec (& queue -> count );
101- return tail ;
102- }
103- return NULL ;
104- }
105-
106- #undef NEXT
107- #undef STUB
0 commit comments