33 * Copyright 2023 Red Hat
44 */
55
6- #ifndef UDS_FUNNEL_QUEUE_H
7- #define UDS_FUNNEL_QUEUE_H
6+ #ifndef VDO_FUNNEL_QUEUE_H
7+ #define VDO_FUNNEL_QUEUE_H
88
99#include <linux/atomic.h>
1010#include <linux/cache.h>
2525 * the queue entries, and pointers to those structures are used exclusively by the queue. No macros
2626 * are defined to template the queue, so the offset of the funnel_queue_entry in the records placed
2727 * in the queue must all be the same so the client can derive their structure pointer from the
28- * entry pointer returned by uds_funnel_queue_poll ().
28+ * entry pointer returned by vdo_funnel_queue_poll ().
2929 *
3030 * Callers are wholly responsible for allocating and freeing the entries. Entries may be freed as
3131 * soon as they are returned since this queue is not susceptible to the "ABA problem" present in
3232 * many lock-free data structures. The queue is dynamically allocated to ensure cache-line
3333 * alignment, but no other dynamic allocation is used.
3434 *
35- * The algorithm is not actually 100% lock-free. There is a single point in uds_funnel_queue_put ()
35+ * The algorithm is not actually 100% lock-free. There is a single point in vdo_funnel_queue_put ()
3636 * at which a preempted producer will prevent the consumers from seeing items added to the queue by
3737 * later producers, and only if the queue is short enough or the consumer fast enough for it to
3838 * reach what was the end of the queue at the time of the preemption.
3939 *
40- * The consumer function, uds_funnel_queue_poll (), will return NULL when the queue is empty. To
40+ * The consumer function, vdo_funnel_queue_poll (), will return NULL when the queue is empty. To
4141 * wait for data to consume, spin (if safe) or combine the queue with a struct event_count to
4242 * signal the presence of new entries.
4343 */
@@ -51,7 +51,7 @@ struct funnel_queue_entry {
5151/*
5252 * The dynamically allocated queue structure, which is allocated on a cache line boundary so the
5353 * producer and consumer fields in the structure will land on separate cache lines. This should be
54- * consider opaque but it is exposed here so uds_funnel_queue_put () can be inlined.
54+ * consider opaque but it is exposed here so vdo_funnel_queue_put () can be inlined.
5555 */
5656struct __aligned (L1_CACHE_BYTES ) funnel_queue {
5757 /*
@@ -67,9 +67,9 @@ struct __aligned(L1_CACHE_BYTES) funnel_queue {
6767 struct funnel_queue_entry stub ;
6868};
6969
70- int __must_check uds_make_funnel_queue (struct funnel_queue * * queue_ptr );
70+ int __must_check vdo_make_funnel_queue (struct funnel_queue * * queue_ptr );
7171
72- void uds_free_funnel_queue (struct funnel_queue * queue );
72+ void vdo_free_funnel_queue (struct funnel_queue * queue );
7373
7474/*
7575 * Put an entry on the end of the queue.
@@ -79,7 +79,7 @@ void uds_free_funnel_queue(struct funnel_queue *queue);
7979 * from the pointer that passed in here, so every entry in the queue must have the struct
8080 * funnel_queue_entry at the same offset within the client's structure.
8181 */
82- static inline void uds_funnel_queue_put (struct funnel_queue * queue ,
82+ static inline void vdo_funnel_queue_put (struct funnel_queue * queue ,
8383 struct funnel_queue_entry * entry )
8484{
8585 struct funnel_queue_entry * previous ;
@@ -101,10 +101,10 @@ static inline void uds_funnel_queue_put(struct funnel_queue *queue,
101101 WRITE_ONCE (previous -> next , entry );
102102}
103103
104- struct funnel_queue_entry * __must_check uds_funnel_queue_poll (struct funnel_queue * queue );
104+ struct funnel_queue_entry * __must_check vdo_funnel_queue_poll (struct funnel_queue * queue );
105105
106- bool __must_check uds_is_funnel_queue_empty (struct funnel_queue * queue );
106+ bool __must_check vdo_is_funnel_queue_empty (struct funnel_queue * queue );
107107
108- bool __must_check uds_is_funnel_queue_idle (struct funnel_queue * queue );
108+ bool __must_check vdo_is_funnel_queue_idle (struct funnel_queue * queue );
109109
110- #endif /* UDS_FUNNEL_QUEUE_H */
110+ #endif /* VDO_FUNNEL_QUEUE_H */
0 commit comments