2024-02-25 21:19:59 +00:00
|
|
|
#ifndef cwNbMpScQueue_h
|
|
|
|
#define cwNbMpScQueue_h
|
|
|
|
|
2024-03-03 21:35:23 +00:00
|
|
|
/*
|
|
|
|
Non-blocking, Lock-free Queue:
|
|
|
|
=================================
|
|
|
|
|
|
|
|
Push
|
|
|
|
----
|
2024-03-11 23:21:03 +00:00
|
|
|
0. Produceers go down the a list of blocks (nbmpscq.blockL)
|
|
|
|
if a block is not already full then it atomically
|
|
|
|
fetch-add's block->write_idx by the size of the the element
|
|
|
|
to be inserted.
|
2024-03-03 21:35:23 +00:00
|
|
|
|
2024-03-11 23:21:03 +00:00
|
|
|
1. If after the fetch-add the write_idx is <= block->byteN then
|
2024-03-03 21:35:23 +00:00
|
|
|
- atomically incr ele-count,
|
|
|
|
- copy in ele
|
|
|
|
- place the block,ele-offset,ele-byte-cnt onto the NbMpScQueue().
|
|
|
|
|
|
|
|
2. else (the area is invalid) goto 0.
|
|
|
|
|
|
|
|
Pop
|
|
|
|
----
|
|
|
|
1. copy out next ele.
|
|
|
|
2. decr. block->ele_count
|
|
|
|
3. if the ele-count is 0 and write-offset is invalid
|
|
|
|
reset the write-offset to 0.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
2024-02-25 21:19:59 +00:00
|
|
|
namespace cw
|
|
|
|
{
|
|
|
|
namespace nbmpscq
|
|
|
|
{
|
|
|
|
typedef handle<struct nbmpscq_str> handle_t;
|
2024-03-25 17:31:58 +00:00
|
|
|
|
2024-02-25 21:19:59 +00:00
|
|
|
rc_t create( handle_t& hRef, unsigned initBlkN, unsigned blkByteN );
|
2024-03-11 23:21:03 +00:00
|
|
|
|
2024-02-25 21:19:59 +00:00
|
|
|
rc_t destroy( handle_t& hRef );
|
|
|
|
|
2024-03-28 23:43:40 +00:00
|
|
|
//
|
|
|
|
// Producer Function
|
|
|
|
//
|
|
|
|
|
2024-03-11 23:21:03 +00:00
|
|
|
// push() is called by multiple producer threads to insert
|
|
|
|
// an element in the queue. Note that the 'blob' is copied into
|
|
|
|
// the queue and therefore can be released by the caller.
|
2024-02-25 21:19:59 +00:00
|
|
|
rc_t push( handle_t h, const void* blob, unsigned blobByteN );
|
|
|
|
|
2024-03-28 23:43:40 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
// Consumer Functions
|
|
|
|
//
|
|
|
|
|
2024-02-25 21:19:59 +00:00
|
|
|
typedef struct blob_str
|
|
|
|
{
|
2024-03-25 17:31:58 +00:00
|
|
|
rc_t rc;
|
2024-02-25 21:19:59 +00:00
|
|
|
const void* blob;
|
|
|
|
unsigned blobByteN;
|
|
|
|
} blob_t;
|
2024-03-11 23:21:03 +00:00
|
|
|
|
|
|
|
// get() is called by the single consumer thread to access the
|
2024-03-25 17:31:58 +00:00
|
|
|
// oldest record in the queue. Note that this call
|
2024-03-11 23:21:03 +00:00
|
|
|
// does not change the state of the queue.
|
|
|
|
blob_t get( handle_t h );
|
|
|
|
|
2024-03-25 17:31:58 +00:00
|
|
|
// advance() disposes of the oldest blob in the
|
2024-03-11 23:21:03 +00:00
|
|
|
// queue and makes the next blob current.
|
2024-03-25 17:31:58 +00:00
|
|
|
blob_t advance( handle_t h );
|
|
|
|
|
|
|
|
// The queue maintains a single internal iterator which the consumer
|
|
|
|
// may use to traverse stored records without removing them.
|
|
|
|
// The first call to peek() will return the oldest stored record.
|
|
|
|
// Each subsequent call to peek() will return the next stored record
|
|
|
|
// until no records are available - at which point blob_t.blob will be
|
|
|
|
// set to 'nullptr'. The following call will then revert to returning
|
|
|
|
// the oldest stored record.
|
|
|
|
blob_t peek( handle_t h );
|
2024-03-28 23:43:40 +00:00
|
|
|
|
|
|
|
// Reset peek to point to the oldest stored record.
|
|
|
|
void peek_reset( handle_t h );
|
|
|
|
|
|
|
|
// Return true if the queue is empty.
|
2024-03-25 17:31:58 +00:00
|
|
|
bool is_empty( handle_t h );
|
2024-02-25 21:19:59 +00:00
|
|
|
|
2024-03-28 23:43:40 +00:00
|
|
|
// Count of elements in the queue.
|
2024-03-25 18:29:25 +00:00
|
|
|
unsigned count( handle_t h );
|
|
|
|
|
2024-03-03 21:35:23 +00:00
|
|
|
rc_t test( const object_t* cfg );
|
2024-02-25 21:19:59 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#endif
|