Jens Axboe | e418bbc | 2022-05-25 08:56:52 -0600 | [diff] [blame] | 1 | #ifndef IOU_REQ_REF_H |
| 2 | #define IOU_REQ_REF_H |
| 3 | |
| 4 | #include <linux/atomic.h> |
Pavel Begunkov | ab1c84d | 2022-06-16 13:57:19 +0100 | [diff] [blame] | 5 | #include <linux/io_uring_types.h> |
Jens Axboe | e418bbc | 2022-05-25 08:56:52 -0600 | [diff] [blame] | 6 | |
| 7 | /* |
| 8 | * Shamelessly stolen from the mm implementation of page reference checking, |
| 9 | * see commit f958d7b528b1 for details. |
| 10 | */ |
| 11 | #define req_ref_zero_or_close_to_overflow(req) \ |
| 12 | ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u) |
| 13 | |
| 14 | static inline bool req_ref_inc_not_zero(struct io_kiocb *req) |
| 15 | { |
| 16 | WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); |
| 17 | return atomic_inc_not_zero(&req->refs); |
| 18 | } |
| 19 | |
| 20 | static inline bool req_ref_put_and_test(struct io_kiocb *req) |
| 21 | { |
| 22 | if (likely(!(req->flags & REQ_F_REFCOUNT))) |
| 23 | return true; |
| 24 | |
| 25 | WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); |
| 26 | return atomic_dec_and_test(&req->refs); |
| 27 | } |
| 28 | |
| 29 | static inline void req_ref_get(struct io_kiocb *req) |
| 30 | { |
| 31 | WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); |
| 32 | WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); |
| 33 | atomic_inc(&req->refs); |
| 34 | } |
| 35 | |
| 36 | static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) |
| 37 | { |
| 38 | if (!(req->flags & REQ_F_REFCOUNT)) { |
| 39 | req->flags |= REQ_F_REFCOUNT; |
| 40 | atomic_set(&req->refs, nr); |
| 41 | } |
| 42 | } |
| 43 | |
| 44 | static inline void io_req_set_refcount(struct io_kiocb *req) |
| 45 | { |
| 46 | __io_req_set_refcount(req, 1); |
| 47 | } |
| 48 | #endif |