Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame^] | 1 | #ifndef INTERNAL_IO_WQ_H |
| 2 | #define INTERNAL_IO_WQ_H |
| 3 | |
| 4 | struct io_wq; |
| 5 | |
| 6 | enum { |
| 7 | IO_WQ_WORK_CANCEL = 1, |
| 8 | IO_WQ_WORK_HAS_MM = 2, |
| 9 | IO_WQ_WORK_HASHED = 4, |
| 10 | IO_WQ_WORK_NEEDS_USER = 8, |
| 11 | |
| 12 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
| 13 | }; |
| 14 | |
| 15 | enum io_wq_cancel { |
| 16 | IO_WQ_CANCEL_OK, /* cancelled before started */ |
| 17 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ |
| 18 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ |
| 19 | }; |
| 20 | |
| 21 | struct io_wq_work { |
| 22 | struct list_head list; |
| 23 | void (*func)(struct io_wq_work **); |
| 24 | unsigned flags; |
| 25 | }; |
| 26 | |
| 27 | #define INIT_IO_WORK(work, _func) \ |
| 28 | do { \ |
| 29 | (work)->func = _func; \ |
| 30 | (work)->flags = 0; \ |
| 31 | } while (0) \ |
| 32 | |
| 33 | struct io_wq *io_wq_create(unsigned concurrency, struct mm_struct *mm); |
| 34 | void io_wq_destroy(struct io_wq *wq); |
| 35 | |
| 36 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); |
| 37 | void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val); |
| 38 | void io_wq_flush(struct io_wq *wq); |
| 39 | |
| 40 | void io_wq_cancel_all(struct io_wq *wq); |
| 41 | enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork); |
| 42 | |
| 43 | #if defined(CONFIG_IO_WQ) |
| 44 | extern void io_wq_worker_sleeping(struct task_struct *); |
| 45 | extern void io_wq_worker_running(struct task_struct *); |
| 46 | #else |
| 47 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) |
| 48 | { |
| 49 | } |
| 50 | static inline void io_wq_worker_running(struct task_struct *tsk) |
| 51 | { |
| 52 | } |
| 53 | #endif |
| 54 | |
| 55 | #endif |