Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1 | #ifndef INTERNAL_IO_WQ_H |
| 2 | #define INTERNAL_IO_WQ_H |
| 3 | |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame^] | 4 | #include <linux/refcount.h> |
Jens Axboe | 98447d6 | 2020-10-14 10:48:51 -0600 | [diff] [blame] | 5 | #include <linux/io_uring.h> |
| 6 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 7 | struct io_wq; |
| 8 | |
| 9 | enum { |
| 10 | IO_WQ_WORK_CANCEL = 1, |
Pavel Begunkov | e883a79 | 2020-06-25 18:20:53 +0300 | [diff] [blame] | 11 | IO_WQ_WORK_HASHED = 2, |
| 12 | IO_WQ_WORK_UNBOUND = 4, |
Pavel Begunkov | e883a79 | 2020-06-25 18:20:53 +0300 | [diff] [blame] | 13 | IO_WQ_WORK_CONCURRENT = 16, |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 14 | |
| 15 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
| 16 | }; |
| 17 | |
| 18 | enum io_wq_cancel { |
| 19 | IO_WQ_CANCEL_OK, /* cancelled before started */ |
| 20 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ |
| 21 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ |
| 22 | }; |
| 23 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 24 | static inline void wq_list_add_after(struct io_wq_work_node *node, |
| 25 | struct io_wq_work_node *pos, |
| 26 | struct io_wq_work_list *list) |
| 27 | { |
| 28 | struct io_wq_work_node *next = pos->next; |
| 29 | |
| 30 | pos->next = node; |
| 31 | node->next = next; |
| 32 | if (!next) |
| 33 | list->last = node; |
| 34 | } |
| 35 | |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 36 | static inline void wq_list_add_tail(struct io_wq_work_node *node, |
| 37 | struct io_wq_work_list *list) |
| 38 | { |
| 39 | if (!list->first) { |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 40 | list->last = node; |
| 41 | WRITE_ONCE(list->first, node); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 42 | } else { |
| 43 | list->last->next = node; |
| 44 | list->last = node; |
| 45 | } |
Xiaoguang Wang | 0020ef0 | 2020-12-18 15:26:48 +0800 | [diff] [blame] | 46 | node->next = NULL; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 47 | } |
| 48 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 49 | static inline void wq_list_cut(struct io_wq_work_list *list, |
| 50 | struct io_wq_work_node *last, |
| 51 | struct io_wq_work_node *prev) |
| 52 | { |
| 53 | /* first in the list, if prev==NULL */ |
| 54 | if (!prev) |
| 55 | WRITE_ONCE(list->first, last->next); |
| 56 | else |
| 57 | prev->next = last->next; |
| 58 | |
| 59 | if (last == list->last) |
| 60 | list->last = prev; |
| 61 | last->next = NULL; |
| 62 | } |
| 63 | |
| 64 | static inline void wq_list_del(struct io_wq_work_list *list, |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 65 | struct io_wq_work_node *node, |
| 66 | struct io_wq_work_node *prev) |
| 67 | { |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 68 | wq_list_cut(list, node, prev); |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | #define wq_list_for_each(pos, prv, head) \ |
| 72 | for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) |
| 73 | |
Jens Axboe | e995d51 | 2019-12-07 21:06:46 -0700 | [diff] [blame] | 74 | #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 75 | #define INIT_WQ_LIST(list) do { \ |
| 76 | (list)->first = NULL; \ |
| 77 | (list)->last = NULL; \ |
| 78 | } while (0) |
| 79 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 80 | struct io_wq_work { |
Pavel Begunkov | 18a542f | 2020-03-23 00:23:29 +0300 | [diff] [blame] | 81 | struct io_wq_work_node list; |
Jens Axboe | 4379bf8 | 2021-02-15 13:40:22 -0700 | [diff] [blame] | 82 | const struct cred *creds; |
Jens Axboe | 6206f0e | 2019-11-26 11:59:32 -0700 | [diff] [blame] | 83 | unsigned flags; |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 84 | }; |
| 85 | |
Pavel Begunkov | 86f3cd1 | 2020-03-23 22:57:22 +0300 | [diff] [blame] | 86 | static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) |
| 87 | { |
| 88 | if (!work->list.next) |
| 89 | return NULL; |
| 90 | |
| 91 | return container_of(work->list.next, struct io_wq_work, list); |
| 92 | } |
| 93 | |
Pavel Begunkov | 5280f7e | 2021-02-04 13:52:08 +0000 | [diff] [blame] | 94 | typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); |
| 95 | typedef void (io_wq_work_fn)(struct io_wq_work *); |
Jens Axboe | 7d72306 | 2019-11-12 22:31:31 -0700 | [diff] [blame] | 96 | |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame^] | 97 | struct io_wq_hash { |
| 98 | refcount_t refs; |
| 99 | unsigned long map; |
| 100 | struct wait_queue_head wait; |
| 101 | }; |
| 102 | |
| 103 | static inline void io_wq_put_hash(struct io_wq_hash *hash) |
| 104 | { |
| 105 | if (refcount_dec_and_test(&hash->refs)) |
| 106 | kfree(hash); |
| 107 | } |
| 108 | |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 109 | struct io_wq_data { |
Jens Axboe | e941894 | 2021-02-19 12:33:30 -0700 | [diff] [blame^] | 110 | struct io_wq_hash *hash; |
Pavel Begunkov | f5fa38c | 2020-06-08 21:08:20 +0300 | [diff] [blame] | 111 | io_wq_work_fn *do_work; |
Pavel Begunkov | e9fd939 | 2020-03-04 16:14:12 +0300 | [diff] [blame] | 112 | free_work_fn *free_work; |
Jens Axboe | 576a347 | 2019-11-25 08:49:20 -0700 | [diff] [blame] | 113 | }; |
| 114 | |
| 115 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 116 | void io_wq_destroy(struct io_wq *wq); |
| 117 | |
| 118 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 119 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
| 120 | |
Jens Axboe | 843bbfd | 2021-02-17 21:05:41 -0700 | [diff] [blame] | 121 | pid_t io_wq_fork_thread(int (*fn)(void *), void *arg); |
| 122 | |
Pavel Begunkov | 8766dd5 | 2020-03-14 00:31:04 +0300 | [diff] [blame] | 123 | static inline bool io_wq_is_hashed(struct io_wq_work *work) |
| 124 | { |
| 125 | return work->flags & IO_WQ_WORK_HASHED; |
| 126 | } |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 127 | |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 128 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
| 129 | |
| 130 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
Pavel Begunkov | 4f26bda | 2020-06-15 10:24:03 +0300 | [diff] [blame] | 131 | void *data, bool cancel_all); |
Jens Axboe | 62755e3 | 2019-10-28 21:49:21 -0600 | [diff] [blame] | 132 | |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 133 | #if defined(CONFIG_IO_WQ) |
| 134 | extern void io_wq_worker_sleeping(struct task_struct *); |
| 135 | extern void io_wq_worker_running(struct task_struct *); |
| 136 | #else |
| 137 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) |
| 138 | { |
| 139 | } |
| 140 | static inline void io_wq_worker_running(struct task_struct *tsk) |
| 141 | { |
| 142 | } |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 143 | #endif |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 144 | |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 145 | static inline bool io_wq_current_is_worker(void) |
| 146 | { |
Jens Axboe | 3bfe610 | 2021-02-16 14:15:30 -0700 | [diff] [blame] | 147 | return in_task() && (current->flags & PF_IO_WORKER) && |
| 148 | current->pf_io_worker; |
Jens Axboe | 525b305 | 2019-12-17 14:13:37 -0700 | [diff] [blame] | 149 | } |
| 150 | #endif |