blob: b68b11bf363318ba2be8552c1c227bd8e8ad38e7 [file] [log] [blame]
Jens Axboe771b53d02019-10-22 10:25:58 -06001#ifndef INTERNAL_IO_WQ_H
2#define INTERNAL_IO_WQ_H
3
4struct io_wq;
5
6enum {
7 IO_WQ_WORK_CANCEL = 1,
8 IO_WQ_WORK_HAS_MM = 2,
9 IO_WQ_WORK_HASHED = 4,
10 IO_WQ_WORK_NEEDS_USER = 8,
Jens Axboefcb323c2019-10-24 12:39:47 -060011 IO_WQ_WORK_NEEDS_FILES = 16,
Jens Axboec5def4a2019-11-07 11:41:16 -070012 IO_WQ_WORK_UNBOUND = 32,
Jens Axboe7d723062019-11-12 22:31:31 -070013 IO_WQ_WORK_INTERNAL = 64,
Jens Axboeb76da702019-11-20 13:05:32 -070014 IO_WQ_WORK_CB = 128,
Jens Axboe771b53d02019-10-22 10:25:58 -060015
16 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
17};
18
19enum io_wq_cancel {
20 IO_WQ_CANCEL_OK, /* cancelled before started */
21 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
22 IO_WQ_CANCEL_NOTFOUND, /* work not found */
23};
24
25struct io_wq_work {
Jens Axboeb76da702019-11-20 13:05:32 -070026 union {
27 struct list_head list;
28 void *data;
29 };
Jens Axboe771b53d02019-10-22 10:25:58 -060030 void (*func)(struct io_wq_work **);
31 unsigned flags;
Jens Axboefcb323c2019-10-24 12:39:47 -060032 struct files_struct *files;
Jens Axboe771b53d02019-10-22 10:25:58 -060033};
34
35#define INIT_IO_WORK(work, _func) \
36 do { \
37 (work)->func = _func; \
38 (work)->flags = 0; \
Jens Axboefcb323c2019-10-24 12:39:47 -060039 (work)->files = NULL; \
Jens Axboe771b53d02019-10-22 10:25:58 -060040 } while (0) \
41
Jens Axboe7d723062019-11-12 22:31:31 -070042typedef void (get_work_fn)(struct io_wq_work *);
43typedef void (put_work_fn)(struct io_wq_work *);
44
Jens Axboec5def4a2019-11-07 11:41:16 -070045struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
Jens Axboe7d723062019-11-12 22:31:31 -070046 struct user_struct *user,
47 get_work_fn *get_work, put_work_fn *put_work);
Jens Axboe771b53d02019-10-22 10:25:58 -060048void io_wq_destroy(struct io_wq *wq);
49
50void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
51void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
52void io_wq_flush(struct io_wq *wq);
53
54void io_wq_cancel_all(struct io_wq *wq);
55enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
56
Jens Axboe62755e32019-10-28 21:49:21 -060057typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
58
59enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
60 void *data);
61
Jens Axboe771b53d02019-10-22 10:25:58 -060062#if defined(CONFIG_IO_WQ)
63extern void io_wq_worker_sleeping(struct task_struct *);
64extern void io_wq_worker_running(struct task_struct *);
65#else
66static inline void io_wq_worker_sleeping(struct task_struct *tsk)
67{
68}
69static inline void io_wq_worker_running(struct task_struct *tsk)
70{
71}
72#endif
73
Jens Axboe960e4322019-11-12 07:56:39 -070074static inline bool io_wq_current_is_worker(void)
75{
76 return in_task() && (current->flags & PF_IO_WORKER);
77}
Jens Axboe771b53d02019-10-22 10:25:58 -060078#endif