task_rq(p)

task_rq(p)  获取进程p所在的runqueue的首地址
--------------------------------------------
#define task_rq(p)      cpu_rq(task_cpu(p))
#define cpu_rq(cpu)     (&per_cpu(runqueues, (cpu)))


task_cpu(p) 获取进程p所在CPU的编号
-------------------------------------------
static inline unsigned int task_cpu(const struct task_struct *p)
{
    return p->thread_info->cpu;
}

cpu_rq(cpu) 获取编号为cpu的处理器的runqueue的首地址
--------------------------------------------
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
#define RELOC_HIDE(ptr, off)                    /
  ({ unsigned long __ptr;                   /
    __asm__ ("" : "=g"(__ptr) : "0"(ptr));      /
    (typeof(ptr)) (__ptr + (off)); })



static int 4085 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 4086 { 4087 unsigned long flags; 4088 int cpu, success = 0; 4089 4090 preempt_disable(); 4091 if (p == current) { 4092 /* 4093 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 4094 * == smp_processor_id()'. Together this means we can special 4095 * case the whole 'p->on_rq && ttwu_runnable()' case below 4096 * without taking any locks. 4097 * 4098 * In particular: 4099 * - we rely on Program-Order guarantees for all the ordering, 4100 * - we're serialized against set_special_state() by virtue of 4101 * it disabling IRQs (this allows not taking ->pi_lock). 4102 */ 4103 if (!ttwu_state_match(p, state, &success)) 4104 goto out; 4105 4106 trace_sched_waking(p); 4107 WRITE_ONCE(p->__state, TASK_RUNNING); 4108 trace_sched_wakeup(p); 4109 goto out; 4110 } 4111 4112 /* 4113 * If we are going to wake up a thread waiting for CONDITION we 4114 * need to ensure that CONDITION=1 done by the caller can not be 4115 * reordered with p->state check below. This pairs with smp_store_mb() 4116 * in set_current_state() that the waiting thread does. 4117 */ 4118 raw_spin_lock_irqsave(&p->pi_lock, flags); 4119 smp_mb__after_spinlock(); 4120 if (!ttwu_state_match(p, state, &success)) 4121 goto unlock; 4122 4123 #ifdef CONFIG_FREEZER 4124 /* 4125 * If we're going to wake up a thread which may be frozen, then 4126 * we can only do so if we have an active CPU which is capable of 4127 * running it. This may not be the case when resuming from suspend, 4128 * as the secondary CPUs may not yet be back online. See __thaw_task() 4129 * for the actual wakeup. 4130 */ 4131 if (unlikely(frozen_or_skipped(p)) && 4132 !cpumask_intersects(cpu_active_mask, task_cpu_possible_mask(p))) 4133 goto unlock; 4134 #endif 4135 4136 trace_sched_waking(p); 4137 4138 /* 4139 * Ensure we load p->on_rq _after_ p->state, otherwise it would 4140 * be possible to, falsely, observe p->on_rq == 0 and get stuck 4141 * in smp_cond_load_acquire() below. 4142 * 4143 * sched_ttwu_pending() try_to_wake_up() 4144 * STORE p->on_rq = 1 LOAD p->state 4145 * UNLOCK rq->lock 4146 * 4147 * __schedule() (switch to task 'p') 4148 * LOCK rq->lock smp_rmb(); 4149 * smp_mb__after_spinlock(); 4150 * UNLOCK rq->lock 4151 * 4152 * [task p] 4153 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 4154 * 4155 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4156 * __schedule(). See the comment for smp_mb__after_spinlock(). 4157 * 4158 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 4159 */ 4160 smp_rmb(); 4161 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 4162 goto unlock; 4163 4164 if (READ_ONCE(p->__state) & TASK_UNINTERRUPTIBLE) 4165 trace_sched_blocked_reason(p); 4166 4167 #ifdef CONFIG_SMP 4168 /* 4169 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 4170 * possible to, falsely, observe p->on_cpu == 0. 4171 * 4172 * One must be running (->on_cpu == 1) in order to remove oneself 4173 * from the runqueue. 4174 * 4175 * __schedule() (switch to task 'p') try_to_wake_up() 4176 * STORE p->on_cpu = 1 LOAD p->on_rq 4177 * UNLOCK rq->lock 4178 * 4179 * __schedule() (put 'p' to sleep) 4180 * LOCK rq->lock smp_rmb(); 4181 * smp_mb__after_spinlock(); 4182 * STORE p->on_rq = 0 LOAD p->on_cpu 4183 * 4184 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4185 * __schedule(). See the comment for smp_mb__after_spinlock(). 4186 * 4187 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 4188 * schedule()'s deactivate_task() has 'happened' and p will no longer 4189 * care about it's own p->state. See the comment in __schedule(). 4190 */ 4191 smp_acquire__after_ctrl_dep(); 4192 4193 /* 4194 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 4195 * == 0), which means we need to do an enqueue, change p->state to 4196 * TASK_WAKING such that we can unlock p->pi_lock before doing the 4197 * enqueue, such as ttwu_queue_wakelist(). 4198 */ 4199 WRITE_ONCE(p->__state, TASK_WAKING); 4200 4201 /* 4202 * If the owning (remote) CPU is still in the middle of schedule() with 4203 * this task as prev, considering queueing p on the remote CPUs wake_list 4204 * which potentially sends an IPI instead of spinning on p->on_cpu to 4205 * let the waker make forward progress. This is safe because IRQs are 4206 * disabled and the IPI will deliver after on_cpu is cleared. 4207 * 4208 * Ensure we load task_cpu(p) after p->on_cpu: 4209 * 4210 * set_task_cpu(p, cpu); 4211 * STORE p->cpu = @cpu 4212 * __schedule() (switch to task 'p') 4213 * LOCK rq->lock 4214 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 4215 * STORE p->on_cpu = 1 LOAD p->cpu 4216 * 4217 * to ensure we observe the correct CPU on which the task is currently 4218 * scheduling. 4219 */ 4220 if (smp_load_acquire(&p->on_cpu) && 4221 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) 4222 goto unlock; 4223 4224 /* 4225 * If the owning (remote) CPU is still in the middle of schedule() with 4226 * this task as prev, wait until it's done referencing the task. 4227 * 4228 * Pairs with the smp_store_release() in finish_task(). 4229 * 4230 * This ensures that tasks getting woken will be fully ordered against 4231 * their previous state and preserve Program Order. 4232 */ 4233 smp_cond_load_acquire(&p->on_cpu, !VAL); 4234 4235 trace_android_rvh_try_to_wake_up(p); 4236 4237 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 4238 if (task_cpu(p) != cpu) { 4239 if (p->in_iowait) { 4240 delayacct_blkio_end(p); 4241 atomic_dec(&task_rq(p)->nr_iowait); 4242 } 4243 4244 wake_flags |= WF_MIGRATED; 4245 psi_ttwu_dequeue(p); 4246 set_task_cpu(p, cpu); 4247 } 4248 #else 4249 cpu = task_cpu(p); 4250 #endif /* CONFIG_SMP */ 4251 4252 ttwu_queue(p, cpu, wake_flags); 4253 unlock: 4254 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4255 out: 4256 if (success) { 4257 trace_android_rvh_try_to_wake_up_success(p); 4258 ttwu_stat(p, task_cpu(p), wake_flags); 4259 } 4260 preempt_enable(); 4261 4262 return success; 4263 } 逐行解读代码并给出解释
最新发布
07-04
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值