539
539
* Return the group to which this tasks belongs.
541
* We use task_subsys_state_check() and extend the RCU verification with
542
* pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
543
* task it moves into the cgroup. Therefore by holding either of those locks,
544
* we pin the task to the current cgroup.
541
* We cannot use task_subsys_state() and friends because the cgroup
542
* subsystem changes that value before the cgroup_subsys::attach() method
543
* is called, therefore we cannot pin it and might observe the wrong value.
545
* The same is true for autogroup's p->signal->autogroup->tg, the autogroup
546
* core changes this before calling sched_move_task().
548
* Instead we use a 'copy' which is updated from sched_move_task() while
549
* holding both task_struct::pi_lock and rq::lock.
546
551
static inline struct task_group *task_group(struct task_struct *p)
548
struct task_group *tg;
549
struct cgroup_subsys_state *css;
551
css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
552
lockdep_is_held(&p->pi_lock) ||
553
lockdep_is_held(&task_rq(p)->lock));
554
tg = container_of(css, struct task_group, css);
556
return autogroup_task_group(p, tg);
553
return p->sched_task_group;
559
556
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */