cgroup: more naming cleanups
Constantly use @cset for css_set variables and use @cgrp as cgroup variables. Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
e0798ce273
commit
6f4b7e632d
|
@ -394,8 +394,8 @@ struct cgroup_map_cb {
|
||||||
|
|
||||||
/* cftype->flags */
|
/* cftype->flags */
|
||||||
enum {
|
enum {
|
||||||
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cg */
|
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
|
||||||
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cg */
|
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
|
||||||
CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
|
CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -513,7 +513,7 @@ struct cftype_set {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cgroup_scanner {
|
struct cgroup_scanner {
|
||||||
struct cgroup *cg;
|
struct cgroup *cgrp;
|
||||||
int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
|
int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
|
||||||
void (*process_task)(struct task_struct *p,
|
void (*process_task)(struct task_struct *p,
|
||||||
struct cgroup_scanner *scan);
|
struct cgroup_scanner *scan);
|
||||||
|
|
|
@ -466,7 +466,7 @@ static inline void put_css_set_taskexit(struct css_set *cset)
|
||||||
* @new_cgrp: cgroup that's being entered by the task
|
* @new_cgrp: cgroup that's being entered by the task
|
||||||
* @template: desired set of css pointers in css_set (pre-calculated)
|
* @template: desired set of css pointers in css_set (pre-calculated)
|
||||||
*
|
*
|
||||||
* Returns true if "cg" matches "old_cg" except for the hierarchy
|
* Returns true if "cset" matches "old_cset" except for the hierarchy
|
||||||
* which "new_cgrp" belongs to, for which it should match "new_cgrp".
|
* which "new_cgrp" belongs to, for which it should match "new_cgrp".
|
||||||
*/
|
*/
|
||||||
static bool compare_css_sets(struct css_set *cset,
|
static bool compare_css_sets(struct css_set *cset,
|
||||||
|
@ -1839,7 +1839,7 @@ EXPORT_SYMBOL_GPL(task_cgroup_path_from_hierarchy);
|
||||||
struct task_and_cgroup {
|
struct task_and_cgroup {
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
struct cgroup *cgrp;
|
struct cgroup *cgrp;
|
||||||
struct css_set *cg;
|
struct css_set *cset;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cgroup_taskset {
|
struct cgroup_taskset {
|
||||||
|
@ -2057,8 +2057,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
|
||||||
|
|
||||||
tc = flex_array_get(group, i);
|
tc = flex_array_get(group, i);
|
||||||
old_cset = task_css_set(tc->task);
|
old_cset = task_css_set(tc->task);
|
||||||
tc->cg = find_css_set(old_cset, cgrp);
|
tc->cset = find_css_set(old_cset, cgrp);
|
||||||
if (!tc->cg) {
|
if (!tc->cset) {
|
||||||
retval = -ENOMEM;
|
retval = -ENOMEM;
|
||||||
goto out_put_css_set_refs;
|
goto out_put_css_set_refs;
|
||||||
}
|
}
|
||||||
|
@ -2071,7 +2071,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < group_size; i++) {
|
for (i = 0; i < group_size; i++) {
|
||||||
tc = flex_array_get(group, i);
|
tc = flex_array_get(group, i);
|
||||||
cgroup_task_migrate(tc->cgrp, tc->task, tc->cg);
|
cgroup_task_migrate(tc->cgrp, tc->task, tc->cset);
|
||||||
}
|
}
|
||||||
/* nothing is sensitive to fork() after this point. */
|
/* nothing is sensitive to fork() after this point. */
|
||||||
|
|
||||||
|
@ -2091,9 +2091,9 @@ out_put_css_set_refs:
|
||||||
if (retval) {
|
if (retval) {
|
||||||
for (i = 0; i < group_size; i++) {
|
for (i = 0; i < group_size; i++) {
|
||||||
tc = flex_array_get(group, i);
|
tc = flex_array_get(group, i);
|
||||||
if (!tc->cg)
|
if (!tc->cset)
|
||||||
break;
|
break;
|
||||||
put_css_set(tc->cg);
|
put_css_set(tc->cset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out_cancel_attach:
|
out_cancel_attach:
|
||||||
|
@ -2203,9 +2203,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
|
||||||
|
|
||||||
mutex_lock(&cgroup_mutex);
|
mutex_lock(&cgroup_mutex);
|
||||||
for_each_active_root(root) {
|
for_each_active_root(root) {
|
||||||
struct cgroup *from_cg = task_cgroup_from_root(from, root);
|
struct cgroup *from_cgrp = task_cgroup_from_root(from, root);
|
||||||
|
|
||||||
retval = cgroup_attach_task(from_cg, tsk, false);
|
retval = cgroup_attach_task(from_cgrp, tsk, false);
|
||||||
if (retval)
|
if (retval)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -3305,8 +3305,8 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
|
||||||
* guarantees forward progress and that we don't miss any tasks.
|
* guarantees forward progress and that we don't miss any tasks.
|
||||||
*/
|
*/
|
||||||
heap->size = 0;
|
heap->size = 0;
|
||||||
cgroup_iter_start(scan->cg, &it);
|
cgroup_iter_start(scan->cgrp, &it);
|
||||||
while ((p = cgroup_iter_next(scan->cg, &it))) {
|
while ((p = cgroup_iter_next(scan->cgrp, &it))) {
|
||||||
/*
|
/*
|
||||||
* Only affect tasks that qualify per the caller's callback,
|
* Only affect tasks that qualify per the caller's callback,
|
||||||
* if he provided one
|
* if he provided one
|
||||||
|
@ -3339,7 +3339,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
|
||||||
* the heap and wasn't inserted
|
* the heap and wasn't inserted
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
cgroup_iter_end(scan->cg, &it);
|
cgroup_iter_end(scan->cgrp, &it);
|
||||||
|
|
||||||
if (heap->size) {
|
if (heap->size) {
|
||||||
for (i = 0; i < heap->size; i++) {
|
for (i = 0; i < heap->size; i++) {
|
||||||
|
@ -3385,7 +3385,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
|
||||||
{
|
{
|
||||||
struct cgroup_scanner scan;
|
struct cgroup_scanner scan;
|
||||||
|
|
||||||
scan.cg = from;
|
scan.cgrp = from;
|
||||||
scan.test_task = NULL; /* select all tasks in cgroup */
|
scan.test_task = NULL; /* select all tasks in cgroup */
|
||||||
scan.process_task = cgroup_transfer_one_task;
|
scan.process_task = cgroup_transfer_one_task;
|
||||||
scan.heap = NULL;
|
scan.heap = NULL;
|
||||||
|
|
|
@ -845,7 +845,7 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
|
||||||
{
|
{
|
||||||
struct cpuset *cpus_cs;
|
struct cpuset *cpus_cs;
|
||||||
|
|
||||||
cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cg));
|
cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cgrp));
|
||||||
set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
|
set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -866,7 +866,7 @@ static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
|
||||||
{
|
{
|
||||||
struct cgroup_scanner scan;
|
struct cgroup_scanner scan;
|
||||||
|
|
||||||
scan.cg = cs->css.cgroup;
|
scan.cgrp = cs->css.cgroup;
|
||||||
scan.test_task = NULL;
|
scan.test_task = NULL;
|
||||||
scan.process_task = cpuset_change_cpumask;
|
scan.process_task = cpuset_change_cpumask;
|
||||||
scan.heap = heap;
|
scan.heap = heap;
|
||||||
|
@ -1062,7 +1062,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
|
||||||
static void cpuset_change_nodemask(struct task_struct *p,
|
static void cpuset_change_nodemask(struct task_struct *p,
|
||||||
struct cgroup_scanner *scan)
|
struct cgroup_scanner *scan)
|
||||||
{
|
{
|
||||||
struct cpuset *cs = cgroup_cs(scan->cg);
|
struct cpuset *cs = cgroup_cs(scan->cgrp);
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
int migrate;
|
int migrate;
|
||||||
nodemask_t *newmems = scan->data;
|
nodemask_t *newmems = scan->data;
|
||||||
|
@ -1102,7 +1102,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
|
||||||
|
|
||||||
guarantee_online_mems(mems_cs, &newmems);
|
guarantee_online_mems(mems_cs, &newmems);
|
||||||
|
|
||||||
scan.cg = cs->css.cgroup;
|
scan.cgrp = cs->css.cgroup;
|
||||||
scan.test_task = NULL;
|
scan.test_task = NULL;
|
||||||
scan.process_task = cpuset_change_nodemask;
|
scan.process_task = cpuset_change_nodemask;
|
||||||
scan.heap = heap;
|
scan.heap = heap;
|
||||||
|
@ -1275,7 +1275,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
||||||
static void cpuset_change_flag(struct task_struct *tsk,
|
static void cpuset_change_flag(struct task_struct *tsk,
|
||||||
struct cgroup_scanner *scan)
|
struct cgroup_scanner *scan)
|
||||||
{
|
{
|
||||||
cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
|
cpuset_update_task_spread_flag(cgroup_cs(scan->cgrp), tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1295,7 +1295,7 @@ static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
|
||||||
{
|
{
|
||||||
struct cgroup_scanner scan;
|
struct cgroup_scanner scan;
|
||||||
|
|
||||||
scan.cg = cs->css.cgroup;
|
scan.cgrp = cs->css.cgroup;
|
||||||
scan.test_task = NULL;
|
scan.test_task = NULL;
|
||||||
scan.process_task = cpuset_change_flag;
|
scan.process_task = cpuset_change_flag;
|
||||||
scan.heap = heap;
|
scan.heap = heap;
|
||||||
|
@ -1971,7 +1971,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
|
||||||
struct cpuset *cs = cgroup_cs(cgrp);
|
struct cpuset *cs = cgroup_cs(cgrp);
|
||||||
struct cpuset *parent = parent_cs(cs);
|
struct cpuset *parent = parent_cs(cs);
|
||||||
struct cpuset *tmp_cs;
|
struct cpuset *tmp_cs;
|
||||||
struct cgroup *pos_cg;
|
struct cgroup *pos_cgrp;
|
||||||
|
|
||||||
if (!parent)
|
if (!parent)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2003,7 +2003,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
|
||||||
* (and likewise for mems) to the new cgroup.
|
* (and likewise for mems) to the new cgroup.
|
||||||
*/
|
*/
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
cpuset_for_each_child(tmp_cs, pos_cg, parent) {
|
cpuset_for_each_child(tmp_cs, pos_cgrp, parent) {
|
||||||
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
|
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
Loading…
Reference in New Issue