[Devel] [PATCH RHEL7 COMMIT] Use ve init task's css instead of opening cgroup via vfs
Vladimir Davydov
vdavydov at virtuozzo.com
Mon Jun 20 10:05:19 PDT 2016
The commit is pushed to "branch-rh7-3.10.0-327.18.2.vz7.14.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.18.2.vz7.14.16
------>
commit 083ecd8a5051975639669e3349a17e07d299c299
Author: Vladimir Davydov <vdavydov at virtuozzo.com>
Date: Mon Jun 20 19:40:13 2016 +0300
Use ve init task's css instead of opening cgroup via vfs
Currently, whenever we need to get cpu or devices cgroup corresponding
to a ve, we open it using cgroup_kernel_open(). This is inflexible,
because it relies on the fact that all container cgroups are located at
a specific location which can never change (at the top level). Since we
want to move container cgroups to machine.slice, we need to rework this.
This patch does the trick. It makes each ve remember its init task at
container start, and use css corresponding to init task whenever we need
to get a corresponding cgroup. Note, that after this patch is applied,
we don't need to mount cpu and devices cgroup in kernel.
https://jira.sw.ru/browse/PSBM-48629
Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
---
fs/proc/loadavg.c | 3 +-
fs/proc/stat.c | 3 +-
fs/proc/uptime.c | 15 ++++----
include/linux/device_cgroup.h | 5 ++-
include/linux/fairsched.h | 23 ------------
include/linux/ve.h | 18 ++++++++++
kernel/fairsched.c | 61 --------------------------------
kernel/ve/ve.c | 82 ++++++++++++++++++++++++++++++++++++++++++-
kernel/ve/vecalls.c | 67 ++++-------------------------------
security/device_cgroup.c | 19 +++++-----
10 files changed, 126 insertions(+), 170 deletions(-)
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index 4cbdeef1aa71..40d8a90b0f13 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -6,7 +6,6 @@
#include <linux/seq_file.h>
#include <linux/seqlock.h>
#include <linux/time.h>
-#include <linux/fairsched.h>
#include <linux/ve.h>
#define LOAD_INT(x) ((x) >> FSHIFT)
@@ -20,7 +19,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v)
ve = get_exec_env();
if (!ve_is_super(ve)) {
int ret;
- ret = fairsched_show_loadavg(ve_name(ve), m);
+ ret = ve_show_loadavg(ve, m);
if (ret != -ENOSYS)
return ret;
}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index e9991db527e0..7f7e87c855e4 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -10,7 +10,6 @@
#include <linux/irqnr.h>
#include <linux/cputime.h>
#include <linux/tick.h>
-#include <linux/fairsched.h>
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/ve.h>
@@ -98,7 +97,7 @@ static int show_stat(struct seq_file *p, void *v)
ve = get_exec_env();
if (!ve_is_super(ve)) {
int ret;
- ret = fairsched_show_stat(ve_name(ve), p);
+ ret = ve_show_cpu_stat(ve, p);
if (ret != -ENOSYS)
return ret;
}
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
index 6fd56831c796..8fa578e8a553 100644
--- a/fs/proc/uptime.c
+++ b/fs/proc/uptime.c
@@ -5,7 +5,6 @@
#include <linux/time.h>
#include <linux/kernel_stat.h>
#include <linux/cputime.h>
-#include <linux/fairsched.h>
#include <linux/ve.h>
#include <linux/cgroup.h>
@@ -25,11 +24,11 @@ static inline void get_ve0_idle(struct timespec *idle)
idle->tv_nsec = rem;
}
-static inline void get_veX_idle(struct timespec *idle, struct cgroup* cgrp)
+static inline void get_veX_idle(struct ve_struct *ve, struct timespec *idle)
{
struct kernel_cpustat kstat;
- cpu_cgroup_get_stat(cgrp, &kstat);
+ ve_get_cpu_stat(ve, &kstat);
cputime_to_timespec(kstat.cpustat[CPUTIME_IDLE], idle);
}
@@ -37,14 +36,12 @@ static int uptime_proc_show(struct seq_file *m, void *v)
{
struct timespec uptime;
struct timespec idle;
+ struct ve_struct *ve = get_exec_env();
- if (ve_is_super(get_exec_env()))
+ if (ve_is_super(ve))
get_ve0_idle(&idle);
- else {
- rcu_read_lock();
- get_veX_idle(&idle, task_cgroup(current, cpu_cgroup_subsys_id));
- rcu_read_unlock();
- }
+ else
+ get_veX_idle(ve, &idle);
do_posix_clock_monotonic_gettime(&uptime);
monotonic_to_bootbased(&uptime);
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 64c2da27278c..25ea2270aabe 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -16,10 +16,9 @@ extern int devcgroup_device_permission(umode_t mode, dev_t dev, int mask);
extern int devcgroup_device_visible(umode_t mode, int major,
int start_minor, int nr_minors);
-struct cgroup;
-int devcgroup_set_perms_ve(struct cgroup *cgroup, unsigned, dev_t, unsigned);
struct ve_struct;
-int devcgroup_seq_show_ve(struct cgroup *devices_root, struct ve_struct *ve, struct seq_file *m);
+int devcgroup_set_perms_ve(struct ve_struct *, unsigned, dev_t, unsigned);
+int devcgroup_seq_show_ve(struct ve_struct *, struct seq_file *);
#else
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
diff --git a/include/linux/fairsched.h b/include/linux/fairsched.h
index b73f51eadabc..5f2ab0c15fa5 100644
--- a/include/linux/fairsched.h
+++ b/include/linux/fairsched.h
@@ -51,31 +51,8 @@ asmlinkage long sys_fairsched_cpumask(unsigned int id, unsigned int len,
asmlinkage long sys_fairsched_nodemask(unsigned int id, unsigned int len,
unsigned long __user *user_mask_ptr);
-int fairsched_get_cpu_stat(const char *name, struct kernel_cpustat *kstat);
-
-int cpu_cgroup_get_avenrun(struct cgroup *cgrp, unsigned long *avenrun);
-int fairsched_get_cpu_avenrun(const char *name, unsigned long *avenrun);
-
-struct cftype;
-int cpu_cgroup_proc_stat(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *p);
-int fairsched_show_stat(const char *name, struct seq_file *p);
-
-int cpu_cgroup_proc_loadavg(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *p);
-int fairsched_show_loadavg(const char *name, struct seq_file *p);
-
-#else /* CONFIG_VZ_FAIRSCHED */
-
-static inline int fairsched_show_stat(const char *name, struct seq_file *p) { return -ENOSYS; }
-static inline int fairsched_show_loadavg(const char *name, struct seq_file *p) { return -ENOSYS; }
-static inline int fairsched_get_cpu_avenrun(const char *name, unsigned long *avenrun) { return -ENOSYS; }
-static inline int fairsched_get_cpu_stat(const char *name, struct kernel_cpustat *kstat) { return -ENOSYS; }
-
#endif /* CONFIG_VZ_FAIRSCHED */
-void cpu_cgroup_get_stat(struct cgroup *cgrp, struct kernel_cpustat *kstat);
-
#endif /* __KERNEL__ */
#endif /* __LINUX_FAIRSCHED_H__ */
diff --git a/include/linux/ve.h b/include/linux/ve.h
index 459c8bc581d9..cea3a87cb9c0 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -104,6 +104,7 @@ struct ve_struct {
u64 _uevent_seqnum;
struct nsproxy __rcu *ve_ns;
+ struct task_struct *init_task;
struct cred *init_cred;
struct net *ve_netns;
@@ -190,6 +191,8 @@ void do_update_load_avg_ve(void);
extern struct ve_struct *get_ve(struct ve_struct *ve);
extern void put_ve(struct ve_struct *ve);
+struct cgroup_subsys_state *ve_get_init_css(struct ve_struct *ve, int subsys_id);
+
static inline struct ve_struct *cgroup_ve(struct cgroup *cgroup)
{
return container_of(cgroup_subsys_state(cgroup, ve_subsys_id),
@@ -272,4 +275,19 @@ static inline void ve_mount_nr_inc(void) { }
static inline void ve_mount_nr_dec(void) { }
#endif /* CONFIG_VE */
+struct seq_file;
+struct kernel_cpustat;
+
+#if defined(CONFIG_VE) && defined(CONFIG_CGROUP_SCHED)
+int ve_show_cpu_stat(struct ve_struct *ve, struct seq_file *p);
+int ve_show_loadavg(struct ve_struct *ve, struct seq_file *p);
+int ve_get_cpu_avenrun(struct ve_struct *ve, unsigned long *avenrun);
+int ve_get_cpu_stat(struct ve_struct *ve, struct kernel_cpustat *kstat);
+#else
+static inline int ve_show_cpu_stat(struct ve_struct *ve, struct seq_file *p) { return -ENOSYS; }
+static inline int ve_show_loadavg(struct ve_struct *ve, struct seq_file *p) { return -ENOSYS; }
+static inline int ve_get_cpu_avenrun(struct ve_struct *ve, unsigned long *avenrun) { return -ENOSYS; }
+static inline int ve_get_cpu_stat(struct ve_struct *ve, struct kernel_cpustat *kstat) { return -ENOSYS; }
+#endif
+
#endif /* _LINUX_VE_H */
diff --git a/kernel/fairsched.c b/kernel/fairsched.c
index da1a1a90c685..4d96b4a9a3e0 100644
--- a/kernel/fairsched.c
+++ b/kernel/fairsched.c
@@ -665,67 +665,6 @@ static struct file_operations proc_fairsched_operations = {
.release = fairsched_seq_release
};
-int fairsched_show_stat(const char *name, struct seq_file *p)
-{
- struct cgroup *cgrp;
- int err;
-
- cgrp = cgroup_kernel_open(root_node.cpu, 0, name);
- if (IS_ERR_OR_NULL(cgrp))
- return cgrp ? PTR_ERR(cgrp) : -ENOENT;
-
- err = cpu_cgroup_proc_stat(cgrp, NULL, p);
- cgroup_kernel_close(cgrp);
-
- return err;
-}
-
-int fairsched_show_loadavg(const char *name, struct seq_file *p)
-{
- struct cgroup *cgrp;
- int err;
-
- cgrp = cgroup_kernel_open(root_node.cpu, 0, name);
- if (IS_ERR_OR_NULL(cgrp))
- return cgrp ? PTR_ERR(cgrp) : -ENOENT;
-
- err = cpu_cgroup_proc_loadavg(cgrp, NULL, p);
- cgroup_kernel_close(cgrp);
-
- return err;
-}
-
-int fairsched_get_cpu_avenrun(const char *name, unsigned long *avenrun)
-{
- struct cgroup *cgrp;
- int err;
-
- cgrp = cgroup_kernel_open(root_node.cpu, 0, name);
- if (IS_ERR_OR_NULL(cgrp))
- return cgrp ? PTR_ERR(cgrp) : -ENOENT;
-
- err = cpu_cgroup_get_avenrun(cgrp, avenrun);
- cgroup_kernel_close(cgrp);
-
- return 0;
-}
-EXPORT_SYMBOL(fairsched_get_cpu_avenrun);
-
-int fairsched_get_cpu_stat(const char *name, struct kernel_cpustat *kstat)
-{
- struct cgroup *cgrp;
-
- cgrp = cgroup_kernel_open(root_node.cpu, 0, name);
- if (IS_ERR_OR_NULL(cgrp))
- return cgrp ? PTR_ERR(cgrp) : -ENOENT;
-
- cpu_cgroup_get_stat(cgrp, kstat);
- cgroup_kernel_close(cgrp);
-
- return 0;
-}
-EXPORT_SYMBOL(fairsched_get_cpu_stat);
-
#endif /* CONFIG_PROC_FS */
extern int sysctl_sched_rt_runtime;
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index 42f3d4a8aa3b..9904a4ae130e 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -104,6 +104,23 @@ void put_ve(struct ve_struct *ve)
}
EXPORT_SYMBOL(put_ve);
+struct cgroup_subsys_state *ve_get_init_css(struct ve_struct *ve, int subsys_id)
+{
+ struct cgroup_subsys_state *css;
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = ve->ve_ns ? ve->init_task : &init_task;
+ while (true) {
+ css = task_subsys_state(task, subsys_id);
+ if (likely(css_tryget(css)))
+ break;
+ cpu_relax();
+ }
+ rcu_read_unlock();
+ return css;
+}
+
static int ve_list_add(struct ve_struct *ve)
{
int err;
@@ -401,6 +418,8 @@ static void ve_grab_context(struct ve_struct *ve)
{
struct task_struct *tsk = current;
+ get_task_struct(tsk);
+ ve->init_task = tsk;
ve->init_cred = (struct cred *)get_current_cred();
rcu_assign_pointer(ve->ve_ns, get_nsproxy(tsk->nsproxy));
ve->ve_netns = get_net(ve->ve_ns->net_ns);
@@ -418,13 +437,17 @@ static void ve_drop_context(struct ve_struct *ve)
put_net(ve->ve_netns);
ve->ve_netns = NULL;
- /* Allows to dereference init_cred if ve_ns is set */
+ /* Allows to dereference init_cred and init_task if ve_ns is set */
rcu_assign_pointer(ve->ve_ns, NULL);
synchronize_rcu();
put_nsproxy(ve_ns);
put_cred(ve->init_cred);
ve->init_cred = NULL;
+
+ put_task_struct(ve->init_task);
+ ve->init_task = NULL;
+
}
static const struct timespec zero_time = { };
@@ -1377,3 +1400,60 @@ static int __init ve_subsys_init(void)
return 0;
}
late_initcall(ve_subsys_init);
+
+#ifdef CONFIG_CGROUP_SCHED
+int cpu_cgroup_proc_stat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *p);
+
+int ve_show_cpu_stat(struct ve_struct *ve, struct seq_file *p)
+{
+ struct cgroup_subsys_state *css;
+ int err;
+
+ css = ve_get_init_css(ve, cpu_cgroup_subsys_id);
+ err = cpu_cgroup_proc_stat(css->cgroup, NULL, p);
+ css_put(css);
+ return err;
+}
+
+int cpu_cgroup_proc_loadavg(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *p);
+
+int ve_show_loadavg(struct ve_struct *ve, struct seq_file *p)
+{
+ struct cgroup_subsys_state *css;
+ int err;
+
+ css = ve_get_init_css(ve, cpu_cgroup_subsys_id);
+ err = cpu_cgroup_proc_loadavg(css->cgroup, NULL, p);
+ css_put(css);
+ return err;
+}
+
+int cpu_cgroup_get_avenrun(struct cgroup *cgrp, unsigned long *avenrun);
+
+int ve_get_cpu_avenrun(struct ve_struct *ve, unsigned long *avenrun)
+{
+ struct cgroup_subsys_state *css;
+ int err;
+
+ css = ve_get_init_css(ve, cpu_cgroup_subsys_id);
+ err = cpu_cgroup_get_avenrun(css->cgroup, avenrun);
+ css_put(css);
+ return err;
+}
+EXPORT_SYMBOL(ve_get_cpu_avenrun);
+
+void cpu_cgroup_get_stat(struct cgroup *cgrp, struct kernel_cpustat *kstat);
+
+int ve_get_cpu_stat(struct ve_struct *ve, struct kernel_cpustat *kstat)
+{
+ struct cgroup_subsys_state *css;
+
+ css = ve_get_init_css(ve, cpu_cgroup_subsys_id);
+ cpu_cgroup_get_stat(css->cgroup, kstat);
+ css_put(css);
+ return 0;
+}
+EXPORT_SYMBOL(ve_get_cpu_stat);
+#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/ve/vecalls.c b/kernel/ve/vecalls.c
index 7ca37e05df8e..47425764e673 100644
--- a/kernel/ve/vecalls.c
+++ b/kernel/ve/vecalls.c
@@ -32,11 +32,8 @@
#include <linux/venet.h>
#include <linux/vzctl.h>
#include <uapi/linux/vzcalluser.h>
-#include <linux/fairsched.h>
#include <linux/device_cgroup.h>
-static struct cgroup *devices_root;
-
static s64 ve_get_uptime(struct ve_struct *ve)
{
struct timespec uptime;
@@ -46,7 +43,7 @@ static s64 ve_get_uptime(struct ve_struct *ve)
return timespec_to_ns(&uptime);
}
-static int ve_get_cpu_stat(envid_t veid, struct vz_cpu_stat __user *buf)
+static int fill_cpu_stat(envid_t veid, struct vz_cpu_stat __user *buf)
{
struct ve_struct *ve;
struct vz_cpu_stat *vstat;
@@ -67,11 +64,11 @@ static int ve_get_cpu_stat(envid_t veid, struct vz_cpu_stat __user *buf)
if (!vstat)
goto out_put_ve;
- retval = fairsched_get_cpu_stat(ve->ve_name, &kstat);
+ retval = ve_get_cpu_stat(ve, &kstat);
if (retval)
goto out_free;
- retval = fairsched_get_cpu_avenrun(ve->ve_name, avenrun);
+ retval = ve_get_cpu_avenrun(ve, avenrun);
if (retval)
goto out_free;
@@ -104,7 +101,6 @@ static int real_setdevperms(envid_t veid, unsigned type,
dev_t dev, unsigned mask)
{
struct ve_struct *ve;
- struct cgroup *cgroup;
int err;
if (!capable_setveid() || veid == 0)
@@ -115,18 +111,10 @@ static int real_setdevperms(envid_t veid, unsigned type,
down_read(&ve->op_sem);
- cgroup = cgroup_kernel_open(devices_root, 0, ve_name(ve));
- if (IS_ERR_OR_NULL(cgroup)) {
- err = PTR_ERR(cgroup) ? : -ESRCH;
- goto out;
- }
-
err = -EAGAIN;
if (ve->is_running)
- err = devcgroup_set_perms_ve(cgroup, type, dev, mask);
+ err = devcgroup_set_perms_ve(ve, type, dev, mask);
- cgroup_kernel_close(cgroup);
-out:
up_read(&ve->op_sem);
put_ve(ve);
return err;
@@ -161,40 +149,6 @@ static int ve_set_meminfo(envid_t veid, unsigned long val)
#endif
}
-static struct vfsmount *ve_cgroup_mnt, *devices_cgroup_mnt;
-
-static int __init init_vecalls_cgroups(void)
-{
- struct cgroup_sb_opts devices_opts = {
- .subsys_mask =
- (1ul << devices_subsys_id),
- };
-
- struct cgroup_sb_opts ve_opts = {
- .subsys_mask =
- (1ul << ve_subsys_id),
- };
-
- devices_cgroup_mnt = cgroup_kernel_mount(&devices_opts);
- if (IS_ERR(devices_cgroup_mnt))
- return PTR_ERR(devices_cgroup_mnt);
- devices_root = cgroup_get_root(devices_cgroup_mnt);
-
- ve_cgroup_mnt = cgroup_kernel_mount(&ve_opts);
- if (IS_ERR(ve_cgroup_mnt)) {
- kern_unmount(devices_cgroup_mnt);
- return PTR_ERR(ve_cgroup_mnt);
- }
-
- return 0;
-}
-
-static void fini_vecalls_cgroups(void)
-{
- kern_unmount(ve_cgroup_mnt);
- kern_unmount(devices_cgroup_mnt);
-}
-
/**********************************************************************
**********************************************************************
*
@@ -340,7 +294,7 @@ static int vestat_seq_show(struct seq_file *m, void *v)
if (ve == get_ve0())
return 0;
- ret = fairsched_get_cpu_stat(ve->ve_name, &kstat);
+ ret = ve_get_cpu_stat(ve, &kstat);
if (ret)
return ret;
@@ -430,7 +384,7 @@ static int devperms_seq_show(struct seq_file *m, void *v)
if (ve_is_super(ve))
seq_printf(m, "%10u b 016 *:*\n%10u c 006 *:*\n", 0, 0);
else
- devcgroup_seq_show_ve(devices_root, ve, m);
+ devcgroup_seq_show_ve(ve, m);
return 0;
}
@@ -697,7 +651,7 @@ int vzcalls_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = -EFAULT;
if (copy_from_user(&s, (void __user *)arg, sizeof(s)))
break;
- err = ve_get_cpu_stat(s.veid, s.cpustat);
+ err = fill_cpu_stat(s.veid, s.cpustat);
}
break;
case VZCTL_VE_MEMINFO: {
@@ -811,10 +765,6 @@ static int __init vecalls_init(void)
{
int err;
- err = init_vecalls_cgroups();
- if (err)
- goto out_cgroups;
-
err = init_vecalls_proc();
if (err < 0)
goto out_proc;
@@ -833,8 +783,6 @@ static int __init vecalls_init(void)
out_ioctls:
fini_vecalls_proc();
out_proc:
- fini_vecalls_cgroups();
-out_cgroups:
return err;
}
@@ -842,7 +790,6 @@ static void __exit vecalls_exit(void)
{
fini_vecalls_ioctls();
fini_vecalls_proc();
- fini_vecalls_cgroups();
}
MODULE_AUTHOR("SWsoft <devel at openvz.org>");
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 96dca988ad85..d98002006e12 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -1046,11 +1046,12 @@ static unsigned encode_ve_perms(unsigned mask)
return perm;
}
-int devcgroup_set_perms_ve(struct cgroup *cgroup,
+int devcgroup_set_perms_ve(struct ve_struct *ve,
unsigned type, dev_t dev, unsigned mask)
{
int err = -EINVAL;
struct dev_exception_item new;
+ struct cgroup_subsys_state *css;
if ((type & S_IFMT) == S_IFBLK)
new.type = DEV_BLOCK;
@@ -1072,23 +1073,23 @@ int devcgroup_set_perms_ve(struct cgroup *cgroup,
}
mutex_lock(&devcgroup_mutex);
- err = dev_exception_add(cgroup_to_devcgroup(cgroup), &new);
+ css = ve_get_init_css(ve, devices_subsys_id);
+ err = dev_exception_add(cgroup_to_devcgroup(css->cgroup), &new);
+ css_put(css);
mutex_unlock(&devcgroup_mutex);
return err;
}
EXPORT_SYMBOL(devcgroup_set_perms_ve);
-int devcgroup_seq_show_ve(struct cgroup *devices_root, struct ve_struct *ve, struct seq_file *m)
+int devcgroup_seq_show_ve(struct ve_struct *ve, struct seq_file *m)
{
struct dev_exception_item *wh;
struct dev_cgroup *devcgroup;
- struct cgroup *cgroup;
+ struct cgroup_subsys_state *css;
- cgroup = cgroup_kernel_open(devices_root, 0, ve_name(ve));
- if (IS_ERR(cgroup))
- return PTR_ERR(cgroup);
- devcgroup = cgroup_to_devcgroup(cgroup);
+ css = ve_get_init_css(ve, devices_subsys_id);
+ devcgroup = cgroup_to_devcgroup(css->cgroup);
rcu_read_lock();
list_for_each_entry_rcu(wh, &devcgroup->exceptions, list) {
@@ -1112,7 +1113,7 @@ int devcgroup_seq_show_ve(struct cgroup *devices_root, struct ve_struct *ve, str
}
rcu_read_unlock();
- cgroup_kernel_close(cgroup);
+ css_put(css);
return 0;
}
EXPORT_SYMBOL(devcgroup_seq_show_ve);
More information about the Devel
mailing list