[CRIU] [PATCH 06/19] pstree: Bind CoreEntry to pstree item
Pavel Emelyanov
xemul at parallels.com
Wed Feb 27 06:55:17 EST 2013
On 02/26/2013 05:08 PM, Cyrill Gorcunov wrote:
>
> In future we will need to gather task registers pretty early which
> implies preliminary allocation of the CoreEntry entries. So we bind
> CoreEntry array to pstree item and allocate/free it with pstree
> lifetime.
>
> (This is because when parasite daemon mode will be implemented we
> no longer able to fetch registers, since task will be running in
> daemon code).
We don't fetch registers for master thread even now:
int get_task_regs(pid_t pid, CoreEntry *core, const struct parasite_ctl *ctl)
{
struct xsave_struct xsave = { };
user_regs_struct_t regs = {-1};
struct iovec iov;
int ret = -1;
pr_info("Dumping GP/FPU registers ... ");
if (ctl)
regs = ctl->regs_orig;
else {
if (ptrace(PTRACE_GETREGS, pid, NULL, ®s)) {
pr_err("Can't obtain GP registers for %d\n", pid);
goto err;
}
}
why not simply pull threads regs in the same manner?
Other than this, comments are inline.
> Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
> ---
> cr-dump.c | 94 ++++++++++++++++----------------------------------------
> include/pstree.h | 5 ++-
> pstree.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++++-
> 3 files changed, 115 insertions(+), 69 deletions(-)
>
>
> In future we will need to gather task registers pretty early which
> implies preliminary allocation of the CoreEntry entries. So we bind
> CoreEntry array to pstree item and allocate/free it with pstree
> lifetime.
>
> (This is because when parasite daemon mode will be implemented we
> no longer able to fetch registers, since task will be running in
> daemon code).
>
> Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
> ---
> cr-dump.c | 94 ++++++++++++++++----------------------------------------
> include/pstree.h | 5 ++-
> pstree.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++++-
> 3 files changed, 115 insertions(+), 69 deletions(-)
>
>
> 0006-pstree-Bind-CoreEntry-to-pstree-item.patch
>
> diff --git a/cr-dump.c b/cr-dump.c
> index 4c3d064..98c439f 100644
> --- a/cr-dump.c
> +++ b/cr-dump.c
> @@ -540,39 +540,6 @@ static int dump_task_kobj_ids(struct pstree_item *item)
> return 0;
> }
>
> -static CoreEntry *core_entry_alloc(int alloc_thread_info,
> - int alloc_tc)
> -{
> - CoreEntry *core;
> - TaskCoreEntry *tc;
> -
> - core = xmalloc(sizeof(*core));
> - if (!core)
> - return NULL;
> - core_entry__init(core);
> -
> - core->mtype = CORE_ENTRY__MARCH;
> -
> - if (alloc_thread_info) {
> - if (arch_alloc_thread_info(core))
> - goto err;
> - }
> -
> - if (alloc_tc) {
> - tc = xzalloc(sizeof(*tc) + TASK_COMM_LEN);
> - if (!tc)
> - goto err;
> - task_core_entry__init(tc);
> - tc->comm = (void *)tc + sizeof(*tc);
> - core->tc = tc;
> - }
> -
> - return core;
> -err:
> - core_entry_free(core);
> - return NULL;
> -}
> -
> int get_task_ids(struct pstree_item *item)
> {
> int ret;
> @@ -607,18 +574,15 @@ static int dump_task_ids(struct pstree_item *item, const struct cr_fdset *cr_fds
> return pb_write_one(fdset_fd(cr_fdset, CR_FD_IDS), item->ids, PB_IDS);
> }
>
> -static int dump_task_core_all(pid_t pid, const struct proc_pid_stat *stat,
> +static int dump_task_core_all(pid_t pid, CoreEntry *core, const struct proc_pid_stat *stat,
> const struct parasite_dump_misc *misc, const struct parasite_ctl *ctl,
> const struct cr_fdset *cr_fdset,
> struct list_head *vma_area_list)
> {
> int fd_core = fdset_fd(cr_fdset, CR_FD_CORE);
> - CoreEntry *core;
> int ret = -1;
>
> - core = core_entry_alloc(1, 1);
> - if (!core)
> - return -1;
> + BUG_ON(!core);
unneeded
>
> pr_info("\n");
> pr_info("Dumping core (pid: %d)\n", pid);
> @@ -626,21 +590,21 @@ static int dump_task_core_all(pid_t pid, const struct proc_pid_stat *stat,
>
> ret = dump_task_mm(pid, stat, misc, cr_fdset);
> if (ret)
> - goto err_free;
> + goto err;
>
> ret = get_task_regs(pid, core, ctl);
> if (ret)
> - goto err_free;
> + goto err;
>
> mark_stack_vma(CORE_THREAD_ARCH_INFO(core)->gpregs->sp, vma_area_list);
>
> ret = get_task_futex_robust_list(pid, core->thread_core);
> if (ret)
> - goto err_free;
> + goto err;
>
> ret = get_task_personality(pid, &core->tc->personality);
> if (ret)
> - goto err_free;
> + goto err;
>
> strncpy((char *)core->tc->comm, stat->comm, TASK_COMM_LEN);
> core->tc->flags = stat->flags;
> @@ -652,16 +616,15 @@ static int dump_task_core_all(pid_t pid, const struct proc_pid_stat *stat,
>
> ret = dump_sched_info(pid, core->thread_core);
> if (ret)
> - goto err_free;
> + goto err;
>
> core_put_tls(core, misc->tls);
>
> ret = pb_write_one(fd_core, core, PB_CORE);
> if (ret < 0)
> - goto err_free;
> + goto err;
>
> -err_free:
> - core_entry_free(core);
> +err:
> pr_info("----------------------------------------\n");
>
> return ret;
> @@ -909,6 +872,9 @@ static int collect_task(struct pstree_item *item)
> goto err_close;
> }
>
> + if (pstree_alloc_cores(item))
> + goto err_close;
Why not allocating cores at the same place where the respective pstree item is?
> +
> close_pid_proc();
>
> pr_info("Collected %d in %d state\n", item->pid.real, item->state);
> @@ -1060,49 +1026,45 @@ static int collect_file_locks(const struct cr_options *opts)
>
> }
>
> -static int dump_task_thread(struct parasite_ctl *parasite_ctl, struct pid *tid)
> +static int dump_task_thread(struct parasite_ctl *parasite_ctl,
> + struct pid *tid, CoreEntry *core)
> {
> - CoreEntry *core;
> int ret = -1, fd_core;
> pid_t pid = tid->real;
>
> + BUG_ON(!core);
unneeded
> +
> pr_info("\n");
> pr_info("Dumping core for thread (pid: %d)\n", pid);
> pr_info("----------------------------------------\n");
>
> - core = core_entry_alloc(1, 0);
> - if (!core)
> - goto err;
> -
> ret = get_task_regs(pid, core, NULL);
> if (ret)
> - goto err_free;
> + goto err;
>
> ret = get_task_futex_robust_list(pid, core->thread_core);
> if (ret)
> - goto err_free;
> + goto err;
>
> ret = parasite_dump_thread_seized(parasite_ctl, tid, core);
> if (ret) {
> pr_err("Can't dump thread for pid %d\n", pid);
> - goto err_free;
> + goto err;
> }
>
> core->thread_core->has_blk_sigset = true;
>
> ret = dump_sched_info(pid, core->thread_core);
> if (ret)
> - goto err_free;
> + goto err;
>
> fd_core = open_image(CR_FD_CORE, O_DUMP, tid->virt);
> if (fd_core < 0)
> - goto err_free;
> + goto err;
>
> ret = pb_write_one(fd_core, core, PB_CORE);
>
> close(fd_core);
> -err_free:
> - core_entry_free(core);
> err:
> pr_info("----------------------------------------\n");
> return ret;
> @@ -1114,21 +1076,18 @@ static int dump_one_zombie(const struct pstree_item *item,
> CoreEntry *core;
> int ret = -1, fd_core;
>
> - core = core_entry_alloc(0, 1);
> - if (core == NULL)
> - goto err;
> + core = item->core[0];
> + BUG_ON(!core || !core->tc);
>
> core->tc->task_state = TASK_DEAD;
> core->tc->exit_code = pps->exit_code;
>
> fd_core = open_image(CR_FD_CORE, O_DUMP, item->pid.virt);
> if (fd_core < 0)
> - goto err_free;
> + goto err;
>
> ret = pb_write_one(fd_core, core, PB_CORE);
> close(fd_core);
> -err_free:
> - core_entry_free(core);
> err:
> return ret;
> }
> @@ -1147,7 +1106,8 @@ static int dump_task_threads(struct parasite_ctl *parasite_ctl,
> continue;
> }
>
> - if (dump_task_thread(parasite_ctl, &item->threads[i]))
> + if (dump_task_thread(parasite_ctl, &item->threads[i],
> + item->core[i]))
> return -1;
> }
>
> @@ -1354,7 +1314,7 @@ static int dump_one_task(struct pstree_item *item)
> goto err_cure;
> }
>
> - ret = dump_task_core_all(pid, &pps_buf, &misc,
> + ret = dump_task_core_all(pid, pstree_find_core(item, pid), &pps_buf, &misc,
Linear search is bad here. The pstree_item can point directly to the desired core.
> parasite_ctl, cr_fdset, &vma_area_list);
> if (ret) {
> pr_err("Dump core (pid: %d) failed with %d\n", pid, ret);
> diff --git a/include/pstree.h b/include/pstree.h
> index ed6495c..52b742b 100644
> --- a/include/pstree.h
> +++ b/include/pstree.h
> @@ -41,6 +41,7 @@ struct pstree_item {
>
> int nr_threads; /* number of threads */
> struct pid *threads; /* array of threads */
> + CoreEntry **core;
> TaskKobjIdsEntry *ids;
>
> struct rst_info rst[0];
> @@ -76,6 +77,8 @@ extern struct task_entries *task_entries;
> int get_task_ids(struct pstree_item *);
> extern struct _TaskKobjIdsEntry *root_ids;
>
> -extern void core_entry_free(CoreEntry *core);
> +CoreEntry *pstree_find_core(struct pstree_item *item, pid_t pid);
> +extern int pstree_alloc_cores(struct pstree_item *item);
> +extern void pstree_free_cores(struct pstree_item *item);
>
> #endif /* __CR_PSTREE_H__ */
> diff --git a/pstree.c b/pstree.c
> index 5bfcbdc..c2ce429 100644
> --- a/pstree.c
> +++ b/pstree.c
> @@ -16,7 +16,7 @@
>
> struct pstree_item *root_item;
>
> -void core_entry_free(CoreEntry *core)
> +static void core_entry_free(CoreEntry *core)
> {
> if (core) {
> arch_free_thread_info(core);
> @@ -26,6 +26,88 @@ void core_entry_free(CoreEntry *core)
> }
> }
>
> +static CoreEntry *core_entry_alloc(int alloc_thread_info, int alloc_tc)
> +{
> + CoreEntry *core;
> + TaskCoreEntry *tc;
> +
> + core = xmalloc(sizeof(*core));
> + if (!core)
> + return NULL;
> + core_entry__init(core);
> +
> + core->mtype = CORE_ENTRY__MARCH;
> +
> + if (alloc_thread_info) {
> + if (arch_alloc_thread_info(core))
> + goto err;
> + }
> +
> + if (alloc_tc) {
> + tc = xzalloc(sizeof(*tc) + TASK_COMM_LEN);
> + if (!tc)
> + goto err;
> + task_core_entry__init(tc);
> + tc->comm = (void *)tc + sizeof(*tc);
> + core->tc = tc;
> + }
> +
> + return core;
> +err:
> + core_entry_free(core);
> + return NULL;
> +}
> +
> +CoreEntry *pstree_find_core(struct pstree_item *item, pid_t pid)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < item->nr_threads; i++) {
> + if (item->threads[i].real == pid)
> + return item->core[i];
> + }
> +
> + return NULL;
> +}
> +
> +int pstree_alloc_cores(struct pstree_item *item)
> +{
> + unsigned int i;
> + bool is_alive;
> +
> + is_alive = (item->state != TASK_DEAD);
> + BUG_ON(!is_alive && item->nr_threads > 1);
If we allocate cores together with pstree-items this bug-on becomes excessive
> +
> + item->core = xzalloc(sizeof(*item->core) * item->nr_threads);
> + if (!item->core)
> + return -1;
> +
> + for (i = 0; i < item->nr_threads; i++) {
> + int is_leader = (item->threads[i].real == item->pid.real);
> +
> + item->core[i] = core_entry_alloc(is_alive, is_leader);
> + if (!item->core[i])
> + goto err;
> + }
> +
> + return 0;
> +err:
> + pstree_free_cores(item);
> + return -1;
> +}
> +
> +void pstree_free_cores(struct pstree_item *item)
> +{
> + unsigned int i;
> +
> + if (item->core) {
> + for (i = 1; i < item->nr_threads; i++)
> + core_entry_free(item->core[i]);
> + xfree(item->core);
> + item->core = NULL;
> + }
> +}
> +
> void free_pstree(struct pstree_item *root_item)
> {
> struct pstree_item *item = root_item, *parent;
> @@ -38,6 +120,7 @@ void free_pstree(struct pstree_item *root_item)
>
> parent = item->parent;
> list_del(&item->sibling);
> + pstree_free_cores(item);
> xfree(item->threads);
> xfree(item);
> item = parent;
More information about the CRIU
mailing list