[CRIU] [PATCH 10/18] x86: cpu -- Add support for extended xsave area
Dmitry Safonov
0x7f454c46 at gmail.com
Wed Jul 25 22:48:39 MSK 2018
2018-07-19 13:47 GMT+01:00 Cyrill Gorcunov <gorcunov at gmail.com>:
> cpu extensions (such as avx-512) require bigger xsave
> area to keep fpu registers set, so we allocate a page
> per process to keep them all. On checkpoint we parse
> runtime fpu features and dump them into an image and
> do reverse on restore procedure.
>
> Signed-off-by: Cyrill Gorcunov <gorcunov at gmail.com>
Reviewed-by: Dmitry Safonov <0x7f454c46 at gmail.com>
> @@ -17,6 +17,21 @@ int save_task_regs(void *x, user_regs_struct_t *regs, user_fpregs_struct_t *fpre
>
> #define assign_reg(dst, src, e) do { dst->e = (__typeof__(dst->e))src.e; } while (0)
> #define assign_array(dst, src, e) memcpy(dst->e, &src.e, sizeof(src.e))
> +#define assign_xsave(feature, xsave, member, area) \
> + do { \
> + if (compel_fpu_has_feature(feature)) { \
> + uint32_t off = compel_fpu_feature_offset(feature); \
> + void *from = &area[off]; \
> + size_t size = pb_repeated_size(xsave, member); \
> + size_t xsize = (size_t)compel_fpu_feature_size(feature); \
> + if (xsize != size) { \
> + pr_err("%s reported %zu bytes (expecting %zu)\n", \
> + # feature, xsize, size); \
> + return -1; \
> + } \
> + memcpy(xsave->member, from, size); \
> + } \
> + } while (0)
Lovely helper, sadly I also can't find an easy way to convert it into inline
function.
> @@ -113,6 +141,62 @@ static void alloc_tls(ThreadInfoX86 *ti, void **mempool)
> }
> }
>
> +static int alloc_xsave_extends(UserX86XsaveEntry *xsave)
> +{
> + if (compel_fpu_has_feature(XFEATURE_YMM)) {
> + xsave->n_ymmh_space = 64;
> + xsave->ymmh_space = xzalloc(pb_repeated_size(xsave, ymmh_space));
> + if (!xsave->ymmh_space)
> + goto err;
> + }
> +
> + if (compel_fpu_has_feature(XFEATURE_BNDREGS)) {
> + xsave->n_bndreg_state = 4 * 2;
> + xsave->bndreg_state = xzalloc(pb_repeated_size(xsave, bndreg_state));
> + if (!xsave->bndreg_state)
> + goto err;
> + }
> +
> + if (compel_fpu_has_feature(XFEATURE_BNDCSR)) {
> + xsave->n_bndcsr_state = 2;
> + xsave->bndcsr_state = xzalloc(pb_repeated_size(xsave, bndcsr_state));
> + if (!xsave->bndcsr_state)
> + goto err;
> + }
> +
> + if (compel_fpu_has_feature(XFEATURE_OPMASK)) {
> + xsave->n_opmask_reg = 8;
> + xsave->opmask_reg = xzalloc(pb_repeated_size(xsave, opmask_reg));
> + if (!xsave->opmask_reg)
> + goto err;
> + }
> +
> + if (compel_fpu_has_feature(XFEATURE_ZMM_Hi256)) {
> + xsave->n_zmm_upper = 16 * 4;
> + xsave->zmm_upper = xzalloc(pb_repeated_size(xsave, zmm_upper));
> + if (!xsave->zmm_upper)
> + goto err;
> + }
> +
> + if (compel_fpu_has_feature(XFEATURE_Hi16_ZMM)) {
> + xsave->n_hi16_zmm = 16 * 8;
> + xsave->hi16_zmm = xzalloc(pb_repeated_size(xsave, hi16_zmm));
> + if (!xsave->hi16_zmm)
> + goto err;
> + }
> +
> + if (compel_fpu_has_feature(XFEATURE_PKRU)) {
> + xsave->n_pkru = 2;
> + xsave->pkru = xzalloc(pb_repeated_size(xsave, pkru));
> + if (!xsave->pkru)
> + goto err;
> + }
> +
> + return 0;
> +err:
> + return -1;
Probably, Coverity will be unhappy about non-freeing them on error-path.
Thanks,
Dmitry
More information about the CRIU
mailing list