[CRIU] [PATCH 1/4] ppc64: Fix assembly code
Laurent Dufour
ldufour at linux.vnet.ibm.com
Wed May 13 09:45:08 PDT 2015
Add various register definition to clean the assembly code.
Signed-off-by: Laurent Dufour <ldufour at linux.vnet.ibm.com>
---
arch/ppc64/include/asm/linkage.h | 281 ++++++++++++++++++++++++++++++++++++++
arch/ppc64/parasite-head.S | 22 +--
arch/ppc64/restorer-trampoline.S | 16 +--
arch/ppc64/syscall-common-ppc64.S | 16 +--
arch/ppc64/vdso-trampoline.S | 8 +-
5 files changed, 312 insertions(+), 31 deletions(-)
diff --git a/arch/ppc64/include/asm/linkage.h b/arch/ppc64/include/asm/linkage.h
index 03e01dc96543..506edc7114d4 100644
--- a/arch/ppc64/include/asm/linkage.h
+++ b/arch/ppc64/include/asm/linkage.h
@@ -1,3 +1,10 @@
+/*
+ * Various PowerPc assembly definitions
+ *
+ * Copied from the kernel file arch/powerpc/include/asm/ppc_asm.h
+ *
+ * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
+ */
#ifndef __CR_LINKAGE_H__
#define __CR_LINKAGE_H__
@@ -15,6 +22,280 @@
#define END(sym) \
.size sym, . - sym
+
+#define STACKFRAMESIZE 256
+#define __STK_REG(i) (112 + ((i)-14)*8)
+#define STK_REG(i) __STK_REG(__REG_##i)
+
+/* The boring bits... */
+
+/* Condition Register Bit Fields */
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+
+/*
+ * General Purpose Registers (GPRs)
+ *
+ * The lower case r0-r31 should be used in preference to the upper
+ * case R0-R31 as they provide more error checking in the assembler.
+ * Use R0-31 only when really nessesary.
+ */
+
+#define r0 %r0
+#define r1 %r1
+#define r2 %r2
+#define r3 %r3
+#define r4 %r4
+#define r5 %r5
+#define r6 %r6
+#define r7 %r7
+#define r8 %r8
+#define r9 %r9
+#define r10 %r10
+#define r11 %r11
+#define r12 %r12
+#define r13 %r13
+#define r14 %r14
+#define r15 %r15
+#define r16 %r16
+#define r17 %r17
+#define r18 %r18
+#define r19 %r19
+#define r20 %r20
+#define r21 %r21
+#define r22 %r22
+#define r23 %r23
+#define r24 %r24
+#define r25 %r25
+#define r26 %r26
+#define r27 %r27
+#define r28 %r28
+#define r29 %r29
+#define r30 %r30
+#define r31 %r31
+
+
+/* Floating Point Registers (FPRs) */
+
+#define fr0 0
+#define fr1 1
+#define fr2 2
+#define fr3 3
+#define fr4 4
+#define fr5 5
+#define fr6 6
+#define fr7 7
+#define fr8 8
+#define fr9 9
+#define fr10 10
+#define fr11 11
+#define fr12 12
+#define fr13 13
+#define fr14 14
+#define fr15 15
+#define fr16 16
+#define fr17 17
+#define fr18 18
+#define fr19 19
+#define fr20 20
+#define fr21 21
+#define fr22 22
+#define fr23 23
+#define fr24 24
+#define fr25 25
+#define fr26 26
+#define fr27 27
+#define fr28 28
+#define fr29 29
+#define fr30 30
+#define fr31 31
+
+/* AltiVec Registers (VPRs) */
+
+#define vr0 0
+#define vr1 1
+#define vr2 2
+#define vr3 3
+#define vr4 4
+#define vr5 5
+#define vr6 6
+#define vr7 7
+#define vr8 8
+#define vr9 9
+#define vr10 10
+#define vr11 11
+#define vr12 12
+#define vr13 13
+#define vr14 14
+#define vr15 15
+#define vr16 16
+#define vr17 17
+#define vr18 18
+#define vr19 19
+#define vr20 20
+#define vr21 21
+#define vr22 22
+#define vr23 23
+#define vr24 24
+#define vr25 25
+#define vr26 26
+#define vr27 27
+#define vr28 28
+#define vr29 29
+#define vr30 30
+#define vr31 31
+
+/* VSX Registers (VSRs) */
+
+#define vsr0 0
+#define vsr1 1
+#define vsr2 2
+#define vsr3 3
+#define vsr4 4
+#define vsr5 5
+#define vsr6 6
+#define vsr7 7
+#define vsr8 8
+#define vsr9 9
+#define vsr10 10
+#define vsr11 11
+#define vsr12 12
+#define vsr13 13
+#define vsr14 14
+#define vsr15 15
+#define vsr16 16
+#define vsr17 17
+#define vsr18 18
+#define vsr19 19
+#define vsr20 20
+#define vsr21 21
+#define vsr22 22
+#define vsr23 23
+#define vsr24 24
+#define vsr25 25
+#define vsr26 26
+#define vsr27 27
+#define vsr28 28
+#define vsr29 29
+#define vsr30 30
+#define vsr31 31
+#define vsr32 32
+#define vsr33 33
+#define vsr34 34
+#define vsr35 35
+#define vsr36 36
+#define vsr37 37
+#define vsr38 38
+#define vsr39 39
+#define vsr40 40
+#define vsr41 41
+#define vsr42 42
+#define vsr43 43
+#define vsr44 44
+#define vsr45 45
+#define vsr46 46
+#define vsr47 47
+#define vsr48 48
+#define vsr49 49
+#define vsr50 50
+#define vsr51 51
+#define vsr52 52
+#define vsr53 53
+#define vsr54 54
+#define vsr55 55
+#define vsr56 56
+#define vsr57 57
+#define vsr58 58
+#define vsr59 59
+#define vsr60 60
+#define vsr61 61
+#define vsr62 62
+#define vsr63 63
+
+/* SPE Registers (EVPRs) */
+
+#define evr0 0
+#define evr1 1
+#define evr2 2
+#define evr3 3
+#define evr4 4
+#define evr5 5
+#define evr6 6
+#define evr7 7
+#define evr8 8
+#define evr9 9
+#define evr10 10
+#define evr11 11
+#define evr12 12
+#define evr13 13
+#define evr14 14
+#define evr15 15
+#define evr16 16
+#define evr17 17
+#define evr18 18
+#define evr19 19
+#define evr20 20
+#define evr21 21
+#define evr22 22
+#define evr23 23
+#define evr24 24
+#define evr25 25
+#define evr26 26
+#define evr27 27
+#define evr28 28
+#define evr29 29
+#define evr30 30
+#define evr31 31
+
+/* some stab codes */
+#define N_FUN 36
+#define N_RSYM 64
+#define N_SLINE 68
+#define N_SO 100
+
+#define __REG_R0 0
+#define __REG_R1 1
+#define __REG_R2 2
+#define __REG_R3 3
+#define __REG_R4 4
+#define __REG_R5 5
+#define __REG_R6 6
+#define __REG_R7 7
+#define __REG_R8 8
+#define __REG_R9 9
+#define __REG_R10 10
+#define __REG_R11 11
+#define __REG_R12 12
+#define __REG_R13 13
+#define __REG_R14 14
+#define __REG_R15 15
+#define __REG_R16 16
+#define __REG_R17 17
+#define __REG_R18 18
+#define __REG_R19 19
+#define __REG_R20 20
+#define __REG_R21 21
+#define __REG_R22 22
+#define __REG_R23 23
+#define __REG_R24 24
+#define __REG_R25 25
+#define __REG_R26 26
+#define __REG_R27 27
+#define __REG_R28 28
+#define __REG_R29 29
+#define __REG_R30 30
+#define __REG_R31 31
+
+
+
#endif /* __ASSEMBLY__ */
#endif /* __CR_LINKAGE_H__ */
diff --git a/arch/ppc64/parasite-head.S b/arch/ppc64/parasite-head.S
index c7e5bdc66c52..e7163f0a5081 100644
--- a/arch/ppc64/parasite-head.S
+++ b/arch/ppc64/parasite-head.S
@@ -11,23 +11,23 @@ ENTRY(__export_parasite_head_start)
// args = r4 = @parasite_args_ptr + @pc
bl 0f
-0: mflr 2
+0: mflr r2
#define LOAD_REG_ADDR(reg, name) \
- addis reg,2,(name - 0b)@ha; \
- addi reg,2,(name - 0b)@l;
+ addis reg,r2,(name - 0b)@ha; \
+ addi reg,r2,(name - 0b)@l;
- LOAD_REG_ADDR(3,__export_parasite_cmd)
- lwz 3,0(3)
+ LOAD_REG_ADDR(r3,__export_parasite_cmd)
+ lwz r3,0(r3)
- LOAD_REG_ADDR(4,parasite_args_ptr)
- lwz 4,0(4)
- add 4,4,2 // Fix up ptr
+ LOAD_REG_ADDR(r4,parasite_args_ptr)
+ lwz r4,0(r4)
+ add r4,r4,r2 // Fix up ptr
// Set the TOC pointer
- LOAD_REG_ADDR(5,parasite_toc_ptr)
- ld 5,0(5)
- add 2,2,5 // Fix up ptr
+ LOAD_REG_ADDR(r5,parasite_toc_ptr)
+ ld r5,0(r5)
+ add r2,r2,r5 // Fix up ptr
bl parasite_service
twi 31,0,0 // Should generate SIGTRAP
diff --git a/arch/ppc64/restorer-trampoline.S b/arch/ppc64/restorer-trampoline.S
index 5e15615ae1aa..4c870b907599 100644
--- a/arch/ppc64/restorer-trampoline.S
+++ b/arch/ppc64/restorer-trampoline.S
@@ -8,14 +8,14 @@
// This trampoline is there to restore r2 before jumping back to the
// C code.
#define LOAD_REG_ADDR(reg, name) \
- addis reg,7,(name - 0b)@ha; \
- addi reg,7,(name - 0b)@l;
+ addis reg,r7,(name - 0b)@ha; \
+ addi reg,r7,(name - 0b)@l;
ENTRY(__export_unmap_trampoline)
bl 0f
-0: mflr 7
- LOAD_REG_ADDR(8,restorer_r2)
- ld 2,0(8)
+0: mflr r7
+ LOAD_REG_ADDR(r8,restorer_r2)
+ ld r2,0(r8)
b __export_unmap
//END(__export_restore_unmap_trampoline)
@@ -24,9 +24,9 @@ ENTRY(__export_unmap_trampoline)
// Assuming up to 4 parameters here since we are using r7 and r8.
ENTRY(__export_restore_task_trampoline)
bl 0f
-0: mflr 7
- LOAD_REG_ADDR(8,restorer_r2)
- std 2,0(8)
+0: mflr r7
+ LOAD_REG_ADDR(r8,restorer_r2)
+ std r2,0(r8)
b __export_restore_task
restorer_r2:
diff --git a/arch/ppc64/syscall-common-ppc64.S b/arch/ppc64/syscall-common-ppc64.S
index 78bc1b7e6e85..d8521e39ca81 100644
--- a/arch/ppc64/syscall-common-ppc64.S
+++ b/arch/ppc64/syscall-common-ppc64.S
@@ -3,7 +3,7 @@
#define SYSCALL(name, opcode) \
ENTRY(name); \
- li 0, opcode; \
+ li r0, opcode; \
b __syscall_common; \
END(name)
@@ -13,20 +13,20 @@
ENTRY(__syscall_common)
sc
bnslr+ /* if no error return to LR */
- neg 3,3 /* r3 = -r3 to return -errno value */
+ neg r3,r3 /* r3 = -r3 to return -errno value */
blr
END(__syscall_common)
ENTRY(__cr_restore_rt)
- li 0, __NR_rt_sigreturn
+ li r0, __NR_rt_sigreturn
b __syscall_common
END(__cr_restore_rt)
- # On Power, shmat is done through the ipc system call.
+# On Power, shmat is done through the ipc system call.
ENTRY(sys_shmat)
- mr 7, 4 # shmaddr -> ptr
- mr 4, 3 # shmid -> first
- li 3, 21 # call = SHMAT
- li 0, __NR_ipc
+ mr r7, r4 # shmaddr -> ptr
+ mr r4, r3 # shmid -> first
+ li r3, 21 # call = SHMAT
+ li r0, __NR_ipc
b __syscall_common
END(sys_shmat)
diff --git a/arch/ppc64/vdso-trampoline.S b/arch/ppc64/vdso-trampoline.S
index 54a22453701a..e910e7ab99a4 100644
--- a/arch/ppc64/vdso-trampoline.S
+++ b/arch/ppc64/vdso-trampoline.S
@@ -3,9 +3,9 @@
.section .text
GLOBAL(vdso_trampoline)
- mflr 12 /* r12 vdso_ptr's address */
- mtlr 0 /* restore lr */
- ld 12,0(12) /* read value store in vdso_ptr */
- mtctr 12 /* branch to it */
+ mflr r12 /* r12 vdso_ptr's address */
+ mtlr r0 /* restore lr */
+ ld r12,0(r12) /* read value store in vdso_ptr */
+ mtctr r12 /* branch to it */
bctr
GLOBAL(vdso_trampoline_end)
--
1.9.1
More information about the CRIU
mailing list