WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
Xen

xen-devel

[Top] [All Lists]

[Xen-devel] [PATCH, RFC 3/17] 32-on-64 hypercall and exception handling

To: <xen-devel@xxxxxxxxxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH, RFC 3/17] 32-on-64 hypercall and exception handling infrastructure
From: "Jan Beulich" <jbeulich@xxxxxxxxxx>
Date: 2006年10月04日 16:28:09 +0100
Delivery-date: 2006年10月04日 08:28:57 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
Index: 2006年10月04日/xen/arch/x86/traps.c
===================================================================
--- 2006年10月04日.orig/xen/arch/x86/traps.c 2006年10月04日 15:03:07.000000000 
+0200
+++ 2006年10月04日/xen/arch/x86/traps.c 2006年10月04日 15:06:22.000000000 +0200
@@ -137,6 +137,12 @@ static void show_guest_stack(struct cpu_
 if ( hvm_guest(current) )
 return;
 
+ if ( IS_COMPAT(container_of(regs, struct cpu_info, 
guest_cpu_user_regs)->current_vcpu->domain) )
+ {
+ compat_show_guest_stack(regs, debug_stack_lines);
+ return;
+ }
+
 if ( vm86_mode(regs) )
 {
 stack = (unsigned long *)((regs->ss << 4) + (regs->esp & 0xffff));
Index: 2006年10月04日/xen/arch/x86/x86_64/Makefile
===================================================================
--- 2006年10月04日.orig/xen/arch/x86/x86_64/Makefile 2006年09月21日 
10:56:11.000000000 +0200
+++ 2006年10月04日/xen/arch/x86/x86_64/Makefile 2006年10月04日 15:06:22.000000000 
+0200
@@ -2,3 +2,9 @@ obj-y += entry.o
 obj-y += io.o
 obj-y += mm.o
 obj-y += traps.o
+
+ifeq ($(CONFIG_COMPAT),y)
+# extra dependencies
+entry.o: compat/entry.S
+traps.o: compat/traps.c
+endif
Index: 2006年10月04日/xen/arch/x86/x86_64/asm-offsets.c
===================================================================
--- 2006年10月04日.orig/xen/arch/x86/x86_64/asm-offsets.c 2006年10月04日 
15:03:07.000000000 +0200
+++ 2006年10月04日/xen/arch/x86/x86_64/asm-offsets.c 2006年10月04日 
15:06:22.000000000 +0200
@@ -53,6 +53,7 @@ void __dummy__(void)
 BLANK();
 
 OFFSET(VCPU_processor, struct vcpu, processor);
+ OFFSET(VCPU_domain, struct vcpu, domain);
 OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info);
 OFFSET(VCPU_trap_bounce, struct vcpu, arch.trap_bounce);
 OFFSET(VCPU_thread_flags, struct vcpu, arch.flags);
@@ -86,6 +87,10 @@ void __dummy__(void)
 OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
 BLANK();
 
+ OFFSET(DOMAIN_domain_flags, struct domain, domain_flags);
+ DEFINE(_DOMF_compat, _DOMF_compat);
+ BLANK();
+
 OFFSET(VMCB_rax, struct vmcb_struct, rax);
 OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
 BLANK();
@@ -94,6 +99,7 @@ void __dummy__(void)
 OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
 BLANK();
 
+ OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
 DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
 BLANK();
 
Index: 2006年10月04日/xen/arch/x86/x86_64/compat/entry.S
===================================================================
--- /dev/null 1970年01月01日 00:00:00.000000000 +0000
+++ 2006年10月04日/xen/arch/x86/x86_64/compat/entry.S 2006年10月04日 
15:09:26.000000000 +0200
@@ -0,0 +1,395 @@
+/*
+ * Compatibility hypercall routines.
+ *
+ * Copyright (c) 2005, K A Fraser
+ */
+
+#include <asm/desc.h>
+
+.text
+
+ENTRY(compat_hypercall)
+ pushq 0ドル
+ movl $TRAP_syscall,4(%rsp)
+ SAVE_ALL
+ GET_CURRENT(%rbx)
+
+ cmpl $NR_hypercalls,%eax
+ jae compat_bad_hypercall
+#ifndef NDEBUG
+ /* Deliberately corrupt parameter regs not used by this hypercall. */
+ pushq UREGS_rbx(%rsp); pushq %rcx; pushq %rdx; pushq %rsi; pushq %rdi; 
pushq UREGS_rbp+5*8(%rsp)
+ leaq compat_hypercall_args_table(%rip),%r10
+ movq 6,ドル%rcx
+ subb (%r10,%rax,1),%cl
+ movq %rsp,%rdi
+ movl 0ドルxDEADBEEF,%eax
+ rep stosq
+ popq %r9 ; popq %r8 ; popq %rcx; popq %rdx; popq %rsi; popq %rdi
+ movl UREGS_rax(%rsp),%eax
+ pushq %rax
+ pushq UREGS_rip+8(%rsp)
+#else
+ movl %eax,%eax
+ movl %ebp,%r9d
+ movl %edi,%r8d
+ xchgl %ecx,%esi
+ movl UREGS_rbx(%rsp),%edi
+#endif
+ leaq compat_hypercall_table(%rip),%r10
+ PERFC_INCR(PERFC_hypercalls, %rax)
+ callq *(%r10,%rax,8)
+#ifndef NDEBUG
+ /* Deliberately corrupt parameter regs used by this hypercall. */
+ popq %r10 # Shadow RIP
+ cmpq %r10,UREGS_rip+8(%rsp)
+ popq %rcx # Shadow hypercall index
+ jne compat_skip_clobber /* If RIP has changed then don't clobber. */
+ leaq compat_hypercall_args_table(%rip),%r10
+ movb (%r10,%rcx,1),%cl
+ movl 0ドルxDEADBEEF,%r10d
+ testb %cl,%cl; jz compat_skip_clobber; movl %r10d,UREGS_rbx(%rsp)
+ cmpb 2,ドル %cl; jb compat_skip_clobber; movl %r10d,UREGS_rcx(%rsp)
+ cmpb 3,ドル %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdx(%rsp)
+ cmpb 4,ドル %cl; jb compat_skip_clobber; movl %r10d,UREGS_rsi(%rsp)
+ cmpb 5,ドル %cl; jb compat_skip_clobber; movl %r10d,UREGS_rdi(%rsp)
+ cmpb 6,ドル %cl; jb compat_skip_clobber; movl %r10d,UREGS_rbp(%rsp)
+compat_skip_clobber:
+#endif
+ movl %eax,UREGS_rax(%rsp) # save the return value
+
+/* %rbx: struct vcpu */
+compat_test_all_events:
+ cli # tests must not race interrupts
+/*compat_test_softirqs:*/
+ movl VCPU_processor(%rbx),%eax
+ shlq $IRQSTAT_shift,%rax
+ leaq irq_stat(%rip),%rcx
+ testl $~0,(%rcx,%rax,1)
+ jnz compat_process_softirqs
+ btrq $_VCPUF_nmi_pending,VCPU_flags(%rbx)
+ jc compat_process_nmi
+compat_test_guest_events:
+ movq VCPU_vcpu_info(%rbx),%rax
+ testb 0ドルxFF,VCPUINFO_upcall_mask(%rax)
+ jnz compat_restore_all_guest
+ testb 0ドルxFF,VCPUINFO_upcall_pending(%rax)
+ jz compat_restore_all_guest
+/*compat_process_guest_events:*/
+ sti
+ leaq VCPU_trap_bounce(%rbx),%rdx
+ movl VCPU_event_addr(%rbx),%eax
+ movl %eax,TRAPBOUNCE_eip(%rdx)
+ movl VCPU_event_sel(%rbx),%eax
+ movl %eax,TRAPBOUNCE_cs(%rdx)
+ movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
+ call compat_create_bounce_frame
+ jmp compat_test_all_events
+
+ ALIGN
+/* %rbx: struct vcpu */
+compat_process_softirqs:
+ sti
+ call do_softirq
+ jmp compat_test_all_events
+
+ ALIGN
+/* %rbx: struct vcpu */
+compat_process_nmi:
+ movl VCPU_nmi_addr(%rbx),%eax
+ testl %eax,%eax
+ jz compat_test_all_events
+ btsq $_VCPUF_nmi_masked,VCPU_flags(%rbx)
+ jc 1f
+ sti
+ leaq VCPU_trap_bounce(%rbx),%rdx
+ movl %eax,TRAPBOUNCE_eip(%rdx)
+ movl $FLAT_COMPAT_KERNEL_CS,TRAPBOUNCE_cs(%rdx)
+ movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
+ call compat_create_bounce_frame
+ jmp compat_test_all_events
+1:
+ btsq $_VCPUF_nmi_pending,VCPU_flags(%rbx)
+ jmp compat_test_guest_events
+
+compat_bad_hypercall:
+ movl $-ENOSYS,UREGS_rax(%rsp)
+ jmp compat_test_all_events
+
+/* %rbx: struct vcpu, interrupts disabled */
+compat_restore_all_guest:
+ RESTORE_ALL
+ addq 8,ドル%rsp
+CFLT0: iretq
+
+.section .fixup,"ax"
+CFIX0: popq -15*8-8(%rsp) # error_code/entry_vector
+ SAVE_ALL # 15*8 bytes pushed
+ movq -8(%rsp),%rsi # error_code/entry_vector
+ sti # after stack abuse (-1024(%rsp))
+ pushq $__HYPERVISOR_DS # SS
+ leaq 8(%rsp),%rax
+ pushq %rax # RSP
+ pushfq # RFLAGS
+ pushq $__HYPERVISOR_CS # CS
+ leaq CDBLFLT0(%rip),%rax
+ pushq %rax # RIP
+ pushq %rsi # error_code/entry_vector
+ jmp handle_exception
+CDBLFLT0:GET_CURRENT(%rbx)
+ jmp compat_test_all_events
+compat_failsafe_callback:
+ GET_CURRENT(%rbx)
+ leaq VCPU_trap_bounce(%rbx),%rdx
+ movl VCPU_failsafe_addr(%rbx),%eax
+ movl %eax,TRAPBOUNCE_eip(%rdx)
+ movl VCPU_failsafe_sel(%rbx),%eax
+ movl %eax,TRAPBOUNCE_cs(%rdx)
+ movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%rdx)
+ btq $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%rbx)
+ jnc 1f
+ orw $TBF_INTERRUPT,TRAPBOUNCE_flags(%rdx)
+1:
+ call compat_create_bounce_frame
+ jmp compat_test_all_events
+.previous
+.section __pre_ex_table,"a"
+ .quad CFLT0,CFIX0
+.previous
+.section __ex_table,"a"
+ .quad CDBLFLT0,compat_failsafe_callback
+.previous
+
+/* %rdx: trap_bounce, %rbx: struct vcpu */
+compat_post_handle_exception:
+ testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
+ jz compat_test_all_events
+ call compat_create_bounce_frame
+ jmp compat_test_all_events
+
+/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
+/* {[ERRCODE,] EIP, CS, EFLAGS, [ESP, SS]} */
+/* %rdx: trap_bounce, %rbx: struct vcpu */
+/* On return only %rbx is guaranteed non-clobbered. */
+compat_create_bounce_frame:
+ mov %fs,%edi
+ testb 2,ドルUREGS_cs+8(%rsp)
+ jz 1f
+ /* Push new frame at registered guest-OS stack base. */
+ movl VCPU_kernel_sp(%rbx),%esi
+CFLT1: mov VCPU_kernel_ss(%rbx),%fs
+ subl 2ドル*4,%esi
+ movl UREGS_rsp+8(%rsp),%eax
+CFLT2: movl %eax,%fs:(%rsi)
+ movl UREGS_ss+8(%rsp),%eax
+CFLT3: movl %eax,%fs:4(%rsi)
+ jmp 2f
+1: /* In kernel context already: push new frame at existing %rsp. */
+ movl UREGS_rsp+8(%rsp),%esi
+CFLT4: mov UREGS_ss+8(%rsp),%fs
+2:
+ movb TRAPBOUNCE_flags(%rdx),%cl
+ subl 3ドル*4,%esi
+ movq VCPU_vcpu_info(%rbx),%rax
+ pushq VCPUINFO_upcall_mask(%rax)
+ testb $TBF_INTERRUPT,%cl
+ setnz %ch # TBF_INTERRUPT -> set upcall mask
+ orb %ch,VCPUINFO_upcall_mask(%rax)
+ popq %rax
+ shll 16,ドル%eax # Bits 16-23: saved_upcall_mask
+ movw UREGS_cs+8(%rsp),%ax # Bits 0-15: CS
+CFLT5: movl %eax,%fs:4(%rsi) # CS / saved_upcall_mask
+ shrl 16,ドル%eax
+ testb %al,%al # Bits 0-7: saved_upcall_mask
+ setz %ch # %ch == !saved_upcall_mask
+ movl UREGS_eflags+8(%rsp),%eax
+ andl $~X86_EFLAGS_IF,%eax
+ shlb 1,ドル%ch # Bit 9 (EFLAGS.IF)
+ orb %ch,%ah # Fold EFLAGS.IF into %eax
+CFLT6: movl %eax,%fs:2*4(%rsi) # EFLAGS
+ movl UREGS_rip+8(%rsp),%eax
+CFLT7: movl %eax,%fs:(%rsi) # EIP
+ testb $TBF_EXCEPTION_ERRCODE,%cl
+ jz 1f
+ subl 4,ドル%esi
+ movl TRAPBOUNCE_error_code(%rdx),%eax
+CFLT8: movl %eax,%fs:(%rsi) # ERROR CODE
+1:
+ testb $TBF_FAILSAFE,%cl
+ jz 2f
+ subl 4ドル*4,%esi
+ movl %gs,%eax
+CFLT9: movl %eax,%fs:3*4(%rsi) # GS
+CFLT10: movl %edi,%fs:2*4(%rsi) # FS
+ movl %es,%eax
+CFLT11: movl %eax,%fs:1*4(%rsi) # ES
+ movl %ds,%eax
+CFLT12: movl %eax,%fs:0*4(%rsi) # DS
+2:
+ /* Rewrite our stack frame and return to guest-OS mode. */
+ /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
+ movl $TRAP_syscall,UREGS_entry_vector+8(%rsp)
+ andl 0ドルxfffcbeff,UREGS_eflags+8(%rsp)
+ mov %fs,UREGS_ss+8(%rsp)
+ movl %esi,UREGS_rsp+8(%rsp)
+CFLT13: mov %edi,%fs
+ movzwl TRAPBOUNCE_cs(%rdx),%eax
+ movl %eax,UREGS_cs+8(%rsp)
+ movl TRAPBOUNCE_eip(%rdx),%eax
+ testl %eax,%eax
+ jz domain_crash_synchronous
+ movl %eax,UREGS_rip+8(%rsp)
+ movb 0,ドルTRAPBOUNCE_flags(%rdx)
+ ret
+.section .fixup,"ax"
+CFIX13:
+ xorl %edi,%edi
+ jmp CFLT13
+.previous
+.section __ex_table,"a"
+ .quad CFLT1,domain_crash_synchronous, CFLT2,domain_crash_page_fault
+ .quad CFLT3,domain_crash_page_fault_4, CFLT4,domain_crash_synchronous
+ .quad CFLT5,domain_crash_page_fault_4, 
CFLT6,domain_crash_page_fault_8
+ .quad CFLT7,domain_crash_page_fault, CFLT8,domain_crash_page_fault
+ .quad CFLT9,domain_crash_page_fault_12, 
CFLT10,domain_crash_page_fault_8
+ .quad CFLT11,domain_crash_page_fault_4, CFLT12,domain_crash_page_fault
+ .quad CFLT13,CFIX13
+.previous
+
+domain_crash_page_fault_12:
+ addl 4,ドル%esi
+domain_crash_page_fault_8:
+ addl 4,ドル%esi
+domain_crash_page_fault_4:
+ addl 4,ドル%esi
+domain_crash_page_fault:
+CFLT14: mov %edi,%fs
+ movl %esi,%edi
+ call show_page_walk
+ jmp domain_crash_synchronous
+.section .fixup,"ax"
+CFIX14:
+ xorl %edi,%edi
+ jmp CFLT14
+.previous
+.section __ex_table,"a"
+ .quad CFLT14,CFIX14
+.previous
+
+.section .rodata, "a", @progbits
+
+#define compat_set_trap_table domain_crash_synchronous
+#define compat_mmu_update domain_crash_synchronous
+#define compat_set_gdt domain_crash_synchronous
+#define compat_stack_switch domain_crash_synchronous
+#define compat_fpu_taskswitch domain_crash_synchronous
+#define compat_arch_sched_op_compat domain_crash_synchronous
+#define compat_platform_op domain_crash_synchronous
+#define compat_set_debugreg domain_crash_synchronous
+#define compat_get_debugreg domain_crash_synchronous
+#define compat_update_descriptor domain_crash_synchronous
+#define compat_memory_op domain_crash_synchronous
+#define compat_multicall domain_crash_synchronous
+#define compat_update_va_mapping domain_crash_synchronous
+#define compat_set_timer_op domain_crash_synchronous
+#define compat_event_channel_op_compat domain_crash_synchronous
+#define compat_xen_version domain_crash_synchronous
+#define compat_console_io domain_crash_synchronous
+#define compat_physdev_op_compat domain_crash_synchronous
+#define compat_grant_table_op domain_crash_synchronous
+#define compat_vm_assist domain_crash_synchronous
+#define compat_update_va_mapping_otherdomain domain_crash_synchronous
+#define compat_vcpu_op domain_crash_synchronous
+#define compat_mmuext_op domain_crash_synchronous
+#define compat_acm_op domain_crash_synchronous
+#define compat_nmi_op domain_crash_synchronous
+#define compat_arch_sched_op domain_crash_synchronous
+#define compat_xenoprof_op domain_crash_synchronous
+#define compat_event_channel_op domain_crash_synchronous
+#define compat_physdev_op domain_crash_synchronous
+#define compat_sysctl domain_crash_synchronous
+#define compat_domctl domain_crash_synchronous
+
+ENTRY(compat_hypercall_table)
+ .quad compat_set_trap_table /* 0 */
+ .quad compat_mmu_update
+ .quad compat_set_gdt
+ .quad compat_stack_switch
+ .quad compat_set_callbacks
+ .quad compat_fpu_taskswitch /* 5 */
+ .quad compat_arch_sched_op_compat
+ .quad compat_platform_op
+ .quad compat_set_debugreg
+ .quad compat_get_debugreg
+ .quad compat_update_descriptor /* 10 */
+ .quad do_ni_hypercall
+ .quad compat_memory_op
+ .quad compat_multicall
+ .quad compat_update_va_mapping
+ .quad compat_set_timer_op /* 15 */
+ .quad compat_event_channel_op_compat
+ .quad compat_xen_version
+ .quad compat_console_io
+ .quad compat_physdev_op_compat
+ .quad compat_grant_table_op /* 20 */
+ .quad compat_vm_assist
+ .quad compat_update_va_mapping_otherdomain
+ .quad compat_iret
+ .quad compat_vcpu_op
+ .quad do_ni_hypercall /* 25 */
+ .quad compat_mmuext_op
+ .quad compat_acm_op
+ .quad compat_nmi_op
+ .quad compat_arch_sched_op
+ .quad compat_callback_op /* 30 */
+ .quad compat_xenoprof_op
+ .quad compat_event_channel_op
+ .quad compat_physdev_op
+ .quad do_ni_hypercall
+ .quad compat_sysctl /* 35 */
+ .quad compat_domctl
+ .rept NR_hypercalls-((.-compat_hypercall_table)/8)
+ .quad do_ni_hypercall
+ .endr
+
+ENTRY(compat_hypercall_args_table)
+ .byte 1 /* compat_set_trap_table */ /* 0 */
+ .byte 4 /* compat_mmu_update */
+ .byte 2 /* compat_set_gdt */
+ .byte 2 /* compat_stack_switch */
+ .byte 4 /* compat_set_callbacks */
+ .byte 1 /* compat_fpu_taskswitch */ /* 5 */
+ .byte 2 /* compat_arch_sched_op_compat */
+ .byte 1 /* compat_platform_op */
+ .byte 2 /* compat_set_debugreg */
+ .byte 1 /* compat_get_debugreg */
+ .byte 4 /* compat_update_descriptor */ /* 10 */
+ .byte 0 /* do_ni_hypercall */
+ .byte 2 /* compat_memory_op */
+ .byte 2 /* compat_multicall */
+ .byte 4 /* compat_update_va_mapping */
+ .byte 2 /* compat_set_timer_op */ /* 15 */
+ .byte 1 /* compat_event_channel_op_compat */
+ .byte 2 /* compat_xen_version */
+ .byte 3 /* compat_console_io */
+ .byte 1 /* compat_physdev_op_compat */
+ .byte 3 /* compat_grant_table_op */ /* 20 */
+ .byte 2 /* compat_vm_assist */
+ .byte 5 /* compat_update_va_mapping_otherdomain */
+ .byte 0 /* compat_iret */
+ .byte 3 /* compat_vcpu_op */
+ .byte 0 /* do_ni_hypercall */ /* 25 */
+ .byte 4 /* compat_mmuext_op */
+ .byte 1 /* compat_acm_op */
+ .byte 2 /* compat_nmi_op */
+ .byte 2 /* compat_arch_sched_op */
+ .byte 2 /* compat_callback_op */ /* 30 */
+ .byte 2 /* compat_xenoprof_op */
+ .byte 2 /* compat_event_channel_op */
+ .byte 2 /* compat_physdev_op */
+ .byte 0 /* do_ni_hypercall */
+ .byte 1 /* compat_sysctl */ /* 35 */
+ .byte 1 /* compat_domctl */
+ .rept NR_hypercalls-(.-compat_hypercall_args_table)
+ .byte 0 /* do_ni_hypercall */
+ .endr
Index: 2006年10月04日/xen/arch/x86/x86_64/compat/traps.c
===================================================================
--- /dev/null 1970年01月01日 00:00:00.000000000 +0000
+++ 2006年10月04日/xen/arch/x86/x86_64/compat/traps.c 2006年10月04日 
15:06:22.000000000 +0200
@@ -0,0 +1,264 @@
+#ifdef CONFIG_COMPAT
+
+#if 0 /* XXX */
+#include <compat/callback.h>
+#else
+struct compat_xen_callback {
+ unsigned int cs;
+ unsigned int eip;
+};
+typedef struct compat_xen_callback xen_callback_compat_t;
+
+struct compat_callback_register {
+ uint16_t type;
+ uint16_t flags;
+ xen_callback_compat_t address;
+};
+
+struct compat_callback_unregister {
+ uint16_t type;
+ uint16_t _unused;
+};
+#endif
+
+void compat_show_guest_stack(struct cpu_user_regs *regs, int debug_stack_lines)
+{
+ int i;
+ unsigned int *stack, addr;
+
+ stack = (unsigned int *)(unsigned long)regs->_esp;
+ printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
+
+ for ( i = 0; i < debug_stack_lines * 8; i++ )
+ {
+ if ( (((long)stack + 3) & (STACK_SIZE - 4)) == 0 )
+ break;
+ if ( get_user(addr, stack) )
+ {
+ if ( i != 0 )
+ printk("\n ");
+ printk("Fault while accessing guest memory.");
+ i = 1;
+ break;
+ }
+ if ( (i != 0) && ((i % 8) == 0) )
+ printk("\n ");
+ printk(" %08x", addr);
+ stack++;
+ }
+ if ( i == 0 )
+ printk("Stack empty.");
+ printk("\n");
+}
+
+unsigned long compat_iret(void)
+{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
+ u32 eflags;
+
+ /* Check worst-case stack frame for overlap with Xen protected area. */
+ if ( unlikely(regs->_esp != regs->rsp) )
+ domain_crash_synchronous();
+
+ /* Pop and restore EAX (clobbered by hypercall). */
+ if ( unlikely(__copy_from_user(&regs->_eax, (void __user *)regs->rsp, 4)) )
+ domain_crash_synchronous();
+ regs->_esp += 4;
+
+ /* Pop and restore CS and EIP. */
+ if ( unlikely(__copy_from_user(&regs->_eip, (void __user *)regs->rsp, 8)) )
+ domain_crash_synchronous();
+ regs->_esp += 8;
+
+ /*
+ * Pop, fix up and restore EFLAGS. We fix up in a local staging area
+ * to avoid firing the BUG_ON(IOPL) check in arch_getdomaininfo_ctxt.
+ */
+ if ( unlikely(__copy_from_user(&eflags, (void __user *)regs->rsp, 4)) )
+ domain_crash_synchronous();
+ regs->_esp += 4;
+ if ( unlikely(eflags & X86_EFLAGS_VM) )
+ domain_crash_synchronous(); /* XXX */
+ regs->_eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
+
+ if ( unlikely(ring_0(regs)) )
+ domain_crash_synchronous();
+ if ( !ring_1(regs) )
+ {
+ /* Return to ring 2/3: pop and restore ESP and SS. */
+ if ( __copy_from_user(&regs->_esp, (void __user *)regs->rsp, 8) )
+ domain_crash_synchronous();
+ }
+
+ /* No longer in NMI context. */
+ clear_bit(_VCPUF_nmi_masked, &current->vcpu_flags);
+
+ /* Restore upcall mask from supplied EFLAGS.IF. */
+ current->vcpu_info->evtchn_upcall_mask = !(eflags & X86_EFLAGS_IF);
+
+ /*
+ * The hypercall exit path will overwrite EAX with this return
+ * value.
+ */
+ return regs->_eax;
+}
+
+static long compat_register_guest_callback(struct compat_callback_register 
*reg)
+{
+ long ret = 0;
+ struct vcpu *v = current;
+
+ fixup_guest_code_selector(v->domain, reg->address.cs);
+
+ switch ( reg->type )
+ {
+ case CALLBACKTYPE_event:
+ v->arch.guest_context.event_callback_cs = reg->address.cs;
+ v->arch.guest_context.event_callback_eip = reg->address.eip;
+ break;
+
+ case CALLBACKTYPE_failsafe:
+ v->arch.guest_context.failsafe_callback_cs = reg->address.cs;
+ v->arch.guest_context.failsafe_callback_eip = reg->address.eip;
+ if ( reg->flags & CALLBACKF_mask_events )
+ set_bit(_VGCF_failsafe_disables_events,
+ &v->arch.guest_context.flags);
+ else
+ clear_bit(_VGCF_failsafe_disables_events,
+ &v->arch.guest_context.flags);
+ break;
+
+ case CALLBACKTYPE_nmi:
+ ret = register_guest_nmi_callback(reg->address.eip);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static long compat_unregister_guest_callback(struct compat_callback_unregister 
*unreg)
+{
+ long ret;
+
+ switch ( unreg->type )
+ {
+ case CALLBACKTYPE_nmi:
+ ret = unregister_guest_nmi_callback();
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+
+long compat_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
+{
+ long ret;
+
+ switch ( cmd )
+ {
+ case CALLBACKOP_register:
+ {
+ struct compat_callback_register reg;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&reg, arg, 1) )
+ break;
+
+ ret = compat_register_guest_callback(&reg);
+ }
+ break;
+
+ case CALLBACKOP_unregister:
+ {
+ struct compat_callback_unregister unreg;
+
+ ret = -EFAULT;
+ if ( copy_from_guest(&unreg, arg, 1) )
+ break;
+
+ ret = compat_unregister_guest_callback(&unreg);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+long compat_set_callbacks(unsigned long event_selector,
+ unsigned long event_address,
+ unsigned long failsafe_selector,
+ unsigned long failsafe_address)
+{
+ struct compat_callback_register event = {
+ .type = CALLBACKTYPE_event,
+ .address = {
+ .cs = event_selector,
+ .eip = event_address
+ }
+ };
+ struct compat_callback_register failsafe = {
+ .type = CALLBACKTYPE_failsafe,
+ .address = {
+ .cs = failsafe_selector,
+ .eip = failsafe_address
+ }
+ };
+
+ compat_register_guest_callback(&event);
+ compat_register_guest_callback(&failsafe);
+
+ return 0;
+}
+
+#endif /* CONFIG_COMPAT */
+
+static void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
+{
+ char *p;
+ int i;
+
+ /* Fill in all the transfer points with template machine code. */
+
+ for ( i = 0; i < (PAGE_SIZE / 32); i++ )
+ {
+ p = (char *)(hypercall_page + (i * 32));
+ *(u8 *)(p+ 0) = 0xb8; /* mov $<i>,%eax */
+ *(u32 *)(p+ 1) = i;
+ *(u16 *)(p+ 5) = 0x82cd; /* int 0ドルx82 */
+ *(u8 *)(p+ 7) = 0xc3; /* ret */
+ }
+
+ /*
+ * HYPERVISOR_iret is special because it doesn't return and expects a
+ * special stack frame. Guests jump at this transfer point instead of
+ * calling it.
+ */
+ p = (char *)(hypercall_page + (__HYPERVISOR_iret * 32));
+ *(u8 *)(p+ 0) = 0x50; /* push %eax */
+ *(u8 *)(p+ 1) = 0xb8; /* mov $__HYPERVISOR_iret,%eax */
+ *(u32 *)(p+ 2) = __HYPERVISOR_iret;
+ *(u16 *)(p+ 6) = 0x82cd; /* int 0ドルx82 */
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
Index: 2006年10月04日/xen/arch/x86/x86_64/entry.S
===================================================================
--- 2006年10月04日.orig/xen/arch/x86/x86_64/entry.S 2006年10月04日 09:17:44.000000000 
+0200
+++ 2006年10月04日/xen/arch/x86/x86_64/entry.S 2006年10月04日 15:06:22.000000000 
+0200
@@ -324,7 +324,16 @@ domain_crash_synchronous:
 GET_GUEST_REGS(%rax)
 movq %rax,%rsp
 # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
+#ifdef CONFIG_COMPAT
+ movq CPUINFO_current_vcpu(%rax),%rax
+ movq VCPU_domain(%rax),%rax
+ btl $_DOMF_compat,DOMAIN_domain_flags(%rax)
+ setnc %al
+ leal (%rax,%rax,2),%eax
+ orb %al,UREGS_cs(%rsp)
+#else
 orb 3,ドルUREGS_cs(%rsp)
+#endif
 # printk(domain_crash_synchronous_string)
 leaq domain_crash_synchronous_string(%rip),%rdi
 xorl %eax,%eax
@@ -336,8 +345,15 @@ domain_crash_synchronous:
 ENTRY(ret_from_intr)
 GET_CURRENT(%rbx)
 testb 3,ドルUREGS_cs(%rsp)
- jnz test_all_events
- jmp restore_all_xen
+ jz restore_all_xen
+#ifndef CONFIG_COMPAT
+ jmp test_all_events
+#else
+ movq VCPU_domain(%rbx),%rax
+ btl $_DOMF_compat,DOMAIN_domain_flags(%rax)
+ jnc test_all_events
+ jmp compat_test_all_events
+#endif
 
 ALIGN
 /* No special register assumptions. */
@@ -355,6 +371,11 @@ handle_exception:
 testb 3,ドルUREGS_cs(%rsp)
 jz restore_all_xen
 leaq VCPU_trap_bounce(%rbx),%rdx
+#ifdef CONFIG_COMPAT
+ movq VCPU_domain(%rbx),%rax
+ btl $_DOMF_compat,DOMAIN_domain_flags(%rax)
+ jc compat_post_handle_exception
+#endif
 testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
 jz test_all_events
 call create_bounce_frame
@@ -618,3 +639,7 @@ ENTRY(hypercall_args_table)
 .rept NR_hypercalls-(.-hypercall_args_table)
 .byte 0 /* do_ni_hypercall */
 .endr
+
+#ifdef CONFIG_COMPAT
+#include "compat/entry.S"
+#endif
Index: 2006年10月04日/xen/arch/x86/x86_64/traps.c
===================================================================
--- 2006年10月04日.orig/xen/arch/x86/x86_64/traps.c 2006年10月04日 15:03:07.000000000 
+0200
+++ 2006年10月04日/xen/arch/x86/x86_64/traps.c 2006年10月04日 15:06:22.000000000 
+0200
@@ -248,6 +248,7 @@ unsigned long do_iret(void)
 }
 
 asmlinkage void syscall_enter(void);
+asmlinkage void compat_hypercall(void);
 void __init percpu_traps_init(void)
 {
 char *stack_bottom, *stack;
@@ -259,6 +260,11 @@ void __init percpu_traps_init(void)
 set_intr_gate(TRAP_double_fault, &double_fault);
 idt_table[TRAP_double_fault].a |= 1UL << 32; /* IST1 */
 idt_table[TRAP_nmi].a |= 2UL << 32; /* IST2 */
+
+#ifdef CONFIG_COMPAT
+ /* The hypercall entry vector is only accessible from ring 1. */
+ _set_gate(idt_table+HYPERCALL_VECTOR, 15, 1, &compat_hypercall);
+#endif
 }
 
 stack_bottom = (char *)get_stack_bottom();
@@ -522,12 +528,16 @@ static void hypercall_page_initialise_ri
 *(u16 *)(p+ 9) = 0x050f; /* syscall */
 }
 
+#include "compat/traps.c"
+
 void hypercall_page_initialise(struct domain *d, void *hypercall_page)
 {
 if ( hvm_guest(d->vcpu[0]) )
 hvm_hypercall_page_initialise(d, hypercall_page);
- else
+ else if ( !IS_COMPAT(d) )
 hypercall_page_initialise_ring3_kernel(hypercall_page);
+ else
+ hypercall_page_initialise_ring1_kernel(hypercall_page);
 }
 
 /*
Index: 2006年10月04日/xen/include/asm-x86/processor.h
===================================================================
--- 2006年10月04日.orig/xen/include/asm-x86/processor.h 2006年09月11日 
09:06:11.000000000 +0200
+++ 2006年10月04日/xen/include/asm-x86/processor.h 2006年10月04日 15:06:22.000000000 
+0200
@@ -559,6 +559,12 @@ void show_execution_state(struct cpu_use
 void show_page_walk(unsigned long addr);
 asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
 
+#ifdef CONFIG_COMPAT
+void compat_show_guest_stack(struct cpu_user_regs *, int lines);
+#else
+#define compat_show_guest_stack(regs, lines) ((void)0)
+#endif
+
 /* Dumps current register and stack state. */
 #define dump_execution_state() \
 /* NB. Needs interrupts enabled else we end up in fatal_trap(). */ \
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH, RFC 3/17] 32-on-64 hypercall and exception handling infrastructure, Jan Beulich <=
Previous by Date: [Xen-devel] [PATCH,RFC 2/17] 32-on-64 basics , Jan Beulich
Next by Date: [Xen-devel] [PATCH, RFC 4/17] 32-on-64 compatibility header generation , Jan Beulich
Previous by Thread: [Xen-devel] [PATCH,RFC 2/17] 32-on-64 basics , Jan Beulich
Next by Thread: [Xen-devel] [PATCH, RFC 4/17] 32-on-64 compatibility header generation , Jan Beulich
Indexes: [Date] [Thread] [Top] [All Lists]

Copyright ©, Citrix Systems Inc. All rights reserved. Legal and Privacy
Citrix This site is hosted by Citrix

AltStyle によって変換されたページ (->オリジナル) /