WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
Xen

xen-devel

[Top] [All Lists]

[Xen-devel] [PATCH] 2nd try: 1/2 VCPU creation and allocation

To: Keir Fraser <Keir.Fraser@xxxxxxxxxxxx>
Subject: [Xen-devel] [PATCH] 2nd try: 1/2 VCPU creation and allocation
From: Ryan Harper <ryanh@xxxxxxxxxx>
Date: 2005年10月11日 10:15:53 -0500
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: 2005年10月11日 15:13:43 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <e03426a13ac8e7d1aa04b6d34258d661@xxxxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20051010151651.GK17358@xxxxxxxxxx> <bf24a942f6b45d1ca73cf693b4cdf11d@xxxxxxxxxxxx> <20051010152821.GN17358@xxxxxxxxxx> <3f428c67eaeb9dd543c246e55a156fd3@xxxxxxxxxxxx> <20051010160538.GO17358@xxxxxxxxxx> <f2c38fb2e629d2867f0a00e083c5813a@xxxxxxxxxxxx> <20051010162354.GP17358@xxxxxxxxxx> <e03426a13ac8e7d1aa04b6d34258d661@xxxxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.5.6+20040907i
New dom0_op DOM0_VCPUSINCREASE.
-- 
Ryan Harper
Software Engineer; Linux Technology Center
IBM Corp., Austin, Tx
(512) 838-9253 T/L: 678-9253
ryanh@xxxxxxxxxx
diffstat output:
 tools/libxc/xc_domain.c | 9 +++++++++
 tools/libxc/xenctrl.h | 11 +++++++++++
 tools/python/xen/lowlevel/xc/xc.c | 28 ++++++++++++++++++++++++++++
 tools/python/xen/xend/XendDomainInfo.py | 4 ++++
 xen/arch/x86/domain_build.c | 6 ++++++
 xen/common/dom0_ops.c | 27 +++++++++++++++++++++++++++
 xen/common/domain.c | 25 ++++++++++---------------
 xen/include/public/dom0_ops.h | 8 ++++++++
 xen/include/xen/domain.h | 2 ++
 9 files changed, 105 insertions(+), 15 deletions(-)
Signed-off-by: Ryan Harper <ryanh@xxxxxxxxxx>
---
diff -r 015f8ae81276 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c Mon Oct 10 14:38:01 2005
+++ b/tools/libxc/xc_domain.c Mon Oct 10 17:26:54 2005
@@ -329,6 +329,15 @@
 return err;
 }
 
+int xc_domain_vcpus_increase(int xc_handle, u32 domid, unsigned int limit)
+{
+ dom0_op_t op;
+ op.cmd = DOM0_VCPUSINCREASE;
+ op.u.vcpusincrease.domain = (domid_t)domid;
+ op.u.vcpusincrease.limit = (u8)limit;
+ return do_dom0_op(xc_handle, &op);
+}
+
 /*
 * Local variables:
 * mode: C
diff -r 015f8ae81276 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Mon Oct 10 14:38:01 2005
+++ b/tools/libxc/xenctrl.h Mon Oct 10 17:26:54 2005
@@ -152,6 +152,17 @@
 u32 ssidref,
 u32 *pdomid);
 
+/*
+ * This function increase the number vcpus in a domain to limit.
+ *
+ * @parm xc_handle a handle to an open hypervisor interface.
+ * @parm domid the domain id in which vcpus are to be created.
+ * @parm limit the total number of vcpus to run in domain.
+ * @return 0 on success, -1 on failure.
+ */
+int xc_domain_vcpus_increase(int xc_handle,
+ u32 domid, 
+ unsigned int limit);
 
 int xc_domain_dumpcore(int xc_handle, 
 u32 domid,
diff -r 015f8ae81276 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Mon Oct 10 14:38:01 2005
+++ b/tools/python/xen/lowlevel/xc/xc.c Mon Oct 10 17:26:54 2005
@@ -91,6 +91,26 @@
 return PyErr_SetFromErrno(xc_error);
 
 return PyInt_FromLong(dom);
+}
+
+static PyObject *pyxc_domain_vcpus_increase(PyObject *self,
+ PyObject *args,
+ PyObject *kwds)
+{
+ XcObject *xc = (XcObject *)self;
+
+ u32 dom, limit;
+
+ static char *kwd_list[] = { "dom", "limit", NULL };
+
+ if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list, &dom, 
&limit) )
+ return NULL;
+
+ if ( xc_domain_vcpus_increase(xc->xc_handle, dom, limit) != 0 )
+ return PyErr_SetFromErrno(xc_error);
+ 
+ Py_INCREF(zero);
+ return zero;
 }
 
 static PyObject *pyxc_domain_pause(PyObject *self,
@@ -783,6 +803,14 @@
 " dom [int, 0]: Domain identifier to use (allocated if 
zero).\n"
 "Returns: [int] new domain identifier; -1 on error.\n" },
 
+ { "domain_vcpus_increase", 
+ (PyCFunction)pyxc_domain_vcpus_increase,
+ METH_VARARGS | METH_KEYWORDS, "\n"
+ "Increase the number of VCPUs in domain.\n"
+ " dom [int, 0]: Domain identifier to use.\n"
+ " limit [int, 0]: New total number of VCPUs in domain.\n"
+ "Returns: [int] 0 on success; -1 on error.\n" },
+
 { "domain_dumpcore", 
 (PyCFunction)pyxc_domain_dumpcore, 
 METH_VARARGS | METH_KEYWORDS, "\n"
diff -r 015f8ae81276 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py Mon Oct 10 14:38:01 2005
+++ b/tools/python/xen/xend/XendDomainInfo.py Mon Oct 10 17:26:54 2005
@@ -1018,6 +1018,10 @@
 self.image.handleBootloading()
 
 xc.domain_setcpuweight(self.domid, self.info['cpu_weight'])
+
+ # increase the total number of vcpus in domain
+ xc.domain_vcpus_increase(self.domid, int(self.info['vcpus']));
+
 # XXX Merge with configure_maxmem?
 m = self.image.getDomainMemory(self.info['memory_KiB'])
 xc.domain_setmaxmem(self.domid, m)
diff -r 015f8ae81276 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c Mon Oct 10 14:38:01 2005
+++ b/xen/arch/x86/domain_build.c Mon Oct 10 17:26:54 2005
@@ -14,6 +14,7 @@
 #include <xen/event.h>
 #include <xen/elf.h>
 #include <xen/kernel.h>
+#include <xen/domain.h>
 #include <asm/regs.h>
 #include <asm/system.h>
 #include <asm/io.h>
@@ -559,6 +560,11 @@
 d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
 d->shared_info->n_vcpu = num_online_cpus();
 
+ /* create extra vcpus */
+ if ( (d->shared_info->n_vcpu > 1) ) 
+ for ( i = 1; i < d->shared_info->n_vcpu; i++ )
+ do_createvcpu(d, i);
+
 /* Set up monitor table */
 update_pagetables(v);
 
diff -r 015f8ae81276 xen/common/dom0_ops.c
--- a/xen/common/dom0_ops.c Mon Oct 10 14:38:01 2005
+++ b/xen/common/dom0_ops.c Mon Oct 10 17:26:54 2005
@@ -227,6 +227,33 @@
 }
 break;
 
+ case DOM0_VCPUSINCREASE:
+ {
+ struct domain *d;
+ unsigned int i, limit = op->u.vcpusincrease.limit;
+
+ if ( limit >= MAX_VIRT_CPUS )
+ ret = -EINVAL;
+
+ d = find_domain_by_id(op->u.vcpusincrease.domain);
+
+ ret = -ESRCH;
+ if ( d != NULL )
+ {
+ ret = -EINVAL;
+ LOCK_BIGLOCK(d);
+ /* NB: VCPU0 is allocated on domain creation */
+ for ( i=1; i<limit; i++ ) {
+ if ( d->vcpu[i] == NULL )
+ if ( (ret = do_createvcpu(d, i)) != 0 )
+ break;
+ }
+ UNLOCK_BIGLOCK(d);
+ put_domain(d);
+ }
+ }
+ break;
+
 case DOM0_DESTROYDOMAIN:
 {
 struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
diff -r 015f8ae81276 xen/common/domain.c
--- a/xen/common/domain.c Mon Oct 10 14:38:01 2005
+++ b/xen/common/domain.c Mon Oct 10 17:26:54 2005
@@ -368,10 +368,9 @@
 return rc;
 }
 
-int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt) 
-{
- struct vcpu *v;
- int rc;
+int do_createvcpu(struct domain *d, int vcpuid)
+{
+ struct vcpu *v;
 
 ASSERT(d->vcpu[vcpuid] == NULL);
 
@@ -387,20 +386,11 @@
 
 arch_do_boot_vcpu(v);
 
- if ( (rc = arch_set_info_guest(v, ctxt)) != 0 )
- goto out;
-
 sched_add_domain(v);
 
 set_bit(_VCPUF_down, &v->vcpu_flags);
- clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
 
 return 0;
-
- out:
- arch_free_vcpu_struct(d->vcpu[vcpuid]);
- d->vcpu[vcpuid] = NULL;
- return rc;
 }
 
 long do_vcpu_op(int cmd, int vcpuid, void *arg)
@@ -413,7 +403,7 @@
 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
 return -EINVAL;
 
- if ( ((v = d->vcpu[vcpuid]) == NULL) && (cmd != VCPUOP_initialise) )
+ if ( ((v = d->vcpu[vcpuid]) == NULL) )
 return -ENOENT;
 
 switch ( cmd )
@@ -433,7 +423,12 @@
 }
 
 LOCK_BIGLOCK(d);
- rc = (d->vcpu[vcpuid] == NULL) ? boot_vcpu(d, vcpuid, ctxt) : -EEXIST;
+ clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
+ if ( (rc = arch_set_info_guest(v, ctxt)) != 0 ) {
+ sched_rem_domain(v);
+ arch_free_vcpu_struct(d->vcpu[vcpuid]);
+ d->vcpu[vcpuid] = NULL;
+ }
 UNLOCK_BIGLOCK(d);
 
 xfree(ctxt);
diff -r 015f8ae81276 xen/include/public/dom0_ops.h
--- a/xen/include/public/dom0_ops.h Mon Oct 10 14:38:01 2005
+++ b/xen/include/public/dom0_ops.h Mon Oct 10 17:26:54 2005
@@ -386,6 +386,13 @@
 int is_ram;
 } *memory_map;
 } dom0_physical_memory_map_t;
+
+#define DOM0_VCPUSINCREASE 41
+typedef struct {
+ domid_t domain; /* domain to be affected */
+ u8 limit; /* total number of vcpus to run */
+} dom0_vcpusincrease_t;
+
 
 typedef struct {
 u32 cmd;
@@ -422,6 +429,7 @@
 dom0_getdomaininfolist_t getdomaininfolist;
 dom0_platform_quirk_t platform_quirk;
 dom0_physical_memory_map_t physical_memory_map;
+ dom0_vcpusincrease_t vcpusincrease;
 } u;
 } dom0_op_t;
 
diff -r 015f8ae81276 xen/include/xen/domain.h
--- a/xen/include/xen/domain.h Mon Oct 10 14:38:01 2005
+++ b/xen/include/xen/domain.h Mon Oct 10 17:26:54 2005
@@ -1,6 +1,8 @@
 
 #ifndef __XEN_DOMAIN_H__
 #define __XEN_DOMAIN_H__
+
+extern int do_createvcpu(struct domain *d, int vcpuid);
 
 /*
 * Arch-specifics.
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
Previous by Date: [Xen-devel] [PATCH] 2nd try: 0/2 VCPU creation and allocation , Ryan Harper
Next by Date: [Xen-devel] [PATCH] 2nd try: 2/2 VCPU creation and allocation , Ryan Harper
Previous by Thread: [Xen-devel] [PATCH] 2nd try: 0/2 VCPU creation and allocation , Ryan Harper
Next by Thread: [Xen-devel] [PATCH] 2nd try: 2/2 VCPU creation and allocation , Ryan Harper
Indexes: [Date] [Thread] [Top] [All Lists]

Copyright ©, Citrix Systems Inc. All rights reserved. Legal and Privacy
Citrix This site is hosted by Citrix

AltStyle によって変換されたページ (->オリジナル) /