WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
Xen

xen-devel

[Top] [All Lists]

[Xen-devel] [PATCH 4/7] xencomm take 3: xen side multiple page support

To: xen-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-devel] [PATCH 4/7] xencomm take 3: xen side multiple page support
From: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
Date: 2007年8月14日 18:50:23 +0900
Cc: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>, xen-ia64-devel@xxxxxxxxxxxxxxxxxxx, xen-ppc-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: 2007年8月14日 02:55:05 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Mutt/1.4.2.1i
# HG changeset patch
# User yamahata@xxxxxxxxxxxxx
# Date 1187077969 -32400
# Node ID b53a87fec6dc7a9f2cc3b3b7df72fa8b2c7fe081
# Parent cf7a9141e7f884a14cfab8d3785c95dea85749be
[xen, xencomm] xencomm multiple page support
Current implementation doesn't allow struct xencomm_desc::address array
to be more than single page. On IA64 it causes 64GB+ domain creation
failure. This patch generalizes xencomm to allow multipage
xencomm_desc::address array.
PATCHNAME: xencomm_multiple_page
Signed-off-by: Isaku Yamahata <yamahata@xxxxxxxxxxxxx>
diff -r cf7a9141e7f8 -r b53a87fec6dc xen/common/xencomm.c
--- a/xen/common/xencomm.c Tue Aug 14 17:17:01 2007 +0900
+++ b/xen/common/xencomm.c Tue Aug 14 16:52:49 2007 +0900
@@ -17,6 +17,7 @@
 *
 * Authors: Hollis Blanchard <hollisb@xxxxxxxxxx>
 * Tristan Gingold <tristan.gingold@xxxxxxxx>
+ * Isaku Yamahata <yamahata@xxxxxxxxxxxxx> multiple page support
 */
 
 #include <xen/config.h>
@@ -81,10 +82,11 @@ xencomm_desc_cross_page_boundary(unsigne
 }
 
 struct xencomm_ctxt {
- struct xencomm_desc *desc;
-
+ struct xencomm_desc __user *desc_in_paddr;
 uint32_t nr_addrs;
+
 struct page_info *page;
+ unsigned long *address;
 };
 
 static uint32_t
@@ -94,21 +96,9 @@ xencomm_ctxt_nr_addrs(const struct xenco
 }
 
 static unsigned long*
-xencomm_ctxt_address(struct xencomm_ctxt *ctxt, int i)
-{
- return &ctxt->desc->address[i];
-}
-
-/* check if struct xencomm_desc and address array cross page boundary */
-static int
-xencomm_ctxt_cross_page_boundary(struct xencomm_ctxt *ctxt)
-{
- unsigned long saddr = (unsigned long)ctxt->desc;
- unsigned long eaddr =
- (unsigned long)&ctxt->desc->address[ctxt->nr_addrs] - 1;
- if ((saddr >> PAGE_SHIFT) != (eaddr >> PAGE_SHIFT))
- return 1;
- return 0;
+xencomm_ctxt_address(struct xencomm_ctxt *ctxt)
+{
+ return ctxt->address;
 }
 
 static int
@@ -136,15 +126,38 @@ xencomm_ctxt_init(const void* handle, st
 return -EINVAL;
 }
 
- ctxt->desc = desc;
 ctxt->nr_addrs = desc->nr_addrs; /* copy before use.
 * It is possible for a guest domain to
 * modify concurrently.
 */
+ ctxt->desc_in_paddr = (struct xencomm_desc*)handle;
 ctxt->page = page;
- if (xencomm_ctxt_cross_page_boundary(ctxt)) {
- put_page(page);
- return -EINVAL;
+ ctxt->address = &desc->address[0];
+ return 0;
+}
+
+static int
+xencomm_ctxt_next(struct xencomm_ctxt *ctxt, int i)
+{
+ BUG_ON(i >= ctxt->nr_addrs);
+ /* in i == 0 case, we already calculated in xecomm_addr_init() */
+ if (i != 0)
+ ctxt->address++;
+ 
+ /* When crossing page boundary, machine address must be calculated. */
+ if (((unsigned long)ctxt->address & ~PAGE_MASK) == 0) {
+ unsigned long paddr =
+ (unsigned long)&(ctxt->desc_in_paddr->address[i]);
+ struct page_info *page;
+ int ret;
+
+ ret = xencomm_get_page(paddr, &page);
+ if (ret == 0) {
+ put_page(ctxt->page);
+ ctxt->page = page;
+ ctxt->address = xencomm_vaddr(paddr, page);
+ }
+ return ret;
 }
 return 0;
 }
@@ -236,11 +249,14 @@ xencomm_copy_from_guest(void *to, const 
 
 /* iterate through the descriptor, copying up to a page at a time */
 while ((to_pos < n) && (i < xencomm_ctxt_nr_addrs(&ctxt))) {
- unsigned long src_paddr = *xencomm_ctxt_address(&ctxt, i);
+ unsigned long src_paddr;
 unsigned int pgoffset;
 unsigned int chunksz;
 unsigned int chunk_skip;
 
+ if (xencomm_ctxt_next(&ctxt, i))
+ goto out;
+ src_paddr = *xencomm_ctxt_address(&ctxt);
 if (src_paddr == XENCOMM_INVALID) {
 i++;
 continue;
@@ -353,11 +369,14 @@ xencomm_copy_to_guest(void *to, const vo
 
 /* iterate through the descriptor, copying up to a page at a time */
 while ((from_pos < n) && (i < xencomm_ctxt_nr_addrs(&ctxt))) {
- unsigned long dest_paddr = *xencomm_ctxt_address(&ctxt, i);
+ unsigned long dest_paddr;
 unsigned int pgoffset;
 unsigned int chunksz;
 unsigned int chunk_skip;
 
+ if (xencomm_ctxt_next(&ctxt, i))
+ goto out;
+ dest_paddr = *xencomm_ctxt_address(&ctxt);
 if (dest_paddr == XENCOMM_INVALID) {
 i++;
 continue;
@@ -401,21 +420,28 @@ int xencomm_add_offset(void **handle, un
 {
 struct xencomm_ctxt ctxt;
 int i = 0;
+ int res = 0;
 
 if (xencomm_is_inline(*handle))
 return xencomm_inline_add_offset(handle, bytes);
 
- if (xencomm_ctxt_init(handle, &ctxt))
- return -1;
+ res = xencomm_ctxt_init(handle, &ctxt);
+ if (res != 0)
+ return res;
 
 /* iterate through the descriptor incrementing addresses */
 while ((bytes > 0) && (i < xencomm_ctxt_nr_addrs(&ctxt))) {
- unsigned long *address = xencomm_ctxt_address(&ctxt, i);
- unsigned long dest_paddr = *address;
+ unsigned long *address;
+ unsigned long dest_paddr;
 unsigned int pgoffset;
 unsigned int chunksz;
 unsigned int chunk_skip;
 
+ res = xencomm_ctxt_next(&ctxt, i);
+ if (res)
+ goto out;
+ address = xencomm_ctxt_address(&ctxt);
+ dest_paddr = *address;
 if (dest_paddr == XENCOMM_INVALID) {
 i++;
 continue;
@@ -435,8 +461,10 @@ int xencomm_add_offset(void **handle, un
 
 i++;
 }
+
+out:
 xencomm_ctxt_done(&ctxt);
- return 0;
+ return res;
 }
 
 int xencomm_handle_is_null(void *handle)
@@ -452,7 +480,9 @@ int xencomm_handle_is_null(void *handle)
 return 1;
 
 for (i = 0; i < xencomm_ctxt_nr_addrs(&ctxt); i++) {
- if (*xencomm_ctxt_address(&ctxt, i) != XENCOMM_INVALID) {
+ if (xencomm_ctxt_next(&ctxt, i))
+ goto out;
+ if (*xencomm_ctxt_address(&ctxt) != XENCOMM_INVALID) {
 res = 0;
 goto out;
 }

Attachment: 15728_b53a87fec6dc_xencomm_multiple_page.patch
Description: Text Data

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-devel] [PATCH 4/7] xencomm take 3: xen side multiple page support, Isaku Yamahata <=
Previous by Date: [Xen-devel] [PATCH 7/7] xencomm take 3: linux side remove xencomm page size limit , Isaku Yamahata
Next by Date: [Xen-devel] [PATCH 5/7] xencomm take 3: linux side various fixes and preparation for consolidation , Isaku Yamahata
Previous by Thread: [Xen-devel] [PATCH 7/7] xencomm take 3: linux side remove xencomm page size limit , Isaku Yamahata
Next by Thread: [Xen-devel] [PATCH 5/7] xencomm take 3: linux side various fixes and preparation for consolidation , Isaku Yamahata
Indexes: [Date] [Thread] [Top] [All Lists]

Copyright ©, Citrix Systems Inc. All rights reserved. Legal and Privacy
Citrix This site is hosted by Citrix

AltStyle によって変換されたページ (->オリジナル) /