On Tue, Jul 31, 2007 at 02:25:17PM -0600, Alex Williamson wrote: > Should we make the same changes to the common xencomm files? > (xen-ppc-devel CC'd) I don't see why we wouldn't, at least for this > first one.
Yes, that's right. I attached the updated one and the new patch for common xencomm code. I did only compile test for common xencomm. It needs review and test by PPC developpers before commit. > How much memory have you been able to assign to a domain > with theses patches? My only nit on this patch is that the > from/to_ulong variables are actually a little uglier than the handful of > casts they remove (imho). Thanks, In fact Akio tested it. I heard it worked with 100GB. Akio, did you try more memory? thanks. -- yamahata
# HG changeset patch # User [EMAIL PROTECTED] # Date 1185948714 -32400 # Node ID c4d697f2367e389f3a2f1b6866ae971206198efc # Parent 039f2ccb1e383a52eb44ba3ed80859548932b95e remove xencomm page size limit. Currently xencomm has page size limit so that a domain with many memory (e.g. 100GB+) can't be created. This patch allows that the address array of struct xencomm_desc to cross page boundary so that the size of struct xencomm_desc can exceed page size. Note that struct xencomm_desc itself can't page boundary. PATCHNAME: remove_xencomm_page_size_limit_xen_side Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]> diff -r 039f2ccb1e38 -r c4d697f2367e xen/arch/ia64/xen/xencomm.c --- a/xen/arch/ia64/xen/xencomm.c Tue Jul 31 10:30:40 2007 -0600 +++ b/xen/arch/ia64/xen/xencomm.c Wed Aug 01 15:11:54 2007 +0900 @@ -34,6 +34,15 @@ static int xencomm_debug = 1; /* extreme #else #define xencomm_debug 0 #endif + +static int +xencomm_desc_cross_page_boundary(unsigned long paddr) +{ + unsigned long offset = paddr & ~PAGE_MASK; + if (offset > PAGE_SIZE - sizeof(struct xencomm_desc)) + return 1; + return 0; +} static int xencomm_copy_chunk_from( @@ -87,6 +96,8 @@ xencomm_copy_from_guest( { struct xencomm_desc *desc; unsigned long desc_addr; + struct xencomm_desc *desc_paddr; + unsigned long *address; unsigned int from_pos = 0; unsigned int to_pos = 0; unsigned int i = 0; @@ -121,6 +132,9 @@ xencomm_copy_from_guest( return 0; } + /* check if struct desc doesn't cross page boundry */ + if (xencomm_desc_cross_page_boundary((unsigned long)from)) + return -EINVAL; /* first we need to access the descriptor */ desc_addr = xencomm_paddr_to_maddr((unsigned long)from); if (desc_addr == 0) @@ -132,18 +146,26 @@ xencomm_copy_from_guest( __func__, desc, desc->magic); return -EFAULT; } + desc_paddr = (struct xencomm_desc *)from; + address = &desc->address[i]; /* iterate through the descriptor, copying up to a page at a time */ while ((to_pos < n) && (i < desc->nr_addrs)) { - unsigned long src_paddr = desc->address[i]; + unsigned long src_paddr; unsigned int pgoffset; unsigned int chunksz; unsigned int chunk_skip; - if (src_paddr == XENCOMM_INVALID) { - i++; - continue; - } + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return -EFAULT; + } + src_paddr = *address; + if (src_paddr == XENCOMM_INVALID) + goto skip_to_next; pgoffset = src_paddr % PAGE_SIZE; chunksz = PAGE_SIZE - pgoffset; @@ -170,7 +192,9 @@ xencomm_copy_from_guest( to_pos += bytes; } + skip_to_next: i++; + address++; } return n - to_pos; @@ -228,6 +252,8 @@ xencomm_copy_to_guest( { struct xencomm_desc *desc; unsigned long desc_addr; + struct xencomm_desc *desc_paddr; + unsigned long *address; unsigned int from_pos = 0; unsigned int to_pos = 0; unsigned int i = 0; @@ -263,6 +289,9 @@ xencomm_copy_to_guest( return 0; } + /* check if struct desc doesn't cross page boundry */ + if (xencomm_desc_cross_page_boundary((unsigned long)to)) + return -EINVAL; /* first we need to access the descriptor */ desc_addr = xencomm_paddr_to_maddr((unsigned long)to); if (desc_addr == 0) @@ -273,18 +302,26 @@ xencomm_copy_to_guest( printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic); return -EFAULT; } + desc_paddr = (struct xencomm_desc*)to; + address = &desc->address[i]; /* iterate through the descriptor, copying up to a page at a time */ while ((from_pos < n) && (i < desc->nr_addrs)) { - unsigned long dest_paddr = desc->address[i]; + unsigned long dest_paddr; unsigned int pgoffset; unsigned int chunksz; unsigned int chunk_skip; - if (dest_paddr == XENCOMM_INVALID) { - i++; - continue; - } + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return -EFAULT; + } + dest_paddr = *address; + if (dest_paddr == XENCOMM_INVALID) + goto skip_to_next; pgoffset = dest_paddr % PAGE_SIZE; chunksz = PAGE_SIZE - pgoffset; @@ -308,7 +345,9 @@ xencomm_copy_to_guest( to_pos += bytes; } + skip_to_next: i++; + address++; } return n - from_pos; } @@ -322,11 +361,16 @@ xencomm_add_offset( { struct xencomm_desc *desc; unsigned long desc_addr; + struct xencomm_desc *desc_paddr; + unsigned long *address; int i = 0; if (XENCOMM_IS_INLINE(handle)) return (void *)((unsigned long)handle + bytes); + /* check if struct desc doesn't cross page boundry */ + if (xencomm_desc_cross_page_boundary((unsigned long)handle)) + return NULL; /* first we need to access the descriptor */ desc_addr = xencomm_paddr_to_maddr((unsigned long)handle); if (desc_addr == 0) @@ -337,18 +381,26 @@ xencomm_add_offset( printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic); return NULL; } + desc_paddr = (struct xencomm_desc*)handle; + address = &desc->address[i]; /* iterate through the descriptor incrementing addresses */ while ((bytes > 0) && (i < desc->nr_addrs)) { - unsigned long dest_paddr = desc->address[i]; + unsigned long dest_paddr; unsigned int pgoffset; unsigned int chunksz; unsigned int chunk_skip; - if (dest_paddr == XENCOMM_INVALID) { - i++; - continue; - } + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return NULL; + } + dest_paddr = *address; + if (dest_paddr == XENCOMM_INVALID) + goto skip_to_next; pgoffset = dest_paddr % PAGE_SIZE; chunksz = PAGE_SIZE - pgoffset; @@ -356,13 +408,15 @@ xencomm_add_offset( chunk_skip = min(chunksz, bytes); if (chunk_skip == chunksz) { /* exhausted this page */ - desc->address[i] = XENCOMM_INVALID; + *address = XENCOMM_INVALID; } else { - desc->address[i] += chunk_skip; + *address += chunk_skip; } bytes -= chunk_skip; - - i++; + + skip_to_next: + i++; + address++; } return handle; }
# HG changeset patch # User [EMAIL PROTECTED] # Date 1185949522 -32400 # Node ID dc2dfa31d0fefbf0b6112d993f7f889659de6355 # Parent c4d697f2367e389f3a2f1b6866ae971206198efc remove xencomm page size limit. Currently xencomm has page size limit so that a domain with many memory (e.g. 100GB+) can't be created. This patch allows that the address array of struct xencomm_desc to cross page boundary so that the size of struct xencomm_desc can exceed page size. Note that struct xencomm_desc itself can't page boundary. PATCHNAME: remove_xencomm_page_size_limit_xen_side Signed-off-by: Isaku Yamahata <[EMAIL PROTECTED]> diff -r c4d697f2367e -r dc2dfa31d0fe xen/common/xencomm.c --- a/xen/common/xencomm.c Wed Aug 01 15:11:54 2007 +0900 +++ b/xen/common/xencomm.c Wed Aug 01 15:25:22 2007 +0900 @@ -33,6 +33,15 @@ static int xencomm_debug = 1; /* extreme #else #define xencomm_debug 0 #endif + +static int +xencomm_desc_cross_page_boundary(unsigned long paddr) +{ + unsigned long offset = paddr & ~PAGE_MASK; + if (offset > PAGE_SIZE - sizeof(struct xencomm_desc)) + return 1; + return 0; +} static unsigned long xencomm_inline_from_guest(void *to, const void *from, unsigned int n, @@ -81,6 +90,8 @@ xencomm_copy_from_guest(void *to, const unsigned int skip) { struct xencomm_desc *desc; + struct xencomm_desc *desc_paddr; + unsigned long *address; unsigned int from_pos = 0; unsigned int to_pos = 0; unsigned int i = 0; @@ -88,6 +99,9 @@ xencomm_copy_from_guest(void *to, const if (xencomm_is_inline(from)) return xencomm_inline_from_guest(to, from, n, skip); + /* check if struct desc doesn't cross page boundry */ + if (xencomm_desc_cross_page_boundary((unsigned long)from)) + return -EINVAL; /* first we need to access the descriptor */ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)from); if (desc == NULL) @@ -98,18 +112,26 @@ xencomm_copy_from_guest(void *to, const __func__, desc, desc->magic); return n; } + desc_paddr = (struct xencomm_desc *)from; + address = &desc->address[i]; /* iterate through the descriptor, copying up to a page at a time */ while ((to_pos < n) && (i < desc->nr_addrs)) { - unsigned long src_paddr = desc->address[i]; + unsigned long src_paddr; unsigned int pgoffset; unsigned int chunksz; unsigned int chunk_skip; - if (src_paddr == XENCOMM_INVALID) { - i++; - continue; - } + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return -EFAULT; + } + src_paddr = *address; + if (src_paddr == XENCOMM_INVALID) + goto skip_to_next; pgoffset = src_paddr % PAGE_SIZE; chunksz = PAGE_SIZE - pgoffset; @@ -135,7 +157,9 @@ xencomm_copy_from_guest(void *to, const to_pos += bytes; } + skip_to_next: i++; + address++; } return n - to_pos; @@ -188,6 +212,8 @@ xencomm_copy_to_guest(void *to, const vo unsigned int skip) { struct xencomm_desc *desc; + struct xencomm_desc *desc_paddr; + unsigned long *address; unsigned int from_pos = 0; unsigned int to_pos = 0; unsigned int i = 0; @@ -195,6 +221,9 @@ xencomm_copy_to_guest(void *to, const vo if (xencomm_is_inline(to)) return xencomm_inline_to_guest(to, from, n, skip); + /* check if struct desc doesn't cross page boundry */ + if (xencomm_desc_cross_page_boundary((unsigned long)to)) + return -EINVAL; /* first we need to access the descriptor */ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)to); if (desc == NULL) @@ -204,18 +233,26 @@ xencomm_copy_to_guest(void *to, const vo printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic); return n; } + desc_paddr = (struct xencomm_desc*)to; + address = &desc->address[i]; /* iterate through the descriptor, copying up to a page at a time */ while ((from_pos < n) && (i < desc->nr_addrs)) { - unsigned long dest_paddr = desc->address[i]; + unsigned long dest_paddr; unsigned int pgoffset; unsigned int chunksz; unsigned int chunk_skip; - if (dest_paddr == XENCOMM_INVALID) { - i++; - continue; - } + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return -EFAULT; + } + dest_paddr = *address; + if (dest_paddr == XENCOMM_INVALID) + goto skip_to_next; pgoffset = dest_paddr % PAGE_SIZE; chunksz = PAGE_SIZE - pgoffset; @@ -241,7 +278,9 @@ xencomm_copy_to_guest(void *to, const vo to_pos += bytes; } + skip_to_next: i++; + address++; } return n - from_pos; @@ -258,11 +297,16 @@ int xencomm_add_offset(void **handle, un int xencomm_add_offset(void **handle, unsigned int bytes) { struct xencomm_desc *desc; + struct xencomm_desc *desc_paddr; + unsigned long *address; int i = 0; if (xencomm_is_inline(*handle)) return xencomm_inline_add_offset(handle, bytes); + /* check if struct desc doesn't cross page boundry */ + if (xencomm_desc_cross_page_boundary((unsigned long)handle)) + return -1; /* first we need to access the descriptor */ desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)*handle); if (desc == NULL) @@ -272,13 +316,26 @@ int xencomm_add_offset(void **handle, un printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic); return -1; } + desc_paddr = (struct xencomm_desc*)handle; + address = &desc->address[i]; /* iterate through the descriptor incrementing addresses */ while ((bytes > 0) && (i < desc->nr_addrs)) { - unsigned long dest_paddr = desc->address[i]; + unsigned long dest_paddr; unsigned int pgoffset; unsigned int chunksz; unsigned int chunk_skip; + + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return -1; + } + dest_paddr = *address; + if (dest_paddr == XENCOMM_INVALID) + goto skip_to_next; pgoffset = dest_paddr % PAGE_SIZE; chunksz = PAGE_SIZE - pgoffset; @@ -286,11 +343,15 @@ int xencomm_add_offset(void **handle, un chunk_skip = min(chunksz, bytes); if (chunk_skip == chunksz) { /* exhausted this page */ - desc->address[i] = XENCOMM_INVALID; + *address = XENCOMM_INVALID; } else { - desc->address[i] += chunk_skip; + *address += chunk_skip; } bytes -= chunk_skip; + + skip_to_next: + i++; + address++; } return 0; } @@ -298,6 +359,8 @@ int xencomm_handle_is_null(void *handle) int xencomm_handle_is_null(void *handle) { struct xencomm_desc *desc; + struct xencomm_desc *desc_paddr; + unsigned long *address; int i; if (xencomm_is_inline(handle)) @@ -306,10 +369,21 @@ int xencomm_handle_is_null(void *handle) desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)handle); if (desc == NULL) return 1; - - for (i = 0; i < desc->nr_addrs; i++) - if (desc->address[i] != XENCOMM_INVALID) + desc_paddr = (struct xencomm_desc*)handle; + address = &desc->address[0]; + + for (i = 0; i < desc->nr_addrs; i++) { + /* When crossing page boundary, machine address must be calculated. */ + if (((unsigned long)address & ~PAGE_MASK) == 0) { + address = (unsigned long*)xencomm_paddr_to_maddr( + (unsigned long)&desc_paddr->address[i]); + if (address == NULL) + return 1; /* EFAULT? */ + } + + if (*address != XENCOMM_INVALID) return 0; + } return 1; }
_______________________________________________ Xen-ppc-devel mailing list Xen-ppc-devel@lists.xensource.com http://lists.xensource.com/xen-ppc-devel