9/10

----------------------------

commit 8a287b6ad3b0fed62786278fbd5fd85a76b9b4e8

Author: Jaren Johnston <jaren's email>

Date:   Fri Jun 8 15:49:03 2012 -0700

 

    adjustment to adjust_sg_array

    

    Replaced the exponential requesting of memory w/ something more linear.

    

    Change-Id: I9a41805fc73b17596b4047c19307de6a460cb163

 

diff --git a/zc.c b/zc.c

index 96a011d..76f5369 100644

--- a/zc.c

+++ b/zc.c

@@ -82,16 +82,15 @@ int __get_userbuf(uint8_t __user *addr, uint32_t len,
int write,

 int adjust_sg_array(struct csession * ses, int pagecount)

{

-struct scatterlist *sg;

-struct page **pages;

-int array_size;

+              struct scatterlist *sg;

+              struct page **pages;

+              int array_size;

                for (array_size = ses->array_size; array_size < pagecount;

-                   array_size *= 2)

+                   array_size += DEFAULT_PREALLOC_PAGES)

                               ;

-

-              dprintk(2, KERN_DEBUG, "%s: reallocating to %d elements\n",

-                                              __func__, array_size);

+              dprintk(2, KERN_DEBUG, "%s: reallocating from %d to %d
pages\n",

+                                              __func__, ses->array_size,
array_size);

               pages = krealloc(ses->pages, array_size * sizeof(struct page
*),

                                                GFP_KERNEL);

               if (unlikely(!pages))

 

_______________________________________________
Cryptodev-linux-devel mailing list
Cryptodev-linux-devel@gna.org
https://mail.gna.org/listinfo/cryptodev-linux-devel

Reply via email to