[f2fs-dev] [PATCH 02/10] fs crypto: add crypto.c for encrypt/decrypt functions

2016-03-02 Thread Jaegeuk Kim
This patch adds crypto.c supporting encrypting and decrypting functions.

1. IO preparation:
  - fscrypt_get_ctx / fscrypt_release_ctx

2. before IOs:
  - fscrypt_encrypt_page
  - fscrypt_decrypt_page
  - fscrypt_zeroout_range

3. after IOs:
  - fscrypt_decrypt_bio_pages
  - fscrypt_pullback_bio_page
  - fscrypt_restore_control_page

Signed-off-by: Michael Halcrow 
Signed-off-by: Ildar Muslukhov 
Signed-off-by: Theodore Ts'o 
Signed-off-by: Jaegeuk Kim 
---
 fs/crypto/crypto.c   | 507 +++
 include/linux/fscrypto.h |  60 ++
 2 files changed, 567 insertions(+)
 create mode 100644 fs/crypto/crypto.c

diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
new file mode 100644
index 000..928a34b
--- /dev/null
+++ b/fs/crypto/crypto.c
@@ -0,0 +1,507 @@
+/*
+ * This contains encryption functions for per-file encryption.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ * Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ * Ildar Muslukhov, 2014
+ * Add fscrypt_pullback_bio_page()
+ * Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+static unsigned int num_prealloc_crypto_pages = 32;
+static unsigned int num_prealloc_crypto_ctxs = 128;
+
+module_param(num_prealloc_crypto_pages, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_pages,
+   "Number of crypto pages to preallocate");
+module_param(num_prealloc_crypto_ctxs, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
+   "Number of crypto contexts to preallocate");
+
+static mempool_t *fscrypt_bounce_page_pool = NULL;
+
+static LIST_HEAD(fscrypt_free_ctxs);
+static DEFINE_SPINLOCK(fscrypt_ctx_lock);
+
+static struct workqueue_struct *fscrypt_read_workqueue;
+static DEFINE_MUTEX(fscrypt_init_mutex);
+
+static struct kmem_cache *fscrypt_ctx_cachep;
+struct kmem_cache *fscrypt_info_cachep;
+
+/**
+ * fscrypt_release_ctx() - Releases an encryption context
+ * @ctx: The encryption context to release.
+ *
+ * If the encryption context was allocated from the pre-allocated pool, returns
+ * it to that pool. Else, frees it.
+ *
+ * If there's a bounce page in the context, this frees that.
+ */
+void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+   unsigned long flags;
+
+   if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
+   mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
+   ctx->w.bounce_page = NULL;
+   }
+   ctx->w.control_page = NULL;
+   if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
+   kmem_cache_free(fscrypt_ctx_cachep, ctx);
+   } else {
+   spin_lock_irqsave(_ctx_lock, flags);
+   list_add(>free_list, _free_ctxs);
+   spin_unlock_irqrestore(_ctx_lock, flags);
+   }
+}
+EXPORT_SYMBOL(fscrypt_release_ctx);
+
+/**
+ * fscrypt_get_ctx() - Gets an encryption context
+ * @inode:   The inode for which we are doing the crypto
+ *
+ * Allocates and initializes an encryption context.
+ *
+ * Return: An allocated and initialized encryption context on success; error
+ * value or NULL otherwise.
+ */
+struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
+{
+   struct fscrypt_ctx *ctx = NULL;
+   struct fscrypt_info *ci = inode->i_crypt_info;
+   unsigned long flags;
+
+   if (ci == NULL)
+   return ERR_PTR(-ENOKEY);
+
+   /*
+* We first try getting the ctx from a free list because in
+* the common case the ctx will have an allocated and
+* initialized crypto tfm, so it's probably a worthwhile
+* optimization. For the bounce page, we first try getting it
+* from the kernel allocator because that's just about as fast
+* as getting it from a list and because a cache of free pages
+* should generally be a "last resort" option for a filesystem
+* to be able to do its job.
+*/
+   spin_lock_irqsave(_ctx_lock, flags);
+   ctx = list_first_entry_or_null(_free_ctxs,
+   struct fscrypt_ctx, free_list);
+   if (ctx)
+   list_del(>free_list);
+   spin_unlock_irqrestore(_ctx_lock, flags);
+   if (!ctx) {
+   ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+   if (!ctx)
+   return ERR_PTR(-ENOMEM);
+   ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+   } else {
+   ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+   }
+   ctx->flags &= 

[f2fs-dev] [PATCH 02/10] fs crypto: add crypto.c for encrypt/decrypt functions

2016-02-25 Thread Jaegeuk Kim
This patch adds crypto.c supporting encrypting and decrypting functions.

1. IO preparation:
  - fscrypt_get_ctx / fscrypt_release_ctx

2. before IOs:
  - fscrypt_encrypt_page
  - fscrypt_decrypt_page
  - fscrypt_zeroout_range

3. after IOs:
  - fscrypt_decrypt_bio_pages
  - fscrypt_pullback_bio_page
  - fscrypt_restore_control_page

Signed-off-by: Michael Halcrow 
Signed-off-by: Ildar Muslukhov 
Signed-off-by: Theodore Ts'o 
Signed-off-by: Jaegeuk Kim 
---
 fs/crypto/crypto.c   | 505 +++
 include/linux/fscrypto.h |  59 ++
 2 files changed, 564 insertions(+)
 create mode 100644 fs/crypto/crypto.c

diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
new file mode 100644
index 000..2aa6eee
--- /dev/null
+++ b/fs/crypto/crypto.c
@@ -0,0 +1,505 @@
+/*
+ * This contains encryption functions for per-file encryption.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ * Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ * Ildar Muslukhov, 2014
+ * Add fscrypt_pullback_bio_page()
+ * Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+static unsigned int num_prealloc_crypto_pages = 32;
+static unsigned int num_prealloc_crypto_ctxs = 128;
+
+module_param(num_prealloc_crypto_pages, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_pages,
+   "Number of crypto pages to preallocate");
+module_param(num_prealloc_crypto_ctxs, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
+   "Number of crypto contexts to preallocate");
+
+static mempool_t *fscrypt_bounce_page_pool = NULL;
+
+static LIST_HEAD(fscrypt_free_ctxs);
+static DEFINE_SPINLOCK(fscrypt_ctx_lock);
+
+static struct workqueue_struct *fscrypt_read_workqueue;
+static DEFINE_MUTEX(fscrypt_init_mutex);
+
+static struct kmem_cache *fscrypt_ctx_cachep;
+struct kmem_cache *fscrypt_info_cachep;
+
+/**
+ * fscrypt_release_ctx() - Releases an encryption context
+ * @ctx: The encryption context to release.
+ *
+ * If the encryption context was allocated from the pre-allocated pool, returns
+ * it to that pool. Else, frees it.
+ *
+ * If there's a bounce page in the context, this frees that.
+ */
+void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+   unsigned long flags;
+
+   if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
+   mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
+   ctx->w.bounce_page = NULL;
+   }
+   ctx->w.control_page = NULL;
+   if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
+   kmem_cache_free(fscrypt_ctx_cachep, ctx);
+   } else {
+   spin_lock_irqsave(_ctx_lock, flags);
+   list_add(>free_list, _free_ctxs);
+   spin_unlock_irqrestore(_ctx_lock, flags);
+   }
+}
+EXPORT_SYMBOL(fscrypt_release_ctx);
+
+/**
+ * fscrypt_get_ctx() - Gets an encryption context
+ * @inode:   The inode for which we are doing the crypto
+ *
+ * Allocates and initializes an encryption context.
+ *
+ * Return: An allocated and initialized encryption context on success; error
+ * value or NULL otherwise.
+ */
+struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode)
+{
+   struct fscrypt_ctx *ctx = NULL;
+   struct fscrypt_info *ci = inode->i_crypt_info;
+   unsigned long flags;
+
+   if (ci == NULL)
+   return ERR_PTR(-ENOKEY);
+
+   /*
+* We first try getting the ctx from a free list because in
+* the common case the ctx will have an allocated and
+* initialized crypto tfm, so it's probably a worthwhile
+* optimization. For the bounce page, we first try getting it
+* from the kernel allocator because that's just about as fast
+* as getting it from a list and because a cache of free pages
+* should generally be a "last resort" option for a filesystem
+* to be able to do its job.
+*/
+   spin_lock_irqsave(_ctx_lock, flags);
+   ctx = list_first_entry_or_null(_free_ctxs,
+   struct fscrypt_ctx, free_list);
+   if (ctx)
+   list_del(>free_list);
+   spin_unlock_irqrestore(_ctx_lock, flags);
+   if (!ctx) {
+   ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+   if (!ctx)
+   return ERR_PTR(-ENOMEM);
+   ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+   } else {
+   ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+   }
+   ctx->flags &=