,Your urgent confirmation

2018-03-07 Thread James Williams
Attn: Beneficiary,

We have contacted the Federal Ministry of Finance on your Behalf and
they have brought a solution to your problem by coordinating your
payment in total (10,000,000.00) Ten Million Dollars in an atm card
which you can use to withdraw money from any ATM MACHINE CENTER
anywhere in the world with a maximum of 1 Dollars daily. You now
have the lawful right to claim your fund in an atm card. Since the
Federal Bureau of Investigation is involved in this transaction, you
have to be rest assured for this is 100% risk free it is our duty to
protect the American Citizens, European Citizens, Asian Citizen. All I
want you to do is to contact the atm card CENTER Via email or call the
office telephone number one of the Consultant will assist you for
their requirements to proceed and procure your Approval Slip on your
behalf.

CONTACT INFORMATION
NAME: James  Williams
EMAIL: paymasterofficed...@gmail.com


Do contact us with your details:

Full name//
Address// Age//
 Telephone Numbers//
Occupation//
 Your Country//
Bank Details//

So your files would be updated after which the Delivery of your
atm card will be affected to your designated home Address without any
further delay and the bank will transfer your funds in total
(10,000,000.00) Ten Million Dollars to your Bank account. We
will reply you with the secret code (1600 atm card). We advice you get
back to the payment office after you have contacted the ATM SWIFT CARD
CENTER and we do await your response so we can move on with our
Investigation and make sure your ATM SWIFT CARD gets to you.


Best Regards
James Williams
Paymaster General
Federal Republic Of Nigeri


[RFC v2 5/5] tpm2-sessions: NOT FOR COMMITTING add sessions testing

2018-03-07 Thread James Bottomley
>From f69d2ec1bdddefa87c7130699c797cd5e24fcaf2 Mon Sep 17 00:00:00 2001
This runs through a preset sequence using sessions to demonstrate that
the session handling code functions.  It does both HMAC, encryption
and decryption by testing an encrypted sealing operation with
authority and proving that the same sealed data comes back again via
an HMAC and response encryption.

Signed-off-by: James Bottomley 
---
 drivers/char/tpm/Makefile |   1 +
 drivers/char/tpm/tpm-chip.c   |   1 +
 drivers/char/tpm/tpm2-sessions-test.c | 177 ++
 3 files changed, 179 insertions(+)
 create mode 100644 drivers/char/tpm/tpm2-sessions-test.c

diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index b83737ccaa81..1ac7a4046630 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_TCG_TPM) += tpm.o
 tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
 tpm-dev-common.o tpmrm-dev.o tpm1_eventlog.o tpm2_eventlog.o \
  tpm2-space.o tpm-buf.o tpm2-sessions.o
+obj-m +=  tpm2-sessions-test.o
 tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_eventlog_acpi.o
 tpm-$(CONFIG_EFI) += tpm_eventlog_efi.o
 tpm-$(CONFIG_OF) += tpm_eventlog_of.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 0a62c19937b6..ca174ee1e670 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -118,6 +118,7 @@ struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip)
 
return res;
 }
+EXPORT_SYMBOL(tpm_chip_find_get);
 
 /**
  * tpm_dev_release() - free chip memory and the device number
diff --git a/drivers/char/tpm/tpm2-sessions-test.c 
b/drivers/char/tpm/tpm2-sessions-test.c
new file mode 100644
index ..bd599648c971
--- /dev/null
+++ b/drivers/char/tpm/tpm2-sessions-test.c
@@ -0,0 +1,177 @@
+/* run a set of tests of the sessions code */
+#include "tpm.h"
+#include "tpm2-sessions.h"
+
+#include 
+
+int tpm2_sessions_test(void)
+{
+   struct tpm2_auth *auth;
+   struct tpm_buf buf, b1;
+   struct tpm_buf t2b;
+   struct tpm_chip *chip;
+   int rc;
+   char payload[29];
+   char *password = "Passw0Rd";
+   const u8 *p;
+   u32 h;
+   u8 name[34];
+   u16 len;
+   int ret = -EINVAL;
+
+   chip = tpm_chip_find_get(NULL);
+   if (!chip)
+   return -ENODEV;
+
+   if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+   return -ENODEV;
+
+   get_random_bytes(payload, sizeof(payload));
+
+   /* precursor: get a session */
+   rc = tpm2_start_auth_session(chip, );
+   dev_info(>dev, "TPM: start auth session returned %d\n", rc);
+   if (rc)
+   goto out;
+
+   /* first test: get random bytes from TPM */
+   tpm_buf_init(, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM);
+   tpm_buf_append_hmac_session(, auth, TPM2_SA_ENCRYPT
+   | TPM2_SA_CONTINUE_SESSION, NULL, 0);
+   tpm_buf_append_u16(, 29);
+   tpm_buf_fill_hmac_session(, auth);
+   rc = tpm_transmit_cmd(chip, >kernel_space, buf.data, PAGE_SIZE,
+ 0, 0, "get random");
+   rc = tpm_buf_check_hmac_response(, auth, rc);
+   dev_info(>dev, "TPM: check hmac response returned %d\n", rc);
+   tpm_buf_destroy();
+
+   /*
+* second test, seal random data protecting sensitive by
+* encryption and also doing response encryption (not
+* necessary) The encrypted payload has two components: an
+* authorization password which must be presented on useal and
+* the actual data (the random payload)
+*/
+   tpm_buf_init(, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+   tpm_buf_append_name(, auth, chip->tpmkey, chip->tpmkeyname);
+   tpm_buf_append_hmac_session(, auth, TPM2_SA_DECRYPT
+   | TPM2_SA_ENCRYPT
+   | TPM2_SA_CONTINUE_SESSION, NULL, 0);
+   /* sensitive */
+   tpm_buf_init_2b();
+   /* the authorization */
+   tpm_buf_append_u16(, strlen(password));
+   tpm_buf_append(, password, strlen(password));
+   /* the payload */
+   tpm_buf_append_u16(, sizeof(payload));
+   tpm_buf_append(, payload, sizeof(payload));
+   tpm_buf_append_2b(, );
+   /* the public */
+   /* type */
+   tpm_buf_append_u16(, TPM2_ALG_KEYEDHASH);
+   /* name hash */
+   tpm_buf_append_u16(, TPM2_ALG_SHA256);
+   /* object properties */
+   tpm_buf_append_u32(, TPM2_OA_USER_WITH_AUTH | TPM2_OA_NO_DA);
+   /* auth policy (empty) */
+   tpm_buf_append_u16(, 0);
+   /* keyed hash parameters (we're null for a non-HMAC data blob) */
+   tpm_buf_append_u16(, TPM2_ALG_NULL);
+   /* unique */
+   tpm_buf_append_u16(, 0);
+   tpm_buf_append_2b(, );
+   /* outside info (also empty) */
+   tpm_buf_append_u16(, 0);
+   /* 

[RFC v2 3/5] tpm2: add hmac checks to tpm2_pcr_extend()

2018-03-07 Thread James Bottomley
We use tpm2_pcr_extend() in trusted keys to extend a PCR to prevent a
key from being re-loaded until the next reboot.  To use this
functionality securely, that extend must be protected by a session
hmac.

Signed-off-by: James Bottomley 
---
 drivers/char/tpm/tpm2-cmd.c | 31 +++
 1 file changed, 11 insertions(+), 20 deletions(-)

diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 2042d4008b9c..a56cdd5d55ff 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -247,13 +247,6 @@ int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 
*res_buf)
return rc;
 }
 
-struct tpm2_null_auth_area {
-   __be32  handle;
-   __be16  nonce_size;
-   u8  attributes;
-   __be16  auth_size;
-} __packed;
-
 /**
  * tpm2_pcr_extend() - extend a PCR value
  *
@@ -268,7 +261,7 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, u32 
count,
struct tpm2_digest *digests)
 {
struct tpm_buf buf;
-   struct tpm2_null_auth_area auth_area;
+   struct tpm2_auth *auth;
int rc;
int i;
int j;
@@ -276,20 +269,17 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, 
u32 count,
if (count > ARRAY_SIZE(chip->active_banks))
return -EINVAL;
 
-   rc = tpm_buf_init(, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+   rc = tpm2_start_auth_session(chip, );
if (rc)
return rc;
 
-   tpm_buf_append_u32(, pcr_idx);
+   rc = tpm_buf_init(, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND);
+   if (rc)
+   return rc;
 
-   auth_area.handle = cpu_to_be32(TPM2_RS_PW);
-   auth_area.nonce_size = 0;
-   auth_area.attributes = 0;
-   auth_area.auth_size = 0;
+   tpm_buf_append_name(, auth, pcr_idx, NULL);
+   tpm_buf_append_hmac_session(, auth, 0, NULL, 0);
 
-   tpm_buf_append_u32(, sizeof(struct tpm2_null_auth_area));
-   tpm_buf_append(, (const unsigned char *)_area,
-  sizeof(auth_area));
tpm_buf_append_u32(, count);
 
for (i = 0; i < count; i++) {
@@ -302,9 +292,10 @@ int tpm2_pcr_extend(struct tpm_chip *chip, int pcr_idx, 
u32 count,
   hash_digest_size[tpm2_hash_map[j].crypto_id]);
}
}
-
-   rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
- "attempting extend a PCR value");
+   tpm_buf_fill_hmac_session(, auth);
+   rc = tpm_transmit_cmd(chip, >kernel_space, buf.data, PAGE_SIZE,
+ 0, 0, "attempting extend a PCR value");
+   rc = tpm_buf_check_hmac_response(, auth, rc);
 
tpm_buf_destroy();
 
-- 
2.12.3


[RFC v2 2/5] tpm2-sessions: Add full HMAC and encrypt/decrypt session handling

2018-03-07 Thread James Bottomley
This code adds true session based HMAC authentication plus parameter
decryption and response encryption using AES.

The basic design of this code is to segregate all the nasty crypto,
hash and hmac code into tpm2-sessions.c and export a usable API.

The API first of all starts off by gaining a session with

tpm2_start_auth_session()

Which initiates a session with the TPM and allocates an opaque
tpm2_auth structure to handle the session parameters.  Then the use is
simply:

* tpm_buf_append_name() in place of the tpm_buf_append_u32 for the
  handles

* tpm_buf_append_hmac_session() where tpm2_append_auth() would go

* tpm_buf_fill_hmac_session() called after the entire command buffer
  is finished but before tpm_transmit_cmd() is called which computes
  the correct HMAC and places it in the command at the correct
  location.

Finally, after tpm_transmit_cmd() is called,
tpm_buf_check_hmac_response() is called to check that the returned
HMAC matched and collect the new state for the next use of the
session, if any.

The features of the session is controlled by the session attributes
set in tpm_buf_append_hmac_session().  If TPM2_SA_CONTINUE_SESSION is
not specified, the session will be flushed and the tpm2_auth structure
freed in tpm_buf_check_hmac_response(); otherwise the session may be
used again.  Parameter encryption is specified by or'ing the flag
TPM2_SA_DECRYPT and response encryption by or'ing the flag
TPM2_SA_ENCRYPT.  the various encryptions will be taken care of by
tpm_buf_fill_hmac_session() and tpm_buf_check_hmac_response()
respectively.

To get all of this to work securely, the Kernel now needs a primary
key to encrypt the session salt to, so we derive an EC key from the
NULL seed and store it in the tpm_chip structure.  We also make sure
that this seed remains for the kernel by using a kernel space to take
it out of the TPM when userspace wants to use it.

Signed-off-by: James Bottomley 

---

v2: Added docbook and improved response check API
---
 drivers/char/tpm/Kconfig |3 +
 drivers/char/tpm/Makefile|2 +-
 drivers/char/tpm/tpm.h   |   26 +
 drivers/char/tpm/tpm2-cmd.c  |   22 +-
 drivers/char/tpm/tpm2-sessions.c | 1030 ++
 drivers/char/tpm/tpm2-sessions.h |   56 +++
 6 files changed, 1126 insertions(+), 13 deletions(-)
 create mode 100644 drivers/char/tpm/tpm2-sessions.c
 create mode 100644 drivers/char/tpm/tpm2-sessions.h

diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 0aee88df98d1..8c714d8550c4 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -8,6 +8,9 @@ menuconfig TCG_TPM
select SECURITYFS
select CRYPTO
select CRYPTO_HASH_INFO
+   select CRYPTO_ECDH
+   select CRYPTO_AES
+   select CRYPTO_CFB
---help---
  If you have a TPM security chip in your system, which
  implements the Trusted Computing Group's specification,
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 41b2482b97c3..b83737ccaa81 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -5,7 +5,7 @@
 obj-$(CONFIG_TCG_TPM) += tpm.o
 tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
 tpm-dev-common.o tpmrm-dev.o tpm1_eventlog.o tpm2_eventlog.o \
- tpm2-space.o tpm-buf.o
+ tpm2-space.o tpm-buf.o tpm2-sessions.o
 tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_eventlog_acpi.o
 tpm-$(CONFIG_EFI) += tpm_eventlog_efi.o
 tpm-$(CONFIG_OF) += tpm_eventlog_of.o
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 51c1fab2354c..1b495d748f90 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -42,6 +42,9 @@
 #include 
 #endif
 
+/* fixed define for the curve we use which is NIST_P256 */
+#define EC_PT_SZ   32
+
 enum tpm_const {
TPM_MINOR = 224,/* officially assigned */
TPM_BUFSIZE = 4096,
@@ -93,6 +96,7 @@ enum tpm2_const {
 enum tpm2_structures {
TPM2_ST_NO_SESSIONS = 0x8001,
TPM2_ST_SESSIONS= 0x8002,
+   TPM2_ST_CREATION= 0x8021,
 };
 
 /* Indicates from what layer of the software stack the error comes from */
@@ -114,16 +118,25 @@ enum tpm2_return_codes {
 enum tpm2_algorithms {
TPM2_ALG_ERROR  = 0x,
TPM2_ALG_SHA1   = 0x0004,
+   TPM2_ALG_AES= 0x0006,
TPM2_ALG_KEYEDHASH  = 0x0008,
TPM2_ALG_SHA256 = 0x000B,
TPM2_ALG_SHA384 = 0x000C,
TPM2_ALG_SHA512 = 0x000D,
TPM2_ALG_NULL   = 0x0010,
TPM2_ALG_SM3_256= 0x0012,
+   TPM2_ALG_ECC= 0x0023,
+   TPM2_ALG_CFB= 0x0043,
+};
+
+enum tpm2_curves {
+   TPM2_ECC_NONE   = 0x,
+   TPM2_ECC_NIST_P256  = 0x0003,
 };
 
 enum tpm2_command_codes {
TPM2_CC_FIRST   = 0x011F,
+   TPM2_CC_CREATE_PRIMARY  = 

[RFC v2 1/5] tpm-buf: create new functions for handling TPM buffers

2018-03-07 Thread James Bottomley
This separates out the old tpm_buf_... handling functions from static
inlines into tpm.h and makes them their own tpm-buf.c file.  It also
adds handling for tpm2b structures and also incremental pointer
advancing parsers.

Signed-off-by: James Bottomley 

---

v2: added this patch to separate out the API changes
---
 drivers/char/tpm/Makefile  |   2 +-
 drivers/char/tpm/tpm-buf.c | 184 +
 drivers/char/tpm/tpm.h |  94 ---
 3 files changed, 200 insertions(+), 80 deletions(-)
 create mode 100644 drivers/char/tpm/tpm-buf.c

diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index d37c4a1748f5..41b2482b97c3 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -5,7 +5,7 @@
 obj-$(CONFIG_TCG_TPM) += tpm.o
 tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
 tpm-dev-common.o tpmrm-dev.o tpm1_eventlog.o tpm2_eventlog.o \
- tpm2-space.o
+ tpm2-space.o tpm-buf.o
 tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_eventlog_acpi.o
 tpm-$(CONFIG_EFI) += tpm_eventlog_efi.o
 tpm-$(CONFIG_OF) += tpm_eventlog_of.o
diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c
new file mode 100644
index ..8e674f410823
--- /dev/null
+++ b/drivers/char/tpm/tpm-buf.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Handing for tpm2b structures to facilitate the building of commands
+ */
+
+#include "tpm.h"
+
+#include 
+
+#include 
+
+static int __tpm_buf_init(struct tpm_buf *buf)
+{
+   buf->data_page = alloc_page(GFP_HIGHUSER);
+   if (!buf->data_page)
+   return -ENOMEM;
+
+   buf->flags = 0;
+   buf->data = kmap(buf->data_page);
+
+   return 0;
+}
+
+int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+   struct tpm_input_header *head;
+   int rc;
+
+   rc = __tpm_buf_init(buf);
+   if (rc)
+   return rc;
+
+   head = (struct tpm_input_header *) buf->data;
+
+   head->tag = cpu_to_be16(tag);
+   head->length = cpu_to_be32(sizeof(*head));
+   head->ordinal = cpu_to_be32(ordinal);
+
+   return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_init);
+
+int tpm_buf_init_2b(struct tpm_buf *buf)
+{
+   struct tpm_input_header *head;
+   int rc;
+
+   rc = __tpm_buf_init(buf);
+   if (rc)
+   return rc;
+
+   head = (struct tpm_input_header *) buf->data;
+
+   head->length = cpu_to_be32(sizeof(*head));
+
+   buf->flags = TPM_BUF_2B;
+   return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_init_2b);
+
+void tpm_buf_destroy(struct tpm_buf *buf)
+{
+   kunmap(buf->data_page);
+   __free_page(buf->data_page);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_destroy);
+
+static void *tpm_buf_data(struct tpm_buf *buf)
+{
+   if (buf->flags & TPM_BUF_2B)
+   return buf->data + TPM_HEADER_SIZE;
+   return buf->data;
+}
+
+u32 tpm_buf_length(struct tpm_buf *buf)
+{
+   struct tpm_input_header *head = (struct tpm_input_header *)buf->data;
+   u32 len;
+
+   len = be32_to_cpu(head->length);
+   if (buf->flags & TPM_BUF_2B)
+   len -= sizeof(*head);
+   return len;
+}
+EXPORT_SYMBOL_GPL(tpm_buf_length);
+
+u16 tpm_buf_tag(struct tpm_buf *buf)
+{
+   struct tpm_input_header *head = (struct tpm_input_header *)buf->data;
+
+   return be16_to_cpu(head->tag);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_tag);
+
+void tpm_buf_append(struct tpm_buf *buf,
+   const unsigned char *new_data,
+   unsigned int new_len)
+{
+   struct tpm_input_header *head = (struct tpm_input_header *) buf->data;
+   u32 len = be32_to_cpu(head->length);
+
+   /* Return silently if overflow has already happened. */
+   if (buf->flags & TPM_BUF_OVERFLOW)
+   return;
+
+   if ((len + new_len) > PAGE_SIZE) {
+   WARN(1, "tpm_buf: overflow\n");
+   buf->flags |= TPM_BUF_OVERFLOW;
+   return;
+   }
+
+   memcpy(>data[len], new_data, new_len);
+   head->length = cpu_to_be32(len + new_len);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append);
+
+void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
+{
+   tpm_buf_append(buf, , 1);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_u8);
+
+void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
+{
+   __be16 value2 = cpu_to_be16(value);
+
+   tpm_buf_append(buf, (u8 *) , 2);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_u16);
+
+void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
+{
+   __be32 value2 = cpu_to_be32(value);
+
+   tpm_buf_append(buf, (u8 *) , 4);
+}
+EXPORT_SYMBOL_GPL(tpm_buf_append_u32);
+
+static void tpm_buf_reset(struct tpm_buf *buf)
+{
+   struct tpm_input_header *head;
+
+   head = (struct tpm_input_header *)buf->data;
+   head->length = cpu_to_be32(sizeof(*head));
+}
+
+void tpm_buf_append_2b(struct tpm_buf *buf, struct tpm_buf *tpm2b)
+{
+   

[RFC 0/5] add integrity and security to TPM2 transactions

2018-03-07 Thread James Bottomley
By now, everybody knows we have a problem with the TPM2_RS_PW easy
button on TPM2 in that transactions on the TPM bus can be intercepted
and altered.  The way to fix this is to use real sessions for HMAC
capabilities to ensure integrity and to use parameter and response
encryption to ensure confidentiality of the data flowing over the TPM
bus.

This RFC is about adding a simple API which can ensure the above
properties as a layered addition to the existing TPM handling code.
 Eventually we can add this to the random number generator, the PCR
extensions and the trusted key handling, but this all depends on the
conversion to tpm_buf which is not yet upstream, so I've constructed a
second patch which demonstrates the new API in a test module for those
who wish to play with it.

This series is also dependent on additions to the crypto subsystem to
fix problems in the elliptic curve key handling and add the Cipher
FeedBack encryption scheme:

https://marc.info/?l=linux-crypto-vger=151994371015475

In the second version, I added security HMAC to our PCR extend and
encryption to the returned random number generators and also extracted
the parsing and tpm2b construction API into a new file.

James

---

James Bottomley (5):
  tpm-buf: create new functions for handling TPM buffers
  tpm2-sessions: Add full HMAC and encrypt/decrypt session handling
  tpm2: add hmac checks to tpm2_pcr_extend()
  tpm2: add session encryption protection to tpm2_get_random()
  tpm2-sessions: NOT FOR COMMITTING add sessions testing

 drivers/char/tpm/Kconfig  |3 +
 drivers/char/tpm/Makefile |3 +-
 drivers/char/tpm/tpm-buf.c|  184 ++
 drivers/char/tpm/tpm-chip.c   |1 +
 drivers/char/tpm/tpm.h|  116 ++--
 drivers/char/tpm/tpm2-cmd.c   |  129 ++---
 drivers/char/tpm/tpm2-sessions-test.c |  177 ++
 drivers/char/tpm/tpm2-sessions.c  | 1030 +
 drivers/char/tpm/tpm2-sessions.h  |   56 ++
 9 files changed, 1548 insertions(+), 151 deletions(-)
 create mode 100644 drivers/char/tpm/tpm-buf.c
 create mode 100644 drivers/char/tpm/tpm2-sessions-test.c
 create mode 100644 drivers/char/tpm/tpm2-sessions.c
 create mode 100644 drivers/char/tpm/tpm2-sessions.h

-- 
2.12.3


[PATCH] crypto/ecc: Remove stack VLA usage

2018-03-07 Thread Kees Cook
On the quest to remove all VLAs from the kernel[1], this switches to
a pair of kmalloc regions instead of using the stack. This also moves
the get_random_bytes() after all allocations (and drops the needless
"nbytes" variable).

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kees Cook 
---
 crypto/ecc.c | 23 +--
 1 file changed, 17 insertions(+), 6 deletions(-)

diff --git a/crypto/ecc.c b/crypto/ecc.c
index 18f32f2a5e1c..5bfa63603da0 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1025,9 +1025,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, 
unsigned int ndigits,
 {
int ret = 0;
struct ecc_point *product, *pk;
-   u64 priv[ndigits];
-   u64 rand_z[ndigits];
-   unsigned int nbytes;
+   u64 *priv, *rand_z;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
 
if (!private_key || !public_key || !curve) {
@@ -1035,14 +1033,22 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, 
unsigned int ndigits,
goto out;
}
 
-   nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+   priv = kmalloc_array(ndigits, sizeof(*priv), GFP_KERNEL);
+   if (!priv) {
+   ret = -ENOMEM;
+   goto out;
+   }
 
-   get_random_bytes(rand_z, nbytes);
+   rand_z = kmalloc_array(ndigits, sizeof(*rand_z), GFP_KERNEL);
+   if (!rand_z) {
+   ret = -ENOMEM;
+   goto kfree_out;
+   }
 
pk = ecc_alloc_point(ndigits);
if (!pk) {
ret = -ENOMEM;
-   goto out;
+   goto kfree_out;
}
 
product = ecc_alloc_point(ndigits);
@@ -1051,6 +1057,8 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, 
unsigned int ndigits,
goto err_alloc_product;
}
 
+   get_random_bytes(rand_z, ndigits << ECC_DIGITS_TO_BYTES_SHIFT);
+
ecc_swap_digits(public_key, pk->x, ndigits);
ecc_swap_digits(_key[ndigits], pk->y, ndigits);
ecc_swap_digits(private_key, priv, ndigits);
@@ -1065,6 +1073,9 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, 
unsigned int ndigits,
ecc_free_point(product);
 err_alloc_product:
ecc_free_point(pk);
+kfree_out:
+   kfree(priv);
+   kfree(rand_z);
 out:
return ret;
 }
-- 
2.7.4


-- 
Kees Cook
Pixel Security


Re: [PATCH v3 3/4] crypto: jz4780-rng: Add RNG node to jz4780.dtsi

2018-03-07 Thread Rob Herring
On Wed, Mar 7, 2018 at 8:54 AM, PrasannaKumar Muralidharan
 wrote:
> Hi Rob,
>
> On 6 March 2018 at 19:25, Rob Herring  wrote:
>> On Tue, Mar 6, 2018 at 3:32 AM, James Hogan  wrote:
>>> On Mon, Sep 18, 2017 at 07:32:40PM +0530, PrasannaKumar Muralidharan wrote:
 Add RNG node to jz4780 dtsi. This driver uses registers that are part of
 the register set used by Ingenic CGU driver. Use regmap in RNG driver to
 access its register. Create 'simple-bus' node, make CGU and RNG node as
 child of it so that both the nodes are visible without changing CGU
 driver code.
>>
>> The goal should be to avoid changing the DT (because the h/w hasn't
>> changed), not avoid changing a driver.
>
> Please have a look at the discussion happened at
> https://patchwork.linux-mips.org/patch/14094/. Looks like there is a
> difference in though process between you and mips folks. I am not an
> expert in DT so please suggest me the correct way to go about this.

Those comments are correct too.

Simply, there is no reason you have to add a rng node to add an RNG
driver. The driver that probes the existing node can register both
clocks and as an RNG provider.

Rob


Re: [PATCH v3 3/4] crypto: jz4780-rng: Add RNG node to jz4780.dtsi

2018-03-07 Thread Paul Cercueil

Hi PrasannaKumar,

Le 2018-03-07 15:51, PrasannaKumar Muralidharan a écrit :

Hi Paul,

On 7 March 2018 at 04:31, Paul Cercueil  wrote:

Le 2018-03-06 10:32, James Hogan a écrit :


On Mon, Sep 18, 2017 at 07:32:40PM +0530, PrasannaKumar Muralidharan
wrote:


Add RNG node to jz4780 dtsi. This driver uses registers that are 
part of
the register set used by Ingenic CGU driver. Use regmap in RNG 
driver to
access its register. Create 'simple-bus' node, make CGU and RNG node 
as

child of it so that both the nodes are visible without changing CGU
driver code.

Signed-off-by: PrasannaKumar Muralidharan 




Better late than never:
Acked-by: James Hogan 

(I presume its okay for the reg ranges to overlap, ISTR that being an
issue a few years ago, but maybe thats fixed now).

Cheers
James



What bothers me is that the CGU code has not been modified to use 
regmap, so

the
registers area is actually mapped twice (once in the CGU driver, once 
with

regmap).


One of my previous versions changed CGU code to use regmap. I got a
review comment saying that is not required
(https://patchwork.kernel.org/patch/9906889/). The points in the
comment were valid so I reverted the change. Please have a look at the
discussion.


I don't know, the point of regmap is for when a register area is shared. 
It
does not make sense to me to have one driver use regmap and not the 
other one.


Besides, regmap would be useful if the RNG registers were actually 
located

in the
middle of the register area used by the CGU driver, which is not the 
case

here.
The CGU block does have some registers after the RNG ones on the X1000 
SoC,

but
I don't think they will ever be used (and if they are it won't be by 
the CGU

driver).

Regards,
-Paul


Ingenic M200 SoC's CGU has clock and power related registers after the
RNG registers. Paul Burton suggested using regmap to expose registers
to CGU and RNG drivers
(https://patchwork.linux-mips.org/patch/14094/).


Where can I find the M200 programming manual? The M200's CGU might have 
some
registers located after the RNG ones, but that does not mean that they 
will

be used by the clocks driver.

Thanks,
-Paul


Re: [PATCH] X.509: unpack RSA signatureValue field from BIT STRING

2018-03-07 Thread Maciej S. Szmigiero
On 07.03.2018 16:44, David Howells wrote:
> Maciej S. Szmigiero  wrote:
> 
>> +if (!strcmp(ctx->cert->sig->pkey_algo, "rsa")) {
> 
> I'm going to change this to '== 0' rather than '!'.

No problem. 
> David
> 

Thanks,
Maciej


[PATCH] crypto/ccp - Fill the result buffer only on digest, finup, and final ops

2018-03-07 Thread Gary R Hook
Any change to the result buffer should only happen on final, finup
and digest operations. Changes to the buffer for update, import, export,
etc, are not allowed.

Fixes: 66d7b9f6175e ("crypto: testmgr - test misuse of result in ahash")
Signed-off-by: Gary R Hook 
Cc: 
---
 0 files changed

diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c 
b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 60fc0fa26fd3..26687f318de6 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -46,7 +46,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request 
*async_req,
}
 
/* Update result area if supplied */
-   if (req->result)
+   if (req->result && rctx->final)
memcpy(req->result, rctx->iv, digest_size);
 
 e_free:
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c 
b/drivers/crypto/ccp/ccp-crypto-sha.c
index 8b9b16d433f7..871c9628a2ee 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -47,7 +47,7 @@ static int ccp_sha_complete(struct crypto_async_request 
*async_req, int ret)
}
 
/* Update result area if supplied */
-   if (req->result)
+   if (req->result && rctx->final)
memcpy(req->result, rctx->ctx, digest_size);
 
 e_free:



[PATCH] crypto/ccp: Validate buffer lengths for copy operations

2018-03-07 Thread Gary R Hook
The CCP driver copies data between scatter/gather lists and DMA buffers.
The length of the requested copy operation must be checked against
the available destination buffer length.

Reported-by: Maciej S. Szmigiero 
Signed-off-by: Gary R Hook 
---
 0 files changed

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 406b95329b3d..0ea43cdeb05f 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea 
*wa,
return 0;
 }
 
-static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
-   struct scatterlist *sg, unsigned int sg_offset,
-   unsigned int len)
+static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+  struct scatterlist *sg, unsigned int sg_offset,
+  unsigned int len)
 {
WARN_ON(!wa->address);
 
+   if (len > (wa->length - wa_offset))
+   return -EINVAL;
+
scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
 0);
+   return 0;
 }
 
 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
@@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea 
*wa,
   unsigned int len)
 {
u8 *p, *q;
+   int rc;
 
-   ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
+   rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
+   if (rc)
+   return rc;
 
p = wa->address + wa_offset;
q = p + len - 1;
@@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
 
dm_offset = CCP_SB_BYTES - aes->key_len;
-   ccp_set_dm_area(, dm_offset, aes->key, 0, aes->key_len);
+   ret = ccp_set_dm_area(, dm_offset, aes->key, 0, aes->key_len);
+   if (ret)
+   goto e_key;
ret = ccp_copy_to_sb(cmd_q, , op.jobid, op.sb_key,
 CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
goto e_key;
 
dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
-   ccp_set_dm_area(, dm_offset, aes->iv, 0, aes->iv_len);
+   ret = ccp_set_dm_area(, dm_offset, aes->iv, 0, aes->iv_len);
+   if (ret)
+   goto e_ctx;
ret = ccp_copy_to_sb(cmd_q, , op.jobid, op.sb_ctx,
 CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue 
*cmd_q,
goto e_src;
}
 
-   ccp_set_dm_area(, 0, aes->cmac_key, 0,
-   aes->cmac_key_len);
+   ret = ccp_set_dm_area(, 0, aes->cmac_key, 0,
+ aes->cmac_key_len);
+   if (ret)
+   goto e_src;
ret = ccp_copy_to_sb(cmd_q, , op.jobid, op.sb_ctx,
 CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -666,7 +679,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
return ret;
 
dm_offset = CCP_SB_BYTES - aes->key_len;
-   ccp_set_dm_area(, dm_offset, aes->key, 0, aes->key_len);
+   ret = ccp_set_dm_area(, dm_offset, aes->key, 0, aes->key_len);
+   if (ret)
+   goto e_key;
ret = ccp_copy_to_sb(cmd_q, , op.jobid, op.sb_key,
 CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -685,7 +700,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
goto e_key;
 
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
-   ccp_set_dm_area(, dm_offset, aes->iv, 0, aes->iv_len);
+   ret = ccp_set_dm_area(, dm_offset, aes->iv, 0, aes->iv_len);
+   if (ret)
+   goto e_ctx;
 
ret = ccp_copy_to_sb(cmd_q, , op.jobid, op.sb_ctx,
 CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -777,7 +794,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
goto e_dst;
}
 
-   ccp_set_dm_area(, dm_offset, aes->iv, 0, aes->iv_len);
+   ret = ccp_set_dm_area(, dm_offset, aes->iv, 0, aes->iv_len);
+   if (ret)
+   goto e_dst;
 
ret = ccp_copy_to_sb(cmd_q, , op.jobid, op.sb_ctx,
 CCP_PASSTHRU_BYTESWAP_256BIT);
@@ -820,7 +839,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
   DMA_BIDIRECTIONAL);
if (ret)
goto e_tag;
-   ccp_set_dm_area(, 

Re: [PATCH v2 2/2] hwrng: mxc-rnga - add driver support on boards with device tree

2018-03-07 Thread Kim Phillips
On Tue, 6 Mar 2018 00:21:00 +0200
Vladimir Zapolskiy  wrote:

> The driver works well on i.MX31 powered boards with device description
> taken from board device tree, the only change to add to the driver is
> the missing OF device id, the affected list of included headers and
> indentation in platform driver struct are beautified a little.
> 
> Signed-off-by: Vladimir Zapolskiy 
> ---
> Changes from v1 to v2:
> * a kernel for iMX boards is always built with multiplatform support,
>   thus CONFIG_OF guards were removed, thanks to Kim Phillips for review,

That's not necessarily the reason, e.g., of_match_table is available to
be assigned even if CONFIG_OF is not set.  Recall, I tested building
without CONFIG_OF by removing the SOC_IMX31 dependency in Kconfig, and
building with netwinder_defconfig as a base.

Nevertheless, this v2 is much easier to review without the ifdef
CONFIG_OF, so:

Reviewed-by: Kim Phillips 

Thanks,

Kim

p.s. my responses typically have lower latencies when I'm added to cc.


Re: [PATCH] X.509: unpack RSA signatureValue field from BIT STRING

2018-03-07 Thread David Howells
Maciej S. Szmigiero  wrote:

> + if (!strcmp(ctx->cert->sig->pkey_algo, "rsa")) {

I'm going to change this to '== 0' rather than '!'.

David


Re: [PATCH v9 crypto 08/12] chtls: Key program

2018-03-07 Thread Sabrina Dubroca
2018-03-06, 21:09:27 +0530, Atul Gupta wrote:
[snip]
> +static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
> +{
> + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
> + struct sk_buff *skb;
> + struct cpl_set_tcb_field *req;
> + struct ulptx_idata *sc;
> + unsigned int wrlen = roundup(sizeof(*req) + sizeof(*sc), 16);
> + unsigned int credits_needed = DIV_ROUND_UP(wrlen, 16);
> +
> + skb = alloc_skb(wrlen, GFP_ATOMIC);

I haven't followed the whole call chain, but does this really need to
be an atomic allocation?
Should this skb be charged to the socket's memory accounting?

> + if (!skb)
> + return -ENOMEM;
> +
> + __set_tcb_field(sk, skb, word, mask, val, 0, 1);
> + set_queue(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA, sk);
> + csk->wr_credits -= credits_needed;
> + csk->wr_unacked += credits_needed;
> + enqueue_wr(csk, skb);
> + cxgb4_ofld_send(csk->egress_dev, skb);

Should the return value be checked?

> + return 0;
> +}


[snip]
> +static void *chtls_alloc_mem(unsigned long size)
> +{
> + void *p = kmalloc(size, GFP_KERNEL);
> +
> + if (!p)
> + p = vmalloc(size);
> + if (p)
> + memset(p, 0, size);
> + return p;
> +}

I think you replace this with kvzalloc().

> +static void chtls_free_mem(void *addr)
> +{
> + unsigned long p = (unsigned long)addr;
> +
> + if (p >= VMALLOC_START && p < VMALLOC_END)
> + vfree(addr);
> + else
> + kfree(addr);
> +}

Use kvfree().

> +/* TLS Key bitmap processing */
> +int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
> +{
> + unsigned int num_key_ctx, bsize;
> +
> + num_key_ctx = (lldi->vr->key.size / TLS_KEY_CONTEXT_SZ);
> + bsize = BITS_TO_LONGS(num_key_ctx);
> +
> + cdev->kmap.size = num_key_ctx;
> + cdev->kmap.available = bsize;
> + cdev->kmap.addr = chtls_alloc_mem(sizeof(*cdev->kmap.addr) *
> +   bsize);
> + if (!cdev->kmap.addr)
> + return -1;

The return value of this function is never checked.

> +
> + cdev->kmap.start = lldi->vr->key.start;
> + spin_lock_init(>kmap.lock);
> + return 0;
> +}
> +
> +void chtls_free_kmap(struct chtls_dev *cdev)
> +{
> + if (cdev->kmap.addr)
> + chtls_free_mem(cdev->kmap.addr);
> +}

I think this wrapper function is not necessary.

> +static int get_new_keyid(struct chtls_sock *csk, u32 optname)
> +{
> + struct chtls_dev *cdev = csk->cdev;
> + struct chtls_hws *hws = >tlshws;
> + struct net_device *dev = csk->egress_dev;
> + struct adapter *adap = netdev2adap(dev);
> + int keyid;
> +
> + spin_lock_bh(>kmap.lock);
> + keyid = find_first_zero_bit(cdev->kmap.addr, cdev->kmap.size);
> + if (keyid < cdev->kmap.size) {
> + __set_bit(keyid, cdev->kmap.addr);
> + if (optname == TLS_RX)

TLS_RX only gets defined in patch 11, so the only reason you're not
breaking the build is because all these new files aren't getting
compiled until patch 12.

> + hws->rxkey = keyid;
> + else
> + hws->txkey = keyid;
> + atomic_inc(>chcr_stats.tls_key);
> + } else {
> + keyid = -1;
> + }
> + spin_unlock_bh(>kmap.lock);
> + return keyid;
> +}
> +

[snip]
> +int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 optname)
> +{
...
>+  skb = alloc_skb(len, GFP_KERNEL);
>+  if (!skb)
>+  return -ENOMEM;

This return code is also ignored by its caller.  Please review the
whole patchset for that problem.

Same question as before, does this need to be accounted?


> + /* ulptx command */
> + kwr->req.cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
> + T5_ULP_MEMIO_ORDER_V(1) |
> + T5_ULP_MEMIO_IMM_V(1));
> + kwr->req.len16 = cpu_to_be32((csk->tid << 8) |
> +   DIV_ROUND_UP(len - sizeof(kwr->wr), 16));
> + kwr->req.dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(klen >> 5));
> + kwr->req.lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(keyid_to_addr
> + (cdev->kmap.start, keyid)));

Breaking this line in that way makes it really hard to read for
humans.

-- 
Sabrina


Re: [PATCH v3 3/4] crypto: jz4780-rng: Add RNG node to jz4780.dtsi

2018-03-07 Thread PrasannaKumar Muralidharan
Hi Rob,

On 6 March 2018 at 19:25, Rob Herring  wrote:
> On Tue, Mar 6, 2018 at 3:32 AM, James Hogan  wrote:
>> On Mon, Sep 18, 2017 at 07:32:40PM +0530, PrasannaKumar Muralidharan wrote:
>>> Add RNG node to jz4780 dtsi. This driver uses registers that are part of
>>> the register set used by Ingenic CGU driver. Use regmap in RNG driver to
>>> access its register. Create 'simple-bus' node, make CGU and RNG node as
>>> child of it so that both the nodes are visible without changing CGU
>>> driver code.
>
> The goal should be to avoid changing the DT (because the h/w hasn't
> changed), not avoid changing a driver.

Please have a look at the discussion happened at
https://patchwork.linux-mips.org/patch/14094/. Looks like there is a
difference in though process between you and mips folks. I am not an
expert in DT so please suggest me the correct way to go about this.

>>>
>>> Signed-off-by: PrasannaKumar Muralidharan 
>>
>> Better late than never:
>> Acked-by: James Hogan 
>>
>> (I presume its okay for the reg ranges to overlap, ISTR that being an
>> issue a few years ago, but maybe thats fixed now).
>
> No, that should be avoided. It does work because we have existing
> cases that have to be supported.

I am sorry but I require guidance here. Do you have any suggestion on
how this should be.

>>
>> Cheers
>> James

Thanks and regards,
PrasannaKumar


Re: [PATCH v3 3/4] crypto: jz4780-rng: Add RNG node to jz4780.dtsi

2018-03-07 Thread PrasannaKumar Muralidharan
Hi Paul,

On 7 March 2018 at 04:31, Paul Cercueil  wrote:
> Le 2018-03-06 10:32, James Hogan a écrit :
>>
>> On Mon, Sep 18, 2017 at 07:32:40PM +0530, PrasannaKumar Muralidharan
>> wrote:
>>>
>>> Add RNG node to jz4780 dtsi. This driver uses registers that are part of
>>> the register set used by Ingenic CGU driver. Use regmap in RNG driver to
>>> access its register. Create 'simple-bus' node, make CGU and RNG node as
>>> child of it so that both the nodes are visible without changing CGU
>>> driver code.
>>>
>>> Signed-off-by: PrasannaKumar Muralidharan 
>>
>>
>> Better late than never:
>> Acked-by: James Hogan 
>>
>> (I presume its okay for the reg ranges to overlap, ISTR that being an
>> issue a few years ago, but maybe thats fixed now).
>>
>> Cheers
>> James
>
>
> What bothers me is that the CGU code has not been modified to use regmap, so
> the
> registers area is actually mapped twice (once in the CGU driver, once with
> regmap).

One of my previous versions changed CGU code to use regmap. I got a
review comment saying that is not required
(https://patchwork.kernel.org/patch/9906889/). The points in the
comment were valid so I reverted the change. Please have a look at the
discussion.

> Besides, regmap would be useful if the RNG registers were actually located
> in the
> middle of the register area used by the CGU driver, which is not the case
> here.
> The CGU block does have some registers after the RNG ones on the X1000 SoC,
> but
> I don't think they will ever be used (and if they are it won't be by the CGU
> driver).
>
> Regards,
> -Paul

Ingenic M200 SoC's CGU has clock and power related registers after the
RNG registers. Paul Burton suggested using regmap to expose registers
to CGU and RNG drivers
(https://patchwork.linux-mips.org/patch/14094/).

Regards,
PrasannaKumar


Re: [PATCH v3 0/4] crypto: AF_ALG AIO improvements

2018-03-07 Thread Herbert Xu
On Tue, Feb 27, 2018 at 03:08:58PM +0100, Stephan Müller wrote:
> Am Freitag, 23. Februar 2018, 13:00:26 CET schrieb Herbert Xu:
> 
> Hi Herbert,
> 
> > On Fri, Feb 23, 2018 at 09:33:33AM +0100, Stephan Müller wrote:
> > > A simple copy operation, however, will imply that in one AIO recvmsg
> > > request, only *one* IOCB can be set and processed.
> > 
> > Sure, but the recvmsg will return as soon as the crypto API encrypt
> > or decrypt function returns.  It's still fully async.  It's just
> > that the setup part needs to be done with sendmsg/recvmsg.
> 
> Wouldn't a copy of the ctx->iv into a per-request buffer change the behavoir 
> of the AF_ALG interface significantly?
> 
> Today, if multiple IOCBs are submitted, most cipher implementations would 
> serialize the requests (e.g. all implementations that behave synchronous in 
> nature like all software implementations).

No there is no such guarantee.  In fact I'm pretty sure such
users would be totally broken if cryptd was used.

Cheers,
-- 
Email: Herbert Xu 
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


Re: [PATCH v9 crypto 06/12] cxgb4: LLD driver changes to enable TLS

2018-03-07 Thread Sabrina Dubroca
2018-03-06, 21:09:25 +0530, Atul Gupta wrote:
> Read FW capability. Read key area size. Dump the TLS record count.

That's not a really helpful commit message. Have a look at other
commit messages and try to be more descriptive.
It's also not clear if those changes belong together in one patch, but
I can't judge because the commit message is really too terse.

> Signed-off-by: Atul Gupta 
> ---
>  drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 32 +---
>  drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h  |  7 ++
>  drivers/net/ethernet/chelsio/cxgb4/sge.c| 98 
> -
>  3 files changed, 126 insertions(+), 11 deletions(-)

[snip]
> +int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
> +const void *src, unsigned int len)
> +{
> + struct adapter *adap = netdev2adap(dev);
> + struct sge_uld_txq_info *txq_info;
> + struct sge_uld_txq *txq;
> + int ret;
> +
> + local_bh_disable();
> + txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
> + if (unlikely(!txq_info)) {
> + WARN_ON(true);
> + return NET_XMIT_DROP;

Don't you need to local_bh_enable() before returning? If not, this
needs a comment to explain why.

> + }
> + txq = _info->uldtxq[idx];
> +
> + ret = ofld_xmit_direct(txq, src, len);
> + local_bh_enable();
> + return net_xmit_eval(ret);
> +}
> +EXPORT_SYMBOL(cxgb4_immdata_send);

-- 
Sabrina


Re: [PATCH v9 crypto 02/12] ethtool: enable Inline TLS in HW

2018-03-07 Thread Atul Gupta


On 3/7/2018 6:05 PM, Sabrina Dubroca wrote:
> Since you're saying the driver supports offloading TLS records to the
> HW, why not call the feature "record offloading"?  With, for example,
> NETIF_F_HW_TLS_RECORD as the feature, and maybe "tls-hw-record" for
> the ethtool string.  This "Inline TLS" name sounds rather like
> marketing to me.
Sure, will use record.
>
> 2018-03-06, 21:07:22 +0530, Atul Gupta wrote:
>> Signed-off-by: Atul Gupta 
> There should be a description here of what the feature covers, so that
> other drivers can decide if it matches what their HW supports.
Will add

Thanks
Atul
>



Re: [PATCH v9 crypto 02/12] ethtool: enable Inline TLS in HW

2018-03-07 Thread Sabrina Dubroca
Since you're saying the driver supports offloading TLS records to the
HW, why not call the feature "record offloading"?  With, for example,
NETIF_F_HW_TLS_RECORD as the feature, and maybe "tls-hw-record" for
the ethtool string.  This "Inline TLS" name sounds rather like
marketing to me.

2018-03-06, 21:07:22 +0530, Atul Gupta wrote:
> Signed-off-by: Atul Gupta 

There should be a description here of what the feature covers, so that
other drivers can decide if it matches what their HW supports.

-- 
Sabrina


Re: [RESEND PATCH v3] crypto: add zBeWalgo compression for zram

2018-03-07 Thread Benjamin Warnke
Hi Eric,


On 06.03.2018 at 23:13, Eric Biggers wrote:
> 
> Hi Benjamin,
> 
> On Tue, Mar 06, 2018 at 09:23:08PM +0100, Benjamin Warnke wrote:
>> Currently ZRAM uses compression-algorithms from the crypto-api. ZRAM
>> compresses each page individually. As a result the compression algorithm is
>> forced to use a very small sliding window. None of the available compression
>> algorithms is designed to achieve high compression ratios with small inputs.
>> 
>> This patch adds a new compression algorithm 'zBeWalgo' to the crypto api. 
>> This
>> algorithm focusses on increasing the capacity of the compressed block-device
>> created by ZRAM. The choice of compression algorithms is always a tradeoff
>> between speed and compression ratio.
>> 
> [...]
>> 
>> Zstd is not in the list of compared algorithms, because it is not available
>> for Zram in the currently available kernel trees.
>> 
> 
> This still isn't a valid excuse for not comparing it to Zstandard.  Zstandard 
> is
> already in the kernel in lib/.  It would only need glue code to wire it up as 
> an
> scomp_alg to the crypto API.  

I'll add benchmarks with Zstandard.

> In contrast you're proposing an entirely new
> algorithm.  Ultimately, by proposing a new algorithm you're on the hook to
> demonstrate that existing algorithms aren't good enough.  Note that many of 
> the
> existing algorithms including Zstandard also have multiple compression levels
> available to choose from.

The crypto api exposes each algorithm only once with its default compression 
level.
Currently there is no switch/option/mechanism/code or something to switch 
thoose levels.
Of course I could benchmark the algorithms with multiple compression levels.
How many / Which compression levels would you like to see to be compared with 
each other?

> It's also rare to add a compression or encryption algorithm to the Linux 
> kernel
> that isn't already used somewhere else.

The kernel is the only place where so many small pieces of data need to be 
compressed.
In user space nobody cares that the data is splitted into pages, and therefore 
compression usually applied to large datasets.

>  Have you at least written a formal
> specification of the 'zBeWalgo' data format?

Not yet.

>  Zstandard, for example, had a
> well-tested and optimized userspace library and a specification of the data
> format before it was added to the kernel.  And even before that it took a 
> couple
> years of development to stabilize the Zstandard data format.
> 
> Now, it's true that you don't strictly need a stable data format if the
> algorithm will only ever be used for RAM compression.  But, if you want to 
> take
> that shortcut, then you're on the hook to justify it, and perhaps to enlighten
> the crypto API about algorithms that don't have a stable data format (e.g. 
> using
> a new CRYPTO_ALG_* flag) so that users have to more explicitly opt-in to using
> them.

I could add CRYPTO_ALG_TYPE_COMPRESS_UNSTABLE as an alias for 
CRYPTO_ALG_TYPE_COMPRESS

> You cannot just ignore fuzz-safety in the decompressor either. The existing
> decompressors exposed through the crypto API are fuzz-safe, so it's not valid 
> to
> compare an unsafe decompressor to them.  If you really do want to add an 
> unsafe
> decompressor then you'd likely need to look into adding crypto API support for
> users to request unsafe decompression -- which could also be used to expose 
> the
> unsafe version of the LZ4 decompressor (LZ4_decompress_fast() instead of
> LZ4_decompress_safe()).  Or if you do make the decompressor safe, then of 
> course
> you'd actually need to fuzz it; I suggest porting the code to userspace and
> running american fuzzy lop (afl-fuzz) on it: http://lcamtuf.coredump.cx/afl/

The current version of the decompressor is fuzz-safe. I tested it with lots of 
data and made sure, that each possible branch in the code is executed multiple 
times in different combinations.

> Separately, given that this is a brand new algorithm and it seems to have
> various corner cases, it would be a good idea to demonstrate a higher 
> guarantee
> of the correctness of the compression-decompression round trip.  afl-fuzz can 
> be
> good for that too: you could port the code to userspace and write a simple
> program which compresses and decompresses a file and aborts if it was 
> corrupted.
> Then by passing the program to afl-fuzz it will eventually cover most of the
> compressor and decompressor.  I've done that when testing compression code in
> the past.

I will port my code to user space and use afl-fuzz to test it.

Benjamin



Re: [PATCH v9 crypto 00/12] Chelsio Inline TLS

2018-03-07 Thread Atul Gupta


On 3/7/2018 3:53 PM, Sabrina Dubroca wrote:
> 2018-03-06, 21:05:23 +0530, Atul Gupta wrote:
>> Series for Chelsio Inline TLS driver (chtls)
>>
>> Use tls ULP infrastructure to register chtls as Inline TLS driver.
>> Chtls use TCP Sockets to transmit and receive TLS record. TCP proto_ops is 
>> extended to offload TLS record.
>>
>> T6 adapter provides the following features:
>> -TLS record offload, TLS header, encrypt, digest and transmit
>> -TLS record receive and decrypt
>> -TLS keys store
>> -TCP/IP engine
>> -TLS engine
>> -GCM crypto engine [support CBC also]
>>
>> TLS provides security at the transport layer. It uses TCP to provide 
>> reliable end-to-end transport of application data. It relies on TCP for any 
>> retransmission. TLS session comprises of three parts:
> Please wrap long lines.
will edit in next ver
>
> [snip]
>
>> v9: corrected __u8 and similar usage
> That's not the only changes since v8, actually. There's also some new
> code in patch 3.
 tls_hw_prot is done before sk_state != TCP_ESTABLISHED,  this check was 
introduced in net-next
and pulled in crypto tree later.
>



Re: [PATCH v9 crypto 01/12] tls: tls_device struct to register TLS drivers

2018-03-07 Thread Atul Gupta


On 3/7/2018 3:46 PM, Sabrina Dubroca wrote:
> Hello Atul,
>
> One quick note before you start replying: please fix your email
> client, as you've been told before. Quoting has to add a quoting
> marker (the '>' character) at the beginning of the line, otherwise
> it's impossible to separate your reply from the email you're quoting.
>
> 2018-03-06, 21:06:20 +0530, Atul Gupta wrote:
>> tls_device structure to register Inline TLS drivers
>> with net/tls
>>
>> Signed-off-by: Atul Gupta 
>> ---
>>  include/net/tls.h | 26 ++
>>  1 file changed, 26 insertions(+)
>>
>> diff --git a/include/net/tls.h b/include/net/tls.h
>> index 4913430..9bfb91f 100644
>> --- a/include/net/tls.h
>> +++ b/include/net/tls.h
>> @@ -55,6 +55,28 @@
>>  #define TLS_RECORD_TYPE_DATA0x17
>>  
>>  #define TLS_AAD_SPACE_SIZE  13
>> +#define TLS_DEVICE_NAME_MAX 32
> Why 32 characters?
This is considered to accommodate registering device name, should it be bigger?
>
>
>> +enum {
>> +TLS_BASE_TX,
>> +TLS_SW_TX,
>> +TLS_FULL_HW, /* TLS record processed Inline */
>> +TLS_NUM_CONFIG,
>> +};
>> +
>> +extern struct proto tls_prots[TLS_NUM_CONFIG];
> Don't break bisection. The code has to compile after every
> patch. These are already defined in net/tls/tls_main.c.
Will take care
>
>> +struct tls_device {
>> +char name[TLS_DEVICE_NAME_MAX];
> I could find only one use of it, in chtls_register_dev. Is this
> actually needed?
help to identify Inline TLS drivers registered with net/tls
>
>> +struct list_head dev_list;
>> +
>> +/* netdev present in registered inline tls driver */
>> +int (*netdev)(struct tls_device *device,
>> +  struct net_device *netdev);
> I was trying to figure out what this did, because the name is really
> not explicit, and the comment doesn't make sense, but noticed it's
> never actually called.
Was used Initially, removed in last few cleanup [thanks for pointing]
>
>> +int (*feature)(struct tls_device *device);
>> +int (*hash)(struct tls_device *device, struct sock *sk);
>> +void (*unhash)(struct tls_device *device, struct sock *sk);
> I think you should add a kerneldoc comment, like all the ndo_*
> methods have.
Will take care
>
>> +};
>>  
>>  struct tls_sw_context {
>>  struct crypto_aead *aead_send;
>> @@ -115,6 +137,8 @@ struct tls_context {
>>  int  (*getsockopt)(struct sock *sk, int level,
>> int optname, char __user *optval,
>> int __user *optlen);
>> +int (*hash)(struct sock *sk);
>> +void (*unhash)(struct sock *sk);
>>  };
>>  
>>  int wait_on_pending_writer(struct sock *sk, long *timeo);
>> @@ -256,5 +280,7 @@ static inline struct tls_offload_context 
>> *tls_offload_ctx(
>>  
>>  int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
>>unsigned char *record_type);
>> +void tls_register_device(struct tls_device *device);
>> +void tls_unregister_device(struct tls_device *device);
> Prototype without implementation, please don't do that.  This happens
> because you split your patchset so that each patch has all the changes
> for exactly one file.
will have declaration and definition together
>



Re: [RESEND PATCH v3] crypto: add zBeWalgo compression for zram

2018-03-07 Thread Benjamin Warnke
Hello,

On(07/03/2018 03:12),Sergey Senozhatsky wrote:
> 
> Hello,
> 
> On (03/06/18 20:59), Benjamin Warnke wrote:
>>   Currently ZRAM uses compression-algorithms from the crypto-api. ZRAM
>>   compresses each page individually. As a result the compression algorithm
>>   is
>>   forced to use a very small sliding window. None of the available
>>   compression
>>   algorithms is designed to achieve high compression ratios with small
>>   inputs.
> 
> I think you first need to merge zBeWalgo (looks like a long way to go)
> And then add ZRAM support, as a separate patch.

I'll split my patch into 2 parts

1st: add zBeWalgo compression algorithm
2nd: enable zBeWalgo to be used by ZRAM

> 
>>   - 'ecoham' (100 MiB) This dataset is one of the input files for the
>>   scientific
>>   application ECOHAM which runs an ocean simulation. This dataset contains a
>>   lot of zeros. Where the data is not zero there are arrays of floating
>>   point
>>   values, adjacent float values are likely to be similar to each other,
>>   allowing for high compression ratios.
>> 
>>   algorithm | ratio   | compression| decompression
>>   zbewalgo  |   12.94 |  294.10 MBit/s | 1242.59 MBit/s
>>   deflate   |   12.54 |   75.51 MBit/s |  736.39 MBit/s
>>   842   |   12.26 |  182.59 MBit/s |  683.61 MBit/s
>>   lz4hc |   12.00 |   51.23 MBit/s | 1524.73 MBit/s
>>   lz4   |   10.68 | 1334.37 MBit/s | 1603.54 MBit/s
>>   lzo   |9.79 | 1333.76 MBit/s | 1534.63 MBit/s
>> 
>>   - 'source-code' (800 MiB) This dataset is a tarball of the source-code
>>   from a
>>   linux-kernel.
>> 
>>   algorithm | ratio   | compression| decompression
>>   deflate   |3.27 |   42.48 MBit/s |  250.36 MBit/s
>>   lz4hc |2.40 |  104.14 MBit/s | 1150.53 MBit/s
>>   lzo   |2.27 |  444.77 MBit/s |  886.97 MBit/s
>>   lz4   |2.18 |  453.08 MBit/s | 1101.45 MBit/s
>>   842   |1.65 |   64.10 MBit/s |  158.40 MBit/s
>>   zbewalgo  |1.19 |   52.89 MBit/s |  197.58 MBit/s
>> 
>>   - 'hpcg' (8 GiB) This dataset is a (partial) memory-snapshot of the
>>   running hpcg-benchmark. At the time of the snapshot, that application
>>   performed a sparse matrix - vector multiplication.
>> 
>>   algorithm | ratio   | compression| decompression
>>   zbewalgo  |   16.16 |  179.97 MBit/s |  468.36 MBit/s
>>   deflate   |9.52 |   65.11 MBit/s |  632.69 MBit/s
>>   lz4hc |4.96 |  193.33 MBit/s | 1607.12 MBit/s
>>   842   |4.20 |  150.99 MBit/s |  316.22 MBit/s
>>   lzo   |4.14 |  922.74 MBit/s |  865.32 MBit/s
>>   lz4   |3.79 |  908.39 MBit/s | 1375.33 MBit/s
>> 
>>   - 'partdiff' (8 GiB) Array of double values. Adjacent doubles are similar,
>>   but
>>   not equal. This array is produced by a partial differential equation
>>   solver
>>   using a Jakobi-implementation.
>> 
>>   algorithm | ratio   | compression| decompression
>>   zbewalgo  |1.30 |  203.30 MBit/s |  530.87 MBit/s
>>   deflate   |1.02 |   37.06 MBit/s | 1131.88 MBit/s
>>   lzo   |1.00 | 1741.46 MBit/s | 2012.78 MBit/s
>>   lz4   |1.00 | 1458.08 MBit/s | 2013.88 MBit/s
>>   lz4hc |1.00 |  173.19 MBit/s | 2012.37 MBit/s
>>   842   |1.00 |   64.10 MBit/s | 2013.64 MBit/s
> 
> Hm, mixed feelings.

as Eric Biggers suggested, I'll add Zstandard to the set of algorithms which 
compared. What else should I add to the benchmarks?

Benjamin



[PATCH v2] crypto: hash.h: Prevent use of req->result in ahash update

2018-03-07 Thread Kamil Konieczny
Prevent improper use of req->result field in ahash update, init, export and
import functions in drivers code. A driver should use ahash request context
if it needs to save internal state.

Signed-off-by: Kamil Konieczny 
---
version 2:
 Change req->digest to req->result, as pointed out by Tom Lendacky

 include/crypto/hash.h | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 2d1849dffb80..76e432cab75d 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -74,7 +74,8 @@ struct ahash_request {
  * @init: **[mandatory]** Initialize the transformation context. Intended only 
to initialize the
  *   state of the HASH transformation at the beginning. This shall fill in
  *   the internal structures used during the entire duration of the whole
- *   transformation. No data processing happens at this point.
+ *   transformation. No data processing happens at this point. Driver code
+ *   implementation must not use req->result.
  * @update: **[mandatory]** Push a chunk of data into the driver for 
transformation. This
  *function actually pushes blocks of data from upper layers into the
  *driver, which then passes those to the hardware as seen fit. This
@@ -83,7 +84,8 @@ struct ahash_request {
  *transformation. This function shall not modify the transformation
  *context, as this function may be called in parallel with the same
  *transformation object. Data processing can happen synchronously
- *[SHASH] or asynchronously [AHASH] at this point.
+ *[SHASH] or asynchronously [AHASH] at this point. Driver must not use
+ *req->result.
  * @final: **[mandatory]** Retrieve result from the driver. This function 
finalizes the
  *transformation and retrieves the resulting hash from the driver and
  *pushes it back to upper layers. No data processing happens at this
@@ -120,11 +122,12 @@ struct ahash_request {
  * you want to save partial result of the transformation after
  * processing certain amount of data and reload this partial result
  * multiple times later on for multiple re-use. No data processing
- * happens at this point.
+ * happens at this point. Driver must not use req->result.
  * @import: Import partial state of the transformation. This function loads the
  * entire state of the ongoing transformation from a provided block of
  * data so the transformation can continue from this point onward. No
- * data processing happens at this point.
+ * data processing happens at this point. Driver must not use
+ * req->result.
  * @halg: see struct hash_alg_common
  */
 struct ahash_alg {
-- 
2.16.2



Re: [PATCH] crypto: hash.h: Prevent use of req->digest in ahash update

2018-03-07 Thread Kamil Konieczny
On 06.03.2018 19:04, Tom Lendacky wrote:
> On 3/6/2018 5:45 AM, Kamil Konieczny wrote:
>> Prevent improper use of req->digest field in ahash update, init, export and
> 
> Shouldn't that be req->result (here and below)?

Yes, it should, I will send version 2 soon,
thank you.

Best regards,
Kamil Konieczny

>> import functions in drivers code. A driver should use ahash request context
>> if it needs to save internal state.
>>
>> Signed-off-by: Kamil Konieczny 
>> ---
>>  include/crypto/hash.h | 11 +++
>>  1 file changed, 7 insertions(+), 4 deletions(-)
>>
>> diff --git a/include/crypto/hash.h b/include/crypto/hash.h
>> index 2d1849dffb80..e97c2e662d6a 100644
>> --- a/include/crypto/hash.h
>> +++ b/include/crypto/hash.h
>> @@ -74,7 +74,8 @@ struct ahash_request {
>>   * @init: **[mandatory]** Initialize the transformation context. Intended 
>> only to initialize the
>>   *state of the HASH transformation at the beginning. This shall fill in
>>   *the internal structures used during the entire duration of the whole
>> - *transformation. No data processing happens at this point.
>> + *transformation. No data processing happens at this point. Driver code
>> + *implementation must not use req->digest.
>>   * @update: **[mandatory]** Push a chunk of data into the driver for 
>> transformation. This
>>   * function actually pushes blocks of data from upper layers into the
>>   * driver, which then passes those to the hardware as seen fit. This
>> @@ -83,7 +84,8 @@ struct ahash_request {
>>   * transformation. This function shall not modify the transformation
>>   * context, as this function may be called in parallel with the same
>>   * transformation object. Data processing can happen synchronously
>> - * [SHASH] or asynchronously [AHASH] at this point.
>> + * [SHASH] or asynchronously [AHASH] at this point. Driver must not use
>> + * req->digest.
>>   * @final: **[mandatory]** Retrieve result from the driver. This function 
>> finalizes the
>>   * transformation and retrieves the resulting hash from the driver and
>>   * pushes it back to upper layers. No data processing happens at this
>> @@ -120,11 +122,12 @@ struct ahash_request {
>>   *  you want to save partial result of the transformation after
>>   *  processing certain amount of data and reload this partial result
>>   *  multiple times later on for multiple re-use. No data processing
>> - *  happens at this point.
>> + *  happens at this point. Driver must not use req->digest.
>>   * @import: Import partial state of the transformation. This function loads 
>> the
>>   *  entire state of the ongoing transformation from a provided block of
>>   *  data so the transformation can continue from this point onward. No
>> - *  data processing happens at this point.
>> + *  data processing happens at this point. Driver must not use
>> + *  req->digest.
>>   * @halg: see struct hash_alg_common
>>   */
>>  struct ahash_alg {
>>
> 
> 
> 


Re: [PATCH v9 crypto 01/12] tls: tls_device struct to register TLS drivers

2018-03-07 Thread Sabrina Dubroca
Hello Atul,

One quick note before you start replying: please fix your email
client, as you've been told before. Quoting has to add a quoting
marker (the '>' character) at the beginning of the line, otherwise
it's impossible to separate your reply from the email you're quoting.

2018-03-06, 21:06:20 +0530, Atul Gupta wrote:
> tls_device structure to register Inline TLS drivers
> with net/tls
> 
> Signed-off-by: Atul Gupta 
> ---
>  include/net/tls.h | 26 ++
>  1 file changed, 26 insertions(+)
> 
> diff --git a/include/net/tls.h b/include/net/tls.h
> index 4913430..9bfb91f 100644
> --- a/include/net/tls.h
> +++ b/include/net/tls.h
> @@ -55,6 +55,28 @@
>  #define TLS_RECORD_TYPE_DATA 0x17
>  
>  #define TLS_AAD_SPACE_SIZE   13
> +#define TLS_DEVICE_NAME_MAX  32

Why 32 characters?


> +enum {
> + TLS_BASE_TX,
> + TLS_SW_TX,
> + TLS_FULL_HW, /* TLS record processed Inline */
> + TLS_NUM_CONFIG,
> +};
> +
> +extern struct proto tls_prots[TLS_NUM_CONFIG];

Don't break bisection. The code has to compile after every
patch. These are already defined in net/tls/tls_main.c.

> +struct tls_device {
> + char name[TLS_DEVICE_NAME_MAX];

I could find only one use of it, in chtls_register_dev. Is this
actually needed?

> + struct list_head dev_list;
> +
> + /* netdev present in registered inline tls driver */
> + int (*netdev)(struct tls_device *device,
> +   struct net_device *netdev);

I was trying to figure out what this did, because the name is really
not explicit, and the comment doesn't make sense, but noticed it's
never actually called.

> + int (*feature)(struct tls_device *device);
> + int (*hash)(struct tls_device *device, struct sock *sk);
> + void (*unhash)(struct tls_device *device, struct sock *sk);

I think you should add a kerneldoc comment, like all the ndo_*
methods have.

> +};
>  
>  struct tls_sw_context {
>   struct crypto_aead *aead_send;
> @@ -115,6 +137,8 @@ struct tls_context {
>   int  (*getsockopt)(struct sock *sk, int level,
>  int optname, char __user *optval,
>  int __user *optlen);
> + int (*hash)(struct sock *sk);
> + void (*unhash)(struct sock *sk);
>  };
>  
>  int wait_on_pending_writer(struct sock *sk, long *timeo);
> @@ -256,5 +280,7 @@ static inline struct tls_offload_context *tls_offload_ctx(
>  
>  int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
> unsigned char *record_type);
> +void tls_register_device(struct tls_device *device);
> +void tls_unregister_device(struct tls_device *device);

Prototype without implementation, please don't do that.  This happens
because you split your patchset so that each patch has all the changes
for exactly one file.

-- 
Sabrina