[PATCH 01/12] dmaengine: add base support for the async_tx api

2007-01-22 Thread Dan Williams
From: Dan Williams <[EMAIL PROTECTED]>

* introduce struct dma_async_tx_descriptor as a common field for all dmaengine
software descriptors
* convert the device_memcpy_* methods into separate prep, set src/dest, and
submit stages
* support capabilities beyond memcpy (xor, memset, xor zero sum, completion
interrupts)
* convert ioatdma to the new semantics

Signed-off-by: Dan Williams <[EMAIL PROTECTED]>
---

 drivers/dma/dmaengine.c   |   44 ++--
 drivers/dma/ioatdma.c |  256 ++--
 drivers/dma/ioatdma.h |8 +
 include/linux/dmaengine.h |  263 ++---
 4 files changed, 394 insertions(+), 177 deletions(-)

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 1527804..8d203ad 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -210,7 +210,8 @@ static void dma_chans_rebalance(void)
mutex_lock(_list_mutex);
 
list_for_each_entry(client, _client_list, global_node) {
-   while (client->chans_desired > client->chan_count) {
+   while (client->chans_desired < 0 ||
+   client->chans_desired > client->chan_count) {
chan = dma_client_chan_alloc(client);
if (!chan)
break;
@@ -219,7 +220,8 @@ static void dma_chans_rebalance(void)
   chan,
   DMA_RESOURCE_ADDED);
}
-   while (client->chans_desired < client->chan_count) {
+   while (client->chans_desired >= 0 &&
+   client->chans_desired < client->chan_count) {
spin_lock_irqsave(>lock, flags);
chan = list_entry(client->channels.next,
  struct dma_chan,
@@ -294,12 +296,12 @@ void dma_async_client_unregister(struct dma_client 
*client)
  * @number: count of DMA channels requested
  *
  * Clients call dma_async_client_chan_request() to specify how many
- * DMA channels they need, 0 to free all currently allocated.
+ * DMA channels they need, 0 to free all currently allocated. A request
+ * < 0 indicates the client wants to handle all engines in the system.
  * The resulting allocations/frees are indicated to the client via the
  * event callback.
  */
-void dma_async_client_chan_request(struct dma_client *client,
-   unsigned int number)
+void dma_async_client_chan_request(struct dma_client *client, int number)
 {
client->chans_desired = number;
dma_chans_rebalance();
@@ -318,6 +320,31 @@ int dma_async_device_register(struct dma_device *device)
if (!device)
return -ENODEV;
 
+   /* validate device routines */
+   BUG_ON(test_bit(DMA_MEMCPY, >capabilities) &&
+   !device->device_prep_dma_memcpy);
+   BUG_ON(test_bit(DMA_XOR, >capabilities) &&
+   !device->device_prep_dma_xor);
+   BUG_ON(test_bit(DMA_ZERO_SUM, >capabilities) &&
+   !device->device_prep_dma_zero_sum);
+   BUG_ON(test_bit(DMA_MEMSET, >capabilities) &&
+   !device->device_prep_dma_memset);
+   BUG_ON(test_bit(DMA_ZERO_SUM, >capabilities) &&
+   !device->device_prep_dma_interrupt);
+
+   BUG_ON(!device->device_alloc_chan_resources);
+   BUG_ON(!device->device_free_chan_resources);
+   BUG_ON(!device->device_tx_submit);
+   BUG_ON(!device->device_set_dest);
+   BUG_ON(!device->device_set_src);
+   BUG_ON(!device->device_dependency_added);
+   BUG_ON(!device->device_is_tx_complete);
+   BUG_ON(!device->map_page);
+   BUG_ON(!device->map_single);
+   BUG_ON(!device->unmap_page);
+   BUG_ON(!device->unmap_single);
+   BUG_ON(!device->device_issue_pending);
+
init_completion(>done);
kref_init(>refcount);
device->dev_id = id++;
@@ -402,11 +429,8 @@ subsys_initcall(dma_bus_init);
 EXPORT_SYMBOL(dma_async_client_register);
 EXPORT_SYMBOL(dma_async_client_unregister);
 EXPORT_SYMBOL(dma_async_client_chan_request);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_complete);
-EXPORT_SYMBOL(dma_async_memcpy_issue_pending);
+EXPORT_SYMBOL(dma_async_is_tx_complete);
+EXPORT_SYMBOL(dma_async_issue_pending);
 EXPORT_SYMBOL(dma_async_device_register);
 EXPORT_SYMBOL(dma_async_device_unregister);
 EXPORT_SYMBOL(dma_chan_cleanup);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 8e87261..70bdd18 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -31,6 +31,7 @@
 #include 
 #include 
 #include 
+#include 
 #include "ioatdma.h"
 #include "ioatdma_io.h"
 #include "ioatdma_registers.h"
@@ -39,6 +40,7 @@
 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
 #define 

[PATCH 01/12] dmaengine: add base support for the async_tx api

2007-01-22 Thread Dan Williams
From: Dan Williams [EMAIL PROTECTED]

* introduce struct dma_async_tx_descriptor as a common field for all dmaengine
software descriptors
* convert the device_memcpy_* methods into separate prep, set src/dest, and
submit stages
* support capabilities beyond memcpy (xor, memset, xor zero sum, completion
interrupts)
* convert ioatdma to the new semantics

Signed-off-by: Dan Williams [EMAIL PROTECTED]
---

 drivers/dma/dmaengine.c   |   44 ++--
 drivers/dma/ioatdma.c |  256 ++--
 drivers/dma/ioatdma.h |8 +
 include/linux/dmaengine.h |  263 ++---
 4 files changed, 394 insertions(+), 177 deletions(-)

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 1527804..8d203ad 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -210,7 +210,8 @@ static void dma_chans_rebalance(void)
mutex_lock(dma_list_mutex);
 
list_for_each_entry(client, dma_client_list, global_node) {
-   while (client-chans_desired  client-chan_count) {
+   while (client-chans_desired  0 ||
+   client-chans_desired  client-chan_count) {
chan = dma_client_chan_alloc(client);
if (!chan)
break;
@@ -219,7 +220,8 @@ static void dma_chans_rebalance(void)
   chan,
   DMA_RESOURCE_ADDED);
}
-   while (client-chans_desired  client-chan_count) {
+   while (client-chans_desired = 0 
+   client-chans_desired  client-chan_count) {
spin_lock_irqsave(client-lock, flags);
chan = list_entry(client-channels.next,
  struct dma_chan,
@@ -294,12 +296,12 @@ void dma_async_client_unregister(struct dma_client 
*client)
  * @number: count of DMA channels requested
  *
  * Clients call dma_async_client_chan_request() to specify how many
- * DMA channels they need, 0 to free all currently allocated.
+ * DMA channels they need, 0 to free all currently allocated. A request
+ *  0 indicates the client wants to handle all engines in the system.
  * The resulting allocations/frees are indicated to the client via the
  * event callback.
  */
-void dma_async_client_chan_request(struct dma_client *client,
-   unsigned int number)
+void dma_async_client_chan_request(struct dma_client *client, int number)
 {
client-chans_desired = number;
dma_chans_rebalance();
@@ -318,6 +320,31 @@ int dma_async_device_register(struct dma_device *device)
if (!device)
return -ENODEV;
 
+   /* validate device routines */
+   BUG_ON(test_bit(DMA_MEMCPY, device-capabilities) 
+   !device-device_prep_dma_memcpy);
+   BUG_ON(test_bit(DMA_XOR, device-capabilities) 
+   !device-device_prep_dma_xor);
+   BUG_ON(test_bit(DMA_ZERO_SUM, device-capabilities) 
+   !device-device_prep_dma_zero_sum);
+   BUG_ON(test_bit(DMA_MEMSET, device-capabilities) 
+   !device-device_prep_dma_memset);
+   BUG_ON(test_bit(DMA_ZERO_SUM, device-capabilities) 
+   !device-device_prep_dma_interrupt);
+
+   BUG_ON(!device-device_alloc_chan_resources);
+   BUG_ON(!device-device_free_chan_resources);
+   BUG_ON(!device-device_tx_submit);
+   BUG_ON(!device-device_set_dest);
+   BUG_ON(!device-device_set_src);
+   BUG_ON(!device-device_dependency_added);
+   BUG_ON(!device-device_is_tx_complete);
+   BUG_ON(!device-map_page);
+   BUG_ON(!device-map_single);
+   BUG_ON(!device-unmap_page);
+   BUG_ON(!device-unmap_single);
+   BUG_ON(!device-device_issue_pending);
+
init_completion(device-done);
kref_init(device-refcount);
device-dev_id = id++;
@@ -402,11 +429,8 @@ subsys_initcall(dma_bus_init);
 EXPORT_SYMBOL(dma_async_client_register);
 EXPORT_SYMBOL(dma_async_client_unregister);
 EXPORT_SYMBOL(dma_async_client_chan_request);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_complete);
-EXPORT_SYMBOL(dma_async_memcpy_issue_pending);
+EXPORT_SYMBOL(dma_async_is_tx_complete);
+EXPORT_SYMBOL(dma_async_issue_pending);
 EXPORT_SYMBOL(dma_async_device_register);
 EXPORT_SYMBOL(dma_async_device_unregister);
 EXPORT_SYMBOL(dma_chan_cleanup);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 8e87261..70bdd18 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -31,6 +31,7 @@
 #include linux/dmaengine.h
 #include linux/delay.h
 #include linux/dma-mapping.h
+#include linux/async_tx.h
 #include ioatdma.h
 #include ioatdma_io.h
 #include ioatdma_registers.h
@@ -39,6 +40,7 @@
 #define 

[PATCH 01/12] dmaengine: add base support for the async_tx api

2006-11-30 Thread Dan Williams
From: Dan Williams <[EMAIL PROTECTED]>

* introduce struct dma_async_tx_descriptor as a common field for all dmaengine
software descriptors
* convert the device_memcpy_* methods into separate prep, set src/dest, and
submit stages
* support capabilities beyond memcpy (xor, memset, xor zero sum, completion
interrupts)
* convert ioatdma to the new semantics

Signed-off-by: Dan Williams <[EMAIL PROTECTED]>
---

 drivers/dma/dmaengine.c   |   44 ++--
 drivers/dma/ioatdma.c |  256 ++--
 drivers/dma/ioatdma.h |8 +
 include/linux/dmaengine.h |  263 ++---
 4 files changed, 394 insertions(+), 177 deletions(-)

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 1527804..8d203ad 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -210,7 +210,8 @@ static void dma_chans_rebalance(void)
mutex_lock(_list_mutex);
 
list_for_each_entry(client, _client_list, global_node) {
-   while (client->chans_desired > client->chan_count) {
+   while (client->chans_desired < 0 ||
+   client->chans_desired > client->chan_count) {
chan = dma_client_chan_alloc(client);
if (!chan)
break;
@@ -219,7 +220,8 @@ static void dma_chans_rebalance(void)
   chan,
   DMA_RESOURCE_ADDED);
}
-   while (client->chans_desired < client->chan_count) {
+   while (client->chans_desired >= 0 &&
+   client->chans_desired < client->chan_count) {
spin_lock_irqsave(>lock, flags);
chan = list_entry(client->channels.next,
  struct dma_chan,
@@ -294,12 +296,12 @@ void dma_async_client_unregister(struct 
  * @number: count of DMA channels requested
  *
  * Clients call dma_async_client_chan_request() to specify how many
- * DMA channels they need, 0 to free all currently allocated.
+ * DMA channels they need, 0 to free all currently allocated. A request
+ * < 0 indicates the client wants to handle all engines in the system.
  * The resulting allocations/frees are indicated to the client via the
  * event callback.
  */
-void dma_async_client_chan_request(struct dma_client *client,
-   unsigned int number)
+void dma_async_client_chan_request(struct dma_client *client, int number)
 {
client->chans_desired = number;
dma_chans_rebalance();
@@ -318,6 +320,31 @@ int dma_async_device_register(struct dma
if (!device)
return -ENODEV;
 
+   /* validate device routines */
+   BUG_ON(test_bit(DMA_MEMCPY, >capabilities) &&
+   !device->device_prep_dma_memcpy);
+   BUG_ON(test_bit(DMA_XOR, >capabilities) &&
+   !device->device_prep_dma_xor);
+   BUG_ON(test_bit(DMA_ZERO_SUM, >capabilities) &&
+   !device->device_prep_dma_zero_sum);
+   BUG_ON(test_bit(DMA_MEMSET, >capabilities) &&
+   !device->device_prep_dma_memset);
+   BUG_ON(test_bit(DMA_ZERO_SUM, >capabilities) &&
+   !device->device_prep_dma_interrupt);
+
+   BUG_ON(!device->device_alloc_chan_resources);
+   BUG_ON(!device->device_free_chan_resources);
+   BUG_ON(!device->device_tx_submit);
+   BUG_ON(!device->device_set_dest);
+   BUG_ON(!device->device_set_src);
+   BUG_ON(!device->device_dependency_added);
+   BUG_ON(!device->device_is_tx_complete);
+   BUG_ON(!device->map_page);
+   BUG_ON(!device->map_single);
+   BUG_ON(!device->unmap_page);
+   BUG_ON(!device->unmap_single);
+   BUG_ON(!device->device_issue_pending);
+
init_completion(>done);
kref_init(>refcount);
device->dev_id = id++;
@@ -402,11 +429,8 @@ subsys_initcall(dma_bus_init);
 EXPORT_SYMBOL(dma_async_client_register);
 EXPORT_SYMBOL(dma_async_client_unregister);
 EXPORT_SYMBOL(dma_async_client_chan_request);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_complete);
-EXPORT_SYMBOL(dma_async_memcpy_issue_pending);
+EXPORT_SYMBOL(dma_async_is_tx_complete);
+EXPORT_SYMBOL(dma_async_issue_pending);
 EXPORT_SYMBOL(dma_async_device_register);
 EXPORT_SYMBOL(dma_async_device_unregister);
 EXPORT_SYMBOL(dma_chan_cleanup);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 0358419..ff7377d 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -31,6 +31,7 @@ #include 
 #include 
 #include 
 #include 
+#include 
 #include "ioatdma.h"
 #include "ioatdma_io.h"
 #include "ioatdma_registers.h"
@@ -39,6 +40,7 @@ #include "ioatdma_hw.h"
 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
 #define 

[PATCH 01/12] dmaengine: add base support for the async_tx api

2006-11-30 Thread Dan Williams
From: Dan Williams [EMAIL PROTECTED]

* introduce struct dma_async_tx_descriptor as a common field for all dmaengine
software descriptors
* convert the device_memcpy_* methods into separate prep, set src/dest, and
submit stages
* support capabilities beyond memcpy (xor, memset, xor zero sum, completion
interrupts)
* convert ioatdma to the new semantics

Signed-off-by: Dan Williams [EMAIL PROTECTED]
---

 drivers/dma/dmaengine.c   |   44 ++--
 drivers/dma/ioatdma.c |  256 ++--
 drivers/dma/ioatdma.h |8 +
 include/linux/dmaengine.h |  263 ++---
 4 files changed, 394 insertions(+), 177 deletions(-)

diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 1527804..8d203ad 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -210,7 +210,8 @@ static void dma_chans_rebalance(void)
mutex_lock(dma_list_mutex);
 
list_for_each_entry(client, dma_client_list, global_node) {
-   while (client-chans_desired  client-chan_count) {
+   while (client-chans_desired  0 ||
+   client-chans_desired  client-chan_count) {
chan = dma_client_chan_alloc(client);
if (!chan)
break;
@@ -219,7 +220,8 @@ static void dma_chans_rebalance(void)
   chan,
   DMA_RESOURCE_ADDED);
}
-   while (client-chans_desired  client-chan_count) {
+   while (client-chans_desired = 0 
+   client-chans_desired  client-chan_count) {
spin_lock_irqsave(client-lock, flags);
chan = list_entry(client-channels.next,
  struct dma_chan,
@@ -294,12 +296,12 @@ void dma_async_client_unregister(struct 
  * @number: count of DMA channels requested
  *
  * Clients call dma_async_client_chan_request() to specify how many
- * DMA channels they need, 0 to free all currently allocated.
+ * DMA channels they need, 0 to free all currently allocated. A request
+ *  0 indicates the client wants to handle all engines in the system.
  * The resulting allocations/frees are indicated to the client via the
  * event callback.
  */
-void dma_async_client_chan_request(struct dma_client *client,
-   unsigned int number)
+void dma_async_client_chan_request(struct dma_client *client, int number)
 {
client-chans_desired = number;
dma_chans_rebalance();
@@ -318,6 +320,31 @@ int dma_async_device_register(struct dma
if (!device)
return -ENODEV;
 
+   /* validate device routines */
+   BUG_ON(test_bit(DMA_MEMCPY, device-capabilities) 
+   !device-device_prep_dma_memcpy);
+   BUG_ON(test_bit(DMA_XOR, device-capabilities) 
+   !device-device_prep_dma_xor);
+   BUG_ON(test_bit(DMA_ZERO_SUM, device-capabilities) 
+   !device-device_prep_dma_zero_sum);
+   BUG_ON(test_bit(DMA_MEMSET, device-capabilities) 
+   !device-device_prep_dma_memset);
+   BUG_ON(test_bit(DMA_ZERO_SUM, device-capabilities) 
+   !device-device_prep_dma_interrupt);
+
+   BUG_ON(!device-device_alloc_chan_resources);
+   BUG_ON(!device-device_free_chan_resources);
+   BUG_ON(!device-device_tx_submit);
+   BUG_ON(!device-device_set_dest);
+   BUG_ON(!device-device_set_src);
+   BUG_ON(!device-device_dependency_added);
+   BUG_ON(!device-device_is_tx_complete);
+   BUG_ON(!device-map_page);
+   BUG_ON(!device-map_single);
+   BUG_ON(!device-unmap_page);
+   BUG_ON(!device-unmap_single);
+   BUG_ON(!device-device_issue_pending);
+
init_completion(device-done);
kref_init(device-refcount);
device-dev_id = id++;
@@ -402,11 +429,8 @@ subsys_initcall(dma_bus_init);
 EXPORT_SYMBOL(dma_async_client_register);
 EXPORT_SYMBOL(dma_async_client_unregister);
 EXPORT_SYMBOL(dma_async_client_chan_request);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
-EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
-EXPORT_SYMBOL(dma_async_memcpy_complete);
-EXPORT_SYMBOL(dma_async_memcpy_issue_pending);
+EXPORT_SYMBOL(dma_async_is_tx_complete);
+EXPORT_SYMBOL(dma_async_issue_pending);
 EXPORT_SYMBOL(dma_async_device_register);
 EXPORT_SYMBOL(dma_async_device_unregister);
 EXPORT_SYMBOL(dma_chan_cleanup);
diff --git a/drivers/dma/ioatdma.c b/drivers/dma/ioatdma.c
index 0358419..ff7377d 100644
--- a/drivers/dma/ioatdma.c
+++ b/drivers/dma/ioatdma.c
@@ -31,6 +31,7 @@ #include linux/interrupt.h
 #include linux/dmaengine.h
 #include linux/delay.h
 #include linux/dma-mapping.h
+#include linux/async_tx.h
 #include ioatdma.h
 #include ioatdma_io.h
 #include ioatdma_registers.h
@@ -39,6 +40,7 @@ #include ioatdma_hw.h
 #define