This patch converts deep if statement nesting to use gotos in few places.

Signed-off-by: Pekka Enberg <[EMAIL PROTECTED]>
---

 cifsfs.c  |   51 +++++++++++++++++++++++++++++++--------------------
 connect.c |   44 ++++++++++++++++++++------------------------
 2 files changed, 51 insertions(+), 44 deletions(-)

Index: linux/fs/cifs/cifsfs.c
===================================================================
--- linux.orig/fs/cifs/cifsfs.c 2005-01-11 08:04:55.007622032 +0200
+++ linux/fs/cifs/cifsfs.c      2005-01-11 08:04:57.454250088 +0200
@@ -839,26 +839,37 @@
        }
 
        rc = cifs_init_inodecache();
-       if (!rc) {
-               rc = cifs_init_mids();
-               if (!rc) {
-                       rc = cifs_init_request_bufs();
-                       if (!rc) {
-                               rc = register_filesystem(&cifs_fs_type);
-                               if (!rc) {                
-                                       rc = 
(int)kernel_thread(cifs_oplock_thread, NULL, 
-                                               CLONE_FS | CLONE_FILES | 
CLONE_VM);
-                                       if(rc > 0)
-                                               return 0;
-                                       else 
-                                               cERROR(1,("error %d create 
oplock thread",rc));
-                               }
-                               cifs_destroy_request_bufs();
-                       }
-                       cifs_destroy_mids();
-               }
-               cifs_destroy_inodecache();
-       }
+       if (rc)
+               goto failed_init_inodecache;
+
+       rc = cifs_init_mids();
+       if (rc)
+               goto failed_init_mids;
+
+       rc = cifs_init_request_bufs();
+       if (rc)
+               goto failed_init_request_bufs;
+
+       rc = register_filesystem(&cifs_fs_type);
+       if (rc)
+               goto failed_register_filesystem;
+
+       rc = kernel_thread(cifs_oplock_thread, NULL,
+                          CLONE_FS | CLONE_FILES | CLONE_VM);
+       if (rc <= 0)
+               goto failed_kernel_thread;
+
+       return 0;
+
+ failed_kernel_thread:
+       cERROR(1,("error %d create oplock thread",rc));
+ failed_register_filesystem:
+       cifs_destroy_request_bufs();
+ failed_init_request_bufs:
+       cifs_destroy_mids();
+ failed_init_mids:
+       cifs_destroy_inodecache();
+ failed_init_inodecache:
 #ifdef CONFIG_PROC_FS
        cifs_proc_clean();
 #endif
Index: linux/fs/cifs/connect.c
===================================================================
--- linux.orig/fs/cifs/connect.c        2005-01-11 08:04:50.326333696 +0200
+++ linux/fs/cifs/connect.c     2005-01-11 08:04:57.457249632 +0200
@@ -122,11 +122,9 @@
        read_lock(&GlobalSMBSeslock);
        list_for_each(tmp, &GlobalSMBSessionList) {
                ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList);
-               if (ses->server) {
-                       if (ses->server == server) {
-                               ses->status = CifsNeedReconnect;
-                               ses->ipc_tid = 0;
-                       }
+               if (ses->server && ses->server == server) {
+                       ses->status = CifsNeedReconnect;
+                       ses->ipc_tid = 0;
                }
                /* else tcp and smb sessions need reconnection */
        }
@@ -857,33 +855,31 @@
                 char *userName, struct TCP_Server_Info **psrvTcp)
 {
        struct list_head *tmp;
-       struct cifsSesInfo *ses;
+       struct cifsSesInfo *ret = NULL;
        *psrvTcp = NULL;
        read_lock(&GlobalSMBSeslock);
 
        list_for_each(tmp, &GlobalSMBSessionList) {
-               ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList);
-               if (ses->server) {
-                       if((target_ip_addr && 
-                               (ses->server->addr.sockAddr.sin_addr.s_addr
-                                 == target_ip_addr->s_addr)) || 
(target_ip6_addr
-                               && 
memcmp(&ses->server->addr.sockAddr6.sin6_addr,
-                                       
target_ip6_addr,sizeof(*target_ip6_addr)))){
-                               /* BB lock server and tcp session and increment 
use count here?? */
-                               *psrvTcp = ses->server; /* found a match on the 
TCP session */
-                               /* BB check if reconnection needed */
-                               if (strncmp
-                                   (ses->userName, userName,
-                                    MAX_USERNAME_SIZE) == 0){
-                                       read_unlock(&GlobalSMBSeslock);
-                                       return ses;     /* found exact match on 
both tcp and SMB sessions */
-                               }
+               struct cifsSesInfo * ses = list_entry(tmp, struct cifsSesInfo, 
cifsSessionList);
+               if (!ses->server)
+                       continue;
+               if((target_ip_addr && 
+                       (ses->server->addr.sockAddr.sin_addr.s_addr
+                         == target_ip_addr->s_addr)) || (target_ip6_addr
+                       && memcmp(&ses->server->addr.sockAddr6.sin6_addr,
+                               target_ip6_addr,sizeof(*target_ip6_addr)))){
+                       /* BB lock server and tcp session and increment use 
count here?? */
+                       *psrvTcp = ses->server; /* found a match on the TCP 
session */
+                       /* BB check if reconnection needed */
+                       if (strncmp(ses->userName, userName, MAX_USERNAME_SIZE) 
== 0) {
+                               ret = ses;
+                               goto out;
                        }
                }
-               /* else tcp and smb sessions need reconnection */
        }
+ out:
        read_unlock(&GlobalSMBSeslock);
-       return NULL;
+       return ret;
 }
 
 static struct cifsTconInfo *


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to