[PATCH 06/13] GFS: logging and recovery

2005-09-01 Thread David Teigland
A per-node on-disk log is used for recovery.

Signed-off-by: Ken Preslan <[EMAIL PROTECTED]>
Signed-off-by: David Teigland <[EMAIL PROTECTED]>

---

 fs/gfs2/log.c  |  670 +
 fs/gfs2/log.h  |   68 +
 fs/gfs2/recovery.c |  561 
 fs/gfs2/recovery.h |   32 ++
 4 files changed, 1331 insertions(+)

--- a/fs/gfs2/log.c 1970-01-01 07:30:00.0 +0730
+++ b/fs/gfs2/log.c 2005-09-01 17:36:55.338111976 +0800
@@ -0,0 +1,670 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "gfs2.h"
+#include "bmap.h"
+#include "glock.h"
+#include "log.h"
+#include "lops.h"
+#include "meta_io.h"
+
+#define PULL 1
+
+static inline int is_done(struct gfs2_sbd *sdp, atomic_t *a)
+{
+   int done;
+   gfs2_log_lock(sdp);
+   done = atomic_read(a) ? FALSE : TRUE;
+   gfs2_log_unlock(sdp);
+   return done;
+}
+
+static void do_lock_wait(struct gfs2_sbd *sdp, wait_queue_head_t *wq,
+atomic_t *a)
+{
+   gfs2_log_unlock(sdp);
+   wait_event(*wq, is_done(sdp, a));
+   gfs2_log_lock(sdp);
+}
+
+static void lock_for_trans(struct gfs2_sbd *sdp)
+{
+   gfs2_log_lock(sdp);
+   do_lock_wait(sdp, >sd_log_trans_wq, >sd_log_flush_count);
+   atomic_inc(>sd_log_trans_count);
+   gfs2_log_unlock(sdp);
+}
+
+static void unlock_from_trans(struct gfs2_sbd *sdp)
+{
+   gfs2_assert_warn(sdp, atomic_read(>sd_log_trans_count));
+   if (atomic_dec_and_test(>sd_log_trans_count))
+   wake_up(>sd_log_flush_wq);
+}
+
+void gfs2_lock_for_flush(struct gfs2_sbd *sdp)
+{
+   gfs2_log_lock(sdp);
+   atomic_inc(>sd_log_flush_count);
+   do_lock_wait(sdp, >sd_log_flush_wq, >sd_log_trans_count);
+   gfs2_log_unlock(sdp);
+}
+
+void gfs2_unlock_from_flush(struct gfs2_sbd *sdp)
+{
+   gfs2_assert_warn(sdp, atomic_read(>sd_log_flush_count));
+   if (atomic_dec_and_test(>sd_log_flush_count))
+   wake_up(>sd_log_trans_wq);
+}
+
+/**
+ * gfs2_struct2blk - compute stuff
+ * @sdp: the filesystem
+ * @nstruct: the number of structures
+ * @ssize: the size of the structures
+ *
+ * Compute the number of log descriptor blocks needed to hold a certain number
+ * of structures of a certain size.
+ *
+ * Returns: the number of blocks needed (minimum is always 1)
+ */
+
+unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
+unsigned int ssize)
+{
+   unsigned int blks;
+   unsigned int first, second;
+
+   blks = 1;
+   first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / 
ssize;
+
+   if (nstruct > first) {
+   second = (sdp->sd_sb.sb_bsize - sizeof(struct 
gfs2_meta_header)) / ssize;
+   blks += DIV_RU(nstruct - first, second);
+   }
+
+   return blks;
+}
+
+void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
+{
+   struct list_head *head = >sd_ail1_list;
+   uint64_t sync_gen;
+   struct list_head *first, *tmp;
+   struct gfs2_ail *first_ai, *ai;
+
+   gfs2_log_lock(sdp);
+   if (list_empty(head)) {
+   gfs2_log_unlock(sdp);
+   return;
+   }
+   sync_gen = sdp->sd_ail_sync_gen++;
+
+   first = head->prev;
+   first_ai = list_entry(first, struct gfs2_ail, ai_list);
+   first_ai->ai_sync_gen = sync_gen;
+   gfs2_ail1_start_one(sdp, first_ai);
+
+   if (flags & DIO_ALL)
+   first = NULL;
+
+   for (;;) {
+   if (first &&
+   (head->prev != first ||
+gfs2_ail1_empty_one(sdp, first_ai, 0)))
+   break;
+
+   for (tmp = head->prev; tmp != head; tmp = tmp->prev) {
+   ai = list_entry(tmp, struct gfs2_ail, ai_list);
+   if (ai->ai_sync_gen >= sync_gen)
+   continue;
+   ai->ai_sync_gen = sync_gen;
+   gfs2_ail1_start_one(sdp, ai);
+   break;
+   }
+
+   if (tmp == head)
+   break;
+   }
+
+   gfs2_log_unlock(sdp);
+}
+
+int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
+{
+   struct list_head *head, *tmp, *prev;
+   struct gfs2_ail *ai;
+   int ret;
+
+   gfs2_log_lock(sdp);
+
+   for (head = >sd_ail1_list, tmp = head->prev, prev = tmp->prev;
+tmp != head;
+tmp = prev, prev = tmp->prev) {
+   ai = list_entry(tmp, struct gfs2_ail, ai_list);
+   

[PATCH 06/13] GFS: logging and recovery

2005-09-01 Thread David Teigland
A per-node on-disk log is used for recovery.

Signed-off-by: Ken Preslan [EMAIL PROTECTED]
Signed-off-by: David Teigland [EMAIL PROTECTED]

---

 fs/gfs2/log.c  |  670 +
 fs/gfs2/log.h  |   68 +
 fs/gfs2/recovery.c |  561 
 fs/gfs2/recovery.h |   32 ++
 4 files changed, 1331 insertions(+)

--- a/fs/gfs2/log.c 1970-01-01 07:30:00.0 +0730
+++ b/fs/gfs2/log.c 2005-09-01 17:36:55.338111976 +0800
@@ -0,0 +1,670 @@
+/*
+ * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
+ * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ */
+
+#include linux/sched.h
+#include linux/slab.h
+#include linux/smp_lock.h
+#include linux/spinlock.h
+#include linux/completion.h
+#include linux/buffer_head.h
+#include asm/semaphore.h
+
+#include gfs2.h
+#include bmap.h
+#include glock.h
+#include log.h
+#include lops.h
+#include meta_io.h
+
+#define PULL 1
+
+static inline int is_done(struct gfs2_sbd *sdp, atomic_t *a)
+{
+   int done;
+   gfs2_log_lock(sdp);
+   done = atomic_read(a) ? FALSE : TRUE;
+   gfs2_log_unlock(sdp);
+   return done;
+}
+
+static void do_lock_wait(struct gfs2_sbd *sdp, wait_queue_head_t *wq,
+atomic_t *a)
+{
+   gfs2_log_unlock(sdp);
+   wait_event(*wq, is_done(sdp, a));
+   gfs2_log_lock(sdp);
+}
+
+static void lock_for_trans(struct gfs2_sbd *sdp)
+{
+   gfs2_log_lock(sdp);
+   do_lock_wait(sdp, sdp-sd_log_trans_wq, sdp-sd_log_flush_count);
+   atomic_inc(sdp-sd_log_trans_count);
+   gfs2_log_unlock(sdp);
+}
+
+static void unlock_from_trans(struct gfs2_sbd *sdp)
+{
+   gfs2_assert_warn(sdp, atomic_read(sdp-sd_log_trans_count));
+   if (atomic_dec_and_test(sdp-sd_log_trans_count))
+   wake_up(sdp-sd_log_flush_wq);
+}
+
+void gfs2_lock_for_flush(struct gfs2_sbd *sdp)
+{
+   gfs2_log_lock(sdp);
+   atomic_inc(sdp-sd_log_flush_count);
+   do_lock_wait(sdp, sdp-sd_log_flush_wq, sdp-sd_log_trans_count);
+   gfs2_log_unlock(sdp);
+}
+
+void gfs2_unlock_from_flush(struct gfs2_sbd *sdp)
+{
+   gfs2_assert_warn(sdp, atomic_read(sdp-sd_log_flush_count));
+   if (atomic_dec_and_test(sdp-sd_log_flush_count))
+   wake_up(sdp-sd_log_trans_wq);
+}
+
+/**
+ * gfs2_struct2blk - compute stuff
+ * @sdp: the filesystem
+ * @nstruct: the number of structures
+ * @ssize: the size of the structures
+ *
+ * Compute the number of log descriptor blocks needed to hold a certain number
+ * of structures of a certain size.
+ *
+ * Returns: the number of blocks needed (minimum is always 1)
+ */
+
+unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
+unsigned int ssize)
+{
+   unsigned int blks;
+   unsigned int first, second;
+
+   blks = 1;
+   first = (sdp-sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / 
ssize;
+
+   if (nstruct  first) {
+   second = (sdp-sd_sb.sb_bsize - sizeof(struct 
gfs2_meta_header)) / ssize;
+   blks += DIV_RU(nstruct - first, second);
+   }
+
+   return blks;
+}
+
+void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
+{
+   struct list_head *head = sdp-sd_ail1_list;
+   uint64_t sync_gen;
+   struct list_head *first, *tmp;
+   struct gfs2_ail *first_ai, *ai;
+
+   gfs2_log_lock(sdp);
+   if (list_empty(head)) {
+   gfs2_log_unlock(sdp);
+   return;
+   }
+   sync_gen = sdp-sd_ail_sync_gen++;
+
+   first = head-prev;
+   first_ai = list_entry(first, struct gfs2_ail, ai_list);
+   first_ai-ai_sync_gen = sync_gen;
+   gfs2_ail1_start_one(sdp, first_ai);
+
+   if (flags  DIO_ALL)
+   first = NULL;
+
+   for (;;) {
+   if (first 
+   (head-prev != first ||
+gfs2_ail1_empty_one(sdp, first_ai, 0)))
+   break;
+
+   for (tmp = head-prev; tmp != head; tmp = tmp-prev) {
+   ai = list_entry(tmp, struct gfs2_ail, ai_list);
+   if (ai-ai_sync_gen = sync_gen)
+   continue;
+   ai-ai_sync_gen = sync_gen;
+   gfs2_ail1_start_one(sdp, ai);
+   break;
+   }
+
+   if (tmp == head)
+   break;
+   }
+
+   gfs2_log_unlock(sdp);
+}
+
+int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
+{
+   struct list_head *head, *tmp, *prev;
+   struct gfs2_ail *ai;
+   int ret;
+
+   gfs2_log_lock(sdp);
+
+   for (head = sdp-sd_ail1_list, tmp = head-prev, prev = tmp-prev;
+tmp != head;
+