diff --git a/src/bin/pg_basebackup/Makefile b/src/bin/pg_basebackup/Makefile
index 4f27492..4b95afa 100644
--- a/src/bin/pg_basebackup/Makefile
+++ b/src/bin/pg_basebackup/Makefile
@@ -25,7 +25,8 @@ OBJS = \
 	$(WIN32RES) \
 	receivelog.o \
 	streamutil.o \
-	walmethods.o
+	walmethods.o \
+	pgrhash.o
 
 all: pg_basebackup pg_receivewal pg_recvlogical
 
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index b8f9ee6..0ba3a8a 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -30,7 +30,9 @@
 #include "common/file_perm.h"
 #include "common/file_utils.h"
 #include "common/logging.h"
+#include "common/sha2.h"
 #include "common/string.h"
+#include "fe_utils/simple_list.h"
 #include "fe_utils/recovery_gen.h"
 #include "fe_utils/string_utils.h"
 #include "getopt_long.h"
@@ -38,12 +40,19 @@
 #include "pgtar.h"
 #include "pgtime.h"
 #include "pqexpbuffer.h"
+#include "pgrhash.h"
 #include "receivelog.h"
 #include "replication/basebackup.h"
 #include "streamutil.h"
 
 #define ERRCODE_DATA_CORRUPTED	"XX001"
 
+#define CHUNK_SIZE 1024
+#define MAXCHUNKS 16
+
+static const char hextbl[] = "0123456789abcdef";
+static unsigned hex_encode(const char *src, unsigned len, char *dst);
+
 typedef struct TablespaceListCell
 {
 	struct TablespaceListCell *next;
@@ -94,6 +103,14 @@ typedef struct WriteManifestState
 	FILE	   *file;
 }			WriteManifestState;
 
+typedef struct DataDirectoryAllFiles
+{
+	int maxchunks;
+	int nchunks;
+	int nfiles;
+	DataDirectoryFileInfo **files;
+}	DataDirectoryAllFiles;
+
 typedef void (*WriteDataCallback) (size_t nbytes, char *buf,
 								   void *callback_data);
 
@@ -142,6 +159,7 @@ static bool create_slot = false;
 static bool no_slot = false;
 static bool verify_checksums = true;
 static bool manifest_with_checksums = false;
+static bool	verify_backup = false;
 
 static bool success = false;
 static bool made_new_pgdata = false;
@@ -201,6 +219,12 @@ static bool reached_end_position(XLogRecPtr segendpos, uint32 timeline,
 static const char *get_tablespace_mapping(const char *dir);
 static void tablespace_list_append(const char *arg);
 
+static void VerifyBackup(void);
+static pgrhash *create_manifest_hash(char manifest_path[MAXPGPATH]);
+static void scan_data_directory(char *basedir, const char *subdirpath, pgrhash *ht);
+static void verify_file(struct dirent *de, char fn[MAXPGPATH],
+						struct stat st, char relative_path[MAXPGPATH],
+						pgrhash	*ht);
 
 static void
 cleanup_directories_atexit(void)
@@ -2167,6 +2191,7 @@ main(int argc, char **argv)
 		{"no-slot", no_argument, NULL, 2},
 		{"no-verify-checksums", no_argument, NULL, 3},
 		{"manifest-with-checksums", no_argument, NULL, 4},
+		{"verify-backup", no_argument, NULL, 5},
 		{NULL, 0, NULL, 0}
 	};
 	int			c;
@@ -2338,6 +2363,9 @@ main(int argc, char **argv)
 			case 4:
 				manifest_with_checksums = true;
 				break;
+			case 5:
+				verify_backup = true;
+				break;
 			default:
 
 				/*
@@ -2460,6 +2488,12 @@ main(int argc, char **argv)
 	}
 #endif
 
+	if(verify_backup)
+	{
+		VerifyBackup();
+		return 0;
+	}
+
 	/* connection in replication mode to server */
 	conn = GetConnection();
 	if (!conn)
@@ -2524,3 +2558,328 @@ main(int argc, char **argv)
 	success = true;
 	return 0;
 }
+
+/*
+ * Read the backup_manifest file and generate the hash table, then scan data
+ * directroy and verify each file. Finally do the sequnetial scan on hash table
+ * to find out missing files.
+ */
+static void
+VerifyBackup(void)
+{
+	char		manifest_path[MAXPGPATH];
+	SimpleStringList *filenames;
+	SimpleStringListCell *cell;
+	pgrhash    *ht;
+
+	snprintf(manifest_path, sizeof(manifest_path), "%s/%s", basedir,
+			 "backup_manifest");
+
+	/* build hash table */
+	ht = create_manifest_hash(manifest_path);
+
+	scan_data_directory(basedir, NULL, ht);
+
+	/* sequential scan on hash table to find out misssing files */
+	filenames = pgrhash_seq_search(ht);
+
+	for (cell = filenames->head; cell; cell = cell->next)
+	{
+		pg_log_info("missing file: %s", cell->val);
+	}
+}
+
+/*
+ * Given a file path, read that file and generate the hash table for same.
+ * Also generate the checksum for the records that are read from file and
+ * compare that with checksum written in backup_manifest file. If both
+ * checksums are identical then proceed, otherwise throw an error and abort.
+ */
+static pgrhash *
+create_manifest_hash(char manifest_path[MAXPGPATH])
+{
+	FILE	   *file;
+	DataDirectoryAllFiles *allfiles;
+	int			fileno;
+	DataDirectoryFileInfo *record;
+	pgrhash    *ht;
+	PQExpBuffer	manifest;
+	pg_sha256_ctx sha256_ctx;
+	uint8		shabuf[PG_SHA256_DIGEST_LENGTH];
+	int			shastringlen;
+	char 		shatextbuf[PG_SHA256_DIGEST_LENGTH * 2 + 1];
+	char		file_checksum[CHECKSUM_LENGTH];
+	char		header[1024];
+
+	manifest = createPQExpBuffer();
+	if (!manifest)
+	{
+		pg_log_error("out of memory");
+		exit(1);
+	}
+
+	file = fopen(manifest_path, "r");
+
+	if (!file)
+	{
+		pg_log_error("could not open backup_manifest");
+		exit(1);
+	}
+
+	if (fscanf(file, "%1023[^\n]\n", header) != 1)
+	{
+		pg_log_error("error while reading the header from backup_manifest");
+		exit(1);
+	}
+
+	appendPQExpBufferStr(manifest, header);
+	appendPQExpBufferStr(manifest, "\n");
+
+	allfiles = (DataDirectoryAllFiles *) palloc(sizeof(DataDirectoryAllFiles));
+
+	allfiles->maxchunks = MAXCHUNKS;
+	allfiles->nchunks = 0;
+	allfiles->files = (DataDirectoryFileInfo **)
+		palloc(sizeof(DataDirectoryFileInfo *) * MAXCHUNKS);
+
+	while (!feof(file))
+	{
+		DataDirectoryFileInfo *record;
+
+		if (allfiles->nfiles % CHUNK_SIZE == 0)
+		{
+			if (allfiles->maxchunks < allfiles->nchunks)
+			{
+				allfiles->maxchunks *= 2;
+				allfiles->files = (DataDirectoryFileInfo **)
+					repalloc(allfiles->files, allfiles->maxchunks *
+							 sizeof(DataDirectoryFileInfo *));
+			}
+			allfiles->files[allfiles->nchunks] = (DataDirectoryFileInfo *)
+				palloc(CHUNK_SIZE * sizeof(DataDirectoryFileInfo));
+
+			allfiles->nchunks++;
+		}
+
+		record = &allfiles->files[allfiles->nfiles / CHUNK_SIZE][allfiles->nfiles % CHUNK_SIZE];
+
+		if (fscanf(file, "%s %s %d %23[^\t] %s\n", record->filetype,
+					   record->filename, &record->filesize,
+					   record->mdate, record->checksum) != 5)
+		{
+			/*
+			 * On failure, re-read the last string of record and check if it is
+			 * a last line where backup checksum is written. If yes, then parse
+			 * it.
+			 */
+			if (fseek(file, -(CHECKSUM_LENGTH), SEEK_CUR) == -1)
+			{
+				pg_log_error("error while reading the backup_manifest file");
+				exit(1);
+			}
+
+			if (fscanf(file, "%s\n", file_checksum) != 1)
+			{
+				pg_log_error("error while reading the backup_manifest file");
+				exit(1);
+			}
+
+			if (feof(file))
+				break;
+		}
+
+		record->touch = false;
+
+		appendPQExpBuffer(manifest, "File\t%s\t%d\t%s\t%s\n", record->filename,
+						  record->filesize, record->mdate, record->checksum);
+
+		allfiles->nfiles++;
+	}
+
+	/* Checksum the manifest. */
+	pg_sha256_init(&sha256_ctx);
+	pg_sha256_update(&sha256_ctx, (uint8 *) manifest->data, manifest->len);
+	pg_sha256_final(&sha256_ctx, shabuf);
+
+	shastringlen = PG_SHA256_DIGEST_LENGTH * 2;
+	shastringlen = hex_encode((char *) shabuf, PG_SHA256_DIGEST_LENGTH, shatextbuf);
+	shatextbuf[shastringlen] = '\0';
+
+	/*
+	 * Compare the both checksums, if they are not same that means
+	 * backup_manifest file is changed. Throw an error and abort.
+	 */
+	if (strcmp(shatextbuf, file_checksum) != 0)
+	{
+		pg_log_error("backup manifest checksum difference. Aborting");
+		exit(1);
+	}
+
+	ht = pgrhash_create(allfiles->nfiles);
+
+	for (fileno = 0; fileno < allfiles->nfiles; fileno++)
+	{
+		record = &allfiles->files[fileno / CHUNK_SIZE][fileno % CHUNK_SIZE];
+		if (pgrhash_insert(ht, record) != -1)
+				pg_log_info("duplicate file present%s\n", record->filename);
+	}
+
+	return ht;
+}
+
+/*
+ * Scan the data directory and check whether each file entry present in hash
+ * table with the correct details, i.e. filesize and checksum.
+ */
+static void
+scan_data_directory(char *basedir, const char *subdirpath, pgrhash *ht)
+{
+	char		path[MAXPGPATH];
+	char		relative_path[MAXPGPATH] = "";
+	DIR		   *dir;
+	struct dirent *de;
+
+	if (subdirpath)
+	{
+		snprintf(path, sizeof(path), "%s/%s", basedir,
+				 subdirpath);
+		snprintf(relative_path, sizeof(relative_path), "%s/", subdirpath);
+
+	}
+	else
+		snprintf(path, sizeof(path), "%s", basedir);
+
+	dir = opendir(path);
+	if (!dir)
+	{
+		pg_log_error("could not open directory \"%s\": %m", path);
+		exit(1);
+	}
+
+	while ((de = readdir(dir)) != NULL)
+	{
+		char		fn[MAXPGPATH];
+		struct stat st;
+
+		if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0 ||
+			strcmp(de->d_name, "pg_wal") == 0)
+			continue;
+
+		snprintf(fn, sizeof(fn), "%s/%s", path, de->d_name);
+		if (stat(fn, &st) < 0)
+		{
+			pg_log_error("could not stat file \"%s\": %m", fn);
+			exit(1);
+		}
+		if (S_ISREG(st.st_mode))
+		{
+			verify_file(de, fn, st, relative_path, ht);
+		}
+		else if (S_ISDIR(st.st_mode))
+		{
+			char		newsubdirpath[MAXPGPATH];
+
+			if (subdirpath)
+				snprintf(newsubdirpath, MAXPGPATH, "%s/%s", subdirpath,
+						 de->d_name);
+			else
+				snprintf(newsubdirpath, MAXPGPATH, "%s", de->d_name);
+
+			scan_data_directory(basedir, newsubdirpath, ht);
+		}
+	}
+	closedir(dir);
+}
+
+/*
+ * Given the file and its details, check whether it is present in hash table
+ * and if yes, then compare its details with hash table entry.
+ */
+static void
+verify_file(struct dirent *de, char fn[MAXPGPATH],
+					   struct stat st, char relative_path[MAXPGPATH],
+					   pgrhash *ht)
+{
+	PQExpBuffer	filename = NULL;
+	DataDirectoryFileInfo *record;
+
+	/* Skip backup manifest file. */
+	if (strcmp(de->d_name, "backup_manifest") == 0)
+		return;
+
+	filename = createPQExpBuffer();
+	if (!filename)
+	{
+		pg_log_error("out of memory");
+		exit(1);
+	}
+
+	appendPQExpBuffer(filename, "%s%s", relative_path, de->d_name);
+
+	/* compare the hash */
+	record = pgrhash_get(ht, filename->data);
+	if (record)
+	{
+		record->touch = true;
+		if (record->filesize != st.st_size)
+			pg_log_info("size changed for file: %s, original size: %d, current size: %zu",
+						filename->data, record->filesize, st.st_size);
+
+		if (strcmp(record->checksum, "-") != 0)
+		{
+			FILE	   *fp;
+			pg_sha256_ctx sha256_ctx;
+			uint8		shabuf[PG_SHA256_DIGEST_LENGTH];
+			char 		shatextbuf[PG_SHA256_DIGEST_LENGTH * 2 + 1];
+			int			shatextlen;
+			char		buf[1000000];  // 1MB chunk
+			pgoff_t		len = 0;
+			off_t		cnt;
+
+			pg_sha256_init(&sha256_ctx);
+
+			fp = fopen(fn, "r");
+			if (!fp)
+			{
+				pg_log_error("could not open file \"%s\": %m", de->d_name);
+				exit(1);
+			}
+
+			/* Read 1 MB chunk of file */
+			while ((cnt = fread(buf, 1, Min(sizeof(buf), st.st_size - len), fp)) > 0)
+			{
+				pg_sha256_update(&sha256_ctx, (uint8 *) buf, cnt);
+				len += cnt;
+			}
+
+			pg_sha256_final(&sha256_ctx, shabuf);
+
+			/* Convert checksum to hexadecimal. */
+			shatextlen =
+						hex_encode((char *) shabuf, PG_SHA256_DIGEST_LENGTH, shatextbuf);
+			Assert(shatextlen + 1 == sizeof(shatextbuf));
+			shatextbuf[shatextlen] = '\0';
+
+			fclose(fp);
+
+			if (strcmp(record->checksum, shatextbuf) != 0)
+				pg_log_info("checksum difference for file: %s", filename->data);
+		}
+	}
+	else
+		pg_log_info("extra file found: %s", filename->data);
+}
+
+unsigned
+hex_encode(const char *src, unsigned len, char *dst)
+{
+	const char *end = src + len;
+
+	while (src < end)
+	{
+		*dst++ = hextbl[(*src >> 4) & 0xF];
+		*dst++ = hextbl[*src & 0xF];
+		src++;
+	}
+	return len * 2;
+}
diff --git a/src/bin/pg_basebackup/pgrhash.c b/src/bin/pg_basebackup/pgrhash.c
new file mode 100644
index 0000000..17fa1fd
--- /dev/null
+++ b/src/bin/pg_basebackup/pgrhash.c
@@ -0,0 +1,165 @@
+#include "postgres_fe.h"
+
+#include "common/logging.h"
+#include "libpq-fe.h"
+#include "pgrhash.h"
+
+
+typedef struct pgrhash_entry
+{
+	struct pgrhash_entry *next; /* link to next entry in same bucket */
+	DataDirectoryFileInfo *record;
+} pgrhash_entry;
+
+struct pgrhash
+{
+	unsigned	nbuckets;		/* number of buckets */
+	pgrhash_entry **bucket;		/* pointer to hash entries */
+};
+
+typedef struct pgrhash pgrhash;
+static uint32 string_hash_sdbm(const char *key);
+static bool pgrhash_compare(char *bt_filename, char *filename);
+
+/* Create a new hash table for given number of records.   */
+pgrhash *
+pgrhash_create(int count)
+{
+	unsigned	bucket_shift;
+	pgrhash    *ht;
+
+	bucket_shift = fls(count);
+	if (bucket_shift >= sizeof(unsigned) * BITS_PER_BYTE)
+		pg_log_error("too many tuples");
+
+	ht = (pgrhash *) pg_malloc(sizeof(pgrhash));
+	ht->nbuckets = ((unsigned) 1) << bucket_shift;
+	ht->bucket = (pgrhash_entry **)
+		pg_malloc0(ht->nbuckets * sizeof(pgrhash_entry *));
+
+	return ht;
+}
+
+/*
+ * Search a result-set hash table for a row matching a given filename.
+ */
+DataDirectoryFileInfo *
+pgrhash_get(pgrhash *ht, char *filename)
+{
+	uint32		hashvalue = 0;
+	pgrhash_entry *bucket = NULL;
+
+	hashvalue ^= string_hash_sdbm(filename);
+
+	for (bucket = ht->bucket[hashvalue & (ht->nbuckets - 1)];
+		 bucket != NULL; bucket = bucket->next)
+		if (pgrhash_compare(bucket->record->filename, filename))
+			return bucket->record;
+
+	return NULL;
+}
+
+/*
+ * Insert a row into a result-set hash table, provided no such row is already
+ * present.
+ *
+ * The return value is -1 on success, or the row number of an existing row
+ * with the same key.
+ */
+int
+pgrhash_insert(pgrhash *ht, DataDirectoryFileInfo *record)
+{
+	unsigned	bucket_number;
+	unsigned	hashvalue = 0;
+	pgrhash_entry *bucket;
+	pgrhash_entry *entry;
+
+	hashvalue ^= string_hash_sdbm(record->filename);
+
+	/* Check for a conflicting entry already present in the table. */
+	bucket_number = hashvalue & (ht->nbuckets - 1);
+	for (bucket = ht->bucket[bucket_number];
+		 bucket != NULL; bucket = bucket->next)
+		if (pgrhash_compare(bucket->record->filename, record->filename))
+			return 0;
+
+	/* Insert the new entry. */
+	entry = pg_malloc(sizeof(pgrhash_entry));
+	entry->next = ht->bucket[bucket_number];
+	entry->record = record;
+	ht->bucket[bucket_number] = entry;
+
+	return -1;
+}
+
+/*
+ * Simple string hash function from http://www.cse.yorku.ca/~oz/hash.html
+ *
+ * The backend uses a more sophisticated function for hashing strings,
+ * but we don't really need that complexity here.  Most of the values
+ * that we're hashing are short integers formatted as text, so there
+ * shouldn't be much room for pathological input.
+ */
+static uint32
+string_hash_sdbm(const char *key)
+{
+	uint32		hash = 0;
+	int			c;
+
+	while ((c = *key++))
+		hash = c + (hash << 6) + (hash << 16) - hash;
+
+	return hash;
+}
+
+/*
+ * TODO: this function is not necessary, can be removed.
+ * Test whether the given row number is match for the supplied keys.
+ */
+static bool
+pgrhash_compare(char *bt_filename, char *filename)
+{
+	if (strcmp(bt_filename, filename) != 0)
+		return false;
+
+	return true;
+}
+
+/*
+ * Given the hash table, determine whether each file record is visited or not
+ * during the comparision and return the list of filenames which are actually
+ * missing.
+ */
+extern SimpleStringList*
+pgrhash_seq_search(pgrhash *ht)
+{
+	unsigned	i;
+	SimpleStringList *fileList;
+	fileList = (SimpleStringList *) palloc(sizeof(SimpleStringList));
+	fileList->head = NULL;
+	fileList->tail = NULL;
+
+	for (i=0; i < ht->nbuckets; i++)
+	{
+		if (ht->bucket[i])
+		{
+			pgrhash_entry *next_entry;
+
+			if (!ht->bucket[i]->record->touch)
+				simple_string_list_append(fileList,
+										  ht->bucket[i]->record->filename);
+
+			for (next_entry = ht->bucket[i]->next; next_entry != NULL;)
+			{
+				if (!next_entry->record->touch)
+					simple_string_list_append(fileList,
+											  next_entry->record->filename);
+
+				next_entry = next_entry->next;
+				continue;
+			}
+		}
+	}
+	return fileList;
+
+}
diff --git a/src/bin/pg_basebackup/pgrhash.h b/src/bin/pg_basebackup/pgrhash.h
new file mode 100644
index 0000000..ecbac09
--- /dev/null
+++ b/src/bin/pg_basebackup/pgrhash.h
@@ -0,0 +1,21 @@
+#include "common/sha2.h"
+#include "fe_utils/simple_list.h"
+
+#define CHECKSUM_LENGTH PG_SHA256_DIGEST_LENGTH * 2 + 1
+
+struct pgrhash;
+typedef struct pgrhash pgrhash;
+typedef	struct DataDirectoryFileInfo
+{
+	char 		filetype[10];
+	char		filename[MAXPGPATH];
+	int			filesize;
+	char		mdate[24];
+	char		checksum[CHECKSUM_LENGTH];
+	bool		touch;
+} DataDirectoryFileInfo;
+
+extern pgrhash *pgrhash_create(int count);
+extern DataDirectoryFileInfo	*pgrhash_get(pgrhash *ht, char *filename);
+extern int pgrhash_insert(pgrhash *ht, DataDirectoryFileInfo *record);
+extern SimpleStringList *pgrhash_seq_search(pgrhash *ht);
