Hi,


 After many tests I have created the necessary configuration for parallel running of jobs.
 Both tasks for copying, and tasks for restoration.

 Files of a configuration look below.

 Pay attention on:
 0. See "Running Concurrent Jobs" chapter of original documentation.
 1. All priorities are identical (in example, Priority = 10)
 2. As well as it is described in the documentation simultaneous record and reading from one Storage, are impossible.

 Test this configuration:
 - first run Job :
       2: file1-to-pool1
 - after this complete, run Job :
       3: file2-to-pool2
 - there and then immediately run Job :
       1: restore.files
 - status dir

Running Jobs:
 JobId Level   Name                       Status
======================================================================
     2 Full    file2-to-pool2.2006-07-04_14.47.36 is running
     3         restore.files.2006-07-04_14.48.40 is running



 Too concerns and to tapes.

 My environment: FC5, MySQL 5, bacula 1.38.11-3 (build from src.rpm).




----- bacula-dir.conf ----------------------------------------


Director {
  Name = "main.dir"
  WorkingDirectory = /arc/temp
  Maximum Concurrent Jobs = 100
  ...
}

Storage {
  Name = "StorageFile1"
  Device = "devFileStorage1"
  Media Type = File1
  Maximum Concurrent Jobs = 100
  ...
}

Storage {
  Name = "StorageFile2"
  Device = "devFileStorage2"
  Media Type = File2
  Maximum Concurrent Jobs = 100
  ...
}

Job {
  Name = "restore.files"
  Type = Restore
  Client = "local.fd"
  FileSet = "Full Set"
  Storage = StorageFile1
  Pool = "pool.test1"
  Where = /arc/restore
  Maximum Concurrent Jobs = 100
  Priority = 10
  Write Bootstrap = "/var/bacula/test11.bsr"
  RunBeforeJob = "/etc/bacula/scripts/restore_before.sh"
  ...
}

FileSet {
  Name = "Full Set"
  Include {
    Options {
      signature = MD5
    }
    File = /tmp
  }
}

Client {
  Name = "local.fd"
  Catalog = MyCatalog
  Maximum Concurrent Jobs = 100
  ...
}


Catalog {
  Name = MyCatalog
  dbname = bacula; user = bacula; password = ""
}

Messages {
  Name = Standard
  ...
}


Messages {
  Name = Daemon
  ...
}

Pool {
  Name = "pool.test1"
  Pool Type = Backup
  LabelFormat = "pool.test1 ."
  ...
}

Pool {
  Name = "pool.test2"
  Pool Type = Backup
  LabelFormat = "pool.test2."
  ...
}

FileSet {
  Name = "file.set.test1"
  Include {
    Options {
      signature = MD5
      compression = GZIP
    }
    File = "/arc/test/1"
  }
}

FileSet {
  Name = "file.set.test2"
  Include {
    Options {
      signature = MD5
      compression = GZIP
    }
    File = "/arc/test/2"
  }
}

Job {
    Name = "file1-to-pool1"
    Type = Backup
    Level = Full
    Client = " local.fd"
    FileSet = "file.set.test1"
    Storage = StorageFile1
    Messages = Standard
    Pool = "pool.test1"
    Write Bootstrap = "/var/bacula/test11.bsr"
    RunBeforeJob = "/etc/bacula/scripts/test1_before.sh"
    SpoolData = yes
    Maximum Concurrent Jobs = 100
    Priority = 10
}

Job {
    Name = "file2-to-pool2"
    Type = Backup
    Level = Full
    Client = "local.fd"
    FileSet = "file.set.test2"
    Storage = StorageFile2
    Messages = Standard
    Pool = "pool.test2"
    Write Bootstrap = "/var/bacula/test22.bsr"
    RunBeforeJob = "/etc/bacula/scripts/test2_before.sh"
    SpoolData = yes
    Maximum Concurrent Jobs = 100
    Priority = 10
}

Job {
    Name = "file1-to-pool2"
    Type = Backup
    Level = Full
    Client = "local.fd"
    FileSet = "file.set.test1"
    Storage = StorageFile2
    Messages = Standard
    Pool = "pool.test2"
    Write Bootstrap = "/var/bacula/test12.bsri"
    RunBeforeJob = "/etc/bacula/scripts/test1_before.sh"
    SpoolData = yes
    Maximum Concurrent Jobs = 100
    Priority = 10
}

Job {
    Name = "file2-to-pool1"
    Type = Backup
    Level = Full
    Client = "local.fd"
    FileSet = "file.set.test2"
    Storage = StorageFile1
    Messages = Standard
    Pool = "pool.test1"
    Write Bootstrap = "/var/bacula/test21.bsr"
    RunBeforeJob = "/etc/bacula/scripts/test2_before.sh"
    SpoolData = yes
    Maximum Concurrent Jobs = 100
    Priority = 10
}







----- bacula-sd.conf ----------------------------------------

Storage {
  Name = "Storage1"
  WorkingDirectory = /arc/temp
  Pid Directory = "/var/run"
  Maximum Concurrent Jobs = 100
  ...
}

Director {
  Name = "main.dir"
  ...
}

Device {
  Name = "devFileStorage1"
  Media Type = File1
  Archive Device = /arc/dev
  LabelMedia = yes;
  Random Access = Yes;
  AutomaticMount = yes;
  RemovableMedia = no;
  AlwaysOpen = no;
  Spool Directory = /arc/spool
}

Device {
  Name = "devFileStorage2"
  Media Type = File2
  Archive Device = /arc/dev
  LabelMedia = yes;
  Random Access = Yes;
  AutomaticMount = yes;
  RemovableMedia = no;
  AlwaysOpen = no;
  Spool Directory = /arc/spool
}

Messages {
  Name = Standard
  director = "main.dir" = all
}





----- bacula-fd.conf ----------------------------------------

Director {
  Name = "main.dir"
  ...
}

FileDaemon {
  Name = "local.fd"
  WorkingDirectory = /arc/temp
  Pid Directory = /var/run
  Maximum Concurrent Jobs = 100
  ...
}

Messages {
  Name = Standard
  director = "main.dir" = all, !skipped
}



----- restore_before.sh ----------------------------------------
#!/bin/bash
sleep 180
exit 0;



----- test1_before.sh ----------------------------------------
#!/bin/bash

FLAG_FILE=/arc/test/1/flag1
INTERVAL=180

if [ -f $FLAG_FILE ]
then
  echo "Ok. File found: "$FLAG_FILE
  exit 0
else
  echo "Error. File not found: "$FLAG_FILE
  echo "Sleep: "$INTERVAL
  sleep $INTERVAL
  exit 0
fi




----- test2_before.sh ----------------------------------------

#!/bin/bash
sleep 180


<EOF>


Using Tomcat but need to do more? Need to support web services, security?
Get stuff done quickly with pre-integrated technology to make your job easier
Download IBM WebSphere Application Server v.1.0.1 based on Apache Geronimo
http://sel.as-us.falkag.net/sel?cmd=lnk&kid=120709&bid=263057&dat=121642
_______________________________________________
Bacula-users mailing list
Bacula-users@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/bacula-users

Reply via email to