hi all,

When I add an instance with disk template plain, I happen
to errors as follows.

root@node1:~# gnt-instance add -n node1.harrycluster.com -o
debootstrap+default -t plain -s 5G instance1
Sat Jul 13 13:09:29 2013  - INFO: Resolved given name 'instance1' to
'instance1.harrycluster.com'
Sat Jul 13 13:09:33 2013 * disk 0, size 5.0G
Sat Jul 13 13:09:33 2013 * creating instance disks...
Sat Jul 13 13:09:34 2013 adding instance instance1.harrycluster.com to
cluster config
Sat Jul 13 13:09:35 2013  - INFO: Waiting for instance
instance1.harrycluster.com to sync disks
Sat Jul 13 13:09:35 2013  - INFO: Instance
instance1.harrycluster.com's disks are in sync
Sat Jul 13 13:09:36 2013 * running the instance OS create scripts...
Sat Jul 13 13:09:47 2013 * starting instance...

root@node1:~/essential-config-files# gnt-instance list
Instance                   Hypervisor OS                  Primary_node
          Status     Memory
instance1.harrycluster.com xen-pvm    debootstrap+default
node1.harrycluster.com ERROR_down      -

root@node1:~/essential-config-files# gnt-instance info instance1
- Instance name: instance1.harrycluster.com
  UUID: 0a60db9a-7dfc-48e4-8a04-fc0fdf74b462
  Serial number: 2
  Creation time: 2013-07-13 09:20:03
  Modification time: 2013-07-13 09:20:16
  State: configured to be up, actual state is down
  Nodes:
    - primary: node1.harrycluster.com
      group: default (UUID f29b7302-247f-4b0a-af26-98f02a2e24d6)
    - secondaries:
  Operating system: debootstrap+default
  Operating system parameters:
  Allocated network port: None
  Hypervisor: xen-pvm
  Hypervisor parameters:
    blockdev_prefix: default (sd)
    bootloader_args: default ()
    bootloader_path: default ()
    cpu_cap: default (0)
    cpu_mask: default (all)
    cpu_weight: default (256)
    initrd_path: default ()
    kernel_args: default (ro)
    kernel_path: default (/boot/vmlinuz-3-xenU)
    reboot_behavior: default (reboot)
    root_path: default (/dev/xvda1)
    use_bootloader: default (False)
    vif_script: default ()
  Back-end parameters:
    always_failover: default (False)
    auto_balance: default (True)
    maxmem: default (128)
    memory: default (128)
    minmem: default (128)
    spindle_use: default (1)
    vcpus: default (1)
  NICs:
    - nic/0:
      MAC: aa:00:00:92:5b:7b
      IP: None
      mode: bridged
      link: xen-br0
      network: None
      UUID: e0cc50fc-d8c3-425e-952f-5cad5c569af0
      name: None
  Disk template: plain
  Disks:
    - disk/0: lvm, size 5.0G
      access mode: rw
      logical_id: xenvg/fae2c09e-3aa6-4695-8035-5074577743bc.disk0
      on primary: /dev/xenvg/fae2c09e-3aa6-4695-8035-5074577743bc.disk0 (254:0)
      name: None
      UUID: f5d07efd-3b92-43b8-92d2-35fe5378ac1a


root@node1:~/essential-config-files# gnt-cluster --version
gnt-cluster (ganeti v2.8.0beta1-344-g20029b6) 2.9.0~alpha1


The logs
=======
root@node1:/var/log/ganeti/os# cat
add-debootstrap+default-instance1.harrycluster.com-2013-07-13_09_20_05.log
BLKRRPART: Invalid argument

Disk /dev/xenvg/fae3c09e-3aa6-4695-8035-5074577743bc.disk0: 5120
cylinders, 64 heads, 32 sectors/track

sfdisk: ERROR: sector 0 does not have an msdos signature
 /dev/xenvg/fae2c09e-3aa6-4695-8035-5074577743bc.disk0: unrecognized
partition table type
Old situation:
No partitions found
New situation:
Units = sectors of 512 bytes, counting from 0

   Device Boot    Start       End   #sectors  Id  System
/dev/xenvg/fae2c09e-3aa6-4695-8035-5074577743bc.disk0p1   *      2048
10485759   10483712  83  Linux
/dev/xenvg/fae2c09e-3aa6-4695-8035-5074577743bc.disk0p2             0
       -          0   0  Empty
/dev/xenvg/fae2c09e-3aa6-4695-8035-5074577743bc.disk0p3             0
       -          0   0  Empty
/dev/xenvg/fae2c09e-3aa6-4695-8035-5074577743bc.disk0p4             0
       -          0   0  Empty
Successfully wrote the new partition table

Re-reading the partition table ...
BLKRRPART: Invalid argument

passwd: password expiry information changed.


Would anyone please give me some suggestions? Thanks very much.

--
Thanks
Weiwei  Jia (Harry Wei)

Reply via email to