root at yoda:~# zpool status -v
pool: backup
state: ONLINE
status: One or more devices has experienced an error resulting in data
corruption. Applications may be affected.
action: Restore the file in question if possible. Otherwise restore the
entire pool from backup.
see: http://www.sun.com/msg/ZFS-8000-8A
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
backup ONLINE 0 0 9
raidz1 ONLINE 0 0 18
c4t2d0 ONLINE 0 0 0
c4t3d0 ONLINE 0 0 0
c4t4d0 ONLINE 0 0 0
c4t5d0 ONLINE 0 0 0
c4t6d0 ONLINE 0 0 0
c4t7d0 ONLINE 0 0 0
c4t8d0 ONLINE 0 0 0
raidz1 ONLINE 0 0 22
c4t9d0 ONLINE 0 0 0
c4t10d0 ONLINE 0 0 0
c4t11d0 ONLINE 0 0 0
c4t12d0 ONLINE 0 0 0
c4t13d0 ONLINE 0 0 0
c4t14d0 ONLINE 0 0 0
c4t15d0 ONLINE 0 0 0
errors: Permanent errors have been detected in the following files:
<metadata>:<0x3f>
backup:<0x0>
pool: rpool
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
mirror ONLINE 0 0 0
c4t0d0s0 ONLINE 0 0 0
c4t1d0s0 ONLINE 0 0 0
errors: No known data errors
root at yoda:~# zfs destroy -f backup
cannot open 'backup': I/O error
root at yoda:~# zpool destroy -f backup
cannot open 'backup': I/O error
I even the script below to zero out the EFI partition.
I just can't get rid of this pool!
Any suggestions?
root at yoda:~# cat /root/bin/dd-disks.sh
#!/bin/bash
for d in c4t2d0 c4t3d0 c4t4d0 c4t5d0 c4t6d0 c4t7d0 c4t8d0 c4t9d0 c4t10d0
c4t11d0 c4t12d0 c4t13d0 c4t14d0 c4t15d0
do
# dd if=/dev/zero of=/dev/rdsk/$d count=1 bs=512
# Total disk sectors available: 976756750 + 16384 (reserved sectors) -1 =
976773133
# dd if=/dev/zero of=/dev/rdsk/$d count=1 bs=512 skip=976773133
dd if=/dev/zero of=/dev/rdsk/$d count=1 bs=1M
dd if=/dev/zero of=/dev/rdsk/$d bs=512 seek=976773133
done
--
This message posted from opensolaris.org