Hi,

since I have the luxury of being able to take down one of the servers, here’s 
what I found on the mdt:

# mount -t ldiskfs
/dev/mapper/vgsrv6-lvsrv6_mdt on /mnt/mdt_fssrv6 type ldiskfs (rw)

# cd /mnt/mdt_fssrv6
# du -hs *

4.0K    CATALOGS
0       changelog_catalog
0       changelog_users
48K     CONFIGS
8.0K    fld
0       hsm_actions
16K     last_rcvd
4.0K    lfsck_bookmark
8.0K    lfsck_namespace
16K     lost+found
4.0K    lov_objid
4.0K    lov_objseq
29G     O
7.6M    oi.16.0
7.7M    oi.16.1
7.4M    oi.16.10
7.3M    oi.16.11
7.1M    oi.16.12
6.9M    oi.16.13
6.8M    oi.16.14
6.9M    oi.16.15
6.8M    oi.16.16
6.8M    oi.16.17
6.8M    oi.16.18
7.5M    oi.16.19
7.7M    oi.16.2
7.9M    oi.16.20
7.8M    oi.16.21
7.9M    oi.16.22
7.9M    oi.16.23
8.0M    oi.16.24
8.4M    oi.16.25
8.3M    oi.16.26
8.2M    oi.16.27
8.1M    oi.16.28
8.1M    oi.16.29
8.0M    oi.16.3
8.1M    oi.16.30
8.0M    oi.16.31
8.1M    oi.16.32
7.9M    oi.16.33
7.6M    oi.16.34
7.6M    oi.16.35
7.6M    oi.16.36
7.7M    oi.16.37
7.6M    oi.16.38
7.5M    oi.16.39
8.0M    oi.16.4
7.5M    oi.16.40
7.4M    oi.16.41
7.5M    oi.16.42
7.4M    oi.16.43
7.5M    oi.16.44
8.1M    oi.16.45
7.8M    oi.16.46
7.9M    oi.16.47
7.9M    oi.16.48
7.9M    oi.16.49
8.0M    oi.16.5
8.0M    oi.16.50
7.9M    oi.16.51
7.9M    oi.16.52
7.5M    oi.16.53
7.8M    oi.16.54
7.8M    oi.16.55
7.4M    oi.16.56
7.5M    oi.16.57
7.2M    oi.16.58
7.2M    oi.16.59
7.9M    oi.16.6
7.2M    oi.16.60
7.8M    oi.16.61
7.6M    oi.16.62
7.6M    oi.16.63
7.6M    oi.16.7
7.6M    oi.16.8
7.4M    oi.16.9
4.0K    OI_scrub
48K     PENDING
44K     quota_master
36K     quota_slave
4.0K    REMOTE_PARENT_DIR
...

# cd O
# du -hs *

29G     1
136K    10
136K    200000003

# cd 1
# du -hs *

928M    d0
920M    d1
928M    d10
932M    d11
928M    d12
932M    d13
932M    d14
932M    d15
924M    d16
932M    d17
920M    d18
928M    d19
929M    d2
928M    d20
928M    d21
924M    d22
932M    d23
924M    d24
924M    d25
932M    d26
932M    d27
930M    d28
920M    d29
928M    d3
924M    d30
928M    d31
928M    d4
916M    d5
932M    d6
928M    d7
928M    d8
932M    d9
4.0K    LAST_ID

# cd d0
# du -hs *

4.0M    1024
4.0M    1056
4.0M    1088
4.0M    1120
4.0M    1152
4.0M    1184
4.0M    1216
4.0M    1248
4.0M    128
4.0M    1280
4.0M    1312
4.0M    1344
4.0M    1376
4.0M    1408
4.0M    1440
4.0M    1472
4.0M    1504
4.0M    1536
4.0M    1568
4.0M    160
4.0M    1600
4.0M    1632
4.0M    1664
4.0M    1696
4.0M    1728
4.0M    1760
4.0M    1792
4.0M    1824
4.0M    1856
4.0M    1888
4.0M    192
4.0M    1920
4.0M    1952
4.0M    1984
4.0M    2016
4.0M    2048
4.0M    2080
4.0M    2112
4.0M    2144
4.0M    2176
4.0M    2208
4.0M    224
4.0M    2240
4.0M    2272
4.0M    2304
4.0M    2336
4.0M    2368
4.0M    2400
4.0M    2432
4.0M    2464
4.0M    2496
4.0M    2528
4.0M    256
4.0M    2560
4.0M    2592
4.0M    2624
4.0M    2656
4.0M    2688
4.0M    2720
4.0M    2752
4.0M    2784
4.0M    2816
4.0M    2848
4.0M    288
4.0M    2880
4.0M    2912
4.0M    2944
4.0M    2976
4.0M    3008
4.0M    3040
4.0M    3072
4.0M    3104
4.0M    3136
4.0M    3168
4.0M    320
4.0M    3200
4.0M    3232
4.0M    3264
4.0M    3296
4.0M    3328
4.0M    3360
4.0M    3392
4.0M    3424
4.0M    3456
4.0M    3488
4.0M    352
4.0M    3520
4.0M    3552
4.0M    3584
4.0M    3616
4.0M    3648
4.0M    3680
4.0M    3712
4.0M    3744
4.0M    3776
4.0M    3808
4.0M    384
4.0M    3840
4.0M    3872
4.0M    3904
4.0M    3936
4.0M    3968
4.0M    4000
4.0M    4032
4.0M    4064
4.0M    4096
4.0M    4128
4.0M    416
4.0M    4160
4.0M    4192
4.0M    4224
4.0M    4256
4.0M    4288
4.0M    4320
4.0M    4352
4.0M    4384
4.0M    4416
4.0M    4448
4.0M    448
4.0M    4480
4.0M    4512
4.0M    4544
4.0M    4576
4.0M    4608
4.0M    4640
4.0M    4672
4.0M    4704
4.0M    4736
4.0M    4768
4.0M    480
4.0M    4800
4.0M    4832
4.0M    4864
4.0M    4896
4.0M    4928
4.0M    4960
4.0M    4992
4.0M    5024
4.0M    5056
4.0M    5088
4.0M    512
4.0M    5120
4.0M    5152
4.0M    5184
4.0M    5216
4.0M    5248
4.0M    5280
4.0M    5312
4.0M    5344
4.0M    5376
4.0M    5408
4.0M    544
4.0M    5440
4.0M    5472
4.0M    5504
4.0M    5536
4.0M    5568
4.0M    5600
4.0M    5632
4.0M    5664
4.0M    5696
4.0M    5728
4.0M    576
4.0M    5760
4.0M    5792
4.0M    5824
4.0M    5856
4.0M    5888
4.0M    5920
4.0M    5952
4.0M    5984
4.0M    6016
4.0M    6048
4.0M    608
4.0M    6080
4.0M    6112
4.0M    6144
4.0M    6176
4.0M    6208
4.0M    6240
4.0M    6272
4.0M    6304
4.0M    6336
4.0M    6368
4.0M    64
4.0M    640
4.0M    6400
4.0M    6432
4.0M    6464
4.0M    6496
4.0M    6528
4.0M    6560
4.0M    6592
4.0M    6624
4.0M    6656
4.0M    6688
4.0M    672
4.0M    6720
4.0M    6752
4.0M    6784
4.0M    6816
4.0M    6848
4.0M    6880
4.0M    6912
4.0M    6944
4.0M    6976
4.0M    7008
4.0M    704
4.0M    7040
4.0M    7072
4.0M    7104
4.0M    7136
4.0M    7168
4.0M    7200
4.0M    7232
4.0M    7264
4.0M    7296
4.0M    7328
4.0M    736
4.0M    7360
4.0M    7392
4.0M    7424
4.0M    7456
4.0M    7488
4.0M    7520
4.0M    768
4.0M    800
4.0M    832
4.0M    864
4.0M    896
4.0M    928
4.0M    96
4.0M    960
4.0M    992

So, the main storage goes to “O/1” folder. Anyone knows what’s supposed to be 
in that folder? And why is it so huge?

Thanks,
Radu

> On 23 Apr 2015, at 09:40, Radu Popescu <[email protected]> wrote:
> 
> From the server:
> 
> mount -t lustre
> /dev/mapper/vgsrv6-lvsrv6_mgs on /mnt/mgs6 type lustre (rw)
> /dev/mapper/vgsrv6-lvsrv6_mdt on /mnt/mdt_fssrv6 type lustre (rw)
> /dev/mapper/vgsrv6-lvsrv6_ost on /mnt/ost_fssrv6 type lustre (rw)
> 
> From a client:
> 
> lfs df -hi | grep -i fssrv6
> fssrv6-MDT0000_UUID        39.1M        1.2M       37.9M   3% 
> /mnt/volumes/5[MDT:0]
> fssrv6-OST0001_UUID         4.6M        1.2M        3.4M  26% 
> /mnt/volumes/5[OST:1]
> 
> lfs df -h | grep -i fssrv6
> fssrv6-MDT0000_UUID        58.6G       30.6G       24.1G  56% 
> /mnt/volumes/5[MDT:0]
> fssrv6-OST0001_UUID        77.0G       12.3G       60.7G  17% 
> /mnt/volumes/5[OST:1]
> 
> 
>> On 23 Apr 2015, at 00:32, Alexander I Kulyavtsev <[email protected] 
>> <mailto:[email protected]>> wrote:
>> 
>> Before you remounted as ldiskfs, what is the output of
>>   mount -t lustre
>>   lfs df -hi
>>   lfs df -h
>> 
>> the first command is to verify fs is actually mounted as lustre.
>> Alex.
>> 
>> On Apr 22, 2015, at 4:23 PM, Colin Faber <[email protected] 
>> <mailto:[email protected]>> wrote:
>> 
>>> You could look at your MDT partition directly, either unmount it and 
>>> remount as ldiskfs and examine where your space is going, or use debugfs to 
>>> do the same, with it mounted.
>>> 
>>> 
>>> On Wed, Apr 22, 2015 at 11:57 AM, Radu Popescu <[email protected] 
>>> <mailto:[email protected]>> wrote:
>>> Hi,
>>> 
>>> changelog is not enabled. I’ve checked 
>>> /proc/fs/lustre/mdd/NAMEOFMDT/changelog_users and got:
>>> 
>>> current index: 0
>>> ID    index
>>> 
>>> Thanks,
>>> Radu
>>> 
>>> 
>>>> On 22 Apr 2015, at 19:52, Colin Faber <[email protected] 
>>>> <mailto:[email protected]>> wrote:
>>>> 
>>>> Do you have changelogs enabled?
>>>> 
>>>> On Wed, Apr 22, 2015 at 2:14 AM, Radu Popescu <[email protected] 
>>>> <mailto:[email protected]>> wrote:
>>>> Hi,
>>>> 
>>>> I have the following Lustre setup:
>>>> 
>>>> - servers
>>>> - number: 9
>>>> - Lustre version: 2.5.3
>>>> - OS: CentOS 6.6
>>>> - RPM URL: 
>>>> https://downloads.hpdd.intel.com/public/lustre/lustre-2.5.3/el6/server/RPMS/
>>>>  
>>>> <https://downloads.hpdd.intel.com/public/lustre/lustre-2.5.3/el6/server/RPMS/>
>>>> 
>>>> - clients
>>>> - number: 90
>>>> - Lustre version: 2.5.56
>>>> - OS: Debian Wheezy
>>>> - Packages were manually created from sources
>>>> - all clients have all 9 Lustre mountpoints
>>>> 
>>>> Lustre setup:
>>>> 
>>>> MGS + MDT + OST all stay on a single LUN which has a VG (160GB) created 
>>>> and 3 LVs for each of the partitions, all mounted on each server:
>>>> 
>>>> MGS - 4GB
>>>> MDT - 78.12GB
>>>> OST - 78.14GB
>>>> 
>>>> (I’ve chosen a comparable size for MDT and OST because of the small file 
>>>> size)
>>>> - Total number of files is at around 16 million, sizes between <1K and 
>>>> 1.7MB. They are not equally spread on all mountpoints so let’s say I have 
>>>> a 2M maximum number of files on a Lustre volume.
>>>> 
>>>> My problem is that MDT partition is getting full. Inodes are fine, only 3% 
>>>> used, which is ok, but the space used is > 50% used, and constantly 
>>>> dropping. So I think that within a week, I’ll be out of storage on all MDT 
>>>> partitions. And I didn’t specify any special options when creating MDT 
>>>> partitions, so bytes per inode should be at 16K (default setting).
>>>> 
>>>> Anyone has any ideas?
>>>> 
>>>> Thanks,
>>>> Radu
>>>> 
>>>> _______________________________________________
>>>> lustre-discuss mailing list
>>>> [email protected] <mailto:[email protected]>
>>>> http://lists.lustre.org/listinfo.cgi/lustre-discuss-lustre.org 
>>>> <http://lists.lustre.org/listinfo.cgi/lustre-discuss-lustre.org>
>>>> 
>>>> 
>>> 
>>> 
>>> _______________________________________________
>>> lustre-discuss mailing list
>>> [email protected] <mailto:[email protected]>
>>> http://lists.lustre.org/listinfo.cgi/lustre-discuss-lustre.org 
>>> <http://lists.lustre.org/listinfo.cgi/lustre-discuss-lustre.org>
>> 
> 
> _______________________________________________
> lustre-discuss mailing list
> [email protected]
> http://lists.lustre.org/listinfo.cgi/lustre-discuss-lustre.org

_______________________________________________
lustre-discuss mailing list
[email protected]
http://lists.lustre.org/listinfo.cgi/lustre-discuss-lustre.org

Reply via email to