Hi all,

My ceph cluster is HEALTH_OK, but I cannot write on cephfs.
OS: Ubuntu 20.04, ceph version 15.2.5, deploy with cephadm.


root@RK01-OSD-A001:~# ceph -s
  cluster:
    id:     9091b472-1bdb-11eb-b217-abff3468259e
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum RK01-OSD-A001,RK02-OSD-A002,RK03-OSD-A003 
(age 18s)
    mgr: RK01-OSD-A001.jwrjgj(active, since 51m), standbys: 
RK03-OSD-A003.tulrii
    mds: cephfs:1 {0=cephfs.RK02-OSD-A002.lwpgaw=up:active} 1 
up:standby
    osd: 6 osds: 6 up (since 44m), 6 in (since 44m)
 
  task status:
    scrub status:
        mds.cephfs.RK02-OSD-A002.lwpgaw: idle
 
  data:
    pools:   3 pools, 65 pgs
    objects: 24 objects, 67 KiB
    usage:   6.0 GiB used, 44 TiB / 44 TiB avail
    pgs:     65 active+clean


root@RK01-OSD-A001:~# ceph fs status
cephfs - 1 clients
======
RANK  STATE              
 MDS                
 ACTIVITY     DNS    INOS  
 0    active  cephfs.RK02-OSD-A002.lwpgaw  Reqs:  
  0 /s    13     15   
       POOL          
 TYPE     USED  AVAIL  
cephfs.cephfs.meta  metadata  1152k  20.7T  
cephfs.cephfs.data    data       0  
 20.7T  
        STANDBY MDS          
cephfs.RK03-OSD-A003.xchwqj  
MDS version: ceph version 15.2.5 (2c93eff00150f0cc5f106a559557a58d3d7b6f1f) 
octopus (stable)



root@RK05-FRP-A001:~# df -h|grep "ceph-test"
172.16.65.1,172.16.65.2,172.16.65.3:6789:/   21T    
 0   21T   0% /ceph-test
root@RK05-FRP-A001:~# echo 123 > /ceph-test/1.txt
-bash: echo: write error: Operation not permitted
root@RK05-FRP-A001:~# ls -l /ceph-test/1.txt
-rw-r--r-- 1 root root 0 Nov  1 09:40 /ceph-test/1.txt
root@RK05-FRP-A001:~# ls -ld /ceph-test/
drwxr-xr-x 2 root root 1 Nov  1 09:40 /ceph-test/


root@RK01-OSD-A001:~# cd /var/log/ceph/`ceph fsid`
root@RK01-OSD-A001:/var/log/ceph/9091b472-1bdb-11eb-b217-abff3468259e# cat 
ceph-volume.log | grep err | grep sdx
[2020-11-01 08:53:51,384][ceph_volume.process][INFO  ] stderr Failed to 
find physical volume "/dev/sdx".
[2020-11-01 08:53:51,417][ceph_volume.process][INFO  ] stderr unable to 
read label for /dev/sdx: (2) No such file or directory
[2020-11-01 08:53:51,445][ceph_volume.process][INFO  ] stderr unable to 
read label for /dev/sdx: (2) No such file or directory


root@RK01-OSD-A001:~# pvs|grep sdx
  /dev/sdx   ceph-41b09a52-e44b-43c5-ad86-0eada11b48b6 lvm2 
a--&nbsp; <7.28t&nbsp; &nbsp; 0&nbsp;
root@RK01-OSD-A001:~# lsblk|grep sdx
sdx&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; 
&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; 
&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; 
&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp; 
&nbsp; &nbsp; &nbsp; &nbsp; &nbsp; &nbsp;65:112&nbsp; 0&nbsp; &nbsp;7.3T&nbsp; 
0 disk&nbsp;
root@RK01-OSD-A001:~# parted -s /dev/sdx print
Error: /dev/sdx: unrecognised disk label
Model: LSI MR9261-8i (scsi)
Disk /dev/sdx: 8001GB
Sector size (logical/physical): 512B/4096B
Partition Table: unknown
Disk Flags:&nbsp;
root@RK01-OSD-A001:~#&nbsp;
_______________________________________________
ceph-users mailing list -- ceph-users@ceph.io
To unsubscribe send an email to ceph-users-le...@ceph.io

Reply via email to