we have a cluster with very low load, however, "ceph osd perf" shows high 
commit_latency and apply_latency.


root@stor-mgt01:~# ceph -s
  cluster:
    id:     
3d1ec789-829d-4e0f-b707-9363356a68f1
    health: HEALTH_WARN
            application 
not enabled on 3 pool(s)
 
  services:
    mon: 3 daemons, quorum a,b,c
    mgr: a(active)
    mds: rook-ceph-filesystem-1/1/1 up  
{0=rook-ceph-filesystem-a=up:active}, 1 up:standby-replay
    osd: 55 osds: 55 up, 55 in
    rgw: 3 daemons active
 
  data:
    pools:   18 pools, 3328 pgs
    objects: 1.06M objects, 1.37TiB
    usage:   4.18TiB used, 141TiB / 145TiB avail
    pgs:     3328 active+clean
 
  io:
    client:   46.2KiB/s rd, 2.58MiB/s wr, 197op/s rd, 
273op/s wr


----------------------------------------------------------------------------------------

osd commit_latency(ms) apply_latency(ms) 
 54                 
 
0                
 0 
 53                 
 
0                
 0 
 52                 
 
0                
 0 
 51                 
 
0                
 0 
 50                 
 
0                
 0 
 49                
 
11               
 11 
 48                 
 
9                
 9 
 47                 
 
3                
 3 
 46                
 
39               
 39 
 21                
 
28               
 28 
 20                
 
26               
 26 
 19                
 
28               
 28 
 18                 
 
1                
 1 
 17                
 
17               
 17 
 16                 
 
6                
 6 
 15                
 
11               
 11 
 14                 
 
9                
 9 
 13                 
 
8                
 8 
 12                
 
12               
 12 
 11                
 
14               
 14 
 10                
 
21               
 21 
  
0                
 
10               
 10 
  
1                
 
12               
 12 
  
2                
 
30               
 30 
  
3                 
 
3                
 3 
  
4                 
 
3                
 3 
  
5                
 
23               
 23 
  
6                
 
56               
 56 
  
7                 
 
2                
 2 
  
8                
 
36               
 36 
  
9                 
 
5                
 5 
 22                
 
26               
 26 
 23                 
 
3                
 3 
 24                 
 
2                
 2 
 25                 
 
3                
 3 
 26                
 
19               
 19 
 27                 
 
3                
 3 
 28                 
 
4                
 4 
 29                 
 
8                
 8 
 30                
 
16               
 16 
 31                
 
21               
 21 
 32                
 
13               
 13 
 33                 
 
7                
 7 
 34                
 
10               
 10 
 35                
 
12               
 12 
 36                 
 
5                
 5 
 37                 
 
5                
 5 
 38                 
 
4                
 4 
 39                
 
29               
 29 
 40                
 
26               
 26 
 41                
 
35               
 35 
 42                 
 
5                
 5 
 43                 
 
4                
 4 
 44                 
 
8                
 8 
 45                 
 
6                
 6
_______________________________________________
ceph-users mailing list -- ceph-users@ceph.io
To unsubscribe send an email to ceph-users-le...@ceph.io

Reply via email to