hi sage,
I use rados_stat() in fuse module , It's block all all all the time. Thank
you for help me.

#define FUSE_USE_VERSION 26

#include <fuse.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <rados/librados.h>
#include <openssl/sha.h>
#include <json/json.h>
#include <time.h>
#include <syslog.h>

char *poold = "data";
char *poolm = "metad";
rados_t cluster = NULL;
rados_ioctx_t dio = NULL;
rados_ioctx_t mio = NULL;

static int read_ceph_getattr(const char *path, struct stat *stbuf)
{
  int err = 0;
  uint64_t size = 0;
  time_t mtime = 0;
  int res = 0;
  unsigned char *buf = NULL;
  const char *str_size = NULL;
  unsigned char fileid[50] = {'\0'};

  struct json_object *new_obj = NULL;

  memset(stbuf, 0, sizeof(struct stat));

  if (strcmp(path, "/") == 0) {
    stbuf->st_mode = S_IFDIR | 0755;
    stbuf->st_nlink = 2;
    stbuf->st_mtime = time(NULL);
    stbuf->st_size = 4096;
    res = 0;
    goto Endhandle;
  }
  memcpy(fileid, path + 1, strlen(path) - 1);

  syslog(LOG_INFO, "metadata fileid : %s\n", fileid);

  if (strlen(fileid) < 40) {
    res = -ENOENT;
    goto Endhandle;
  }

  *err = rados_stat(mio, fileid, &size, &mtime); //This block .........*
  if (err < 0) {
    syslog(LOG_INFO, "mio rados_stat: %s\n", strerror(-err));
    res = -ENOENT;
    goto Endhandle;
  }

  buf = (unsigned char *)malloc(size + 1);
  if (NULL == buf) {
    res = -ENOENT;
    goto Endhandle;
  }
  memset(buf, 0, size + 1);

  err = rados_read(mio, fileid, buf, size, 0);
  if (err < 0) {
    syslog(LOG_INFO, "mio rados_read: %s\n", strerror(-err));
    res = -ENOENT;
    goto Endhandle;
  }

  syslog(LOG_INFO, "json : %s\n", buf);

  new_obj = json_tokener_parse(buf);
  str_size = json_object_get_string(json_object_object_get(new_obj,
"filesize"));

  stbuf->st_nlink = 1;
  stbuf->st_mode = S_IFREG | 0666;
  stbuf->st_size = hextoll(str_size, strlen(str_size));

  size = 0;
  mtime = 0;
  memset(fileid, 0, 50);
  snprintf(fileid, 50, "%s.00000000", path + 1);

  syslog(LOG_INFO, "data fileid : %s\n", fileid);

  err = rados_stat(dio, fileid, &size, &mtime);
  if (err < 0) {
    syslog(LOG_INFO, "dio rados_stat: %s\n", strerror(-err));
    res = -ENOENT;
    goto Endhandle;
  }
  stbuf->st_mtime = mtime;

 Endhandle:
  if (NULL != buf) {
    free(buf);
    buf = NULL;
  }
  if (NULL != new_obj) {
    json_object_put(new_obj);
    new_obj = NULL;
  }
  return res;
}

static struct fuse_operations read_ceph_oper = {
  .getattr = read_ceph_getattr
};

int main(int argc, char *argv[])
{
  int err = 0;
  int iRet = 0;

  err = rados_create(&cluster, NULL);
  if (err < 0) {
    fprintf(stderr, "%s: cannot create a cluster handle: %s\n", argv[0],
strerror(-err));
    iRet = -1;
    goto Endhandle;
  }

  err = rados_conf_read_file(cluster, "/etc/ceph/ceph.conf");
  if (err < 0) {
    fprintf(stderr, "%s: cannot read config file: %s\n", argv[0],
strerror(-err));
    iRet = -1;
    goto Endhandle;
  }

  err = rados_connect(cluster);
  if (err < 0) {
    fprintf(stderr, "%s: cannot connect to cluster: %s\n", argv[0],
strerror(-err));
    iRet = -1;
    goto Endhandle;

  }

  err = rados_ioctx_create(cluster, poold, &dio);
  if (err < 0) {
    fprintf(stderr, "%s: cannot open rados pool %s: %s\n", argv[0], poold,
strerror(-err));
    iRet = -1;
    goto Endhandle;
  }

  err = rados_ioctx_create(cluster, poolm, &mio);
  if (err < 0) {
    fprintf(stderr, "%s: cannot open rados pool %s: %s\n", argv[0], poolm,
strerror(-err));
    iRet = -1;
    goto Endhandle;
  }

  fuse_main(argc, argv, &read_ceph_oper, NULL);

 Endhandle:

  if (NULL != dio) {
    rados_ioctx_destroy(dio);
    dio = NULL;
  }
  if (NULL != mio) {
    rados_ioctx_destroy(mio);
    mio = NULL;
  }
  if (NULL != cluster) {
    rados_shutdown(cluster);
    cluster = NULL;
  }

  return iRet;
}
_______________________________________________
ceph-users mailing list
ceph-users@lists.ceph.com
http://lists.ceph.com/listinfo.cgi/ceph-users-ceph.com

Reply via email to