Author: avg
Date: Thu Jan 16 18:13:18 2014
New Revision: 260785
URL: http://svnweb.freebsd.org/changeset/base/260785

Log:
  MFC r258744-258746: zfs: add zfs_freebsd_putpages

Added:
  stable/9/sys/cddl/compat/opensolaris/sys/vm.h   (contents, props changed)
Modified:
  stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
  stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h
  stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
Directory Properties:
  stable/9/sys/   (props changed)
  stable/9/sys/cddl/contrib/opensolaris/   (props changed)

Added: stable/9/sys/cddl/compat/opensolaris/sys/vm.h
==============================================================================
--- /dev/null   00:00:00 1970   (empty, because file is newly added)
+++ stable/9/sys/cddl/compat/opensolaris/sys/vm.h       Thu Jan 16 18:13:18 
2014        (r260785)
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2013 EMC Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _OPENSOLARIS_SYS_VM_H_
+#define        _OPENSOLARIS_SYS_VM_H_
+
+#ifdef _KERNEL
+
+#include <sys/sf_buf.h>
+
+static inline caddr_t
+zfs_map_page(vm_page_t pp, struct sf_buf **sfp)
+{
+       *sfp = sf_buf_alloc(pp, 0);
+       return ((caddr_t)sf_buf_kva(*sfp));
+}
+
+static inline void
+zfs_unmap_page(struct sf_buf *sf)
+{
+       sf_buf_free(sf);
+}
+
+#endif /* _KERNEL */
+
+#endif /* _OPENSOLARIS_SYS_VM_H_ */

Modified: stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c
==============================================================================
--- stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c       Thu Jan 
16 18:01:57 2014        (r260784)
+++ stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c       Thu Jan 
16 18:13:18 2014        (r260785)
@@ -45,6 +45,7 @@
 #include <sys/zio_compress.h>
 #include <sys/sa.h>
 #ifdef _KERNEL
+#include <sys/vm.h>
 #include <sys/zfs_znode.h>
 #endif
 
@@ -1184,6 +1185,64 @@ dmu_write_pages(objset_t *os, uint64_t o
        dmu_buf_rele_array(dbp, numbufs, FTAG);
        return (err);
 }
+
+#else
+
+int
+dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
+    vm_page_t *ma, dmu_tx_t *tx)
+{
+       dmu_buf_t **dbp;
+       struct sf_buf *sf;
+       int numbufs, i;
+       int err;
+
+       if (size == 0)
+               return (0);
+
+       err = dmu_buf_hold_array(os, object, offset, size,
+           FALSE, FTAG, &numbufs, &dbp);
+       if (err)
+               return (err);
+
+       for (i = 0; i < numbufs; i++) {
+               int tocpy, copied, thiscpy;
+               int bufoff;
+               dmu_buf_t *db = dbp[i];
+               caddr_t va;
+
+               ASSERT(size > 0);
+               ASSERT3U(db->db_size, >=, PAGESIZE);
+
+               bufoff = offset - db->db_offset;
+               tocpy = (int)MIN(db->db_size - bufoff, size);
+
+               ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
+
+               if (tocpy == db->db_size)
+                       dmu_buf_will_fill(db, tx);
+               else
+                       dmu_buf_will_dirty(db, tx);
+
+               for (copied = 0; copied < tocpy; copied += PAGESIZE) {
+                       ASSERT3U(ptoa((*ma)->pindex), ==, db->db_offset + 
bufoff);
+                       thiscpy = MIN(PAGESIZE, tocpy - copied);
+                       va = zfs_map_page(*ma, &sf);
+                       bcopy(va, (char *)db->db_data + bufoff, thiscpy);
+                       zfs_unmap_page(sf);
+                       ma += 1;
+                       bufoff += PAGESIZE;
+               }
+
+               if (tocpy == db->db_size)
+                       dmu_buf_fill_done(db, tx);
+
+               offset += tocpy;
+               size -= tocpy;
+       }
+       dmu_buf_rele_array(dbp, numbufs, FTAG);
+       return (err);
+}
 #endif /* sun */
 #endif
 

Modified: stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h
==============================================================================
--- stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h   Thu Jan 
16 18:01:57 2014        (r260784)
+++ stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h   Thu Jan 
16 18:13:18 2014        (r260785)
@@ -606,8 +606,15 @@ int dmu_write_uio(objset_t *os, uint64_t
     dmu_tx_t *tx);
 int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size,
     dmu_tx_t *tx);
+#ifdef _KERNEL
+#ifdef sun
 int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset,
     uint64_t size, struct page *pp, dmu_tx_t *tx);
+#else
+int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset,
+    uint64_t size, struct vm_page **ppa, dmu_tx_t *tx);
+#endif
+#endif
 struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size);
 void dmu_return_arcbuf(struct arc_buf *buf);
 void dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, struct arc_buf *buf,

Modified: stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c
==============================================================================
--- stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Thu Jan 
16 18:01:57 2014        (r260784)
+++ stable/9/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c Thu Jan 
16 18:13:18 2014        (r260785)
@@ -33,6 +33,7 @@
 #include <sys/sysmacros.h>
 #include <sys/resource.h>
 #include <sys/vfs.h>
+#include <sys/vm.h>
 #include <sys/vnode.h>
 #include <sys/file.h>
 #include <sys/stat.h>
@@ -69,7 +70,6 @@
 #include <sys/kidmap.h>
 #include <sys/bio.h>
 #include <sys/buf.h>
-#include <sys/sf_buf.h>
 #include <sys/sched.h>
 #include <sys/acl.h>
 #include <vm/vm_pageout.h>
@@ -443,21 +443,6 @@ page_unhold(vm_page_t pp)
        vm_page_unlock(pp);
 }
 
-static caddr_t
-zfs_map_page(vm_page_t pp, struct sf_buf **sfp)
-{
-
-       *sfp = sf_buf_alloc(pp, 0);
-       return ((caddr_t)sf_buf_kva(*sfp));
-}
-
-static void
-zfs_unmap_page(struct sf_buf *sf)
-{
-
-       sf_buf_free(sf);
-}
-
 /*
  * When a file is memory mapped, we must keep the IO data synchronized
  * between the DMU cache and the memory mapped pages.  What this means:
@@ -474,6 +459,7 @@ update_pages(vnode_t *vp, int64_t start,
        caddr_t va;
        int off;
 
+       ASSERT(segflg != UIO_NOCOPY);
        ASSERT(vp->v_mount != NULL);
        obj = vp->v_object;
        ASSERT(obj != NULL);
@@ -484,27 +470,7 @@ update_pages(vnode_t *vp, int64_t start,
                vm_page_t pp;
                int nbytes = imin(PAGESIZE - off, len);
 
-               if (segflg == UIO_NOCOPY) {
-                       pp = vm_page_lookup(obj, OFF_TO_IDX(start));
-                       KASSERT(pp != NULL,
-                           ("zfs update_pages: NULL page in putpages case"));
-                       KASSERT(off == 0,
-                           ("zfs update_pages: unaligned data in putpages 
case"));
-                       KASSERT(pp->valid == VM_PAGE_BITS_ALL,
-                           ("zfs update_pages: invalid page in putpages 
case"));
-                       KASSERT(pp->busy > 0,
-                           ("zfs update_pages: unbusy page in putpages case"));
-                       KASSERT(!pmap_page_is_write_mapped(pp),
-                           ("zfs update_pages: writable page in putpages 
case"));
-                       VM_OBJECT_UNLOCK(obj);
-
-                       va = zfs_map_page(pp, &sf);
-                       (void) dmu_write(os, oid, start, nbytes, va, tx);
-                       zfs_unmap_page(sf);
-
-                       VM_OBJECT_LOCK(obj);
-                       vm_page_undirty(pp);
-               } else if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
+               if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
                        VM_OBJECT_UNLOCK(obj);
 
                        va = zfs_map_page(pp, &sf);
@@ -518,8 +484,7 @@ update_pages(vnode_t *vp, int64_t start,
                len -= nbytes;
                off = 0;
        }
-       if (segflg != UIO_NOCOPY)
-               vm_object_pip_wakeupn(obj, 0);
+       vm_object_pip_wakeupn(obj, 0);
        VM_OBJECT_UNLOCK(obj);
 }
 
@@ -5797,6 +5762,169 @@ zfs_freebsd_getpages(ap)
 }
 
 static int
+zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
+    int *rtvals)
+{
+       znode_t         *zp = VTOZ(vp);
+       zfsvfs_t        *zfsvfs = zp->z_zfsvfs;
+       rl_t            *rl;
+       dmu_tx_t        *tx;
+       struct sf_buf   *sf;
+       vm_object_t     object;
+       vm_page_t       m;
+       caddr_t         va;
+       size_t          tocopy;
+       size_t          lo_len;
+       vm_ooffset_t    lo_off;
+       vm_ooffset_t    off;
+       uint_t          blksz;
+       int             ncount;
+       int             pcount;
+       int             err;
+       int             i;
+
+       ZFS_ENTER(zfsvfs);
+       ZFS_VERIFY_ZP(zp);
+
+       object = vp->v_object;
+       pcount = btoc(len);
+       ncount = pcount;
+
+       KASSERT(ma[0]->object == object, ("mismatching object"));
+       KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
+
+       for (i = 0; i < pcount; i++)
+               rtvals[i] = VM_PAGER_ERROR;
+
+       off = IDX_TO_OFF(ma[0]->pindex);
+       blksz = zp->z_blksz;
+       lo_off = rounddown(off, blksz);
+       lo_len = roundup(len + (off - lo_off), blksz);
+       rl = zfs_range_lock(zp, lo_off, lo_len, RL_WRITER);
+
+       VM_OBJECT_LOCK(object);
+       if (len + off > object->un_pager.vnp.vnp_size) {
+               if (object->un_pager.vnp.vnp_size > off) {
+                       int pgoff;
+
+                       len = object->un_pager.vnp.vnp_size - off;
+                       ncount = btoc(len);
+                       if ((pgoff = (int)len & PAGE_MASK) != 0) {
+                               /*
+                                * If the object is locked and the following
+                                * conditions hold, then the page's dirty
+                                * field cannot be concurrently changed by a
+                                * pmap operation.
+                                */
+                               m = ma[ncount - 1];
+                               KASSERT(m->busy > 0,
+                                   ("zfs_putpages: page %p is not busy", m));
+                               KASSERT(!pmap_page_is_write_mapped(m),
+                                   ("zfs_putpages: page %p is not read-only", 
m));
+                               vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
+                                   pgoff);
+                       }
+               } else {
+                       len = 0;
+                       ncount = 0;
+               }
+               if (ncount < pcount) {
+                       for (i = ncount; i < pcount; i++) {
+                               rtvals[i] = VM_PAGER_BAD;
+                       }
+               }
+       }
+       VM_OBJECT_UNLOCK(object);
+
+       if (ncount == 0)
+               goto out;
+
+       if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
+           zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
+               goto out;
+       }
+
+top:
+       tx = dmu_tx_create(zfsvfs->z_os);
+       dmu_tx_hold_write(tx, zp->z_id, off, len);
+
+       dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
+       zfs_sa_upgrade_txholds(tx, zp);
+       err = dmu_tx_assign(tx, TXG_NOWAIT);
+       if (err != 0) {
+               if (err == ERESTART) {
+                       dmu_tx_wait(tx);
+                       dmu_tx_abort(tx);
+                       goto top;
+               }
+               dmu_tx_abort(tx);
+               goto out;
+       }
+
+       if (zp->z_blksz < PAGE_SIZE) {
+               i = 0;
+               for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
+                       tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
+                       va = zfs_map_page(ma[i], &sf);
+                       dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
+                       zfs_unmap_page(sf);
+               }
+       } else {
+               err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
+       }
+
+       if (err == 0) {
+               uint64_t mtime[2], ctime[2];
+               sa_bulk_attr_t bulk[3];
+               int count = 0;
+
+               SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
+                   &mtime, 16);
+               SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
+                   &ctime, 16);
+               SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
+                   &zp->z_pflags, 8);
+               zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
+                   B_TRUE);
+               zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
+
+               VM_OBJECT_LOCK(object);
+               for (i = 0; i < ncount; i++) {
+                       rtvals[i] = VM_PAGER_OK;
+                       vm_page_undirty(ma[i]);
+               }
+               VM_OBJECT_UNLOCK(object);
+               PCPU_INC(cnt.v_vnodeout);
+               PCPU_ADD(cnt.v_vnodepgsout, ncount);
+       }
+       dmu_tx_commit(tx);
+
+out:
+       zfs_range_unlock(rl);
+       if ((flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0 ||
+           zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
+               zil_commit(zfsvfs->z_log, zp->z_id);
+       ZFS_EXIT(zfsvfs);
+       return (rtvals[0]);
+}
+
+int
+zfs_freebsd_putpages(ap)
+       struct vop_putpages_args /* {
+               struct vnode *a_vp;
+               vm_page_t *a_m;
+               int a_count;
+               int a_sync;
+               int *a_rtvals;
+               vm_ooffset_t a_offset;
+       } */ *ap;
+{
+
+       return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
+           ap->a_rtvals));
+}
+
+static int
 zfs_freebsd_bmap(ap)
        struct vop_bmap_args /* {
                struct vnode *a_vp;
@@ -6885,6 +7013,7 @@ struct vop_vector zfs_vnodeops = {
        .vop_setacl =           zfs_freebsd_setacl,
        .vop_aclcheck =         zfs_freebsd_aclcheck,
        .vop_getpages =         zfs_freebsd_getpages,
+       .vop_putpages =         zfs_freebsd_putpages,
 };
 
 struct vop_vector zfs_fifoops = {
_______________________________________________
svn-src-stable-9@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-stable-9
To unsubscribe, send any mail to "svn-src-stable-9-unsubscr...@freebsd.org"

Reply via email to