> > > @@ -1467,6 +1503,56 @@ int > mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) > > } > > break; > > > > + case XENMEM_sharing_op_bulk_dedup: > > + { > > + unsigned long max_sgfn, max_cgfn; > > + struct domain *cd; > > + > > + rc = -EINVAL; > > + if ( !mem_sharing_enabled(d) ) > > + goto out; > > + > > + rc = > rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain, > > + &cd); > > + if ( rc ) > > + goto out; > > + > > + rc = xsm_mem_sharing_op(XSM_DM_PRIV, d, cd, mso.op); > > + if ( rc ) > > + { > > + rcu_unlock_domain(cd); > > + goto out; > > + } > > + > > + if ( !mem_sharing_enabled(cd) ) > > + { > > + rcu_unlock_domain(cd); > > + rc = -EINVAL; > > + goto out; > > + } > > + > > + max_sgfn = domain_get_maximum_gpfn(d); > > + max_cgfn = domain_get_maximum_gpfn(cd); > > + > > + if ( max_sgfn != max_cgfn || max_sgfn < start_iter ) > > + { > > + rcu_unlock_domain(cd); > > + rc = -EINVAL; > > + goto out; > > + } > > + > > + rc = bulk_share(d, cd, max_sgfn, start_iter, > MEMOP_CMD_MASK); > > + if ( rc > 0 ) > > + { > > + ASSERT(!(rc & MEMOP_CMD_MASK)); > > The way other continuations like this work is to shift the remaining > work left by MEMOP_EXTENT_SHIFT. > > This avoids bulk_share() needing to know MEMOP_CMD_MASK, but does chop 6 > bits off the available max_sgfn. > > However, a better alternative would be to extend xen_mem_sharing_op and > stash the continue information in a new union. That would avoid the > mask games, and also avoid limiting the maximum potential gfn. > > ~Andrew
I agree, I was thinking of extending it anyway to return the number of pages that was shared, so this could be looped in there too. Thanks, Tamas
_______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel