Add dedicated inode structure (kvm_gmem_inode_info) and slab-allocated inode cache for guest memory backing, similar to how shmem handles inodes.
This adds the necessary allocation/destruction functions and prepares for upcoming guest_memfd NUMA policy support changes. Signed-off-by: Shivank Garg <shiva...@amd.com> --- virt/kvm/guest_memfd.c | 52 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index 002328569c9e..0ccbb152483a 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -18,6 +18,15 @@ struct kvm_gmem { struct list_head entry; }; +struct kvm_gmem_inode_info { + struct inode vfs_inode; +}; + +static inline struct kvm_gmem_inode_info *KVM_GMEM_I(struct inode *inode) +{ + return container_of(inode, struct kvm_gmem_inode_info, vfs_inode); +} + /** * folio_file_pfn - like folio_file_page, but return a pfn. * @folio: The folio which contains this index. @@ -317,8 +326,34 @@ static pgoff_t kvm_gmem_get_index(struct kvm_memory_slot *slot, gfn_t gfn) return gfn - slot->base_gfn + slot->gmem.pgoff; } +static struct kmem_cache *kvm_gmem_inode_cachep; + +static struct inode *kvm_gmem_alloc_inode(struct super_block *sb) +{ + struct kvm_gmem_inode_info *info; + + info = alloc_inode_sb(sb, kvm_gmem_inode_cachep, GFP_KERNEL); + if (!info) + return NULL; + + return &info->vfs_inode; +} + +static void kvm_gmem_destroy_inode(struct inode *inode) +{ + +} + +static void kvm_gmem_free_inode(struct inode *inode) +{ + kmem_cache_free(kvm_gmem_inode_cachep, KVM_GMEM_I(inode)); +} + static const struct super_operations kvm_gmem_super_operations = { .statfs = simple_statfs, + .alloc_inode = kvm_gmem_alloc_inode, + .destroy_inode = kvm_gmem_destroy_inode, + .free_inode = kvm_gmem_free_inode, }; static int kvm_gmem_init_fs_context(struct fs_context *fc) @@ -355,16 +390,33 @@ static struct file_operations kvm_gmem_fops = { .fallocate = kvm_gmem_fallocate, }; +static void kvm_gmem_init_inode(void *foo) +{ + struct kvm_gmem_inode_info *info = foo; + + inode_init_once(&info->vfs_inode); +} + +static void kvm_gmem_init_inodecache(void) +{ + kvm_gmem_inode_cachep = kmem_cache_create("kvm_gmem_inode_cache", + sizeof(struct kvm_gmem_inode_info), + 0, SLAB_ACCOUNT, + kvm_gmem_init_inode); +} + void kvm_gmem_init(struct module *module) { kvm_gmem_fops.owner = module; + kvm_gmem_init_inodecache(); kvm_gmem_init_mount(); } void kvm_gmem_exit(void) { kern_unmount(kvm_gmem_mnt); + kmem_cache_destroy(kvm_gmem_inode_cachep); } static int kvm_gmem_migrate_folio(struct address_space *mapping, -- 2.34.1