+--------+ 0xffffffff | Kernel | +--------+ 0xc0000000 | | | User | | | +--------+ 0x00000000 #define FS_KEY_DESCRIPTOR_SIZE 8 struct fscrypt_policy { __u8 version; __u8 contents_encryption_mode; __u8 filenames_encryption_mode; __u8 flags; __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; }; Address space mirroring implementation and API int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); int hmm_mirror_register_locked(struct hmm_mirror *mirror, struct mm_struct *mm); struct hmm_mirror_ops { /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror * @update_type: type of update that occurred to the CPU page table * @start: virtual start address of the range to update * @end: virtual end address of the range to update * * This callback ultimately originates from mmu_notifiers when the CPU * page table is updated. The device driver must update its page table * in response to this callback. The update argument tells what action * to perform. * * The device driver must not return from this callback until the device * page tables are completely updated (TLBs flushed, etc); this is a * synchronous call. */ void (*update)(struct hmm_mirror *mirror, enum hmm_update action, unsigned long start, unsigned long end); }; int hmm_vma_get_pfns(struct vm_area_struct *vma, struct hmm_range *range, unsigned long start, unsigned long end, hmm_pfn_t *pfns); int hmm_vma_fault(struct vm_area_struct *vma, struct hmm_range *range, unsigned long start, unsigned long end, hmm_pfn_t *pfns, bool write, bool block); int driver_populate_range(...) { struct hmm_range range; ... again: ret = hmm_vma_get_pfns(vma, &range, start, end, pfns); if (ret) return ret; take_lock(driver->update); if (!hmm_vma_range_done(vma, &range)) { release_lock(driver->update); goto again; } // Use pfns array content to update device page table release_lock(driver->update); return 0; } struct hmm_devmem_ops { void (*free)(struct hmm_devmem *devmem, struct page *page); int (*fault)(struct hmm_devmem *devmem, struct vm_area_struct *vma, unsigned long addr, struct page *page, unsigned flags, pmd_t *pmdp); }; int migrate_vma(const struct migrate_vma_ops *ops, struct vm_area_struct *vma, unsigned long mentries, unsigned long start, unsigned long end, unsigned long *src, unsigned long *dst, void *private); struct migrate_vma_ops { void (*alloc_and_copy)(struct vm_area_struct *vma, const unsigned long *src, unsigned long *dst, unsigned long start, unsigned long end, void *private); void (*finalize_and_map)(struct vm_area_struct *vma, const unsigned long *src, const unsigned long *dst, unsigned long start, unsigned long end, void *private); };