linux-glibc内存管理小结2(内存相关系统调用的实现)

在上一节ptmalloc源码分析中我们提到dlmalloc向系统申请内存的方式有两种, 对应Linux系统下分别是sbrk()与mmap()系统调用. 本节我们就来看下brk()/sbrk()与mmap()/munmap()的实现, 作为切入点来一窥内核内存管理的特点.
在正文开始之前我们先大致描述一下内核内存管理的模型. 以32bit系统为例Linux将4G地址空间划分为两块, 低地址为用户态地址空间(一般为0~3G), 高地址为内核态地址空间, 两者相互独立不能直接访问. 每个进程都有自己的用户态地址空间, 进程不能直接访问其它进程的地址空间, 当然也不能直接访问内核态地址空间. 内核态地址空间为所有内核线程共有, 同样内核线程也不能直接访问进程的地址空间.
另外需要提及的一点是通常情况下我们调用brk()/mmap()等接口时实际是调用的glibc的函数, 而非系统调用本身, 对于brk()等系统调用glibc会修改返回值与实际返回值不一致. 举例而言man brk可以看到NOTES段提到: The return value described above for brk() is the behavior provided by the glibc wrapper function for the Linux brk() system call. However, the actual Linux system call returns the new program break on success. On failure, the system call returns the current break. The glibc wrapper function does some work to provide the 0 and  -1 return values described above. 因此如果有发现内核实现与函数声明不一致的情况可以参见glibc的封装.

让我们开始本节的叙述.

由于内核代码规模较glibc而言庞大很多, 在分析数据结构时不会列举全部成员, 同时重点通过分析接口实现来理解部分数据结构的意义.
首先来看下内核中描述一块虚拟地址空间的数据结构vm_area_struct.

 1 struct vm_area_struct { 
 2         //第一个cache line(32字节)包含了查找vma必要的信息 
 3         //该vma的起始地址 
 4         unsigned long vm_start; 
 5         //该vma的结束地址 
 6         unsigned long vm_end; 
 7         /** 
 8          * 用于管理一个task的所有vma的双向链表的前后节点 
 9          * 与通常的双向链表不同的是该链表是非闭合的 
10          * 第一个节点的prev与最后一个节点的next为NULL 
11          * 该链表是以地址排序的, 因此在计算vma gap, merge vma时比较高效 
12          * 
13         **/ 
14         struct vm_area_struct *vm_next, *vm_prev; 
15         //用于管理一个task的所有vma的红黑树, 使用红黑树在索引vma gap时比较高效 
16         struct rb_node vm_rb; 
17         /** 
18          * gap值是指一个vma与其之前的vma的空余空间, 即vma->vm_start - vma->vm_prev->vm_end 
19          * rb_subtree_gap是指当前vma及其左右子树的gap值中的最大值 
20          * 利用该值可以在分配新vma时快速定位满足大小且最小的空余空间(见unmap_area()) 
21          * 
22         **/ 
23         unsigned long rb_subtree_gap; 
24         //第二个cache line(32字节)起始 
25         //task内存管理结构指针 
26         struct mm_struct *vm_mm; 
27         //vma的访问权限 
28         pgprot_t vm_page_prot; 
29         //该vma的标记位, 见mm.h 
30         unsigned long vm_flags; 
31         union { 
32                 struct { 
33                         struct rb_node rb; 
34                         unsigned long rb_subtree_last; 
35                 } linear; 
36                 struct list_head nonlinear; 
37                 const char __user *anon_name; 
38         } shared; 
39         /** 
40          * 匿名vma链表, 记录每个vma包含的所有匿名vma对应的anon_vma_chain 
41          * 文件带私有映射标记(MAP_PRIVATE)的vma可能同时存在与i_mmap树与anon_vma链表中 
42          * 带共享映射标记(MAP_SHARED)的vma只可能存在与i_mmap树中 
43          * 匿名带私有映射标记, 栈或者堆的vma只存在与anon_vma链表中 
44          * 
45         **/ 
46         struct list_head anon_vma_chain; 
47         //匿名vma指针 
48         struct anon_vma *anon_vma; 
49         const struct vm_operations_struct *vm_ops; 
50         //映射区间起始地址的页偏移 
51         unsigned long vm_pgoff; 
52         //映射的文件 
53         struct file * vm_file; 
54         void * vm_private_data; 
55 #ifndef CONFIG_MMU 
56         struct vm_region *vm_region; 
57 #endif 
58 #ifdef CONFIG_NUMA 
59         struct mempolicy *vm_policy; 
60 #endif 
61 }; 

 

然后是进程的内存管理结构mm_struct.

 1 struct mm_struct { 
 2         /** 
 3          * vma链表, 根据地址顺序链接 
 4          * 通过next->vm_start - prev->vm_end相减可以快速计算vma之间的gap值 
 5          * 
 6         **/ 
 7         struct vm_area_struct * mmap; 
 8         /** 
 9          * 用于查找vma的红黑树, 其索引是vma的地址区间 
10          * 所有小于vma地址空间的vma落在该vma左子树, 反之落在右子树 
11          * 借此可以通过给定地址快速索引该地址所在的vma 
12          * 
13         **/ 
14         struct rb_root mm_rb; 
15         //用于缓存最近查询到的vma(通过find_vma返回的结果), 在该vma被unlink时也会置空 
16         struct vm_area_struct * mmap_cache; 
17 #ifdef CONFIG_MMU 
18         unsigned long (*get_unmapped_area) (struct file *filp, 
19                 unsigned long addr, unsigned long len, 
20                 unsigned long pgoff, unsigned long flags); 
21         void (*unmap_area) (struct mm_struct *mm, unsigned long addr); 
22 #endif 
23         unsigned long mmap_base;                /* base of mmap area */ 
24         unsigned long task_size;                /* size of task vm space */ 
25         unsigned long cached_hole_size;         /* if non-zero, the largest hole below free_area_cache */ 
26         unsigned long free_area_cache;                /* first hole of size cached_hole_size or larger */ 
27         //该mm_struct包含的vma中地址最大的vma的结束地址 
28         unsigned long highest_vm_end; 
29         pgd_t * pgd; 
30         //对该mm的用户态的引用计数 
31         atomic_t mm_users; 
32         //对该mm的引用计数 
33         atomic_t mm_count; 
34         //归属该mm管理的vma个数 
35         int map_count; 
36         spinlock_t page_table_lock; 
37         //mmap用到的信号量 
38         struct rw_semaphore mmap_sem; 
39         struct list_head mmlist; 
40         unsigned long hiwater_rss; 
41         unsigned long hiwater_vm; 
42         unsigned long total_vm; 
43         unsigned long locked_vm; 
44         unsigned long pinned_vm; 
45         unsigned long shared_vm; 
46         unsigned long exec_vm; 
47         unsigned long stack_vm; 
48         unsigned long def_flags; 
49         unsigned long nr_ptes; 
50         //start_data为数据段起始地址, end_data为数据段结束地址 
51         //当未开启随机堆时end_data即当前主堆的结束地址 
52         unsigned long start_code, end_code, start_data, end_data; 
53         //当开启随机化地址空间后, start_brk与start_stack分别为随机堆与栈基址 
54         //brk为主堆的结束地址(不论是否开启随机化地址) 
55         unsigned long start_brk, brk, start_stack; 
56         unsigned long arg_start, arg_end, env_start, env_end; 
57         ...... 
58 }; 

 

以及匿名映射相关的结构anon_vma与anon_vma_chain.

 1 /** 
 2  * 匿名vma: 指进程私有的内存区域, 即堆, 栈与私有映射(MAP_PRIVATE)的区域 
 3  * 每个匿名vma对应一系列连续的page结构, page可以通过其mapping成员与index成员反推匿名vma 
 4  * 
 5 **/ 
 6 struct anon_vma { 
 7         struct anon_vma *root; 
 8         struct rw_semaphore rwsem; 
 9         //引用计数 
10         atomic_t refcount; 
11         //由所有指向同一anon_vma的anon_vma_chain组成的红黑树的根节点 
12         struct rb_root rb_root; 
13 }; 
14 /** 
15  * 当fork()产生新进程时, 内核需要拷贝所有父进程的内存信息 
16  * 其中包括MAP_PRIVATE的mmap()或brk()或栈的vma(这些内存被称为anon_vma) 
17  * 内核会保留旧的页表与anon_vma的关系直到子进程的COW(copy on write)操作才分配新的页表 
18  * 这样可以减少物理页表的使用, 但增加了reverse mapping的开销 
19  * 每个物理页表要销毁时都需要遍历所有进程的所有vma 
20  * 因此内核引入anon_vma与anon_vma_chain加速rmap 
21  * 三者的关系: 
22  * 一个vma对应一个或多个anon_vma/anon_vma_chain(如果该vma实际映射了多个不连续的物理页面) 
23  * 一个anon_vma对应一个或多个vma/anon_vma_chain(如果fork()了多个进程) 
24  * 一个anon_vma_chain对应一个vma/anon_vma 
25  * 
26 **/ 
27 struct anon_vma_chain { 
28         //该anon_vma_chain所属的vma, 一个vma对应一个或多个anon_vma_chain 
29         struct vm_area_struct *vma; 
30         //该anon_vma_chain对应的anon_vma, 一个anon_vma对应一个或多个anon_vma_chain 
31         struct anon_vma *anon_vma; 
32         //由同一vma包含的所有anon_vma组成的双向链表 
33         struct list_head same_vma; 
34         //由所有指向同一anon_vma的anon_vma_chain组成的红黑树的节点 
35         struct rb_node rb; 
36         unsigned long rb_subtree_last; 
37 #ifdef CONFIG_DEBUG_VM_RB 
38         unsigned long cached_vma_start, cached_vma_last; 
39 #endif 
40 }; 

 

每个进程都对应一个mm_struct用于管理进程的虚拟地址空间, 属于该进程的每一块虚拟地址区间(堆, 栈, 映射的文件都会有对应的地址区间)都表现为一个vm_area_struct, 被mm_struct管理. 内核管理这些vma有两种方式, 一种是根据地址排序的双向链表, 通过该链表可以快速获取地址在被操作vma之前/之后的vma, 另一种是根据地址排序的红黑树. 该树有两个特性, 一是树生成按地址排序, 左子树地址小于根节点, 根节点地址小于右子树, 给定一个地址可以快速定位该地址所属的区间的vma. 二是树上每个节点的gap值(gap值指该vma与地址在其之前的vma之间的空闲地址区间)是该节点及其左右子树的gap值中的最大值, 给定一个长度可以快速索引一个大小最合适的区间用于分配.
文字描述到此结束, 接下来通过代码分析. 首先来看下brk()(defined in mm/mmap.c)系统调用, brk()只接受一个入参brk, 即堆地址, 该地址可以大于或小于当前堆地址(扩展或缩减堆).

 1 SYSCALL_DEFINE1(brk, unsigned long, brk) 
 2 { 
 3         unsigned long rlim, retval; 
 4         unsigned long newbrk, oldbrk; 
 5         struct mm_struct *mm = current->mm; 
 6         unsigned long min_brk; 
 7         bool populate; 
 8         down_write(&mm->mmap_sem); 
 9         /** 
10          * 动态堆地址用于防止堆溢出攻击 
11          * 如果编译时设置CONFIG_COMPAT_BRK为N(在init/Kconfig中) 
12          * 或运行时设置/proc/sys/kernel/randomize_va_space为2i 
13          * 则使用动态堆地址mm->start_brk 
14          * 
15         **/ 
16 #ifdef CONFIG_COMPAT_BRK 
17         if (current->brk_randomized) 
18                 min_brk = mm->start_brk; 
19         else 
20                 min_brk = mm->end_data; 
21 #else 
22         min_brk = mm->start_brk; 
23 #endif 
24         //边界检查 
25         rlim = rlimit(RLIMIT_DATA); 
26         if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + 
27                 (mm->end_data - mm->start_data) > rlim) 
28                 goto out; 
29         //对齐 
30         newbrk = PAGE_ALIGN(brk); 
31         oldbrk = PAGE_ALIGN(mm->brk); 
32         if (oldbrk == newbrk) 
33                 goto set_brk; 
34         //如果小于之前堆地址则缩小堆 
35         if (brk <= mm->brk) { 
36                 if (!do_munmap(mm, newbrk, oldbrk-newbrk)) 
37                         goto set_brk; 
38                 goto out; 
39         } 
40         //判断[oldbrk, newbrk+PAGE_SIZE]区间是否已存在vma 
41         if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) 
42                 goto out; 
43         //扩展堆 
44         if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) 
45                 goto out; 
46 set_brk: 
47         mm->brk = brk; 
48         populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; 
49         up_write(&mm->mmap_sem); 
50         if (populate) 
51                 mm_populate(oldbrk, newbrk - oldbrk); 
52         return brk; 
53 out: 
54         retval = mm->brk; 
55         up_write(&mm->mmap_sem); 
56         return retval; 
57 } 

 

mm是进程内存管理结构体mm_struct, 对进程内存空间操作必然需要获取锁(mm->mmap_sem).
current(defined in include/asm-generic/current.h)是一个用来指向当前进程的宏:

1 #define get_current() (current_thread_info()->task) 
2 #define current get_current() 
3 而current_thread_info()(defined in arch/arm/include/asm/thread_info.h)是内联函数: 
4 static inline struct thread_info *current_thread_info(void) 
5 { 
6         register unsigned long sp asm ("sp"); 
7         return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); 
8 } 

 

current_thread_info()通过获取sp寄存器得知当前栈位置, 进而获取线程信息(thread_info分配在线程栈顶的8K字节). thread_info的task成员指向该进程的进程描述符(task_struct).
获取mm后判断传入的brk是否超越上下限, 对于开启随机堆设置mm会记录其随机堆的基址(mm->start_brk), 否则记录在mm->end_data中, 对于给定的brk不能小于以上值(堆不能为负数). 同样还要计算brk是否超越给定上限, 如果启用随机化地址则堆计算为brk - mm->start_brk, 否则为mm->end_data - mm->start_data. 最后将通过合法性判断且对齐后的地址与之前的brk地址比较, 根据比较结果选择三条处理分支.
1. 先看brk大于mm->brk的情况, 这种情况下首先要判断旧的brk地址到新的brk地址间是否已经存在其他映射的vma. 注意此处结束地址为newbrk+PAGE_SIZE, 该PAGE_SIZE是否是用于防溢出的? find_vma_intersection()(defined in include/linux/mm.h)是内联函数, 实质是调用find_vma()(defined in mm/mmap.c)查询给定地址区间内是否存在vma.

 1 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 
 2 { 
 3         struct vm_area_struct *vma = NULL; 
 4         //优先判断最近使用的vma, 注释里指出有35%几率命中 
 5         vma = ACCESS_ONCE(mm->mmap_cache); 
 6         if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { 
 7                 struct rb_node *rb_node; 
 8                 rb_node = mm->mm_rb.rb_node; 
 9                 vma = NULL; 
10                 while (rb_node) { 
11                         struct vm_area_struct *vma_tmp; 
12                         vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); 
13                         if (vma_tmp->vm_end > addr) { 
14                                 vma = vma_tmp; 
15                                 if (vma_tmp->vm_start <= addr) 
16                                         break; 
17                                 rb_node = rb_node->rb_left; 
18                         } else 
19                                 rb_node = rb_node->rb_right; 
20                 } 
21                 if (vma) 
22                         mm->mmap_cache = vma; 
23         } 
24         return vma; 
25 } 

 

find_vma()中首先会查找最近访问过的mmap_cache, 判断它是否与给定地址存在交集(由于传入的是地址区间的起始地址, 此处只能判断mmap_cache地址区间小于给定区间的情况). 若mmap_cache不存在或存在但无交集则通过mm->mm_rb红黑树查找. mm->mm_rb是根据vma所在地址区间建立的树, 因此查找方式是寻找红黑树中vma->vm_end大于给定地址的vma子树, 如果vma->vm_end小于给定地址则该vma及其左子树地址空间均小于该地址, 此时索引右子树, 否则索引左子树. 遍历整个树直到节点为NULL或找到一个给定地址在vma地址区间内的vma. 注意如果是查询到NULL节点才退出情况下返回的vma不一定是与给定地址存在交集的, 所以在find_vma_intersection()中还会再判断一次vma->vm_start >= end_addr.
如果判断brk()增长的地址区间内不存在vma则可以放心的增长堆空间了. do_brk()(defined in mm/mmap.c)是实际扩展堆空间的函数.

 1 static unsigned long do_brk(unsigned long addr, unsigned long len) 
 2 { 
 3         struct mm_struct * mm = current->mm; 
 4         struct vm_area_struct * vma, * prev; 
 5         unsigned long flags; 
 6         struct rb_node ** rb_link, * rb_parent; 
 7         pgoff_t pgoff = addr >> PAGE_SHIFT; 
 8         int error; 
 9         len = PAGE_ALIGN(len); 
10         if (!len) 
11                 return addr; 
12         flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 
13         //获取满足大小的最小的未映射区间的起始地址 
14         error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 
15         if (error & ~PAGE_MASK) 
16                 return error; 
17         if (mm->def_flags & VM_LOCKED) { 
18                 unsigned long locked, lock_limit; 
19                 locked = len >> PAGE_SHIFT; 
20                 locked += mm->locked_vm; 
21                 lock_limit = rlimit(RLIMIT_MEMLOCK); 
22                 lock_limit >>= PAGE_SHIFT; 
23                 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 
24                         return -EAGAIN; 
25         } 
26         verify_mm_writelocked(mm); 
27 munmap_back: 
28         //查找是否存在vma占用[addr, addr + len], 如存在则先unmap 
29         if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { 
30                 if (do_munmap(mm, addr, len)) 
31                         return -ENOMEM; 
32                 goto munmap_back; 
33         } 
34         if (!may_expand_vm(mm, len >> PAGE_SHIFT)) 
35                 return -ENOMEM; 
36         if (mm->map_count > sysctl_max_map_count) 
37                 return -ENOMEM; 
38         if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) 
39                 return -ENOMEM; 
40         //检测是否需合并vma 
41         vma = vma_merge(mm, prev, addr, addr + len, flags, 
42                 NULL, NULL, pgoff, NULL, NULL); 
43         if (vma) 
44                 goto out; 
45         //申请新的vma, 初始化并加入树中 
46         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 
47         if (!vma) { 
48                 vm_unacct_memory(len >> PAGE_SHIFT); 
49                 return -ENOMEM; 
50         } 
51         INIT_LIST_HEAD(&vma->anon_vma_chain); 
52         vma->vm_mm = mm; 
53         vma->vm_start = addr; 
54         vma->vm_end = addr + len; 
55         vma->vm_pgoff = pgoff; 
56         vma->vm_flags = flags; 
57         vma->vm_page_prot = vm_get_page_prot(flags); 
58         vma_link(mm, vma, prev, rb_link, rb_parent); 
59 out: 
60         perf_event_mmap(vma); 
61         mm->total_vm += len >> PAGE_SHIFT; 
62         if (flags & VM_LOCKED) 
63                 mm->locked_vm += (len >> PAGE_SHIFT); 
64         return addr; 
65 } 

 

do_brk()中第一个需要分析的是get_unmapped_area()(defined in mm/mmap.c), 这个函数实现获取一个满足长度需求且又是最小的空闲(即未映射的)区间.

 1 unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 
 2         unsigned long pgoff, unsigned long flags) 
 3 { 
 4         unsigned long (*get_area)(struct file *, unsigned long, 
 5                 unsigned long, unsigned long, unsigned long); 
 6         //合法性检查 
 7         unsigned long error = arch_mmap_check(addr, len, flags); 
 8         if (error) 
 9                 return error; 
10         //溢出检查 
11         if (len > TASK_SIZE) 
12                 return -ENOMEM; 
13         //如果传入文件描述符非空使用file->f_op->get_unmapped_area, 否则使用mm->get_unmapped_area 
14         get_area = current->mm->get_unmapped_area; 
15         if (file && file->f_op && file->f_op->get_unmapped_area) 
16                 get_area = file->f_op->get_unmapped_area; 
17         //实际获取内存区间的接口 
18         addr = get_area(file, addr, len, pgoff, flags); 
19         if (IS_ERR_VALUE(addr)) 
20                 return addr; 
21         if (addr > TASK_SIZE - len) 
22                 return -ENOMEM; 
23         if (addr & ~PAGE_MASK) 
24                 return -EINVAL; 
25         addr = arch_rebalance_pgtables(addr, len); 
26         error = security_mmap_addr(addr); 
27         return error   error : addr; 
28 } 

 

在获取内存区间前首先要做架构相关检查arch_mmap_check()(defined in arch/arm/include/uapi/asm/mman.h), 在ARM平台下要求是如果指定该区间为MAP_FIXED则起始地址不得为[0, 4K]之间, 另外对区间长度要求是不得超过TASK_SIZE(defined in arch/arm/include/asm/memory.h), 即用户空间大小(CONFIG_PAGE_OFFSET)减去16M. 然后调用回调获取地址区间, 对于文件与非文件的回调是不同的, 在分析brk()时走mm->get_unmapped_area回调. 该回调在arch_pick_mmap_layout()(defined in arch/arm/mm/mmap.c)中初始化(具体后文分析), 根据地址空间增长方向分为arch_get_unmapped_area与arch_get_unmapped_area_topdown(分别为向高地址扩展与向地址扩展), 此处我们仅分析arch_get_unmapped_area. arch_get_unmapped_area同样也有两种实现, 公共的实现定义在mm/mmap.c, 而ARM平台实现见arch/arm/mm/mmap.c, 两者区别在于后者增加对cache相关的操作, 此处暂时不对cache做分析.

 1 //源码注释: 返回值必须以页对齐, 如果低位有值说明是错误码 
 2 unsigned long arch_get_unmapped_area(struct file *filp, \ 
 3         unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) 
 4 { 
 5         struct mm_struct *mm = current->mm; 
 6         struct vm_area_struct *vma; 
 7         struct vm_unmapped_area_info info; 
 8         if (len > TASK_SIZE) 
 9                 return -ENOMEM; 
10         //固定地址映射, 不论区间内是否已存在映射都直接返回给定地址 
11         if (flags & MAP_FIXED) 
12                 return addr; 
13         //判断给定地址起始的区间是否存在足够空闲区间, 如果是也返回给定地址 
14         if (addr) { 
15                 addr = PAGE_ALIGN(addr); 
16                 vma = find_vma(mm, addr); 
17                 if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) 
18                         return addr; 
19         } 
20         //映射非固定且给定地址区间不符合条件, 另选一个区间映射 
21         info.flags = 0; 
22         info.length = len; 
23         info.low_limit = TASK_UNMAPPED_BASE; 
24         info.high_limit = TASK_SIZE; 
25         info.align_mask = 0; 
26         return vm_unmapped_area(&info); 
27 } 

 

arch_get_unmapped_area()中又分三种情况(事情真多), 对于MAP_FIXED标记(指使用给定起始地址进行映射)的地址区间直接返回给定的地址. 否则先尝试(给定合法地址前提下)在给定地址分配, 如果未给定地址或给定地址区间已存在映射, 则需选择另一地址区间进行映射. vm_unmapped_area()(defined in include/linux/mm.h)会调用unmapped_area()或unmapped_area_topdown()来获取一个未映射的地址区间. 通过vm_unmapped_area()获取的地址区间必须满足以下要求: 不得与任何vma存在交集, 在给定的地址下限与地址上限之间, 满足希望的长度大小以及满足(begin_addr & align_mask) == (align_offset & align_mask). 对于brk()而言是固定地址映射(MAP_FIXED标记置位), 直接返回地址无需调用vm_unmapped_area(), 但考虑到后文分析mmap()时仍要经历该流程, 所以此处我们一并分析. 那让我们来看看符合条件的地址区间是如何得到的.

 1 unsigned long unmapped_area(struct vm_unmapped_area_info *info) 
 2 { 
 3         struct mm_struct *mm = current->mm; 
 4         struct vm_area_struct *vma; 
 5         unsigned long length, low_limit, high_limit, gap_start, gap_end; 
 6         //以最坏打算计算对齐(通常是在length基础上再加一页) 
 7         length = info->length + info->align_mask; 
 8         if (length < info->length) 
 9                 return -ENOMEM; 
10         //调整起始地址可能的上下界使整个地址区间都不超越上下界 
11         if (info->high_limit < length) 
12                 return -ENOMEM; 
13         high_limit = info->high_limit - length; 
14         if (info->low_limit > high_limit) 
15                 return -ENOMEM; 
16         low_limit = info->low_limit + length; 
17         //根节点为空直接略过后面循环 
18         if (RB_EMPTY_ROOT(&mm->mm_rb)) 
19                 goto check_highest; 
20         vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); 
21         //根节点及其子树的vma也没有足够剩余空间 
22         if (vma->rb_subtree_gap < length) 
23                 goto check_highest; 
24         while (true) { 
25                 //先遍历左子树, 查找符合空余内存大于length条件的最小的vma 
26                 //gap_end是符合条件vma的起始地址 
27                 gap_end = vma->vm_start; 
28                 //gap_end必须大于low_limit, 否则申请映射区域的起始地址会小于info->low_limit 
29                 //如果节点还有左子树则查找是否有更小vma, 否则使用该vma 
30                 if (gap_end >= low_limit && vma->vm_rb.rb_left) { 
31                         struct vm_area_struct *left = 
32                                 rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb); 
33                         //节点及其子树的vma有足够剩余空间说明还有更小的vma 
34                         if (left->rb_subtree_gap >= length) { 
35                                 vma = left; 
36                                 continue; 
37                         } 
38                 } 
39                 //gap_start是符合条件vma的前一节点vma的结束地址 
40                 gap_start = vma->vm_prev   vma->vm_prev->vm_end : 0; 
41 check_current: 
42                 //gap_start必须小于high_limit, 否则申请映射区域的结束地址会大于info->high_limit 
43                 if (gap_start > high_limit) 
44                         return -ENOMEM; 
45                 //如果两个vma间空间足够则可以映射, 走入found分支 
46                 if (gap_end >= low_limit && gap_end - gap_start >= length) 
47                         goto found; 
48                 //左子树没找到符合条件的区域则遍历右子树 
49                 //查找符合空余内存大于length条件的最小的vma 
50                 if (vma->vm_rb.rb_right) { 
51                         struct vm_area_struct *right = 
52                                 rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb); 
53                         //如果右子树及其子树的vma有足够剩余空间则跳过此次循环, 重新从新节点开始搜索 
54                         //如果没有则需要向上找父节点看是否有足够空间 
55                         if (right->rb_subtree_gap >= length) { 
56                                 vma = right; 
57                                 continue; 
58                         } 
59                 } 
60                 while (true) { 
61                         //节点及其子树均已遍历, 向上寻找父节点 
62                         struct rb_node *prev = &vma->vm_rb; 
63                         //走到根节点则跳过循环 
64                         if (!rb_parent(prev)) 
65                                 goto check_highest; 
66                         vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); 
67                         //通过左子树找到父节点则重新走入check_current分支(左子树已遍历无需再循环判断) 
68                         //通过右子树找到的父节点是已遍历过的, 再向上找它的父节点 
69                         if (prev == vma->vm_rb.rb_left) { 
70                                 gap_start = vma->vm_prev->vm_end; 
71                                 gap_end = vma->vm_start; 
72                                 goto check_current; 
73                         } 
74                 } 
75         } 
76 check_highest: 
77         //mm->highest_vm_end是该进程所有vma中地址最大的vma的结束地址 
78         //从gap_start到gap_end之间一定都是未映射的空间 
79         gap_start = mm->highest_vm_end; 
80         gap_end = ULONG_MAX; 
81         if (gap_start > high_limit) 
82                 return -ENOMEM; 
83 found: 
84         //如果gap_start小于给定下限则设置为下限 
85         if (gap_start < info->low_limit) 
86                 gap_start = info->low_limit; 
87         //边界对齐 
88         gap_start += (info->align_offset - gap_start) & info->align_mask; 
89         VM_BUG_ON(gap_start + info->length > info->high_limit); 
90         VM_BUG_ON(gap_start + info->length > gap_end); 
91         return gap_start; 
92 } 

 

unmapped_area()的注释比较详细说明了搜索的原理: 通过查找红黑树节点的gap值来快速索引一个合适的区间. 合适的区间是指地址在目标区间之前的vma的结束地址小于给定地址上限减去目标区间长度, 当前vma的结束地址大于给定地址下限加上目标区间长度, 且前两者之差大于目标区间长度.
这里先解释下vma的数据成员方便我们分析代码: vma->vm_start, vma->vm_end是内存区间的起始地址与结束地址, vma->vm_rb是以前两个地址为索引建立的红黑树, 在该树上所有左子树节点的地址都小于父节点的地址, 父节点的地址又小于右子树节点. gap值是指该vma与其链表前一个vma的空余空间长度, 对树上每个节点vma->rb_subtree_gap是该vma及其左右子树每个vma的gap值中的最大值.
我们按代码来分析下函数流程, 首先根据传入的长度与地址空间的上下限计算出目标区间的地址的上下限. 这里传入的地址空间的上下限分别是TASK_UNMAPPED_BASE与TASK_SIZE(下限不是接近0起始的位置是为了给堆足够空间?). 然后索引红黑树, 如果:
1. 树根为空即第一次分配, 直接跳入check_highest分支. check_highest分支会检查地址空间最高处是否有足够区间, 对于第一次分配的情况即直接从给定下限开始分配.
2. 若根节点非空则判断根节点的gap值是否满足长度要求, 如不满足说明所有vma之间的空闲区间也不满足分配要求, 同样选择check_highest分支.
3. 若根节点非空且根节点的gap值满足长度要求则索引子树查找最合适的gap值. 查找方式是:
3.1. 先索引左子树, 如果当前vma的起始地址大于目标区间的起始地址的下限且当前vma的左子树节点存在则判断该子树节点gap值是否仍满足长度需求, 如满足说明左子树存在大小更合适的gap.
3.2. 否则进入check_current分支判断当前vma是否满足需求. check_current分支首先判断该vma的前一个vma的结束地址(可能为空那么地址就是0)是否超越目标区间的起始地址的上限, 再判断当前vma的起始地址是否超越目标区间的结束地址的下限以及两个vma之间的空闲区间是否满足长度需求. 如果以上判断都通过则选择该vma, 走入found分支.
3.3. 否则说明该vma及其左子树上的vma之间的空闲区间均不满足目标需求, 此时再查找右子树, 如果右子树gap值满足长度需求则从头重新遍历子树, 否则说明右子树上也不存在合适的节点. 此时就要回溯该节点的父节点的右子树, 如果父节点不存在则直接进入check_highest分支, 否则如果该节点是其父节点的左子树则说明父节点与父节点的右子树尚未判断过, 选择父节点走入check_current分支, 如果该节点是其父节点的右子树则说明父节点及其子树均已遍历, 选择继续向上回溯. 如果遍历完整个树后仍未找到合适的节点则同样走入check_highest分支.

回到get_unmapped_area(), 注意此处返回的值可能是错误码也可能是以页边界为起始的地址, 所以需要IS_ERR_VALUE()宏判断是否是错误码.

再向上回到do_brk()中, 因为在get_unmapped_area()中只获取了地址区间, 并未判断区间是否存在vma(MAP_FIXED不允许修改映射地址), 所以需要先判断区间是否存在vma, 有则需去映射. find_vma_links()(defined in mm/mmap.c)作用是遍历红黑树, 判断给定区间是否存在vma, 如存在则返回ENOMEM, 否则返回离给定区间最近的两个vma.

 1 static int find_vma_links(struct mm_struct *mm, \ 
 2         unsigned long addr, unsigned long end, struct vm_area_struct **pprev, \ 
 3         struct rb_node ***rb_link, struct rb_node **rb_parent) 
 4 { 
 5         struct rb_node **__rb_link, *__rb_parent, *rb_prev; 
 6         __rb_link = &mm->mm_rb.rb_node; 
 7         rb_prev = __rb_parent = NULL; 
 8         while (*__rb_link) { 
 9                 struct vm_area_struct *vma_tmp; 
10                 __rb_parent = *__rb_link; 
11                 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); 
12                 if (vma_tmp->vm_end > addr) { 
13                         if (vma_tmp->vm_start < end) 
14                                 return -ENOMEM; 
15                         __rb_link = &__rb_parent->rb_left; 
16                 } else { 
17                         rb_prev = __rb_parent; 
18                         __rb_link = &__rb_parent->rb_right; 
19                 } 
20         } 
21         *pprev = NULL; 
22         if (rb_prev) 
23                 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); 
24         *rb_link = __rb_link; 
25         *rb_parent = __rb_parent; 
26         return 0; 
27 } 

 

find_vma_links()是值得细细分析的优美代码, 首先它用二维指针__rb_link作为循环变量, 减少对根节点非空的额外判断(linus说的good taste code).
它通过判断节点vma->vm_end是否大于addr选择索引左右子树, 直到当前vma与给定区间有交集或遍历到空的叶子节点, 最后返回: rb_parent为给定区间地址后的第一个vma节点的二维指针, pprev为给定区间地址前的第一个vma节点的二维指针, rb_link为给定区间在加入红黑树管理时的节点位置, 这是一个三维指针.
我们来仔细分析其逻辑, 首先对于根节点为空的情况, *pprev肯定为NULL, *rb_parent也为NULL, 给定区间的vma为进程第一个vma, 所以*rb_link为&mm->mm_rb.rb_node. 对于根节点非空情况, 首先判断当前节点地址区间是否在给定区间之后, 如在之前选择右子树判断, 否则选择左子树判断. 如果选择左子树说明给定区间地址小于当前节点vma, 无需记录当前vma节点, 如果选择右子树即当前节点可能为给定区间地址前的第一个vma, 因此记录在rb_prev中. 重复循环直到z找到与给定区间有交集的vma或叶子节点为空.
如果find_vma_links()返回ENOMEM则尝试解除该vma的映射, do_munmap()(defined in mm/mmap.c)负责将该vma去映射, 因do_munmap()也是munmap()的实现, 该函数放到后面分析.
如果成功解除映射则重新回到munmap_back分支继续判断, 否则返回ENOMEM, 直到给定区间不存在内存映射.

再次回到do_brk(), 经过一系列系统限制判断后, 终于可以调用vma_merge()(defined in mm/mmap.c)进行内存合并.

 1 /** 
 2  * 对给定的映射请求我们需要判断是否需要将它与前驱或后继映射合并(或两者同时合并) 
 3  * 在大多数情况下, 通过调用mmap, brk或mremap后得到的区间在调用vma_merge时仍未真正映射 
 4  * 但对mprotect, 往往调用vma_merge时已经完成映射, 且区域的flags往往将要变成vm_flags 
 5  * 我们以AAA代表目标区间, PPP代表prev vma, NNN代表next vma, ___代表空缺 
 6  * 
 7  *    AAA 
 8  * PPP___NNN 目标区间存在空缺中, 只能为mmap, brk或mremap, 可能出现 
 9  * 1. PPPPPPPPP 可能prev curr next三块vma合并 
10  * 2. PPPPPPNNN 可能prev curr两块vma合并 
11  * 3. PPPNNNNNN 可能curr next两块vma合并 
12  * 
13  * 对于mprotect, 有以下情况 
14  * 
15  * PPPPPPNNNX 
16  *   AA       修改prev中间区间属性, 不可merge 
17  *     AA     修改prev尾部区间属性, 4. 可能变为PPPPNNNNNX 
18  *       AA   修改next首部区间属性, 5. 可能变为PPPPPPPPNX 
19  *       AAA  修改next全部区间属性, 6. 可能变为PPPPPPPPPP 或 
20  *                                  7. 可能变为PPPPPPPPPX 或 
21  *                                  8. 可能变为PPPPPPNNNN 
22  * 
23 **/ 
24 struct vm_area_struct *vma_merge(struct mm_struct *mm, \ 
25         struct vm_area_struct *prev, unsigned long addr, unsigned long end, \ 
26         unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, \ 
27         pgoff_t pgoff, struct mempolicy *policy, const char __user *anon_name) 
28 { 
29         pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 
30         struct vm_area_struct *area, *next; 
31         int err; 
32         /** 
33          * 对不可merge的vma直接返回 
34          * 后面会要求vma->vm_flags == vm_flags 
35          * 所以此处等于也判断了vma->vm_flags & VM_SPECIAL 
36          * 
37         **/ 
38         if (vm_flags & VM_SPECIAL) 
39                 return NULL; 
40         if (prev) 
41                 next = prev->vm_next; 
42         else 
43                 next = mm->mmap; 
44         area = next; 
45         //next即目标区间, mprotect情况6, 7, 8 
46         if (next && next->vm_end == end) 
47                 next = next->vm_next; 
48         //判断是否可以与前驱合并 
49         if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && \ 
50                 can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff, anon_name)) { 
51                 //判断是否可以与后继合并 
52                 if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && \ 
53                         can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, anon_name) && \ 
54                         is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { 
55                         //情况1, 6 
56                         err = vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL); 
57                 } else 
58                         //情况2, 5, 7 
59                         err = vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, NULL); 
60                 if (err) 
61                         return NULL; 
62                 khugepaged_enter_vma_merge(prev); 
63                 return prev; 
64         } 
65         //判断是否可以与后继合并 
66         if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && \ 
67                 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, anon_name)) { 
68                 if (prev && addr < prev->vm_end) 
69                         //情况4 
70                         err = vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL); 
71                 else 
72                         //情况3, 8 
73                         err = vma_adjust(area, addr, next->vm_end, next->vm_pgoff - pglen, NULL); 
74                 if (err) 
75                         return NULL; 
76                 khugepaged_enter_vma_merge(area); 
77                 return area; 
78         } 
79         return NULL; 
80 } 

 

vma_merge()又是一个复杂的接口函数, 好在注释清楚. 判断merge的策略源码注释已经写的很明白了, 一共8种可能发生merge的情况, 这里分析下代码实现中值得注意的地方. 一是首先判断该vma是否为不可merge的vma, 如果是则直接返回NULL.
再来看下判断是否可合并的函数can_vma_merge_before()(defined in mm/mmap.c)是如何判断的(can_vma_merge_after()类似):

 1 static int can_vma_merge_before(struct vm_area_struct *vma, \ 
 2         unsigned long vm_flags, struct anon_vma *anon_vma, \ 
 3         struct file *file, pgoff_t vm_pgoff, const char __user *anon_name) 
 4 { 
 5         if (is_mergeable_vma(vma, file, vm_flags, anon_name) && 
 6                 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { 
 7                 if (vma->vm_pgoff == vm_pgoff) 
 8                         return 1; 
 9         } 
10         return 0; 
11 } 

 

is_mergeable_vma()(defined in mm/mmap.c)判断的是两个vma是否可合并. 及要求两者的vma->vm_flags一致, vma->vm_file为同一文件, 有同一匿名映射. 如果vma->vm_ops->close非空则驱动可能还要释放对每个vma的资源, 那也不能合并vma. is_mergeable_anon_vma()(defined in mm/mmap.c)判断的是两个匿名映射是否可合并. 最后才判断前者结束地址的页边界是否与后者起始地址的页边界相同.
对于两块vma合并需要判断两者vma是否可合并与两者匿名映射是否可合并, 对于三块vma合并还需要额外判断prev和next的匿名映射是否一致. 如果一致则调用vma_adjust()(defined in mm/mmap.c)调整vma.

  1 int vma_adjust(struct vm_area_struct *vma, unsigned long start, 
  2         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 
  3 { 
  4         struct mm_struct *mm = vma->vm_mm; 
  5         struct vm_area_struct *next = vma->vm_next; 
  6         struct vm_area_struct *importer = NULL; 
  7         struct address_space *mapping = NULL; 
  8         struct rb_root *root = NULL; 
  9         struct anon_vma *anon_vma = NULL; 
 10         struct file *file = vma->vm_file; 
 11         bool start_changed = false, end_changed = false; 
 12         long adjust_next = 0; 
 13         int remove_next = 0; 
 14         //insert为NULL即不是分割vma, 仅仅是调整vma大小, next存在则可能需同时调整两个vma 
 15         if (next && !insert) { 
 16                 if (end >= next->vm_end) { 
 17 again: 
 18                         //vma扩展超过它的后一节点, 甚至可能超过后一节点的后一节点, 情况6 
 19                         remove_next = 1 + (end > next->vm_end); 
 20                         end = next->vm_end; 
 21                         exporter = next; 
 22                         importer = vma; 
 23                 } else if (end > next->vm_start) { 
 24                         //vma扩展到后一节点的一部分, 情况5 
 25                         adjust_next = (end - next->vm_start) >> PAGE_SHIFT; 
 26                         exporter = next; 
 27                         importer = vma; 
 28                 } else if (end < vma->vm_end) { 
 29                         //vma收缩且insert为NULL即不是分割vma, 情况4 
 30                         adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); 
 31                         exporter = vma; 
 32                         importer = next; 
 33                 } 
 34                 //如果收缩的vma存在匿名映射, 而扩展的vma无匿名映射则必须拷贝匿名映射 
 35                 if (exporter && exporter->anon_vma && !importer->anon_vma) { 
 36                         //复制anon_vma来保存物理页面映射信息 
 37                         if (anon_vma_clone(importer, exporter)) 
 38                                 return -ENOMEM; 
 39                         importer->anon_vma = exporter->anon_vma; 
 40                 } 
 41         } 
 42         //文件映射, 略过 
 43         if (file) { 
 44                 mapping = file->f_mapping; 
 45                 if (!(vma->vm_flags & VM_NONLINEAR)) { 
 46                         root = &mapping->i_mmap; 
 47                         uprobe_munmap(vma, vma->vm_start, vma->vm_end); 
 48                         if (adjust_next) 
 49                                 uprobe_munmap(next, next->vm_start, next->vm_end); 
 50                 } 
 51                 mutex_lock(&mapping->i_mmap_mutex); 
 52                 if (insert) { 
 53                         __vma_link_file(insert); 
 54                 } 
 55         } 
 56         vma_adjust_trans_huge(vma, start, end, adjust_next); 
 57         anon_vma = vma->anon_vma; 
 58         if (!anon_vma && adjust_next) 
 59                 anon_vma = next->anon_vma; 
 60         if (anon_vma) { 
 61                 VM_BUG_ON(adjust_next && next->anon_vma && 
 62                           anon_vma != next->anon_vma); 
 63                 anon_vma_lock_write(anon_vma); 
 64                 //需在vma相关成员修改前调用 
 65                 //注意与anon_vma_interval_tree_post_update_vma()对应调用 
 66                 anon_vma_interval_tree_pre_update_vma(vma); 
 67                 if (adjust_next) 
 68                         anon_vma_interval_tree_pre_update_vma(next); 
 69         } 
 70         //文件映射, 略过 
 71         if (root) { 
 72                 flush_dcache_mmap_lock(mapping); 
 73                 vma_interval_tree_remove(vma, root); 
 74                 if (adjust_next) 
 75                         vma_interval_tree_remove(next, root); 
 76         } 
 77         //修改vma的起始/结束地址, 页偏移 
 78         if (start != vma->vm_start) { 
 79                 vma->vm_start = start; 
 80                 start_changed = true; 
 81         } 
 82         if (end != vma->vm_end) { 
 83                 vma->vm_end = end; 
 84                 end_changed = true; 
 85         } 
 86         vma->vm_pgoff = pgoff; 
 87         if (adjust_next) { 
 88                 next->vm_start += adjust_next << PAGE_SHIFT; 
 89                 next->vm_pgoff += adjust_next; 
 90         } 
 91         //文件映射, 略过 
 92         if (root) { 
 93                 if (adjust_next) 
 94                         vma_interval_tree_insert(next, root); 
 95                 vma_interval_tree_insert(vma, root); 
 96                 flush_dcache_mmap_unlock(mapping); 
 97         } 
 98         /** 
 99          * 三种情况: 
100          * 1. remove_next不为零是vma扩展且吞并之后的一个或两个映射区间 
101          * 2. insert非空是插入映射区间 
102          * 3. vma向后扩展, 但未吞并后一个映射区间的全部地址 
103          * 
104         **/ 
105         if (remove_next) { 
106                 //vma_merge已经将next合入vma, 此处仅unlink该节点 
107                 __vma_unlink(mm, next, vma); 
108                 if (file) 
109                         __remove_shared_vm_struct(next, file, mapping); 
110         } else if (insert) { 
111                 //split_vma已经将next合入vma, 此处仅unlink该节点 
112                 __insert_vm_struct(mm, insert); 
113         } else { 
114                 //vma起始地址修改, 更新vma及其向上节点的gap值 
115                 if (start_changed) 
116                         vma_gap_update(vma); 
117                 //vma结束地址修改, 更新next及其向上节点的gap值 
118                 //如果next不存在别忘了修改mm->highest_vm_end 
119                 if (end_changed) { 
120                         if (!next) 
121                                 mm->highest_vm_end = end; 
122                         else if (!adjust_next) 
123                                 vma_gap_update(next); 
124                 } 
125         } 
126         if (anon_vma) { 
127                 //注意与anon_vma_interval_tree_pre_update_vma()对应调用 
128                 anon_vma_interval_tree_post_update_vma(vma); 
129                 if (adjust_next) 
130                         anon_vma_interval_tree_post_update_vma(next); 
131                 anon_vma_unlock_write(anon_vma); 
132         } 
133         //文件映射, 略过 
134         if (mapping) 
135                 mutex_unlock(&mapping->i_mmap_mutex); 
136         if (root) { 
137                 uprobe_mmap(vma); 
138                 if (adjust_next) 
139                         uprobe_mmap(next); 
140         } 
141         if (remove_next) { 
142                 if (file) { 
143                         uprobe_munmap(next, next->vm_start, next->vm_end); 
144                         fput(file); 
145                 } 
146                 if (next->anon_vma) 
147                         anon_vma_merge(vma, next); 
148                 mm->map_count--; 
149                 vma_set_policy(vma, vma_policy(next)); 
150                 kmem_cache_free(vm_area_cachep, next); 
151                 next = vma->vm_next; 
152                 //vma连续吞并两个映射区间, 循环两次 
153                 if (remove_next == 2) 
154                         goto again; 
155                 else if (next) 
156                         vma_gap_update(next); 
157                 else 
158                         mm->highest_vm_end = end; 
159         } 
160         if (insert && file) 
161                 uprobe_mmap(insert); 
162         validate_mm(mm); 
163         return 0; 
164 } 

 

vma_adjust()又是一个庞大的函数, 因为它不仅合并vma, 所有调整vma的操作都在此完成. 先解释下几个参数的含义: insert是插入的vma, 对于合并vma的情况insert为空, vma为受到调整的第一块vma, 对vma_merge()而言可能为prev也可能为目标区间, start与end分别为调整后整个vma的区间地址, exporter为缩减区间的vma, importer为扩展区间的vma.
对vma调整主要涉及几点: 修改vma的地址区间及变化区间的匿名映射, 重新计算gap值以及文件相关操作. 这里容易忽视的一点是在判断哪个vma扩展地址哪个vma缩减地址后不要忘记复制对应区间的匿名映射, anon_vma_clone()(defined in mm/rmap.c)负责复制匿名映射.

 1 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 
 2 { 
 3         struct anon_vma_chain *avc, *pavc; 
 4         struct anon_vma *root = NULL; 
 5         //遍历src->anon_vma_chain中每个anon_vma_chain 
 6         list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 
 7                 struct anon_vma *anon_vma; 
 8                 //内存申请, 不等待 
 9                 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 
10                 if (unlikely(!avc)) { 
11                         unlock_anon_vma_root(root); 
12                         root = NULL; 
13                         //高优先级申请, 再申请不到只能失败了 
14                         avc = anon_vma_chain_alloc(GFP_KERNEL); 
15                         if (!avc) 
16                                 goto enomem_failure; 
17                 } 
18                 anon_vma = pavc->anon_vma; 
19                 //加锁anon_vma->rwsem 
20                 //考虑到多个anon_vma归属于一个树, 因此找到根节点加锁一个节点即可 
21                 root = lock_anon_vma_root(root, anon_vma); 
22                 //初始化avc 
23                 anon_vma_chain_link(dst, avc, anon_vma); 
24         } 
25         unlock_anon_vma_root(root); 
26         return 0; 
27  enomem_failure: 
28         unlink_anon_vmas(dst); 
29         return -ENOMEM; 
30 } 

 

在修改vma->vm_start, vma->vm_end等成员时需注意同时对匿名映射的操作. 在有匿名映射的情况下需要成对调用anon_vma_interval_tree_pre_update_vma() / anon_vma_interval_tree_post_update_vma(), 并在两者之间修改vma相关成员, 注意事项见anon_vma_interval_tree_pre_update_vma()(defined in mm/mmap.c).

 1 /** 
 2  * 有声明anon_vma的vma已经在anon_vma树中 
 3  * 在更新vma的vm_start, vm_end, vm_pgoff成员前vma必须使用anon_vma_interval_tree_pre_update_vma移除相关anon_vma 
 4  * 在更新完vma后需要使用anon_vma_interval_tree_post_update_vma重新添加至树中 
 5  * 整个操作必须使用mmap_sem与anon_vma树的根节点的mutex 
 6  * 
 7 **/ 
 8 static inline void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) 
 9 { 
10         struct anon_vma_chain *avc; 
11         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) 
12                 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); 
13 } 

 

匿名映射相关的内容本文暂时不展开, 后续分析逆向映射时再做分析. 额外提一句, anon_vma_interval_tree_remove() / anon_vma_interval_tree_insert()中调用的子接口是由宏定义INTERVAL_TREE_DEFINE(defined in include/linux/interval_tree_generic.h)拼接的.

1 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.linear.rb, \ 
2         unsigned long, shared.linear.rb_subtree_last, \ 
3         vma_start_pgoff, vma_last_pgoff,, vma_interval_tree) 

 

还是回到vma_adjust(), 再对vma地址区间进行修改并重新匿名映射后, 需要对增删的vma进行处理. 如存在需要移除的vma(remove_next != 0)则调用__vma_unlink(defined in mm/mmap.c).

 1 static inline void __vma_unlink(struct mm_struct *mm, \ 
 2         struct vm_area_struct *vma, struct vm_area_struct *prev) 
 3 { 
 4         struct vm_area_struct *next; 
 5         vma_rb_erase(vma, &mm->mm_rb); 
 6         prev->vm_next = next = vma->vm_next; 
 7         if (next) 
 8                 next->vm_prev = prev; 
 9         if (mm->mmap_cache == vma) 
10                 mm->mmap_cache = prev; 
11 } 

 

__vma_unlink()还是很人性化的接口(相比前面两个函数), vma_rb_erase()实际上调用rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks), 其中又会调用vma_gap_callbacks回调.

1 RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, \ 
2         vm_rb, unsigned long, rb_subtree_gap, vma_compute_subtree_gap) 

 

vma_gap_callbacks又是宏定义的rb_augment_callbacks结构, 直接搜索是找不到的. RB_DECLARE_CALLBACKS(defined in include/linux/rbtree_augmented.h)定义了vma_gap_callbacks_*(*为propagate, copy, rotate), 具体就不展开分析了.

再次回到vma_adjust(), 如果insert非空则需插入vma, __insert_vm_struct()(defined in mm/mmap.c)完成两件事, 找到insert在树中的位置, 调用__vma_link()(defined in mm/mmap.c)将insert加入树中(实际调用__vma_link_list()与__vma_link_rb()).

 1 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 
 2         struct rb_node **rb_link, struct rb_node *rb_parent) 
 3 { 
 4         //修改vma->vm_next的gap值, 如不存在则修改mm->highest_vm_end 
 5         if (vma->vm_next) 
 6                 vma_gap_update(vma->vm_next); 
 7         else 
 8                 mm->highest_vm_end = vma->vm_end; 
 9         /** 
10          * 源码注释说通过红黑树找到的vma插入位置不知道vma->vm_prev的位置 
11          * 因此不能计算vma的gap值, 需要先插入vma后再进行计算 
12          * 但我不明白为什么不知道vma->vm_prev, find_vma_links()不是已经找到了吗? 
13          * 
14         **/ 
15         rb_link_node(&vma->vm_rb, rb_parent, rb_link); 
16         vma->rb_subtree_gap = 0; 
17         vma_gap_update(vma); 
18         vma_rb_insert(vma, &mm->mm_rb); 
19 } 

 

__vma_link_list()即将其链表链接起来, 就不赘述了. __vma_link_rb()倒需要稍稍注释一下. 这个接口做了两件事: 重新计算gap值和将节点加入树. 为什么移除节点时不需重新计算gap值? 因为没有区间发生地址上的变化, 但插入节点可能改变gap值. vma_gap_update()又调用了vma_gap_callbacks_propagate(), 不想分析偏偏调用了这么多次(- -!)只好来看下.

 1 #define RB_DECLARE_CALLBACKS(rbstatic, rbname,                      \ 
 2     rbstruct, rbfield, rbtype, rbaugmented, rbcompute)              \ 
 3 static inline void                                                  \ 
 4 rbname ## _propagate(struct rb_node *rb, struct rb_node *stop)      \ 
 5 {                                                                   \ 
 6     while (rb != stop) {                                            \ 
 7         rbstruct *node = rb_entry(rb, rbstruct, rbfield);           \ 
 8         rbtype augmented = rbcompute(node);                         \ 
 9         if (node->rbaugmented == augmented)                         \ 
10             break;                                                  \ 
11         node->rbaugmented = augmented;                              \ 
12         rb = rb_parent(&node->rbfield);                             \ 
13     }                                                               \ 
14 }                                                                   \ 
15 static inline void                                                  \ 
16 rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new)     \ 
17 {                                                                   \ 
18     rbstruct *old = rb_entry(rb_old, rbstruct, rbfield);            \ 
19     rbstruct *new = rb_entry(rb_new, rbstruct, rbfield);            \ 
20     new->rbaugmented = old->rbaugmented;                            \ 
21 }                                                                   \ 
22 static void                                                         \ 
23 rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new)   \ 
24 {                                                                   \ 
25     rbstruct *old = rb_entry(rb_old, rbstruct, rbfield);            \ 
26     rbstruct *new = rb_entry(rb_new, rbstruct, rbfield);            \ 
27     new->rbaugmented = old->rbaugmented;                            \ 
28     old->rbaugmented = rbcompute(old);                              \ 
29 }                                                                   \ 
30 rbstatic const struct rb_augment_callbacks rbname = {               \ 
31     rbname ## _propagate, rbname ## _copy, rbname ## _rotate        \ 
32 }; 

 

结合上面vma_gap_callbacks的定义, 可以得出vma_gap_callbacks_propagate()的作用是给定树节点rb与stop, 从rb开始只要rb不等于stop就向上搜索, 并调用vma_compute_subtree_gap()(defined in mm/mmap.c)当前节点的gap值, 如果计算值与原值一致则说明无需再更改父节点的值, 选择break退出.

 1 static long vma_compute_subtree_gap(struct vm_area_struct *vma) 
 2 { 
 3         unsigned long max, subtree_gap; 
 4         max = vma->vm_start; 
 5         if (vma->vm_prev) 
 6                 max -= vma->vm_prev->vm_end; 
 7         if (vma->vm_rb.rb_left) { 
 8                 subtree_gap = rb_entry(vma->vm_rb.rb_left, 
 9                         struct vm_area_struct, vm_rb)->rb_subtree_gap; 
10                 if (subtree_gap > max) 
11                         max = subtree_gap; 
12         } 
13         if (vma->vm_rb.rb_right) { 
14                 subtree_gap = rb_entry(vma->vm_rb.rb_right, 
15                         struct vm_area_struct, vm_rb)->rb_subtree_gap; 
16                 if (subtree_gap > max) 
17                         max = subtree_gap; 
18         } 
19         return max; 
20 } 

 

回到__vma_link_rb(), 这里调用vma_gap_update()时传入的参数是vma->vm_next, 这是因为vma的gap值的计算方式是统计vma左侧区间的gap值的最大值, (除本身插入的vma外)第一个受影响的是vma->vm_next. 之后将vma加入红黑树, 更新gap值.

第三次回到vma_adjust(), 如果既没有移除也未添加节点则根据起始/结束地址是否变化更新vma或vma->vm_next的gap值. 之后完成匿名映射的后处理以及其它收尾工作后一路返回.

回到do_brk(), 在成功调用vma_merge()后判断vma是否为空, 如果非空即成功合并vma, 使用上一次的vma, 否则需要分配一个新的vma结构体来记录该vma. 至此brk()流程完成, 对于缩减堆的情况我们放在munmap()中分析.

分析完brk()我们再来看下mmap(), mmap()在内核中定义是mmap_pgoff()(defined in mm/mmap.c).

 1 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, \ 
 2         unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) 
 3 { 
 4         struct file *file = NULL; 
 5         unsigned long retval = -EBADF; 
 6         if (!(flags & MAP_ANONYMOUS)) { 
 7                 //非匿名映射 
 8                 audit_mmap_fd(fd, flags); 
 9                 if (unlikely(flags & MAP_HUGETLB)) 
10                         return -EINVAL; 
11                 file = fget(fd); 
12                 if (!file) 
13                         goto out; 
14                 if (is_file_hugepages(file)) 
15                         len = ALIGN(len, huge_page_size(hstate_file(file))); 
16         } else if (flags & MAP_HUGETLB) { 
17                 //huge tlb分支 
18                 struct user_struct *user = NULL; 
19                 struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK); 
20                 if (!hs) 
21                         return -EINVAL; 
22                 len = ALIGN(len, huge_page_size(hs)); 
23                 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, \ 
24                         &user, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); 
25                 if (IS_ERR(file)) 
26                         return PTR_ERR(file); 
27         } 
28         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 
29         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 
30         if (file) 
31                 fput(file); 
32 out: 
33         return retval; 
34 } 

 

在分析过brk()后分析mmap()简单很多, 我们先略过文件映射与HUGE TLB, 直接看下vm_mmap_pgoff()(defined in mm/util.c).

 1 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, \ 
 2         unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff) 
 3 { 
 4         unsigned long ret; 
 5         struct mm_struct *mm = current->mm; 
 6         unsigned long populate; 
 7         ret = security_mmap_file(file, prot, flag); 
 8         if (!ret) { 
 9                 down_write(&mm->mmap_sem); 
10                 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, &populate); 
11                 up_write(&mm->mmap_sem); 
12                 if (populate) 
13                         mm_populate(ret, populate); 
14         } 
15         return ret; 
16 } 

 

vm_mmap_pgoff()先获取了mmap操作必须的信号量, 然后调用do_mmap_pgoff(defined in mm/mmap.c). do_mmap_pgoff()主要是对标记位的判断, 实际映射接口在mmap_region()(defined in mm/mmap.c)中. 注意返回的populate是在MAP_POPULATE置位时需要预先分配物理内存.

  1 unsigned long do_mmap_pgoff(struct file *file, \ 
  2         unsigned long addr, unsigned long len, unsigned long prot, \ 
  3         unsigned long flags, unsigned long pgoff, unsigned long *populate) 
  4 { 
  5         struct mm_struct * mm = current->mm; 
  6         struct inode *inode; 
  7         vm_flags_t vm_flags; 
  8         *populate = 0; 
  9         //默认可读即可执行, 例外是不能执行的底层文件系统挂载时不增加可执行标记 
 10         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 
 11                 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) 
 12                         prot |= PROT_EXEC; 
 13         if (!len) 
 14                 return -EINVAL; 
 15         //固定地址映射需判断传入的地址是否小于mmap的下界 
 16         if (!(flags & MAP_FIXED)) 
 17                 addr = round_hint_to_min(addr); 
 18         len = PAGE_ALIGN(len); 
 19         if (!len) 
 20                 return -ENOMEM; 
 21         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 
 22                 return -EOVERFLOW; 
 23         if (mm->map_count > sysctl_max_map_count) 
 24                 return -ENOMEM; 
 25         //获取将要映射的地址 
 26         addr = get_unmapped_area(file, addr, len, pgoff, flags); 
 27         if (addr & ~PAGE_MASK) 
 28                 return addr; 
 29         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | 
 30                 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 
 31         if (flags & MAP_LOCKED) 
 32                 if (!can_do_mlock()) 
 33                         return -EPERM; 
 34         if (vm_flags & VM_LOCKED) { 
 35                 unsigned long locked, lock_limit; 
 36                 locked = len >> PAGE_SHIFT; 
 37                 locked += mm->locked_vm; 
 38                 lock_limit = rlimit(RLIMIT_MEMLOCK); 
 39                 lock_limit >>= PAGE_SHIFT; 
 40                 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 
 41                         return -EAGAIN; 
 42         } 
 43         inode = file   file_inode(file) : NULL; 
 44         if (file) { 
 45                 //文件映射 
 46                 switch (flags & MAP_TYPE) { 
 47                 case MAP_SHARED: 
 48                         if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE)) 
 49                                 return -EACCES; 
 50                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 
 51                                 return -EACCES; 
 52                         if (locks_verify_locked(inode)) 
 53                                 return -EAGAIN; 
 54                         vm_flags |= VM_SHARED | VM_MAYSHARE; 
 55                         if (!(file->f_mode & FMODE_WRITE)) 
 56                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 
 57                 case MAP_PRIVATE: 
 58                         if (!(file->f_mode & FMODE_READ)) 
 59                                 return -EACCES; 
 60                         if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { 
 61                                 if (vm_flags & VM_EXEC) 
 62                                         return -EPERM; 
 63                                 vm_flags &= ~VM_MAYEXEC; 
 64                         } 
 65                         if (!file->f_op || !file->f_op->mmap) 
 66                                 return -ENODEV; 
 67                         break; 
 68                 default: 
 69                         return -EINVAL; 
 70                 } 
 71         } else { 
 72                 //匿名映射 
 73                 switch (flags & MAP_TYPE) { 
 74                 case MAP_SHARED: 
 75                         pgoff = 0; 
 76                         vm_flags |= VM_SHARED | VM_MAYSHARE; 
 77                         break; 
 78                 case MAP_PRIVATE: 
 79                         pgoff = addr >> PAGE_SHIFT; 
 80                         break; 
 81                 default: 
 82                         return -EINVAL; 
 83                 } 
 84         } 
 85         if (flags & MAP_NORESERVE) { 
 86                 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) 
 87                         vm_flags |= VM_NORESERVE; 
 88                 if (file && is_file_hugepages(file)) 
 89                         vm_flags |= VM_NORESERVE; 
 90         } 
 91         addr = mmap_region(file, addr, len, vm_flags, pgoff); 
 92         if (!IS_ERR_VALUE(addr) && 
 93             ((vm_flags & VM_LOCKED) || 
 94              (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) 
 95                 *populate = len; 
 96         return addr; 
 97 } 
 98 unsigned long mmap_region(struct file *file, unsigned long addr, \ 
 99         unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) 
100 { 
101         struct mm_struct *mm = current->mm; 
102         struct vm_area_struct *vma, *prev; 
103         int correct_wcount = 0; 
104         int error; 
105         struct rb_node **rb_link, *rb_parent; 
106         unsigned long charged = 0; 
107         struct inode *inode =  file   file_inode(file) : NULL; 
108         if (!may_expand_vm(mm, len >> PAGE_SHIFT)) { 
109                 unsigned long nr_pages; 
110                 //对于超过内存限制的情况返回ENOMEM 
111                 //但固定映射情况可能存在区间已分配的情况, 计算移除已映射部分后是否足够映射 
112                 if (!(vm_flags & MAP_FIXED)) 
113                         return -ENOMEM; 
114                 nr_pages = count_vma_pages_range(mm, addr, addr + len); 
115                 if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages)) 
116                         return -ENOMEM; 
117         } 
118         error = -ENOMEM; 
119 munmap_back: 
120         if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { 
121                 if (do_munmap(mm, addr, len)) 
122                         return -ENOMEM; 
123                 goto munmap_back; 
124         } 
125         if (accountable_mapping(file, vm_flags)) { 
126                 charged = len >> PAGE_SHIFT; 
127                 if (security_vm_enough_memory_mm(mm, charged)) 
128                         return -ENOMEM; 
129                 vm_flags |= VM_ACCOUNT; 
130         } 
131         //判断是否可在原有vma上扩展 
132         vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, 
133                         NULL, NULL); 
134         if (vma) 
135                 goto out; 
136         //如不可在原有vma上扩展则新分配vma结构管理 
137         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 
138         if (!vma) { 
139                 error = -ENOMEM; 
140                 goto unacct_error; 
141         } 
142         vma->vm_mm = mm; 
143         vma->vm_start = addr; 
144         vma->vm_end = addr + len; 
145         vma->vm_flags = vm_flags; 
146         vma->vm_page_prot = vm_get_page_prot(vm_flags); 
147         vma->vm_pgoff = pgoff; 
148         INIT_LIST_HEAD(&vma->anon_vma_chain); 
149         error = -EINVAL; 
150         if (file) { 
151                 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) 
152                         goto free_vma; 
153                 if (vm_flags & VM_DENYWRITE) { 
154                         error = deny_write_access(file); 
155                         if (error) 
156                                 goto free_vma; 
157                         correct_wcount = 1; 
158                 } 
159                 vma->vm_file = get_file(file); 
160                 error = file->f_op->mmap(file, vma); 
161                 if (error) 
162                         goto unmap_and_free_vma; 
163                 WARN_ON_ONCE(addr != vma->vm_start); 
164                 addr = vma->vm_start; 
165                 pgoff = vma->vm_pgoff; 
166                 vm_flags = vma->vm_flags; 
167         } else if (vm_flags & VM_SHARED) { 
168                 if (unlikely(vm_flags & (VM_GROWSDOWN | VM_GROWSUP))) 
169                         goto free_vma; 
170                 error = shmem_zero_setup(vma); 
171                 if (error) 
172                         goto free_vma; 
173         } 
174         if (vma_wants_writenotify(vma)) { 
175                 pgprot_t pprot = vma->vm_page_prot; 
176                 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); 
177                 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot))) 
178                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 
179         } 
180         vma_link(mm, vma, prev, rb_link, rb_parent); 
181         file = vma->vm_file; 
182         if (correct_wcount) 
183                 atomic_inc(&inode->i_writecount); 
184 out: 
185         perf_event_mmap(vma); 
186         vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); 
187         if (vm_flags & VM_LOCKED) { 
188                 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || 
189                                         vma == get_gate_vma(current->mm))) 
190                         mm->locked_vm += (len >> PAGE_SHIFT); 
191                 else 
192                         vma->vm_flags &= ~VM_LOCKED; 
193         } 
194         if (file) 
195                 uprobe_mmap(vma); 
196         return addr; 
197 unmap_and_free_vma: 
198         if (correct_wcount) 
199                 atomic_inc(&inode->i_writecount); 
200         vma->vm_file = NULL; 
201         fput(file); 
202         unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); 
203         charged = 0; 
204 free_vma: 
205         kmem_cache_free(vm_area_cachep, vma); 
206 unacct_error: 
207         if (charged) 
208                 vm_unacct_memory(charged); 
209         return error; 
210 } 

 

如果映射标记位中MAP_POPULATE置位且MAP_NONBLOCK置零则还要为新映射的页分配物理页表. 这里先解释一下两个标记位的作用: MAP_POPULATE是2.5.46引入的标记位, 用于为映射的区间预先准备页表, MAP_NONBLOCK也是同时期引入的且必须与前者一起使用, 当它置位时内核仅会使用已存在RAM中的页表来映射区间. 让我们来看下mm_populate()的实现.

 1 static inline void mm_populate(unsigned long addr, unsigned long len) 
 2 { 
 3         (void)__mm_populate(addr, len, 1); 
 4 } 
 5 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) 
 6 { 
 7         struct mm_struct *mm = current->mm; 
 8         unsigned long end, nstart, nend; 
 9         struct vm_area_struct *vma = NULL; 
10         int locked = 0; 
11         long ret = 0; 
12         VM_BUG_ON(start & ~PAGE_MASK); 
13         VM_BUG_ON(len != PAGE_ALIGN(len)); 
14         end = start + len; 
15         for (nstart = start; nstart < end; nstart = nend) { 
16                 if (!locked) { 
17                         locked = 1; 
18                         down_read(&mm->mmap_sem); 
19                         vma = find_vma(mm, nstart); 
20                 } else if (nstart >= vma->vm_end) 
21                         vma = vma->vm_next; 
22                 if (!vma || vma->vm_start >= end) 
23                         break; 
24                 nend = min(end, vma->vm_end); 
25                 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 
26                         continue; 
27                 if (nstart < vma->vm_start) 
28                         nstart = vma->vm_start; 
29                 ret = __mlock_vma_pages_range(vma, nstart, nend, &locked); 
30                 if (ret < 0) { 
31                         if (ignore_errors) { 
32                                 ret = 0; 
33                                 continue; 
34                         } 
35                         ret = __mlock_posix_error_return(ret); 
36                         break; 
37                 } 
38                 nend = nstart + ret * PAGE_SIZE; 
39                 ret = 0; 
40         } 
41         if (locked) 
42                 up_read(&mm->mmap_sem); 
43         return ret; 
44 } 
45 long __mlock_vma_pages_range(struct vm_area_struct *vma, \ 
46         unsigned long start, unsigned long end, int *nonblocking) 
47 { 
48         struct mm_struct *mm = vma->vm_mm; 
49         unsigned long nr_pages = (end - start) / PAGE_SIZE; 
50         int gup_flags; 
51         VM_BUG_ON(start & ~PAGE_MASK); 
52         VM_BUG_ON(end   & ~PAGE_MASK); 
53         VM_BUG_ON(start < vma->vm_start); 
54         VM_BUG_ON(end   > vma->vm_end); 
55         VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 
56         gup_flags = FOLL_TOUCH | FOLL_MLOCK; 
57         if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 
58                 gup_flags |= FOLL_WRITE; 
59         if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) 
60                 gup_flags |= FOLL_FORCE; 
61         return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); 
62 } 

 

__mm_populate()有两个作用, 一是实现mlock()系统调用, 另一个是实现mmaap()中预分配页表. 其流程是遍历给定地址区间, 循环所有在区间内的vma, 对每个复合条件的vma执行__mlock_vma_pages_range()(defined in mm/mlock.c). 关于物理页表的获取我们放到下一节分析, 在本节我们最后看下mmap()(defined in mm/mmap.c)的实现. 受限于作者自身水平, 仅能从几个接口管中窥豹式分析内核整个虚拟地址管理框架, 关于文件映射及相关操作还需要各位自己分析了.

 1 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 
 2 { 
 3         //处理回调, 暂时略过 
 4         profile_munmap(addr); 
 5         return vm_munmap(addr, len); 
 6 } 
 7 int vm_munmap(unsigned long start, size_t len) 
 8 { 
 9         int ret; 
10         struct mm_struct *mm = current->mm; 
11         down_write(&mm->mmap_sem); 
12         ret = do_munmap(mm, start, len); 
13         up_write(&mm->mmap_sem); 
14         return ret; 
15 } 
16 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 
17 { 
18         vma = find_vma(mm, start); 
19         if (!vma) 
20                 return 0; 
21         prev = vma->vm_prev; 
22         //如果vma不包含整个区间则直接返回 
23         end = start + len; 
24         if (vma->vm_start >= end) 
25                 return 0; 
26         //将unmap的空间与其它部分分割开, 根据unmap的区间可能分割为2块或3块 
27         if (start > vma->vm_start) { 
28                 //如果vma要一分三且map_count超过限制, 返回ENOMEM, 如果仅一分二无需该检查 
29                 //因为unmap的vma在之后会销毁, 即函数返回时保证map_count不超过限制即可 
30                 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) 
31                         return -ENOMEM; 
32                 //分割前半vma 
33                 error = __split_vma(mm, vma, start, 0); 
34                 if (error) 
35                         return error; 
36                 prev = vma; 
37         } 
38         last = find_vma(mm, end); 
39         if (last && end > last->vm_start) { 
40                 //分割后半vma 
41                 int error = __split_vma(mm, last, end, 1); 
42                 if (error) 
43                         return error; 
44         } 
45         //找到要被unmap的vma 
46         vma = prev? prev->vm_next: mm->mmap; 
47         //解除vma的LOCK 
48         if (mm->locked_vm) { 
49                 while (tmp && tmp->vm_start < end) { 
50                         if (tmp->vm_flags & VM_LOCKED) { 
51                                 mm->locked_vm -= vma_pages(tmp); 
52                                 //解锁, 暂时略过 
53                                 munlock_vma_pages_all(tmp); 
54                         } 
55                         tmp = tmp->vm_next; 
56                 } 
57         } 
58         //解除vma并去映射物理页 
59         detach_vmas_to_be_unmapped(mm, vma, prev, end); 
60         unmap_region(mm, vma, prev, start, end); 
61         //移除vma及调整mm相关统计数据 
62         remove_vma_list(mm, vma); 
63         return 0; 
64 } 

 

posted @ 2018-02-28 00:34  Five100Miles  阅读(1415)  评论(0编辑  收藏  举报