创建和删除进程的地址空间
<br />本博,我们重点关注fork()系统调用为子进程创建一个完整的新地址空间。相反,当进程结束时,内核撤消它的地址空间。我们重点来讨论Linux如何执行这两种操作。<br />1 创建进程的地址空间<br /><br />回忆一下“进程的创建 —— do_fork()函数详解”博文:当创建一个新的进程时内核调用copy_mm()函数。这个函数通过建立新进程的所有页表和内存描述符来创建进程一的
本博,我们重点关注fork()系统调用为子进程创建一个完整的新地址空间。相反,当进程结束时,内核撤消它的地址空间。我们重点来讨论Linux如何执行这两种操作。
1 创建进程的地址空间
回忆一下“进程的创建 —— do_fork()函数详解 ”博文:当创建一个新的进程时内核调用copy_mm()函数。这个函数通过建立新进程的所有页表和内存描述符来创建进程一的地址空间:
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
struct mm_struct * mm, *oldmm;
int retval;
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
tsk->mm = NULL;
tsk->active_mm = NULL;
/*
* Are we cloning a kernel thread?
*
* We need to steal a active VM for that..
*/
oldmm = current->mm;
if (!oldmm)
return 0;
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
goto good_mm;
}
retval = -ENOMEM;
mm = dup_mm(tsk);
if (!mm)
goto fail_nomem;
good_mm:
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
fail_nomem:
return retval;
}
通常,每个进程都有自己的地址空间,但是轻进程可以通过调用clone()函数(设置了CLONE_VM标志)来创建。这些轻量级进程共享同一地址空间,也就是说,允许它们对同一组页进行寻址。
按照前面讲述的“写时复制 ”方法,传统的进程继承父进程的地址空间,只要页是只读的,就依然共享它们。当其中的一个进程试图对某个页进行写时,此时,这个页才被复制一份。一段时间之后,所创建的子进程通常会因为缺页异常而获得与父进程不一样的完全属于自己的地址空间。
另一方面,轻量级的进程使用父进程的地址空间。Linux实现轻量级进程很简单,即不复制父进程地址空间。创建轻量级的进程(clone)比创建普通进程相应要快得多,而且只要父进程和子进程谨慎地协调它们的访问,就可以认为页的共享是有益的。
如果通过clone()系统调用已经创建了新进程,并且flag参数的CLONE_VM标志被设置,则copy_mm()函数把父进程(current)地址空间给子进程(tsk):
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
goto good_mm;
}
good_mm:
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
如果没有设置CLONE_VM标志,copy_mm()函数就必须创建一个新的地址空间(在进程请求一个地址之前,即使在地址空间内没有分配内存):
mm = dup_mm(tsk);
dup_mm()函数分配一个新的内存描述符,把它的地址存放在新进程描述符tsk的mm字段中,并把current->mm的内容复制到tsk->mm中。然后改变新进程描述符的一些字段:
static struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
if (!oldmm)
return NULL;
mm = allocate_mm();
if (!mm)
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
if (!mm_init(mm) )
goto fail_nomem;
if (init_new_context(tsk, mm))
goto fail_nocontext;
err = dup_mmap(mm, oldmm);
if (err)
goto free_pt;
mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm;
return mm;
free_pt:
mmput(mm);
fail_nomem:
return NULL;
fail_nocontext:
free_mm_flags(mm);
mm_free_pgd(mm);
free_mm(mm);
return NULL;
}
#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
函数首先使用allocate_mm()函数调用kmem_cache_alloc(mm_cachep, SLAB_KERNEL)从slab中分配一个mm_struct结构,然后调用mm_init对其进行初始化:
static struct mm_struct * mm_init(struct mm_struct * mm)
{
unsigned long mm_flags;
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->core_waiters = 0;
mm->nr_ptes = 0;
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
spin_lock_init(&mm->page_table_lock);
rwlock_init(&mm->ioctx_list_lock);
mm->ioctx_list = NULL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
mm_flags = get_mm_flags(current->mm);
if (mm_flags != MMF_DUMP_FILTER_DEFAULT) {
if (unlikely(set_mm_flags(mm, mm_flags, 0) < 0))
goto fail_nomem;
}
if (likely(!mm_alloc_pgd(mm) )) {
mm->def_flags = 0;
mmu_notifier_mm_init(mm);
return mm;
}
if (mm_flags != MMF_DUMP_FILTER_DEFAULT)
free_mm_flags(mm);
fail_nomem:
free_mm(mm);
return NULL;
}
回想一下,mm_alloc_pgd()调用pgd_alloc()宏为新进程分配一个全新的页全局目录:(/arch/i386/mm/Pgtable.c)
static inline int mm_alloc_pgd(struct mm_struct * mm)
{
mm->pgd = pgd_alloc(mm);
if (unlikely(!mm->pgd))
return -ENOMEM;
return 0;
}
pgd_t *pgd_alloc(struct mm_struct *mm)
{
int i;
pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
if (PTRS_PER_PMD == 1 || !pgd)
return pgd;
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
if (!pmd)
goto out_oom;
set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
}
return pgd;
out_oom:
for (i--; i >= 0; i--)
kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
kmem_cache_free(pgd_cache, pgd);
return NULL;
}
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_SHIFT 22
#define TASK_SIZE (PAGE_OFFSET) /* User space process size: 3GB (default). */
注意,执行完mm_alloc_pgd()函数之后,子进程的pgd和pmd有了(32位i386体系结构),但是pte是没有的,后面的工作需要dup_mmap()函数来完成,马上会谈到。
接着来,随后调用依赖于体系结构的init_new_context()函数:对于80x86处理器,该函数检查当前进程是否拥有定制的局部描述符表,如果是,init_new_context()复制一份current的局部描述符表并把它插入tsk的地址空间:
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
struct mm_struct * old_mm;
int retval = 0;
init_MUTEX(&mm->context.sem);
mm->context.size = 0;
old_mm = current->mm;
if (old_mm && old_mm->context.size > 0) {
down(&old_mm->context.sem);
retval = copy_ldt(&mm->context, &old_mm->context);
up(&old_mm->context.sem);
}
return retval;
}
下一个重点步骤:err = dup_mmap(mm, oldmm):
static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
struct mempolicy *pol;
down_write(&oldmm->mmap_sem);
flush_cache_mm(oldmm);
/*
* Not linked in yet - no deadlock potential:
*/
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
cpus_clear(mm->cpu_vm_mask);
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
pprev = &mm->mmap;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
long pages = vma_pages(mpnt);
mm->total_vm -= pages;
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-pages);
continue;
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
if (security_vm_enough_memory(len))
goto fail_nomem;
charge = len;
}
tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
pol = mpol_copy(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = NULL;
anon_vma_link(tmp);
file = tmp->vm_file;
if (file) {
struct inode *inode = file->f_dentry->d_inode;
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
/* insert tmp into the share list, just after mpnt */
spin_lock(&file->f_mapping->i_mmap_lock);
tmp->vm_truncate_count = mpnt->vm_truncate_count;
flush_dcache_mmap_lock(file->f_mapping);
vma_prio_tree_add(tmp, mpnt);
flush_dcache_mmap_unlock(file->f_mapping);
spin_unlock(&file->f_mapping->i_mmap_lock);
}
/*
* Link in the new vma and copy the page table entries.
*/
*pprev = tmp;
pprev = &tmp->vm_next;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb;
mm->map_count++;
retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
if (retval)
goto out;
}
#ifdef arch_dup_mmap
arch_dup_mmap(mm, oldmm);
#endif
retval = 0;
out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
return retval;
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
goto out;
}
dup_mmap()函数既复制父进程的线性区,也复制父进程的页表。
然后,从current->mm->mmap所指向的线性区开始扫描父进程的线性区链表:
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next)
它复制遇到的每个vm_area_struct线性区描述符,并把复制品插入到子进程的线性区链表和红-黑树中:
tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
*tmp = *mpnt; /* 完全复制,这个技巧在这里又用到了 */
……
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb;
在插入一个新的线性区描述符之后,如果需要的话,dup_mmap()立即调用copy_page_range()创建必要的页表来映射这个线性区所包含的一组页,并且初始化新页表的表项。尤其是,与私有的、可写的页(VM_SHARED标志关闭,VM_MAYWRITE标志打开)所对应的任一页框都标记为对父子进程是只读的,以便这种页框能用写时复制机制进行处理:
int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
struct vm_area_struct *vma)
{
pgd_t *src_pgd, *dst_pgd;
unsigned long next;
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
int ret;
/*
* Don't copy ptes where a page fault will fill them correctly.
* Fork becomes much lighter when there are big shared or private
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
if (!vma->anon_vma)
return 0;
}
if (is_vm_hugetlb_page(vma))
return copy_hugetlb_page_range(dst_mm, src_mm, vma);
/*
* We need to invalidate the secondary MMU mappings only when
* there could be a permission downgrade on the ptes of the
* parent mm. And a permission downgrade will only happen if
* is_cow_mapping() returns true.
*/
if (is_cow_mapping(vma->vm_flags))
mmu_notifier_invalidate_range_start(src_mm, addr, end);
ret = 0;
dst_pgd = pgd_offset(dst_mm, addr);
src_pgd = pgd_offset(src_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(src_pgd))
continue;
if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
vma, addr, next))) {
ret = -ENOMEM;
break;
}
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
if (is_cow_mapping(vma->vm_flags))
mmu_notifier_invalidate_range_end(src_mm,
vma->vm_start, end);
return ret;
}
static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pud_t *src_pud, *dst_pud;
unsigned long next;
dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
if (!dst_pud)
return -ENOMEM;
src_pud = pud_offset(src_pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(src_pud))
continue;
if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
vma, addr, next))
return -ENOMEM;
} while (dst_pud++, src_pud++, addr = next, addr != end);
return 0;
}
static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pmd_t *src_pmd, *dst_pmd;
unsigned long next;
dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
if (!dst_pmd)
return -ENOMEM;
src_pmd = pmd_offset(src_pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(src_pmd))
continue;
if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
vma, addr, next))
return -ENOMEM;
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
return 0;
}
static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
pte_t *src_pte, *dst_pte;
spinlock_t *src_ptl, *dst_ptl;
int progress = 0;
int rss[2];
again:
rss[1] = rss[0] = 0;
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
if (!dst_pte)
return -ENOMEM;
src_pte = pte_offset_map_nested(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
do {
/*
* We are holding two locks at this point - either of them
* could generate latencies in another task on another CPU.
*/
if (progress >= 32) {
progress = 0;
if (need_resched() ||
need_lockbreak(src_ptl) ||
need_lockbreak(dst_ptl))
break;
}
if (pte_none(*src_pte)) {
progress++;
continue;
}
copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
progress += 8;
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
spin_unlock(src_ptl);
pte_unmap_nested(src_pte - 1);
add_mm_rss(dst_mm, rss[0], rss[1]);
pte_unmap_unlock(dst_pte - 1, dst_ptl);
cond_resched();
if (addr != end)
goto again;
return 0;
}
static inline void
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
unsigned long addr, int *rss)
{
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
struct page *page;
/* pte contains position in swap or file, so copy. */
if (unlikely(!pte_present(pte))) {
if (!pte_file(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
swap_duplicate(entry);
/* make sure dst_mm is on swapoff's mmlist. */
if (unlikely(list_empty(&dst_mm->mmlist))) {
spin_lock(&mmlist_lock);
if (list_empty(&dst_mm->mmlist))
list_add(&dst_mm->mmlist,
&src_mm->mmlist);
spin_unlock(&mmlist_lock);
}
if (is_write_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
/*
* COW mappings require pages in both parent
* and child to be set to read.
*/
make_migration_entry_read(&entry);
pte = swp_entry_to_pte(entry);
set_pte_at(src_mm, addr, src_pte, pte);
}
}
goto out_set_pte;
}
/*
* If it's a COW mapping, write protect it both
* in the parent and the child
*/
if (is_cow_mapping(vm_flags)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = *src_pte;
}
/*
* If it's a shared mapping, mark it clean in
* the child
*/
if (vm_flags & VM_SHARED)
pte = pte_mkclean(pte);
pte = pte_mkold(pte);
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
page_dup_rmap(page);
rss[!!PageAnon(page)]++;
}
out_set_pte:
set_pte_at(dst_mm, addr, dst_pte, pte);
}
2 删除进程的地址空间
当进程结束时,内核调用exit_mm()函数释放进程的地址空间:
mm_release(tsk, tsk->mm);
if (!(mm = tsk->mm)) /* kernel thread ? */
return;
down_read(&mm->mmap_sem);
mm_release()函数唤醒在tsk->vfork_done补充信号量上睡眠的任一进程(参见同步互斥专题“信号量 ”博文中有关补充信号量的相关内容)。典型地,只有当现有进程通过vfork()系统调用被创建时,相应的等待队列才会为非空(参见“fork与vfock系统调用的区别 ”博文)。
如果正在被终止的进程不是内核线程,exit_mm()函数就必须释放内存描述符和所有相关的数据结构。首先,它检查mm->core_waiters标志是否被置位:如果是,进程就把内存的所有内容卸载到一个转储文件中。为了避免转储文件的混乱,函数利用mm->core_done和mm->core_startup_done补充原语使共享同一个内存描述符mm的轻量级进程的执行串行化。
接下来,函数递增内存描述符的主使用计数器,重新设置进程描述符的mm字段,并使处理器处于懒惰TLB模式:
atomic_inc(&mm->mm_count);
spin_lock(tsk->alloc_lock);
tsk->mm = NULL;
up_read(&mm->map_sem);
enter_lazy_tlb(mm, current);
spin_unlock(tsk->alloc_lock);
mmput(mm);
最后,调用mmput()函数释放局部描述符表、线性区描述符和页表。不过,因为exit_mm()已经递增了主使用计数器,所以并不释放内存描述符本身。当要把正在被终止的进程从本地CPU撤消时,将由finish_task_switch()函数释放内存描述符。
更多推荐
所有评论(0)