一文搞懂 Linux 內核內存常見分配方式

在 Linux 操作系統中,內存分配是一個重 要的任務。正確地管理和分配內存對於系統的性能和穩定性至關重,Linux 採用了一種靈活且高效的內存分配方式,通過多種機制來滿足不同程序的內存需求。

一、malloc

alloc 是 C 庫實現的函數,C 庫維護了一個緩存,當內存夠用時,malloc 直接從 C 庫緩存分配,只有當 C 庫緩存不夠用;通過系統調用 brk,向內核申請,從堆空間申請一個 vma。

malloc 實現流程圖:

1.1_do_sys_brk 函數

經過平臺相關實現,malloc 最終會調用 SYSCALL_DEFINE1 宏,擴展爲__do_sys_brk 函數:

SYSCALL_DEFINE1(brk, unsigned long, brk)
{
	unsigned long retval;
	unsigned long newbrk, oldbrk, origbrk;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *next;
	unsigned long min_brk;
	bool populate;
	bool downgraded = false;
	LIST_HEAD(uf);

	if (down_write_killable(&mm->mmap_sem))  ///申請寫類型讀寫信號量
		return -EINTR;

	origbrk = mm->brk;    ///brk記錄動態分配區的當前底部

#ifdef CONFIG_COMPAT_BRK
	/*
	 * CONFIG_COMPAT_BRK can still be overridden by setting
	 * randomize_va_space to 2, which will still cause mm->start_brk
	 * to be arbitrarily shifted
	 */
	if (current->brk_randomized)
		min_brk = mm->start_brk;
	else
		min_brk = mm->end_data;
#else
	min_brk = mm->start_brk;
#endif
	if (brk < min_brk)
		goto out;

	/*
	 * Check against rlimit here. If this check is done later after the test
	 * of oldbrk with newbrk then it can escape the test and let the data
	 * segment grow beyond its set limit the in case where the limit is
	 * not page aligned -Ram Gupta
	 */
	if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
			      mm->end_data, mm->start_data))
		goto out;

	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(mm->brk);
	if (oldbrk == newbrk) {
		mm->brk = brk;
		goto success;
	}

	/*
	 * Always allow shrinking brk.
	 * __do_munmap() may downgrade mmap_sem to read.
	 */
	if (brk <= mm->brk) {  ///請求釋放空間
		int ret;

		/*
		 * mm->brk must to be protected by write mmap_sem so update it
		 * before downgrading mmap_sem. When __do_munmap() fails,
		 * mm->brk will be restored from origbrk.
		 */
		mm->brk = brk;
		ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
		if (ret < 0) {
			mm->brk = origbrk;
			goto out;
		} else if (ret == 1) {
			downgraded = true;
		}
		goto success;
	}

	/* Check against existing mmap mappings. */
	next = find_vma(mm, oldbrk);
	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))   ///發現有重疊,不需要尋找
		goto out;

	/* Ok, looks good - let it rip. */
	if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)  ///無重疊,新分配一個vma
		goto out;
	mm->brk = brk;   ///更新brk地址

success:
	populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
	if (downgraded)
		up_read(&mm->mmap_sem);
	else
		up_write(&mm->mmap_sem);
	userfaultfd_unmap_complete(mm, &uf);
	if (populate)  ///調用mlockall()系統調用,mm_populate會立刻分配物理內存
		mm_populate(oldbrk, newbrk - oldbrk);
	return brk;

out:
	retval = origbrk;
	up_write(&mm->mmap_sem);
	return retval;
}

總結下_do_sys_brk() 功能:

1.2do_brk_flags 函數

函數實現:

/*
 *  this is really a simplified "do_mmap".  it only handles
 *  anonymous maps.  eventually we may be able to do some
 *  brk-specific accounting here.
 */
static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma, *prev;
	struct rb_node **rb_link, *rb_parent;
	pgoff_t pgoff = addr >> PAGE_SHIFT;
	int error;
	unsigned long mapped_addr;

	/* Until we need other flags, refuse anything except VM_EXEC. */
	if ((flags & (~VM_EXEC)) != 0)
		return -EINVAL;
	flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;  ///默認屬性,可讀寫

	mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); ///返回未使用過的,未映射的線性地址區間的,起始地址
	if (IS_ERR_VALUE(mapped_addr))
		return mapped_addr;

	error = mlock_future_check(mm, mm->def_flags, len);
	if (error)
		return error;

	/* Clear old maps, set up prev, rb_link, rb_parent, and uf */
	if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf)) ///尋找適合插入的紅黑樹節點
		return -ENOMEM;

	/* Check against address space limits *after* clearing old maps... */
	if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
		return -ENOMEM;

	if (mm->map_count > sysctl_max_map_count)
		return -ENOMEM;

	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
		return -ENOMEM;

	/* Can we just expand an old private anonymous mapping? */  ///檢查是否能合併addr到附近的vma,若不能,只能新建一個vma
	vma = vma_merge(mm, prev, addr, addr + len, flags,
			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
	if (vma)
		goto out;

	/*
	 * create a vma struct for an anonymous mapping
	 */
	vma = vm_area_alloc(mm);
	if (!vma) {
		vm_unacct_memory(len >> PAGE_SHIFT);
		return -ENOMEM;
	}

	vma_set_anonymous(vma);
	vma->vm_start = addr;
	vma->vm_end = addr + len;
	vma->vm_pgoff = pgoff;
	vma->vm_flags = flags;
	vma->vm_page_prot = vm_get_page_prot(flags);
	vma_link(mm, vma, prev, rb_link, rb_parent);  ///新vma添加到mmap鏈表和紅黑樹
out:
	perf_event_mmap(vma);
	mm->total_vm += len >> PAGE_SHIFT;
	mm->data_vm += len >> PAGE_SHIFT;
	if (flags & VM_LOCKED)
		mm->locked_vm += (len >> PAGE_SHIFT);
	vma->vm_flags |= VM_SOFTDIRTY;
	return 0;
}

mm_populate() 函數

依次調用:

mm_populate()
	->__mm_populate()
	->populate_vma_page_range()
	->__get_user_pages()

當設置 VM_LOCKED 標誌時,表示要馬上申請物理頁面,並與 vma 建立映射;

否則,這裏不操作,直到訪問該 vma 時,觸發缺頁異常,再分配物理頁面,並建立映射;

1.3get_user_pages() 函數

static long __get_user_pages(struct mm_struct *mm,
		unsigned long start, unsigned long nr_pages,
		unsigned int gup_flags, struct page **pages,
		struct vm_area_struct **vmas, int *locked)
{
	long ret = 0, i = 0;
	struct vm_area_struct *vma = NULL;
	struct follow_page_context ctx = { NULL };

	if (!nr_pages)
		return 0;

	start = untagged_addr(start);

	VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));

	/*
	 * If FOLL_FORCE is set then do not force a full fault as the hinting
	 * fault information is unrelated to the reference behaviour of a task
	 * using the address space
	 */
	if (!(gup_flags & FOLL_FORCE))
		gup_flags |= FOLL_NUMA;

	do {  ///依次處理每個頁面
		struct page *page;
		unsigned int foll_flags = gup_flags;
		unsigned int page_increm;

		/* first iteration or cross vma bound */
		if (!vma || start >= vma->vm_end) {
			vma = find_extend_vma(mm, start);  ///檢查是否可以擴增vma
			if (!vma && in_gate_area(mm, start)) {
				ret = get_gate_page(mm, start & PAGE_MASK,
						gup_flags, &vma,
						pages ? &pages[i] : NULL);
				if (ret)
					goto out;
				ctx.page_mask = 0;
				goto next_page;
			}

			if (!vma) {
				ret = -EFAULT;
				goto out;
			}
			ret = check_vma_flags(vma, gup_flags);
			if (ret)
				goto out;

			if (is_vm_hugetlb_page(vma)) {  ///支持巨頁
				i = follow_hugetlb_page(mm, vma, pages, vmas,
						&start, &nr_pages, i,
						gup_flags, locked);
				if (locked && *locked == 0) {
					/*
					 * We've got a VM_FAULT_RETRY
					 * and we've lost mmap_lock.
					 * We must stop here.
					 */
					BUG_ON(gup_flags & FOLL_NOWAIT);
					BUG_ON(ret != 0);
					goto out;
				}
				continue;
			}
		}
retry:
		/*
		 * If we have a pending SIGKILL, don't keep faulting pages and
		 * potentially allocating memory.
		 */
		if (fatal_signal_pending(current)) {  ///如果當前進程收到SIGKILL信號,直接退出
			ret = -EINTR;
			goto out;
		}
		cond_resched();  //判斷是否需要調度,內核中常用該函數,優化系統延遲

		page = follow_page_mask(vma, start, foll_flags, &ctx);  ///查看VMA的虛擬頁面是否已經分配物理內存,返回已經映射的頁面的page
		if (!page) {
			ret = faultin_page(vma, start, &foll_flags, locked); ///若無映射,主動觸發虛擬頁面到物理頁面的映射
			switch (ret) {
			case 0:
				goto retry;
			case -EBUSY:
				ret = 0;
				fallthrough;
			case -EFAULT:
			case -ENOMEM:
			case -EHWPOISON:
				goto out;
			case -ENOENT:
				goto next_page;
			}
			BUG();
		} else if (PTR_ERR(page) == -EEXIST) {
			/*
			 * Proper page table entry exists, but no corresponding
			 * struct page.
			 */
			goto next_page;
		} else if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto out;
		}
		if (pages) {
			pages[i] = page;
			flush_anon_page(vma, page, start);  ///分配完物理頁面,刷新緩存
			flush_dcache_page(page);
			ctx.page_mask = 0;
		}
next_page:
		if (vmas) {
			vmas[i] = vma;
			ctx.page_mask = 0;
		}
		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
		if (page_increm > nr_pages)
			page_increm = nr_pages;
		i += page_increm;
		start += page_increm * PAGE_SIZE;
		nr_pages -= page_increm;
	} while (nr_pages);
out:
	if (ctx.pgmap)
		put_dev_pagemap(ctx.pgmap);
	return i ? i : ret;
}

follow_page_mask函數返回已經映射的頁面的page,最終會調用follow_page_pte函數,其實現如下:

1.4follow_page_pte 函數

static struct page *follow_page_pte(struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmd, unsigned int flags,
		struct dev_pagemap **pgmap)
{
	struct mm_struct *mm = vma->vm_mm;
	struct page *page;
	spinlock_t *ptl;
	pte_t *ptep, pte;
	int ret;

	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
			 (FOLL_PIN | FOLL_GET)))
		return ERR_PTR(-EINVAL);
retry:
	if (unlikely(pmd_bad(*pmd)))
		return no_page_table(vma, flags);

	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);  ///獲得pte和一個鎖
	pte = *ptep;
	if (!pte_present(pte)) {  ///處理頁面不在內存中,作以下處理
		swp_entry_t entry;
		/*
		 * KSM's break_ksm() relies upon recognizing a ksm page
		 * even while it is being migrated, so for that case we
		 * need migration_entry_wait().
		 */
		if (likely(!(flags & FOLL_MIGRATION)))
			goto no_page;
		if (pte_none(pte))
			goto no_page;
		entry = pte_to_swp_entry(pte);
		if (!is_migration_entry(entry))
			goto no_page;
		pte_unmap_unlock(ptep, ptl);
		migration_entry_wait(mm, pmd, address);   ///等待頁面合併完成再嘗試
		goto retry;
	}
	if ((flags & FOLL_NUMA) && pte_protnone(pte))
		goto no_page;
	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
		pte_unmap_unlock(ptep, ptl);
		return NULL;
	}

	page = vm_normal_page(vma, address, pte); ///根據pte,返回物理頁面page(只返回普通頁面,特殊頁面不參與內存管理)
	if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) { ///處理設備映射文件
		/*
		 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
		 * case since they are only valid while holding the pgmap
		 * reference.
		 */
		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
		if (*pgmap)
			page = pte_page(pte);
		else
			goto no_page;
	} else if (unlikely(!page)) {   ///處理vm_normal_page()沒返回有效頁面情況
		if (flags & FOLL_DUMP) {
			/* Avoid special (like zero) pages in core dumps */
			page = ERR_PTR(-EFAULT);
			goto out;
		}

		if (is_zero_pfn(pte_pfn(pte))) {   ///系統零頁,不會返回錯誤
			page = pte_page(pte);
		} else {
			ret = follow_pfn_pte(vma, address, ptep, flags);
			page = ERR_PTR(ret);
			goto out;
		}
	}

	/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
	if (unlikely(!try_grab_page(page, flags))) {
		page = ERR_PTR(-ENOMEM);
		goto out;
	}
	/*
	 * We need to make the page accessible if and only if we are going
	 * to access its content (the FOLL_PIN case).  Please see
	 * Documentation/core-api/pin_user_pages.rst for details.
	 */
	if (flags & FOLL_PIN) {
		ret = arch_make_page_accessible(page);
		if (ret) {
			unpin_user_page(page);
			page = ERR_PTR(ret);
			goto out;
		}
	}
	if (flags & FOLL_TOUCH) { ///FOLL_TOUCH, 標記頁面可訪問
		if ((flags & FOLL_WRITE) &&
		    !pte_dirty(pte) && !PageDirty(page))
			set_page_dirty(page);
		/*
		 * pte_mkyoung() would be more correct here, but atomic care
		 * is needed to avoid losing the dirty bit: it is easier to use
		 * mark_page_accessed().
		 */
		mark_page_accessed(page);
	}
	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
		/* Do not mlock pte-mapped THP */
		if (PageTransCompound(page))
			goto out;

		/*
		 * The preliminary mapping check is mainly to avoid the
		 * pointless overhead of lock_page on the ZERO_PAGE
		 * which might bounce very badly if there is contention.
		 *
		 * If the page is already locked, we don't need to
		 * handle it now - vmscan will handle it later if and
		 * when it attempts to reclaim the page.
		 */
		if (page->mapping && trylock_page(page)) {
			lru_add_drain();  /* push cached pages to LRU */
			/*
			 * Because we lock page here, and migration is
			 * blocked by the pte's page reference, and we
			 * know the page is still mapped, we don't even
			 * need to check for file-cache page truncation.
			 */
			mlock_vma_page(page);
			unlock_page(page);
		}
	}
out:
	pte_unmap_unlock(ptep, ptl);
	return page;
no_page:
	pte_unmap_unlock(ptep, ptl);
	if (!pte_none(pte))
		return NULL;
	return no_page_table(vma, flags);
}

a.malloc() 分配內存後,直接讀,linux 內核進入缺頁異常,調用 do_anonymous_page 函數使用零頁映射,此時 PTE 屬性只讀;

b.malloc() 分配內存後,先讀後寫,linux 內核第一次觸發缺頁異常,映射零頁;第二次觸發異常,觸發寫時複製;

c.malloc() 分配內存後, 直接寫,linux 內核進入匿名頁面的缺頁異常,調用 alloc_zeroed_user_highpage_movable 分配一個新頁面,這個 PTE 是可寫的;

二、kmalloc

一般來說內核程序中對小於一頁的小塊內存的請求會通過 slab 分配器提供的接口 kmalloc 來完成 (雖然它可分配 32 到 131072 字節的內存)。從內核內存分配角度講 kmalloc 可被看成是 get_free_page(s) 的一個有效補充,內存分配粒度更靈活了。

kmalloc() 函數類似與我們常見的 malloc() 函數,前者用於內核態的內存分配,後者用於用戶態。

kmalloc() 函數在物理內存中分配一塊連續的存儲空間,且和 malloc() 函數一樣,不會清除裏面的原始數據,如果內存充足,它的分配速度很快,其原型如下:

static inline void *kmalloc(size_t size, gfp_t flags);  /*返回的是虛擬地址*/

size: 待分配的內存大小。由於 Linux 內存管理機制的原因,內存只能按照頁面大小(一般 32 位機爲 4KB,64 位機爲 8KB)進行分配,這樣就導致了當我們僅需要幾個字節內存時,系統仍會返回一個頁面的內存,顯然這是極度浪費的。所以,不同於 malloc 的是,kmalloc 的處理方式是:內核先爲其分配一系列不同大小(32B、64B、128B、… 、128KB)的內存池,當需要分配內存時,系統會分配大於等於所需內存的最小一個內存池給它。即 kmalloc 分配的內存,最小爲 32 字節,最大爲 128KB。如果超過 128KB,需要採樣其它內存分配函數,例如 vmalloc()。

flag:該參數用於控制函數的行爲,最常用的是 GFP_KERNEL,表示噹噹前沒有足夠內存分配時,進程進入睡眠,待系統將緩衝區中的內容 SWAP 到硬盤中後,獲得足夠內存後再喚醒進程,爲其分配。

使用 GFP_ KERNEL 標誌申請內存時,若暫時不能滿足,則進程會睡眠等待頁,即會引起阻塞,因此不能在中斷上下文或持有自旋鎖的時候使用 GFP_KERNE 申請內存。所以,在中斷處理函數、tasklet 和內核定時器等非進程上下文中不能阻塞,此時驅動應當使用 GFP_ATOMIC 標誌來申請內存。當使用 GFP_ATOMIC 標誌申請內存時,若不存在空閒頁,則不等待,直接返回。

kmalloc 函數

static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
	if (__builtin_constant_p(size)) {
#ifndef CONFIG_SLOB
		unsigned int index;
#endif
		if (size > KMALLOC_MAX_CACHE_SIZE)
			return kmalloc_large(size, flags);
#ifndef CONFIG_SLOB
		index = kmalloc_index(size);  ///查找使用的哪個slab緩衝區

		if (!index)
			return ZERO_SIZE_PTR;

		return kmem_cache_alloc_trace(    ///從slab分配內存
				kmalloc_caches[kmalloc_type(flags)][index],
				flags, size);
#endif
	}
	return __kmalloc(size, flags);
}

kmem_cache_alloc_trace 分配函數

void *
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
	void *ret;

	ret = slab_alloc(cachep, flags, size, _RET_IP_);  ///分配slab緩存

	ret = kasan_kmalloc(cachep, ret, size, flags);
	trace_kmalloc(_RET_IP_, ret,
		      size, cachep->size, flags);
	return ret;
}

可見,kmalloc() 基於 slab 分配器實現,因此分配的內存,物理上都是連續的。

三、vmalloc

vmalloc() 一般用在爲只存在於軟件中(沒有對應的硬件意義)的較大的順序緩衝區分配內存,當內存沒有足夠大的連續物理空間可以分配時,可以用該函數來分配虛擬地址連續但物理地址不連續的內存。由於需要建立新的頁表,所以它的開銷要遠遠大於 kmalloc 及後面將要講到的__get_free_pages() 函數。且 vmalloc() 不能用在原子上下文中,因爲它的內部實現使用了標誌爲 GFP_KERNEL 的 kmalloc(),其函數原型如下:

void *vmalloc(unsigned long size);
void vfree(const void *addr);

使用 vmalloc 函數的一個例子函數是 create_module() 系統調用,它利用 vmalloc() 函數來獲取被創建模塊需要的內存空間。

內存分配是一項要求嚴格的任務,無論什麼時候,都應該對返回值進行檢測,在驅動編程中可以使用 copy_from_user() 對內存進行使用。下面舉一個使用 vmalloc 函數的示例:

static int xxx(...)
{
  ...
  cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
  if(!cpuid_entries)
  goto out;
  if(copy_from_user(cpuid_entries, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry)))
    goto out_free;
  for(i=0; i<cpuid->nent; i++){
    vcpuid->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
    ...
    vcpuid->arch.cpuid_entries[i].index = 0;
  }
  ...
out_free:
  vfree(cpuid_entries);
out:
  return r;
}

核心函數__vmalloc_node_range

static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
				 pgprot_t prot, unsigned int page_shift,
				 int node)
{
	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
	unsigned long addr = (unsigned long)area->addr;
	unsigned long size = get_vm_area_size(area);   ///計算vm_struct包含多少個頁面
	unsigned long array_size;
	unsigned int nr_small_pages = size >> PAGE_SHIFT;
	unsigned int page_order;
	struct page **pages;
	unsigned int i;

	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
	gfp_mask |= __GFP_NOWARN;
	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
		gfp_mask |= __GFP_HIGHMEM;

	/* Please note that the recursion is strictly bounded. */
	if (array_size > PAGE_SIZE) {
		pages = __vmalloc_node(array_size, 1, nested_gfp, node,
					area->caller);
	} else {
		pages = kmalloc_node(array_size, nested_gfp, node);
	}

	if (!pages) {
		free_vm_area(area);
		warn_alloc(gfp_mask, NULL,
			   "vmalloc size %lu allocation failure: "
			   "page array size %lu allocation failed",
			   nr_small_pages * PAGE_SIZE, array_size);
		return NULL;
	}

	area->pages = pages;  ///保存已分配頁面的page數據結構的指針
	area->nr_pages = nr_small_pages;
	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);

	page_order = vm_area_page_order(area);

	/*
	 * Careful, we allocate and map page_order pages, but tracking is done
	 * per PAGE_SIZE page so as to keep the vm_struct APIs independent of
	 * the physical/mapped size.
	 */
	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
		struct page *page;
		int p;

		/* Compound pages required for remap_vmalloc_page */
		page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order); ///分配物理頁面
		if (unlikely(!page)) {
			/* Successfully allocated i pages, free them in __vfree() */
			area->nr_pages = i;
			atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
			warn_alloc(gfp_mask, NULL,
				   "vmalloc size %lu allocation failure: "
				   "page order %u allocation failed",
				   area->nr_pages * PAGE_SIZE, page_order);
			goto fail;
		}

		for (p = 0; p < (1U << page_order); p++)
			area->pages[i + p] = page + p;

		if (gfpflags_allow_blocking(gfp_mask))
			cond_resched();
	}
	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);

	if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) { ///建立物理頁面到vma的映射
		warn_alloc(gfp_mask, NULL,
			   "vmalloc size %lu allocation failure: "
			   "failed to map pages",
			   area->nr_pages * PAGE_SIZE);
		goto fail;
	}

	return area->addr;

fail:
	__vfree(area->addr);
	return NULL;
}

可見,vmalloc 是臨時在 vmalloc 內存區申請 vma,並且分配物理頁面,建立映射;直接分配物理頁面,至少一個頁 4K,因此 vmalloc 適合用於分配較大內存,並且物理內存不一定連續;

四、mmap 函數

mmap 一般用於用戶程序分配內存,讀寫大文件,鏈接動態庫,多進程內存共享等;

實現過程流程圖:

mmap 根據文件關聯性和映射區域是否共享等屬性,其映射分爲 4 類:

  1. 私有匿名映射

fd=-1, 且 flags=MAP_ANONYMOUS|MAP_PRIVATE, 創建的 mmap 映射是私有匿名映射;

用途是在 glibc 分配大內存時,如果需分配內存大於 MMAP_THREASHOLD(128KB),glibc 默認用 mmap 代替 brk 分配內存;

  1. 共享匿名映射

fd=-1, 且 flags=MAP_ANONYMOUS|MAP_SHARED;

常用於父子進程的通信,共享一塊內存區域;

do_mmap_pgoff()->mmap_region(), 最終調用 shmem_zero_setup 打開 / dev/zero 設備文件;

另外直接打開 / dev/zero 設備文件,然後使用這個句柄創建 mmap,也是最終調用 shmem 模塊創建共享匿名映射;

  1. 私有文件映射

flags=MAP_PRIVATE;

常用場景是,加載動態共享庫;

  1. 共享文件映射

flags=MAP_SHARED;有兩個應用場景;

(1) 讀寫文件:內核的會寫機制會將內存數據同步到磁盤;

(2) 進程間通信:多個獨立進程,打開同一個文件,互相都可以觀察到,可是實現多進程通信;

核心函數如下:

unsigned long mmap_region(struct file *file, unsigned long addr,
		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
		struct list_head *uf)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma, *prev, *merge;
	int error;
	struct rb_node **rb_link, *rb_parent;
	unsigned long charged = 0;

	/* Check against address space limit. */
	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
		unsigned long nr_pages;

		/*
		 * MAP_FIXED may remove pages of mappings that intersects with
		 * requested mapping. Account for the pages it would unmap.
		 */
		nr_pages = count_vma_pages_range(mm, addr, addr + len);

		if (!may_expand_vm(mm, vm_flags,
					(len >> PAGE_SHIFT) - nr_pages))
			return -ENOMEM;
	}

	/* Clear old maps, set up prev, rb_link, rb_parent, and uf */
	if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
		return -ENOMEM;
	/*
	 * Private writable mapping: check memory availability
	 */
	if (accountable_mapping(file, vm_flags)) {
		charged = len >> PAGE_SHIFT;
		if (security_vm_enough_memory_mm(mm, charged))
			return -ENOMEM;
		vm_flags |= VM_ACCOUNT;
	}

	/*
	 * Can we just expand an old mapping?
	 */
	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,   ///嘗試合併vma
			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
	if (vma)
		goto out;

	/*
	 * Determine the object being mapped and call the appropriate
	 * specific mapper. the address has already been validated, but
	 * not unmapped, but the maps are removed from the list.
	 */
	vma = vm_area_alloc(mm);   ///分配一個新vma
	if (!vma) {
		error = -ENOMEM;
		goto unacct_error;
	}

	vma->vm_start = addr;
	vma->vm_end = addr + len;
	vma->vm_flags = vm_flags;
	vma->vm_page_prot = vm_get_page_prot(vm_flags);
	vma->vm_pgoff = pgoff;

	if (file) {   ///文件映射  
		if (vm_flags & VM_DENYWRITE) {
			error = deny_write_access(file);
			if (error)
				goto free_vma;
		}
		if (vm_flags & VM_SHARED) {
			error = mapping_map_writable(file->f_mapping);
			if (error)
				goto allow_write_and_free_vma;
		}

		/* ->mmap() can change vma->vm_file, but must guarantee that
		 * vma_link() below can deny write-access if VM_DENYWRITE is set
		 * and map writably if VM_SHARED is set. This usually means the
		 * new file must not have been exposed to user-space, yet.
		 */
		vma->vm_file = get_file(file);
		error = call_mmap(file, vma);
		if (error)
			goto unmap_and_free_vma;

		/* Can addr have changed??
		 *
		 * Answer: Yes, several device drivers can do it in their
		 *         f_op->mmap method. -DaveM
		 * Bug: If addr is changed, prev, rb_link, rb_parent should
		 *      be updated for vma_link()
		 */
		WARN_ON_ONCE(addr != vma->vm_start);

		addr = vma->vm_start;

		/* If vm_flags changed after call_mmap(), we should try merge vma again
		 * as we may succeed this time.
		 */
		if (unlikely(vm_flags != vma->vm_flags && prev)) {
			merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
				NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX);
			if (merge) {
				/* ->mmap() can change vma->vm_file and fput the original file. So
				 * fput the vma->vm_file here or we would add an extra fput for file
				 * and cause general protection fault ultimately.
				 */
				fput(vma->vm_file);
				vm_area_free(vma);
				vma = merge;
				/* Update vm_flags to pick up the change. */
				vm_flags = vma->vm_flags;
				goto unmap_writable;
			}
		}

		vm_flags = vma->vm_flags;
	} else if (vm_flags & VM_SHARED) {   ///共享映射
		error = shmem_zero_setup(vma);   ///共享匿名映射
		if (error)
			goto free_vma;
	} else {
		vma_set_anonymous(vma);  ///匿名映射
	}

	/* Allow architectures to sanity-check the vm_flags */
	if (!arch_validate_flags(vma->vm_flags)) {
		error = -EINVAL;
		if (file)
			goto unmap_and_free_vma;
		else
			goto free_vma;
	}

	vma_link(mm, vma, prev, rb_link, rb_parent);   ///vma加入mm系統
	/* Once vma denies write, undo our temporary denial count */
	if (file) {
unmap_writable:
		if (vm_flags & VM_SHARED)
			mapping_unmap_writable(file->f_mapping);
		if (vm_flags & VM_DENYWRITE)
			allow_write_access(file);
	}
	file = vma->vm_file;
out:
	perf_event_mmap(vma);

	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
	if (vm_flags & VM_LOCKED) {
		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
					is_vm_hugetlb_page(vma) ||
					vma == get_gate_vma(current->mm))
			vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
		else
			mm->locked_vm += (len >> PAGE_SHIFT);
	}

	if (file)
		uprobe_mmap(vma);

	/*
	 * New (or expanded) vma always get soft dirty status.
	 * Otherwise user-space soft-dirty page tracker won't
	 * be able to distinguish situation when vma area unmapped,
	 * then new mapped in-place (which must be aimed as
	 * a completely new data area).
	 */
	vma->vm_flags |= VM_SOFTDIRTY;

	vma_set_page_prot(vma);

	return addr;

unmap_and_free_vma:
	fput(vma->vm_file);
	vma->vm_file = NULL;

	/* Undo any partial mapping done by a device driver. */
	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
	charged = 0;
	if (vm_flags & VM_SHARED)
		mapping_unmap_writable(file->f_mapping);
allow_write_and_free_vma:
	if (vm_flags & VM_DENYWRITE)
		allow_write_access(file);
free_vma:
	vm_area_free(vma);
unacct_error:
	if (charged)
		vm_unacct_memory(charged);
	return error;
}

以上的 malloc,mmap 函數,若無特別設定,默認都是指建立虛擬地址空間,但沒有建立虛擬地址空間到物理地址空間的映射;

當訪問未映射的虛擬空間時,觸發缺頁異常,linxu 內核會處理缺頁異常,缺頁異常服務程序中,會分配物理頁,並建立虛擬地址到物理頁的映射;

補充兩個問題:

  1. 當 mmap 重複申請相同地址,爲什麼不會失敗?

find_vma_links() 函數便利該進程所有的 vma,當檢查到當前要映射區域和已有 vma 重疊時,先銷燬舊映射區,重新映射,所以第二次申請,不會報錯。

2.mmap 打開多個文件時,比如播放視頻時,爲什麼會卡頓?

mmap 只是建立 vma,並未實際分配物理頁面讀取文件內存,當播放器真正讀取文件時,會頻繁觸發缺頁異常,再從磁盤讀取文件到頁面高速緩存中,會導致磁盤讀性能較差;

madvise(add,len,MADV_WILLNEED|MADV_SEQUENTIAL) 對文件內容進行預讀和順序讀;

但是內核默認的預讀功能就可以實現;且 madvise 不適合流媒體,只適合隨機讀取場景;

能夠有效提高流媒體服務 I/O 性能的方法是最大內核默認預讀窗口;內核默認是 128K,可以通過 “blockdev --setra” 命令修改;

五、頁分配函數

該函數定義在頭文件 / include/linux/gfp.h 中,它既可以在內核空間分配,也可以在用戶空間分配,它返回分配的第一個頁的描述符而非首地址,其原型爲:

#define alloc_page(gfp_mask)  alloc_pages(gfp_mask, 0)
#define alloc_pages(gfp_mask, order) alloc_pages_node(numa_node_id(), gfp_mask, order)//分配連續2^order個頁面
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 
{
  if(unlikely(order >= MAX_ORDER))
    return NULL;
  if(nid < 0)
    nid = numa_node_id();
  return __alloc_pages(gfp_mask, order, noed_zonelist(nid, gfp_mask));
}

根據返回頁面數目分類,分爲僅返回單頁面的函數和返回多頁面的函數。

5.1alloc_page() 和 alloc_pages() 函數

該函數定義在頭文件 / include/linux/gfp.h 中,它既可以在內核空間分配,也可以在用戶空間分配,它返回分配的第一個頁的描述符而非首地址,其原型爲:

#define alloc_page(gfp_mask)  alloc_pages(gfp_mask, 0)
#define alloc_pages(gfp_mask, order) alloc_pages_node(numa_node_id(), gfp_mask, order)//分配連續2^order個頁面
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 
{
  if(unlikely(order >= MAX_ORDER))
    return NULL;
  if(nid < 0)
    nid = numa_node_id();
  return __alloc_pages(gfp_mask, order, noed_zonelist(nid, gfp_mask));
}

5.2_get_free_pages() 系列函數

它是 kmalloc 函數實現的基礎,返回一個或多個頁面的虛擬地址。該系列函數 / 宏包括 get_zeroed_page()、_ _get_free_page() 和_ _get_free_pages()。在使用時,其申請標誌的值及含義與 kmalloc() 完全一樣,最常用的是 GFP_KERNEL 和 GFP_ATOMIC。

/*分配多個頁並返回分配內存的首地址,分配的頁數爲2^order,分配的頁不清零。
order 允許的最大值是 10(即 1024 頁)或者 11(即 2048 頁),依賴於具體
的硬件平臺。*/
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
  struct page *page;
  page = alloc_pages(gfp_mask, order);
  if(!page)
    return 0;
  return (unsigned long)page_address(page);
}

#define __get_free_page(gfp_mask)  __get_free_pages(gfp_mask, 0)

/*該函數返回一個指向新頁的指針並且將該頁清零*/
unsigned long get_zeroed_page(unsigned int flags);

使用_ _get_free_pages() 系列函數 / 宏申請的內存應使用 free_page(addr) 或 free_pages(addr, order) 函數釋放:

#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)

void free_pages(unsigned long addr, unsigned int order)
{
  if(addr != 0){
    VM_BUG_ON(!virt_addr_valid((void*)addr));
    __free_pages(virt_to_page((void *)addr), order);
  }
}

void __free_pages(struct page *page, unsigned int order)
{
  if(put_page_testzero(page)){
    if(order == 0)
      free_hot_page(page);
    else
      __free_pages_ok(page, order);
  }
}

free_pages() 函數是調用__free_pages() 函數完成內存釋放的。

六、slab 緩存

當在驅動程序中,遇到反覆分配、釋放同一大小的內存塊時(例如,inode、task_struct 等),建議使用內存池技術(對象在前後兩次被使用時均分配在同一塊內存或同一類內存空間,且保留了基本的數據結構,這大大提高了效率)。在 linux 中,有一個叫做 slab 分配器的內存池管理技術,內存池使用的內存區叫做後備高速緩存。

salb 相關頭文件在 linux/slab.h 中,在使用後備高速緩存前,需要創建一個 kmem_cache 的結構體。

6.1 創建 slab 緩存區

該函數創建一個 slab 緩存(後備高速緩衝區),它是一個可以駐留任意數目全部同樣大小的後備緩存。其原型如下:

struct kmem_cache *kmem_cache_create(const char *name, size_t size, \
                   size_t align, unsigned long flags,\
                   void (*ctor)(void *, struct kmem_cache *, unsigned long),\
                   void (*dtor)(void *, struct kmem_cache *, unsigned ong)));

其中:

6.2 分配 slab 緩存函數

一旦創建完後備高速緩衝區後,就可以調用 kmem_cache_alloc() 在緩存區分配一個內存塊對象了,其原型如下:

void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags);

cachep 指向開始分配的後備高速緩存,flags 與傳給 kmalloc 函數的參數相同,一般爲 GFP_KERNEL。

6.3 釋放 slab 緩存

該函數釋放一個內存塊對象:

void *kmem_cache_free(struct kmem_cache *cachep, void *objp);

6.4 銷燬 slab 緩存

與 kmem_cache_create 對應的是銷燬函數,釋放一個後備高速緩存:

int kmem_cache_destroy(struct kmem_cache *cachep);

它必須等待所有已經分配的內存塊對象被釋放後才能釋放後備高速緩存區。

6.5slab 緩存使用舉例

創建一個存放線程結構體(struct thread_info)的後備高速緩存,因爲在 linux 中涉及頻繁的線程創建與釋放,如果使用__get_free_page() 函數會造成內存的大量浪費,效率也不高。所以在 linux 內核的初始化階段就創建了一個名爲 thread_info 的後備高速緩存,代碼如下:

/* 創建slab緩存 */
static struct kmem_cache *thread_info_cache;
thread_info_cache = kmem_cache_create("thread_info", sizeof(struct thread_info), \
                    SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);

/* 分配slab緩存 */
struct thread_info *ti;
ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);

/* 使用slab緩存 */
...
/* 釋放slab緩存 */
kmem_cache_free(thread_info_cache, ti);
kmem_cache_destroy(thread_info_cache);

七、內存池

在 Linux 內核中還包含對內存池的支持,內存池技術也是一種非常經典的用於分配大量小對象的後備緩存技術。

7.1 創建內存池

mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, \
              mempool_free_t *free_fn, void *pool_data);

mempool_create() 函數用於創建一個內存池,min_nr 參數是需要預分配對象的數目,alloc_fn 和 free_fn 是指向內存池機制提供的標準對象分配和回收函數的指針,其原型分別爲:

typedef void *(mempool_alloc_t)(int gfp_mask, void *pool_data); 
typedef void (mempool_free_t)(void *element, void *pool_data);

pool_data 是分配和回收函數用到的指針,gfp_mask 是分配標記。只有當_ _GFP_WAIT 標記被指定時,分配函數纔會休眠。

7.2 分配和回收對象

在內存池中分配和回收對象需由以下函數來完成:

void *mempool_alloc(mempool_t *pool, int gfp_mask); 
void mempool_free(void *element, mempool_t *pool);

mempool_alloc() 用來分配對象,如果內存池分配器無法提供內存,那麼就可以用預分配的池。

7.3 銷燬內存池

void mempool_destroy(mempool_t *pool);

mempool_create() 函數創建的內存池需由 mempool_destroy() 來回收。

本文由 Readfog 進行 AMP 轉碼,版權歸原作者所有。
來源https://mp.weixin.qq.com/s/QDEjj0BLmqSn7X5B-sp2iA