Linux系统内存管理中存在着一个称之为OOM killer(Out-Of-Memory killer)的机制,该机制主要用于内存监控,监控进程的内存使用量,当系统的内存耗尽时,其将根据算法选择性地kill了部分进程。本文分析的内存溢出保护机制,也就是OOM killer机制了。
回到伙伴管理算法中涉及的一函数__alloc_pages_nodemask(),其里面调用的__alloc_pages_slowpath()并未展开深入,而内存溢出保护机制则在此函数中。
先行查看一下__alloc_pages_slowpath()的实现:
-
【file:/ mm/page_alloc.h】
-
static inline struct page *
-
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
-
struct zonelist *zonelist, enum zone_type high_zoneidx,
-
nodemask_t *nodemask, struct zone *preferred_zone,
-
int migratetype)
-
{
-
const gfp_t wait = gfp_mask & __GFP_WAIT;
-
struct page *page = NULL;
-
int alloc_flags;
-
unsigned long pages_reclaimed = 0;
-
unsigned long did_some_progress;
-
bool sync_migration = false;
-
bool deferred_compaction = false;
-
bool contended_compaction = false;
-
-
/*
-
* In the slowpath, we sanity check order to avoid ever trying to
-
* reclaim >= MAX_ORDER areas which will never succeed. Callers may
-
* be using allocators in order of preference for an area that is
-
* too large.
-
*/
-
if (order >= MAX_ORDER) {
-
WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
-
return NULL;
-
}
-
-
/*
-
* GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
-
* __GFP_NOWARN set) should not cause reclaim since the subsystem
-
* (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
-
* using a larger set of nodes after it has established that the
-
* allowed per node queues are empty and that nodes are
-
* over allocated.
-
*/
-
if (IS_ENABLED(CONFIG_NUMA) &&
-
(gfp_mask & GFP_THISNODE) == GFP_THISNODE)
-
goto nopage;
-
-
restart:
-
if (!(gfp_mask & __GFP_NO_KSWAPD))
-
wake_all_kswapds(order, zonelist, high_zoneidx, preferred_zone);
-
-
/*
-
* OK, we're below the kswapd watermark and have kicked background
-
* reclaim. Now things get more complex, so set up alloc_flags according
-
* to how we want to proceed.
-
*/
-
alloc_flags = gfp_to_alloc_flags(gfp_mask);
-
-
/*
-
* Find the true preferred zone if the allocation is unconstrained by
-
* cpusets.
-
*/
-
if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
-
first_zones_zonelist(zonelist, high_zoneidx, NULL,
-
&preferred_zone);
-
-
rebalance:
-
/* This is the last chance, in general, before the goto nopage. */
-
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
-
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
-
preferred_zone, migratetype);
-
if (page)
-
goto got_pg;
-
-
/* Allocate without watermarks if the context allows */
-
if (alloc_flags & ALLOC_NO_WATERMARKS) {
-
/*
-
* Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
-
* the allocation is high priority and these type of
-
* allocations are system rather than user orientated
-
*/
-
zonelist = node_zonelist(numa_node_id(), gfp_mask);
-
-
page = __alloc_pages_high_priority(gfp_mask, order,
-
zonelist, high_zoneidx, nodemask,
-
preferred_zone, migratetype);
-
if (page) {
-
goto got_pg;
-
}
-
}
-
-
/* Atomic allocations - we can't balance anything */
-
if (!wait) {
-
/*
-
* All existing users of the deprecated __GFP_NOFAIL are
-
* blockable, so warn of any new users that actually allow this
-
* type of allocation to fail.
-
*/
-
WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
-
goto nopage;
-
}
-
-
/* Avoid recursion of direct reclaim */
-
if (current->flags & PF_MEMALLOC)
-
goto nopage;
-
-
/* Avoid allocations with no watermarks from looping endlessly */
-
if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
-
goto nopage;
-
-
/*
-
* Try direct compaction. The first pass is asynchronous. Subsequent
-
* attempts after direct reclaim are synchronous
-
*/
-
page = __alloc_pages_direct_compact(gfp_mask, order,
-
zonelist, high_zoneidx,
-
nodemask,
-
alloc_flags, preferred_zone,
-
migratetype, sync_migration,
-
&contended_compaction,
-
&deferred_compaction,
-
&did_some_progress);
-
if (page)
-
goto got_pg;
-
sync_migration = true;
-
-
/*
-
* If compaction is deferred for high-order allocations, it is because
-
* sync compaction recently failed. In this is the case and the caller
-
* requested a movable allocation that does not heavily disrupt the
-
* system then fail the allocation instead of entering direct reclaim.
-
*/
-
if ((deferred_compaction || contended_compaction) &&
-
(gfp_mask & __GFP_NO_KSWAPD))
-
goto nopage;
-
-
/* Try direct reclaim and then allocating */
-
page = __alloc_pages_direct_reclaim(gfp_mask, order,
-
zonelist, high_zoneidx,
-
nodemask,
-
alloc_flags, preferred_zone,
-
migratetype, &did_some_progress);
-
if (page)
-
goto got_pg;
-
-
/*
-
* If we failed to make any progress reclaiming, then we are
-
* running out of options and have to consider going OOM
-
*/
-
if (!did_some_progress) {
-
if (oom_gfp_allowed(gfp_mask)) {
-
if (oom_killer_disabled)
-
goto nopage;
-
/* Coredumps can quickly deplete all memory reserves */
-
if ((current->flags & PF_DUMPCORE) &&
-
!(gfp_mask & __GFP_NOFAIL))
-
goto nopage;
-
page = __alloc_pages_may_oom(gfp_mask, order,
-
zonelist, high_zoneidx,
-
nodemask, preferred_zone,
-
migratetype);
-
if (page)
-
goto got_pg;
-
-
if (!(gfp_mask & __GFP_NOFAIL)) {
-
/*
-
* The oom killer is not called for high-order
-
* allocations that may fail, so if no progress
-
* is being made, there are no other options and
-
* retrying is unlikely to help.
-
*/
-
if (order > PAGE_ALLOC_COSTLY_ORDER)
-
goto nopage;
-
/*
-
* The oom killer is not called for lowmem
-
* allocations to prevent needlessly killing
-
* innocent tasks.
-
*/
-
if (high_zoneidx < ZONE_NORMAL)
-
goto nopage;
-
}
-
-
goto restart;
-
}
-
}
-
-
/* Check if we should retry the allocation */
-
pages_reclaimed += did_some_progress;
-
if (should_alloc_retry(gfp_mask, order, did_some_progress,
-
pages_reclaimed)) {
-
/* Wait for some write requests to complete then retry */
-
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
-
goto rebalance;
-
} else {
-
/*
-
* High-order allocations do not necessarily loop after
-
* direct reclaim and reclaim/compaction depends on compaction
-
* being called after reclaim so call directly if necessary
-
*/
-
page = __alloc_pages_direct_compact(gfp_mask, order,
-
zonelist, high_zoneidx,
-
nodemask,
-
alloc_flags, preferred_zone,
-
migratetype, sync_migration,
-
&contended_compaction,
-
&deferred_compaction,
-
&did_some_progress);
-
if (page)
-
goto got_pg;
-
}
-
-
nopage:
-
warn_alloc_failed(gfp_mask, order, NULL);
-
return page;
-
got_pg:
-
if (kmemcheck_enabled)
-
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
-
-
return page;
-
}
该函数首先判断调用者是否禁止唤醒kswapd线程,若不做禁止则唤醒线程进行内存回收工作,然后通过gfp_to_alloc_flags()对内存分配标识进行调整,而后再次调用get_page_from_freelist()尝试分配,如果分配到则退出。否则继续尝试内存分配,继续尝试分配则先行判断是否设置了ALLOC_NO_WATERMARKS标识,如果设置了,则将忽略watermark,调用__alloc_pages_high_priority()进行分配。
__alloc_pages_high_priority()函数实现:
-
【file:/ mm/page_alloc.h】
-
/*
-
* This is called in the allocator slow-path if the allocation request is of
-
* sufficient urgency to ignore watermarks and take other desperate measures
-
*/
-
static inline struct page *
-
__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
-
struct zonelist *zonelist, enum zone_type high_zoneidx,
-
nodemask_t *nodemask, struct zone *preferred_zone,
-
int migratetype)
-
{
-
struct page *page;
-
-
do {
-
page = get_page_from_freelist(gfp_mask, nodemask, order,
-
zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
-
preferred_zone, migratetype);
-
-
if (!page && gfp_mask & __GFP_NOFAIL)
-
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
-
} while (!page && (gfp_mask & __GFP_NOFAIL));
-
-
return page;
-
}
可以看到该函数根据分配标识__GFP_NOFAIL不断地调用get_page_from_freelist()循环尝试去获得内存。
接着回到__alloc_pages_slowpath()中,其从__alloc_pages_high_priority()退出后继而判断是否设置了__GFP_WAIT标识,如果设置则表示内存分配运行休眠,否则直接以分配内存失败而退出。接着将会调用__alloc_pages_direct_compact()和__alloc_pages_direct_reclaim()尝试回收内存并尝试分配。基于上面的多种尝试内存分配仍然失败的情况,将会调用__alloc_pages_may_oom()触发OOM killer机制。OOM killer将进程kill后会重新再次尝试内存分配,最后则是分配失败或分配成功的收尾处理。
__alloc_pages_slowpath()暂且分析至此,回到本文重点函数__alloc_pages_may_oom()中进一步进行分析。
-
【file:/ mm/page_alloc.h】
-
static inline struct page *
-
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
-
struct zonelist *zonelist, enum zone_type high_zoneidx,
-
nodemask_t *nodemask, struct zone *preferred_zone,
-
int migratetype)
-
{
-
struct page *page;
-
-
/* Acquire the OOM killer lock for the zones in zonelist */
-
if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
-
schedule_timeout_uninterruptible(1);
-
return NULL;
-
}
-
-
/*
-
* Go through the zonelist yet one more time, keep very high watermark
-
* here, this is only to catch a parallel oom killing, we must fail if
-
* we're still under heavy pressure.
-
*/
-
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
-
order, zonelist, high_zoneidx,
-
ALLOC_WMARK_HIGH|ALLOC_CPUSET,
-
preferred_zone, migratetype);
-
if (page)
-
goto out;
-
-
if (!(gfp_mask & __GFP_NOFAIL)) {
-
/* The OOM killer will not help higher order allocs */
-
if (order > PAGE_ALLOC_COSTLY_ORDER)
-
goto out;
-
/* The OOM killer does not needlessly kill tasks for lowmem */
-
if (high_zoneidx < ZONE_NORMAL)
-
goto out;
-
/*
-
* GFP_THISNODE contains __GFP_NORETRY and we never hit this.
-
* Sanity check for bare calls of __GFP_THISNODE, not real OOM.
-
* The caller should handle page allocation failure by itself if
-
* it specifies __GFP_THISNODE.
-
* Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
-
*/
-
if (gfp_mask & __GFP_THISNODE)
-
goto out;
-
}
-
/* Exhausted what can be done so it's blamo time */
-
out_of_memory(zonelist, gfp_mask, order, nodemask, false);
-
-
out:
-
clear_zonelist_oom(zonelist, gfp_mask);
-
return page;
-
}
该函数首先通过try_set_zonelist_oom()判断OOM killer是否已经在其他核进行killing操作,如果没有的情况下将会在try_set_zonelist_oom()内部进行锁操作,确保只有一个核执行killing的操作。继而调用get_page_from_freelist()在高watermark的情况下尝试再次获取内存,不过这里注定会失败。接着就是调用到了关键函数out_of_memory()。最后函数退出时将会调用clear_zonelist_oom()清除掉try_set_zonelist_oom()里面的锁操作。
着重分析一下out_of_memory():
-
【file:/ mm/oom_kill.c】
-
/**
-
* out_of_memory - kill the "best" process when we run out of memory
-
* @zonelist: zonelist pointer
-
* @gfp_mask: memory allocation flags
-
* @order: amount of memory being requested as a power of 2
-
* @nodemask: nodemask passed to page allocator
-
* @force_kill: true if a task must be killed, even if others are exiting
-
*
-
* If we run out of memory, we have the choice between either
-
* killing a random task (bad), letting the system crash (worse)
-
* OR try to be smart about which process to kill. Note that we
-
* don't have to be perfect here, we just have to be good.
-
*/
-
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
-
int order, nodemask_t *nodemask, bool force_kill)
-
{
-
const nodemask_t *mpol_mask;
-
struct task_struct *p;
-
unsigned long totalpages;
-
unsigned long freed = 0;
-
unsigned int uninitialized_var(points);
-
enum oom_constraint constraint = CONSTRAINT_NONE;
-
int killed = 0;
-
-
blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
-
if (freed > 0)
-
/* Got some memory back in the last second. */
-
return;
-
-
/*
-
* If current has a pending SIGKILL or is exiting, then automatically
-
* select it. The goal is to allow it to allocate so that it may
-
* quickly exit and free its memory.
-
*/
-
if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
-
set_thread_flag(TIF_MEMDIE);
-
return;
-
}
-
-
/*
-
* Check if there were limitations on the allocation (only relevant for
-
* NUMA) that may require different handling.
-
*/
-
constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
-
&totalpages);
-
mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
-
check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
-
-
if (sysctl_oom_kill_allocating_task && current->mm &&
-
!oom_unkillable_task(current, NULL, nodemask) &&
-
current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
-
get_task_struct(current);
-
oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
-
nodemask,
-
"Out of memory (oom_kill_allocating_task)");
-
goto out;
-
}
-
-
p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
-
/* Found nothing?!?! Either we hang forever, or we panic. */
-
if (!p) {
-
dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
-
panic("Out of memory and no killable processes...\n");
-
}
-
if (p != (void *)-1UL) {
-
oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
-
nodemask, "Out of memory");
-
killed = 1;
-
}
-
out:
-
/*
-
* Give the killed threads a good chance of exiting before trying to
-
* allocate memory again.
-
*/
-
if (killed)
-
schedule_timeout_killable(1);
-
}
该函数首先调用blocking_notifier_call_chain()进行OOM的内核通知链回调处理;接着的if
(fatal_signal_pending(current) || current->flags & PF_EXITING)判断则是用于检查是否有SIGKILL信号挂起或者正在信号处理中,如果有则退出;再接着通过constrained_alloc()检查内存分配限制以及check_panic_on_oom()检查是否报linux内核panic;继而判断sysctl_oom_kill_allocating_task变量及进程检查,如果符合条件判断,则将当前分配的内存kill掉;否则最后,将通过select_bad_process()选出最佳的进程,进而调用oom_kill_process()对其进行kill操作。
最后分析一下select_bad_process()和oom_kill_process(),其中select_bad_process()的实现:
-
【file:/ mm/oom_kill.c】
-
/*
-
* Simple selection loop. We chose the process with the highest
-
* number of 'points'. Returns -1 on scan abort.
-
*
-
* (not docbooked, we don't want this one cluttering up the manual)
-
*/
-
static struct task_struct *select_bad_process(unsigned int *ppoints,
-
unsigned long totalpages, const nodemask_t *nodemask,
-
bool force_kill)
-
{
-
struct task_struct *g, *p;
-
struct task_struct *chosen = NULL;
-
unsigned long chosen_points = 0;
-
-
rcu_read_lock();
-
for_each_process_thread(g, p) {
-
unsigned int points;
-
-
switch (oom_scan_process_thread(p, totalpages, nodemask,
-
force_kill)) {
-
case OOM_SCAN_SELECT:
-
chosen = p;
-
chosen_points = ULONG_MAX;
-
/* fall through */
-
case OOM_SCAN_CONTINUE:
-
continue;
-
case OOM_SCAN_ABORT:
-
rcu_read_unlock();
-
return (struct task_struct *)(-1UL);
-
case OOM_SCAN_OK:
-
break;
-
};
-
points = oom_badness(p, NULL, nodemask, totalpages);
-
if (!points || points < chosen_points)
-
continue;
-
/* Prefer thread group leaders for display purposes */
-
if (points == chosen_points && thread_group_leader(chosen))
-
continue;
-
-
chosen = p;
-
chosen_points = points;
-
}
-
if (chosen)
-
get_task_struct(chosen);
-
rcu_read_unlock();
-
-
*ppoints = chosen_points * 1000 / totalpages;
-
return chosen;
-
}
此函数通过for_each_process_thread()宏遍历所有进程,进而借用oom_scan_process_thread()获得进程扫描类型然后通过switch-case作特殊化处理,例如存在某进程退出中则中断扫描、某进程占用内存过多且被标识为优先kill掉则优选等特殊处理。而正常情况则会通过oom_badness()计算出进程的分值,然后根据最高分值将进程控制块返回回去。
顺便研究一下oom_badness()的实现:
-
【file:/ mm/oom_kill.c】
-
/**
-
* oom_badness - heuristic function to determine which candidate task to kill
-
* @p: task struct of which task we should calculate
-
* @totalpages: total present RAM allowed for page allocation
-
*
-
* The heuristic for determining which task to kill is made to be as simple and
-
* predictable as possible. The goal is to return the highest value for the
-
* task consuming the most memory to avoid subsequent oom failures.
-
*/
-
unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
-
const nodemask_t *nodemask, unsigned long totalpages)
-
{
-
long points;
-
long adj;
-
-
if (oom_unkillable_task(p, memcg, nodemask))
-
return 0;
-
-
p = find_lock_task_mm(p);
-
if (!p)
-
return 0;
-
-
adj = (long)p->signal->oom_score_adj;
-
if (adj == OOM_SCORE_ADJ_MIN) {
-
task_unlock(p);
-
return 0;
-
}
-
-
/*
-
* The baseline for the badness score is the proportion of RAM that each
-
* task's rss, pagetable and swap space use.
-
*/
-
points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
-
get_mm_counter(p->mm, MM_SWAPENTS);
-
task_unlock(p);
-
-
/*
-
* Root processes get 3% bonus, just like the __vm_enough_memory()
-
* implementation used by LSMs.
-
*/
-
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
-
points -= (points * 3) / 100;
-
-
/* Normalize to oom_score_adj units */
-
adj *= totalpages / 1000;
-
points += adj;
-
-
/*
-
* Never return 0 for an eligible task regardless of the root bonus and
-
* oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
-
*/
-
return points > 0 ? points : 1;
-
}
计算进程分值的函数中,首先排除了不可OOM kill的进程以及oom_score_adj值为OOM_SCORE_ADJ_MIN(即-1000)的进程,其中oom_score_adj取值范围是-1000到1000;接着就是计算进程的RSS、页表以及SWAP空间的使用量占RAM的比重,如果该进程是超级进程,则去除3%的权重;最后将oom_score_adj和points归一后,但凡小于0值的都返回1,其他的则返回原值。由此可知,分值越低的则越不会被kill,而且该值可以通过修改oom_score_adj进行调整。
最后分析一下找到了最“bad”的进程后,其享受的“待遇”oom_kill_process():
-
【file:/ mm/oom_kill.c】
-
/*
-
* Must be called while holding a reference to p, which will be released upon
-
* returning.
-
*/
-
void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
-
unsigned int points, unsigned long totalpages,
-
struct mem_cgroup *memcg, nodemask_t *nodemask,
-
const char *message)
-
{
-
struct task_struct *victim = p;
-
struct task_struct *child;
-
struct task_struct *t;
-
struct mm_struct *mm;
-
unsigned int victim_points = 0;
-
static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
-
DEFAULT_RATELIMIT_BURST);
-
-
/*
-
* If the task is already exiting, don't alarm the sysadmin or kill
-
* its children or threads, just set TIF_MEMDIE so it can die quickly
-
*/
-
if (p->flags & PF_EXITING) {
-
set_tsk_thread_flag(p, TIF_MEMDIE);
-
put_task_struct(p);
-
return;
-
}
-
-
if (__ratelimit(&oom_rs))
-
dump_header(p, gfp_mask, order, memcg, nodemask);
-
-
task_lock(p);
-
pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
-
message, task_pid_nr(p), p->comm, points);
-
task_unlock(p);
-
-
/*
-
* If any of p's children has a different mm and is eligible for kill,
-
* the one with the highest oom_badness() score is sacrificed for its
-
* parent. This attempts to lose the minimal amount of work done while
-
* still freeing memory.
-
*/
-
read_lock(&tasklist_lock);
-
for_each_thread(p, t) {
-
list_for_each_entry(child, &t->children, sibling) {
-
unsigned int child_points;
-
-
if (child->mm == p->mm)
-
continue;
-
/*
-
* oom_badness() returns 0 if the thread is unkillable
-
*/
-
child_points = oom_badness(child, memcg, nodemask,
-
totalpages);
-
if (child_points > victim_points) {
-
put_task_struct(victim);
-
victim = child;
-
victim_points = child_points;
-
get_task_struct(victim);
-
}
-
}
-
}
-
read_unlock(&tasklist_lock);
-
-
p = find_lock_task_mm(victim);
-
if (!p) {
-
put_task_struct(victim);
-
return;
-
} else if (victim != p) {
-
get_task_struct(p);
-
put_task_struct(victim);
-
victim = p;
-
}
-
-
/* mm cannot safely be dereferenced after task_unlock(victim) */
-
mm = victim->mm;
-
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
-
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
-
K(get_mm_counter(victim->mm, MM_ANONPAGES)),
-
K(get_mm_counter(victim->mm, MM_FILEPAGES)));
-
task_unlock(victim);
-
-
/*
-
* Kill all user processes sharing victim->mm in other thread groups, if
-
* any. They don't get access to memory reserves, though, to avoid
-
* depletion of all memory. This prevents mm->mmap_sem livelock when an
-
* oom killed thread cannot exit because it requires the semaphore and
-
* its contended by another thread trying to allocate memory itself.
-
* That thread will now get access to memory reserves since it has a
-
* pending fatal signal.
-
*/
-
rcu_read_lock();
-
for_each_process(p)
-
if (p->mm == mm && !same_thread_group(p, victim) &&
-
!(p->flags & PF_KTHREAD)) {
-
if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
-
continue;
-
-
task_lock(p); /* Protect ->comm from prctl() */
-
pr_err("Kill process %d (%s) sharing same memory\n",
-
task_pid_nr(p), p->comm);
-
task_unlock(p);
-
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
-
}
-
rcu_read_unlock();
-
-
set_tsk_thread_flag(victim, TIF_MEMDIE);
-
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
-
put_task_struct(victim);
-
}
该函数将会判断当前被kill的进程情况,如果该进程处于退出状态,则设置TIF_MEMDIE标志,不做kill操作;接着会通过list_for_each_entry()遍历该进程的子进程信息,如果某个子进程拥有不同的mm且合适被kill掉,将会优先考虑将该子进程替代父进程kill掉,这样可以避免kill掉父进程带来的接管子进程的工作开销;再往下通过find_lock_task_mm()找到持有mm锁的进程,如果进程处于退出状态,则return,否则继续处理,若此时的进程与传入的不是同一个时则更新victim;继而接着通过for_each_process()查找与当前被kill进程使用到了同样的共享内存的进程进行一起kill掉,kill之前将对应的进程添加标识TIF_MEMDIE,而kill的动作则是通过发送SICKILL信号给对应进程,由被kill进程从内核态返回用户态时进行处理。
至此,OOM kill处理分析完毕。