2.1 進(jìn)程優(yōu)先級(jí)
1. 進(jìn)程分類
1)硬實(shí)時(shí)進(jìn)程: 有嚴(yán)格時(shí)間限制
2)軟實(shí)時(shí)進(jìn)程,
3)普通進(jìn)程:大多數(shù)進(jìn)程
2.2 進(jìn)程生命周期
進(jìn)程的生命周期可歸結(jié)為以下三個(gè)狀態(tài):
- 運(yùn)行:進(jìn)程正在執(zhí)行;
- 睡眠:進(jìn)程正在睡眠,不能執(zhí)行,它在等待一個(gè)外部事件;
- 等待:進(jìn)程可以運(yùn)行,但是需要等到下一任務(wù)切換時(shí)執(zhí)行。
更細(xì)致的,從task_struct中state中得到具體狀態(tài):
1)運(yùn)行時(shí)狀態(tài):
TASK_RUNNING:進(jìn)程處于可運(yùn)行狀態(tài),但并不意味著已實(shí)際分配了CPU,而是進(jìn)程可以無(wú)需等待外部條件執(zhí)行;
TASK_INTERRUPTIBLE:進(jìn)程處于睡眠狀態(tài),可由外部信號(hào)喚醒;
TASK_UNINTERRUPTIBLE:睡眠狀態(tài),但不能有外部信號(hào)喚醒,例如IO等待,這種狀態(tài)主要是為了保持強(qiáng)一致性,例如讀取磁盤(pán)時(shí)若外部信號(hào)能中斷,那么磁盤(pán)讀取則會(huì)處理不完整狀態(tài),影響正常使用。
__TASK_STOPPED:進(jìn)程特意停止運(yùn)行,如由調(diào)試器暫停;
__TASK_TRACED:ptrace跟蹤用,在調(diào)試時(shí)區(qū)分常規(guī)進(jìn)程;
2)退出時(shí)狀態(tài):exit_state
EXIT_ZOMBIE: 進(jìn)程處于僵尸狀態(tài)。當(dāng)進(jìn)程被另一個(gè)進(jìn)程或用戶殺死的同時(shí),父進(jìn)程在該子進(jìn)程終止時(shí),未調(diào)用wait函數(shù),則會(huì)出現(xiàn)僵尸進(jìn)程。
EXIT_DEAD:指父進(jìn)程已發(fā)出wait調(diào)用,但進(jìn)程還未完全從系統(tǒng)中移除之前的狀態(tài)
2.3 進(jìn)程表示
進(jìn)程的表示主要通過(guò)task_struct結(jié)構(gòu):
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
int lock_depth; /* BKL lock depth */
#ifdef CONFIG_SMP
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
#endif
#endif
int prio, static_prio, normal_prio;
unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
#endif
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
unsigned int policy;
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST
struct rt_mutex *rcu_boost_mutex;
#endif /* #ifdef CONFIG_RCU_BOOST */
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
#endif
struct list_head tasks;
#ifdef CONFIG_SMP
struct plist_node pushable_tasks;
#endif
struct mm_struct *mm, *active_mm;
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;
#endif
/* task state */
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
unsigned int personality;
unsigned did_exec:1;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
* execve */
unsigned in_iowait:1;
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
pid_t pid;
pid_t tgid;
#ifdef CONFIG_CC_STACKPROTECTOR
/* Canary value for the -fstack-protector gcc feature */
unsigned long stack_canary;
#endif
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid)
*/
struct task_struct *real_parent; /* real parent process */
struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
/*
* children/sibling forms the list of my natural children
*/
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
/*
* ptraced is the list of tasks this task is using ptrace on.
* This includes both natural children and PTRACE_ATTACH targets.
* p->ptrace_entry is p's link on the p->parent->ptraced list.
*/
struct list_head ptraced;
struct list_head ptrace_entry;
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
struct completion *vfork_done; /* for vfork() */
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
cputime_t prev_utime, prev_stime;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time; /* monotonic time */
struct timespec real_start_time; /* boot based time */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
* credentials (COW) */
const struct cred __rcu *cred; /* effective (overridable) subjective task
* credentials (COW) */
struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
- initialized normally by setup_new_exec */
/* file system info */
int link_count, total_link_count;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
struct sysv_sem sysvsem;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
/* hung task detection */
unsigned long last_switch_count;
#endif
/* CPU-specific state of this task */
struct thread_struct thread;
/* filesystem information */
struct fs_struct *fs;
/* open file information */
struct files_struct *files;
/* namespaces */
struct nsproxy *nsproxy;
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL
uid_t loginuid;
unsigned int sessionid;
#endif
seccomp_t seccomp;
/* Thread group tracking */
u32 parent_exec_id;
u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
* mempolicy */
spinlock_t alloc_lock;
#ifdef CONFIG_GENERIC_HARDIRQS
/* IRQ handler threads */
struct irqaction *irqaction;
#endif
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
struct plist_head pi_waiters;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
int hardirqs_enabled;
int hardirq_context;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
#endif
/* journalling filesystem info */
void *journal_info;
/* stacked block device info */
struct bio_list *bio_list;
#ifdef CONFIG_BLOCK
/* stack plugging */
struct blk_plug *plug;
#endif
/* VM state */
struct reclaim_state *reclaim_state;
struct backing_dev_info *backing_dev_info;
struct io_context *io_context;
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT)
u64 acct_rss_mem1; /* accumulated rss usage */
u64 acct_vm_mem1; /* accumulated virtual memory usage */
cputime_t acct_timexpd; /* stime + utime since last update */
#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed; /* Protected by alloc_lock */
int mems_allowed_change_disable;
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock */
struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy; /* Protected by alloc_lock */
short il_next;
short pref_node_fork;
#endif
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
/*
* cache last used pipe for splice
*/
struct pipe_inode_info *splice_pipe;
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info *delays;
#endif
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
#endif
struct prop_local_single dirties;
#ifdef CONFIG_LATENCYTOP
int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT];
#endif
/*
* time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack *ret_stack;
/* time stamp for last schedule */
unsigned long long ftrace_timestamp;
/*
* Number of functions that haven't been traced
* because of depth overrun.
*/
atomic_t trace_overrun;
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
/* bitmask of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
struct memcg_batch_info {
int do_batch; /* incremented when batch uncharge started */
struct mem_cgroup *memcg; /* target memcg of uncharge */
unsigned long nr_pages; /* uncharged usage */
unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
} memcg_batch;
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
atomic_t ptrace_bp_refcnt;
#endif
};
進(jìn)程管理中需要注意的一些重要成員:
1、state: 見(jiàn)上文;
2、資源管理rlim數(shù)組,見(jiàn)signal_struct中:
struct rlimit rlim[RLIM_NLIMITS];
rlimit定義如下:
struct rlmit {
unsigned long rlim_cur; // 進(jìn)程當(dāng)前的資源限制,叫軟限制
unsigned long rlim_max; // 進(jìn)程最大容許值,叫硬限制
}
rlim數(shù)組的索引標(biāo)識(shí)類型,資源限制可通過(guò)查看limits文件得知:
cat /proc/pid/limits
2.3.1 進(jìn)程類型
1、新進(jìn)程是通過(guò)fork、exec系統(tǒng)調(diào)用產(chǎn)生的;
2、clone也可以產(chǎn)生新進(jìn)程,但是clone主要用于實(shí)現(xiàn)線程,它和fork的主要不同點(diǎn)在于父子進(jìn)程共享的資源不同,本質(zhì)上是沒(méi)有任務(wù)區(qū)別的。
2.3.2 命名空間
作用
不同于KVM或VMWare,Linux的命名空間只使用一個(gè)內(nèi)核在一臺(tái)物理計(jì)算機(jī)上運(yùn)作,只需要很少的資源,便可虛擬化出多臺(tái)計(jì)算機(jī)。namespace主要做資源的隔離,正如目前很熱門(mén)的Docker技術(shù)也是使用類似的原理。
創(chuàng)建方式
創(chuàng)建方式有三種:
1、clone() – 實(shí)現(xiàn)線程的系統(tǒng)調(diào)用,用來(lái)創(chuàng)建一個(gè)新的進(jìn)程,并可以通過(guò)設(shè)計(jì)參數(shù)達(dá)到各類資源的隔離。
2、unshare() – 使某進(jìn)程脫離某個(gè)namespace
3、setns() – 把某進(jìn)程加入到某個(gè)namespace
實(shí)現(xiàn)方式
namespace的實(shí)現(xiàn)主要由nsproxy結(jié)構(gòu):
struct nsproxy {
atomic_t count; // 引用計(jì)數(shù),
struct uts_namespace *uts_ns; // UTS命名空間,包括內(nèi)核名稱版本等信息
struct ipc_namespace *ipc_ns; // 進(jìn)程間通信相關(guān)信息
struct mnt_namespace *mnt_ns; // 文件系統(tǒng)掛載信息
struct user_namespace *user_ns; // 用于保存限制每個(gè)用戶資源使用的信息
struct pid_namespace *pid_ns; // 進(jìn)程ID相關(guān)信息
struct net *net_ns; // 網(wǎng)絡(luò)相關(guān)命名空間信息
};
具體可參考:DOCKER基礎(chǔ)技術(shù):LINUX NAMESPACE(上)
2.3.2 進(jìn)程ID號(hào)

上圖很好的闡述了進(jìn)程ID的結(jié)構(gòu)信息
首先從task_struct開(kāi)始:
struct task_struct {
...
struct pid_link pids[PIDTYPE_MAX];
...
}
其中pids數(shù)組是一個(gè)將task_struct關(guān)聯(lián)到pid的散列表。
struct pid_link {
struct hlist_node node; 用作散列表元素
struct pid *pid;
}
struct upid {
/* Try to keep pid_chain in the same cacheline as nr for find_vpid */
int nr; // ID數(shù)值
struct pid_namespace *ns; // 關(guān)聯(lián)到pid_namespace的指針
struct hlist_node pid_chain;
};
struct pid
{
atomic_t count; // 引用計(jì)數(shù)
unsigned int level; // 層級(jí),子命名空間的level為父命名空間level+1
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX]; // 一個(gè)HASH數(shù)組,每一項(xiàng)都是一個(gè)鏈表頭。分別是PID鏈表頭,進(jìn)程組ID表頭,會(huì)話ID表頭;
struct rcu_head rcu;
struct upid numbers[1]; // 是一個(gè)UPID數(shù)組,記錄對(duì)應(yīng)層級(jí)的命名空間中的UPID,所以可以想到,該P(yáng)ID處于第幾層,那么這個(gè)數(shù)組應(yīng)該有幾項(xiàng)(當(dāng)然都是從0開(kāi)始)。
};
enum pid_type
{
PIDTYPE_PID, // 進(jìn)程PID
PIDTYPE_PGID, // 進(jìn)程組PID
PIDTYPE_SID, // 會(huì)話PID
PIDTYPE_MAX
};
這里沒(méi)加上TGID的原因是線程組也是一種PID:線程組長(zhǎng)PID,再單獨(dú)定義一個(gè)id沒(méi)有必要。
upid中關(guān)聯(lián)到pid_namespace,再看看pid_namespace定義:
struct pid_namespace {
struct kref kref;
struct pidmap pidmap[PIDMAP_ENTRIES];
int last_pid;
struct task_struct *child_reaper;
struct kmem_cache *pid_cachep;
unsigned int level;
struct pid_namespace *parent;
#ifdef CONFIG_PROC_FS
struct vfsmount *proc_mnt;
#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
struct bsd_acct_struct *bacct;
#endif
};
在這里只關(guān)注child_reaper指針、level和parent指針。
其中child_reaper作用類似于fork函數(shù)中父進(jìn)程調(diào)用wait系列函數(shù),用于托管進(jìn)程:就是當(dāng)父進(jìn)程先于子進(jìn)程結(jié)束的時(shí)候,就把子進(jìn)程的父進(jìn)程更新為child_reaper。
level即為命名空間的層級(jí)關(guān)系;
parent:父pid命名空間指針。
參考文章:Pid NameSpace淺分析
生成唯一PID
唯一pid的生成其實(shí)是通過(guò)一個(gè)大的bitmap生成,bitmap有高效、節(jié)省空間的作用,本質(zhì)即是尋找bitmap中第一個(gè)為0的比特用于分配新pid。該bitmap可見(jiàn)pid_namespace:
struct pid_namespace {
...
struct pidmap pidmap[PIDMAP_ENTRIES];
...
}
#define PIDMAP_ENTRIES ((PID_MAX_LIMIT + 8*PAGE_SIZE - 1)/PAGE_SIZE/8);
#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
(sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT));
alloc_pidmap函數(shù)用于分配一個(gè)PID,而free_pidmap用于地方一個(gè)PID,具體見(jiàn)kernel/pid.c
2.3 進(jìn)程管理相關(guān)的系統(tǒng)調(diào)用
2.3.1 進(jìn)程復(fù)制
進(jìn)程復(fù)制主要有三個(gè)api:
1、fork(): 重量級(jí)調(diào)用,建立一個(gè)父進(jìn)程的完整副本,然后作為子進(jìn)程執(zhí)行。
2、vfork():類似fork(),不同在于vfork中父子進(jìn)程共享數(shù)據(jù),同時(shí)子進(jìn)程在創(chuàng)建后,父進(jìn)程會(huì)一直等到子進(jìn)程執(zhí)行完成;
3、clone():用于線程實(shí)現(xiàn),通過(guò)設(shè)定不同的flag來(lái)控制父子進(jìn)程的共享項(xiàng)。
1、寫(xiě)時(shí)復(fù)制
寫(xiě)時(shí)復(fù)制主要解決子進(jìn)程生成時(shí)復(fù)制大量數(shù)據(jù)從而耗費(fèi)資源的問(wèn)題,資源耗費(fèi)主要有兩方面:1是使用了大量?jī)?nèi)存,2是復(fù)制操作耗費(fèi)大量時(shí)間。寫(xiě)時(shí)復(fù)制實(shí)現(xiàn)的主要依據(jù)是進(jìn)程通常只需要內(nèi)存頁(yè)的一小部分,同時(shí)大部分子進(jìn)程在創(chuàng)建后會(huì)立即執(zhí)行。寫(xiě)時(shí)復(fù)制實(shí)現(xiàn)中,當(dāng)一個(gè)進(jìn)程師徒向復(fù)制的內(nèi)存頁(yè)寫(xiě)入數(shù)據(jù)時(shí),處理器會(huì)立即報(bào)告缺頁(yè)異常,這時(shí)內(nèi)核會(huì)檢查內(nèi)存頁(yè)面是否只讀,若是只讀,則報(bào)告段錯(cuò)誤,否則執(zhí)行復(fù)制操作。
2、執(zhí)行系統(tǒng)調(diào)用
fork(),vfork(),clone()最終都會(huì)調(diào)用do_fork()函數(shù)。三種實(shí)現(xiàn)只有微小的差別。
3、do_fork()實(shí)現(xiàn)

2.5 調(diào)度器實(shí)現(xiàn)
在2.6之后,調(diào)度器策略進(jìn)行了改變,從O(1)調(diào)度器到完全公平隊(duì)列調(diào)度,有了質(zhì)的改變,完全公平調(diào)度不再依賴時(shí)間片的概念,摒棄了舊版本調(diào)度存在的不足。
調(diào)度操作起點(diǎn)是kernel/sched.c中的schedule函數(shù)。
2.5.2 數(shù)據(jù)結(jié)構(gòu)

整體的調(diào)度框架如上圖所示,可有兩種方法激活調(diào)度:
1、直接方式
例如進(jìn)程打算睡眠或出于其他原因放棄cpu;
2、周期性
固定頻率運(yùn)行,檢查是否需要進(jìn)程切換。
具體的,有以下幾個(gè)調(diào)度時(shí)機(jī):
1)調(diào)用cond_resched()時(shí)
2)顯式調(diào)用schedule()時(shí)
3)從系統(tǒng)調(diào)用或者異常中斷返回用戶空間時(shí)
4)從中斷上下文返回用戶空間時(shí)
當(dāng)開(kāi)啟內(nèi)核搶占(默認(rèn)開(kāi)啟)時(shí),會(huì)多出幾個(gè)調(diào)度時(shí)機(jī),如下
1)在系統(tǒng)調(diào)用或者異常中斷上下文中調(diào)用preempt_enable()時(shí)(多次調(diào)用preempt_enable()時(shí),系統(tǒng)只會(huì)在最后一次調(diào)用時(shí)會(huì)調(diào)度)
2)在中斷上下文中,從中斷處理函數(shù)返回到可搶占的上下文時(shí)(這里是中斷下半部,中斷上半部實(shí)際上會(huì)關(guān)中斷,而新的中斷只會(huì)被登記,由于上半部處理很快,上半部處理完成后才會(huì)執(zhí)行新的中斷信號(hào),這樣就形成了中斷可重入)
task_struct結(jié)構(gòu)中相關(guān)成員:
struct task_struct {
// prio和normal_prio為動(dòng)態(tài)優(yōu)先級(jí),static_prio為靜態(tài)優(yōu)先級(jí)
// 靜態(tài)優(yōu)先級(jí)是進(jìn)程啟動(dòng)時(shí)分配的優(yōu)先級(jí),可用nice或sched_setscheduler系統(tǒng)調(diào)用修改
// normal_prio基于進(jìn)程的靜態(tài)優(yōu)先級(jí)和調(diào)度策略計(jì)算出來(lái)的優(yōu)先級(jí)
// fork時(shí),子進(jìn)程的prio來(lái)自父進(jìn)程的普通優(yōu)先級(jí),static_prio來(lái)自父進(jìn)程的靜態(tài)優(yōu)先級(jí)
int prio, static_prio, normal_prio;
// 表示實(shí)時(shí)進(jìn)程的優(yōu)先級(jí),范圍為[0, 99],值越大優(yōu)先級(jí)越高
unsigned int rt_priority;
// 進(jìn)程所屬調(diào)度器類
const struct sched_class *sched_class;
// 調(diào)度實(shí)體,調(diào)度器操作的對(duì)象
struct sched_entity se;
// 調(diào)度策略,包括SCHED_NORMAL(普通進(jìn)程),SCHED_RR, SCHED_FIFO(軟實(shí)時(shí)進(jìn)程)
unsigned int policy;
// 用于限制進(jìn)程可以在哪些CPU上運(yùn)行,CPU親和力
cpumask_t cups_allowed;
// run_list是一個(gè)表頭,維護(hù)包含各進(jìn)程的一個(gè)運(yùn)行表,time_slice指定進(jìn)程可以使用cpu的剩余時(shí)間
struct list_head run_list;
unsigned int time_slice;
}
調(diào)度器類
struct sched_class {
const struct sched_class *next; // 鏈表連接
// 向就緒隊(duì)列添加新進(jìn)程
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
// 從就緒隊(duì)列去掉進(jìn)程
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*yield_task) (struct rq *rq); // 放棄處理器控制權(quán)
bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
// 使用新喚醒的進(jìn)程搶占當(dāng)前進(jìn)程
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
// 選擇下一個(gè)將要運(yùn)行的進(jìn)程
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
int (*select_task_rq)(struct rq *rq, struct task_struct *p,
int sd_flag, int flags);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct rq *this_rq, struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
#endif
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_fork) (struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task);
void (*switched_to) (struct rq *this_rq, struct task_struct *task);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio);
unsigned int (*get_rr_interval) (struct rq *rq,
struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*task_move_group) (struct task_struct *p, int on_rq);
#endif
};
在進(jìn)程注冊(cè)到就緒隊(duì)列時(shí),sched_entity實(shí)例的on_rq設(shè)置為1,否則為0
就緒隊(duì)列
struct rq {
/* runqueue lock: */
raw_spinlock_t lock;
/*
* nr_running and cpu_load should be in the same cacheline because
* remote CPUs use both these fields when doing load calculation.
*/
unsigned long nr_running; // 隊(duì)列上可運(yùn)行進(jìn)程的數(shù)目
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
unsigned long last_load_update_tick;
#ifdef CONFIG_NO_HZ
u64 nohz_stamp;
unsigned char nohz_balance_kick;
#endif
unsigned int skip_clock_update;
/* capture load from *all* tasks on this cpu: */
struct load_weight load; // 就緒隊(duì)列當(dāng)前負(fù)荷的度量
unsigned long nr_load_updates;
u64 nr_switches;
struct cfs_rq cfs; // 子就緒隊(duì)列,用于完全公平調(diào)度器
struct rt_rq rt; // 子就緒隊(duì)列,用于實(shí)時(shí)調(diào)度器
// 分別是當(dāng)前task_struct實(shí)例,idle進(jìn)程實(shí)例,
struct task_struct *curr, *idle;
// 就緒隊(duì)列自身時(shí)鐘,每次調(diào)用周期性調(diào)度器時(shí),都會(huì)更新clock值
u64 clock;
u64 clock_task;
};
系統(tǒng)的所有就緒隊(duì)列都在runqueues數(shù)組中,該數(shù)組的每個(gè)元素分別對(duì)應(yīng)于系統(tǒng)中的一個(gè)CPU。
調(diào)度實(shí)體
struct sched_entity {
struct load_weight load; // 權(quán)重,決定各個(gè)實(shí)體占隊(duì)列總負(fù)荷的比例
struct rb_node run_node; // 樹(shù)節(jié)點(diǎn),實(shí)體在紅黑樹(shù)上排序
unsigned int on_rq; // 當(dāng)前實(shí)體是否在就緒隊(duì)列上接受調(diào)度
u64 exec_start; // 當(dāng)前時(shí)間
u64 sum_exec_runtime; // 消耗的CPU時(shí)間
u64 vruntime; // 虛擬時(shí)鐘上流逝的時(shí)間數(shù)量
u64 prev_sum_exec_runtime; // 保存sum_exec_runtime
}