Skip to content

Commit

Permalink
Merge pull request #821 from albertxu216/develop
Browse files Browse the repository at this point in the history
Cpu_watcher:增加ewma
  • Loading branch information
chenamy2017 authored Jun 7, 2024
2 parents 9e6ed68 + b8e1249 commit 42adaef
Show file tree
Hide file tree
Showing 5 changed files with 278 additions and 141 deletions.
6 changes: 6 additions & 0 deletions .github/workflows/ebpf_cpu_watcher.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,9 @@ jobs:
cd eBPF_Supermarket/CPU_Subsystem/cpu_watcher/
make
sudo ./cpu_watcher
- name: Run test_cpuwatcher
run: |
cd eBPF_Supermarket/CPU_Subsystem/cpu_watcher/test
make
./test_cpuwatcher
61 changes: 33 additions & 28 deletions eBPF_Supermarket/CPU_Subsystem/cpu_watcher/bpf/schedule_delay.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,14 @@
char LICENSE[] SEC("license") = "Dual BSD/GPL";
#define TASK_RUNNING 0x0000

BPF_HASH(has_scheduled,struct proc_id, bool, 10240);
BPF_HASH(enter_schedule,struct proc_id, struct schedule_event, 10240);
BPF_ARRAY(sys_schedule,int,struct sum_schedule,1);

BPF_HASH(has_scheduled,struct proc_id, bool, 10240);//记录该进程是否调度过
BPF_HASH(enter_schedule,struct proc_id, struct schedule_event, 10240);//记录该进程上运行队列的时间
BPF_ARRAY(sys_schedule,int,struct sum_schedule,1);//记录整个系统的调度延迟
BPF_ARRAY(threshold_schedule,int,struct proc_schedule,10240);//记录每个进程的调度延迟

SEC("tp_btf/sched_wakeup")
int BPF_PROG(sched_wakeup, struct task_struct *p) {
pid_t pid = BPF_CORE_READ(p, pid);
pid_t pid = p->pid;
int cpu = bpf_get_smp_processor_id();
struct schedule_event *schedule_event;
struct proc_id id= {};
Expand All @@ -56,7 +56,7 @@ int BPF_PROG(sched_wakeup, struct task_struct *p) {

SEC("tp_btf/sched_wakeup_new")
int BPF_PROG(sched_wakeup_new, struct task_struct *p) {
pid_t pid = BPF_CORE_READ(p, pid);
pid_t pid = p->pid;
int cpu = bpf_get_smp_processor_id();
struct proc_id id= {};
u64 current_time = bpf_ktime_get_ns();
Expand Down Expand Up @@ -86,17 +86,17 @@ int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev, struct task_s
struct schedule_event *schedule_event;
struct sum_schedule *sum_schedule;
int key = 0;
struct proc_id next_id= {};
struct proc_id next_id = {};
u64 delay;
if (prev_state == TASK_RUNNING) {
struct proc_id prev_pd= {};
struct proc_id prev_pd = {};
prev_pd.pid = prev_pid;
if (prev_pid == 0) {
prev_pd.cpu_id = prev_cpu;
}
}
schedule_event = bpf_map_lookup_elem(&enter_schedule, &prev_pd);
if (!schedule_event) {
struct schedule_event schedule_event2 ;
struct schedule_event schedule_event2;
bool issched = false;
schedule_event2.pid = prev_pid;
schedule_event2.count = 1;
Expand All @@ -113,44 +113,49 @@ int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev, struct task_s
next_id.cpu_id = next_cpu;
}
schedule_event = bpf_map_lookup_elem(&enter_schedule, &next_id);
if (!schedule_event) return 0;
if (!schedule_event) return 0;
issched = bpf_map_lookup_elem(&has_scheduled, &next_id);
if (!issched) return 0;
if (!issched) return 0;
if (*issched) {
schedule_event->count++;
} else {
*issched = true;
}
}
delay = current_time - schedule_event->enter_time;
struct proc_schedule proc_schedule;
proc_schedule.pid = next_pid;
proc_schedule.delay = delay;
bpf_probe_read_kernel_str(&proc_schedule.proc_name, sizeof(proc_schedule.proc_name), next->comm);
bpf_map_update_elem(&threshold_schedule, &key, &proc_schedule, BPF_ANY);
sum_schedule = bpf_map_lookup_elem(&sys_schedule, &key);
if (!sum_schedule) {
struct sum_schedule sum_schedule= {};
struct sum_schedule sum_schedule = {};
sum_schedule.sum_count++;
sum_schedule.sum_delay += delay;
if (delay > sum_schedule.max_delay){
if (delay > sum_schedule.max_delay) {
sum_schedule.max_delay = delay;
if(next->pid!=0){
sum_schedule.pid_max = next->pid;
if (next->pid != 0) {
bpf_probe_read_kernel_str(&sum_schedule.proc_name_max, sizeof(sum_schedule.proc_name_max), next->comm);
}
}else if (sum_schedule.min_delay == 0 || delay < sum_schedule.min_delay)
} else if (sum_schedule.min_delay == 0 || delay < sum_schedule.min_delay) {
sum_schedule.min_delay = delay;
if(next->pid!=0){
sum_schedule.pid_min = next->pid;
if (next->pid != 0) {
bpf_probe_read_kernel_str(&sum_schedule.proc_name_min, sizeof(sum_schedule.proc_name_min), next->comm);
}
}
bpf_map_update_elem(&sys_schedule, &key, &sum_schedule, BPF_ANY);
} else {
sum_schedule->sum_count++;
sum_schedule->sum_delay += delay;
if (delay > sum_schedule->max_delay){
if (delay > sum_schedule->max_delay) {
sum_schedule->max_delay = delay;
if(next->pid!=0){
sum_schedule->pid_max = next->pid;
}
}else if (sum_schedule->min_delay == 0 || delay < sum_schedule->min_delay)
bpf_probe_read_kernel_str(&sum_schedule->proc_name_max, sizeof(sum_schedule->proc_name_max), next->comm);
} else if (sum_schedule->min_delay == 0 || delay < sum_schedule->min_delay) {
sum_schedule->min_delay = delay;
if(next->pid!=0){
sum_schedule->pid_min = next->pid;
if (next->pid != 0) {
bpf_probe_read_kernel_str(&sum_schedule->proc_name_min, sizeof(sum_schedule->proc_name_min), next->comm);
}
}
}
return 0;
}
Expand All @@ -175,4 +180,4 @@ int sched_process_exit(void *ctx) {
bpf_map_delete_elem(&has_scheduled, &id);
}
return 0;
}
}
Loading

0 comments on commit 42adaef

Please sign in to comment.