@@ -174,6 +174,9 @@ extern unsigned long nr_iowait(void);
174
174
extern unsigned long nr_iowait_cpu (int cpu );
175
175
extern void get_iowait_load (unsigned long * nr_waiters , unsigned long * load );
176
176
177
+ extern void sched_update_nr_prod (int cpu , long delta , bool inc );
178
+ extern void sched_get_nr_running_avg (int * avg , int * iowait_avg , int * big_avg );
179
+
177
180
extern void calc_global_load (unsigned long ticks );
178
181
179
182
#if defined(CONFIG_SMP ) && defined(CONFIG_NO_HZ_COMMON )
@@ -315,6 +318,25 @@ extern char ___assert_task_state[1 - 2*!!(
315
318
/* Task command name length */
316
319
#define TASK_COMM_LEN 16
317
320
321
+ extern const char * sched_window_reset_reasons [];
322
+
323
+ enum task_event {
324
+ PUT_PREV_TASK = 0 ,
325
+ PICK_NEXT_TASK = 1 ,
326
+ TASK_WAKE = 2 ,
327
+ TASK_MIGRATE = 3 ,
328
+ TASK_UPDATE = 4 ,
329
+ IRQ_UPDATE = 5 ,
330
+ };
331
+
332
+ /* Note: this need to be in sync with migrate_type_names array */
333
+ enum migrate_types {
334
+ GROUP_TO_RQ ,
335
+ RQ_TO_GROUP ,
336
+ RQ_TO_RQ ,
337
+ GROUP_TO_GROUP ,
338
+ };
339
+
318
340
#include <linux/spinlock.h>
319
341
320
342
/*
@@ -1335,6 +1357,47 @@ struct sched_statistics {
1335
1357
};
1336
1358
#endif
1337
1359
1360
+ #define RAVG_HIST_SIZE_MAX 5
1361
+ #define NUM_BUSY_BUCKETS 10
1362
+
1363
+ /* ravg represents frequency scaled cpu-demand of tasks */
1364
+ struct ravg {
1365
+ /*
1366
+ * 'mark_start' marks the beginning of an event (task waking up, task
1367
+ * starting to execute, task being preempted) within a window
1368
+ *
1369
+ * 'sum' represents how runnable a task has been within current
1370
+ * window. It incorporates both running time and wait time and is
1371
+ * frequency scaled.
1372
+ *
1373
+ * 'sum_history' keeps track of history of 'sum' seen over previous
1374
+ * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
1375
+ * ignored.
1376
+ *
1377
+ * 'demand' represents maximum sum seen over previous
1378
+ * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
1379
+ * demand for tasks.
1380
+ *
1381
+ * 'curr_window' represents task's contribution to cpu busy time
1382
+ * statistics (rq->curr_runnable_sum) in current window
1383
+ *
1384
+ * 'prev_window' represents task's contribution to cpu busy time
1385
+ * statistics (rq->prev_runnable_sum) in previous window
1386
+ *
1387
+ * 'pred_demand' represents task's current predicted cpu busy time
1388
+ *
1389
+ * 'busy_buckets' groups historical busy time into different buckets
1390
+ * used for prediction
1391
+ */
1392
+ u64 mark_start ;
1393
+ u32 sum , demand ;
1394
+ u32 sum_history [RAVG_HIST_SIZE_MAX ];
1395
+ u32 curr_window , prev_window ;
1396
+ u16 active_windows ;
1397
+ u32 pred_demand ;
1398
+ u8 busy_buckets [NUM_BUSY_BUCKETS ];
1399
+ };
1400
+
1338
1401
struct sched_entity {
1339
1402
struct load_weight load ; /* for load-balancing */
1340
1403
struct rb_node run_node ;
@@ -1505,6 +1568,20 @@ struct task_struct {
1505
1568
const struct sched_class * sched_class ;
1506
1569
struct sched_entity se ;
1507
1570
struct sched_rt_entity rt ;
1571
+ #ifdef CONFIG_SCHED_HMP
1572
+ struct ravg ravg ;
1573
+ /*
1574
+ * 'init_load_pct' represents the initial task load assigned to children
1575
+ * of this task
1576
+ */
1577
+ u32 init_load_pct ;
1578
+ u64 last_wake_ts ;
1579
+ u64 last_switch_out_ts ;
1580
+ u64 last_cpu_selected_ts ;
1581
+ struct related_thread_group * grp ;
1582
+ struct list_head grp_list ;
1583
+ u64 cpu_cycles ;
1584
+ #endif
1508
1585
#ifdef CONFIG_CGROUP_SCHED
1509
1586
struct task_group * sched_task_group ;
1510
1587
#endif
@@ -2254,6 +2331,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
2254
2331
/*
2255
2332
* Per process flags
2256
2333
*/
2334
+ #define PF_WAKE_UP_IDLE 0x00000002 /* try to wake up on an idle CPU */
2257
2335
#define PF_EXITING 0x00000004 /* getting shut down */
2258
2336
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
2259
2337
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@@ -2436,6 +2514,93 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
2436
2514
}
2437
2515
#endif
2438
2516
2517
+ struct sched_load {
2518
+ unsigned long prev_load ;
2519
+ unsigned long new_task_load ;
2520
+ unsigned long predicted_load ;
2521
+ };
2522
+
2523
+ extern int sched_set_wake_up_idle (struct task_struct * p , int wake_up_idle );
2524
+ extern u32 sched_get_wake_up_idle (struct task_struct * p );
2525
+
2526
+ struct cpu_cycle_counter_cb {
2527
+ u64 (* get_cpu_cycle_counter )(int cpu );
2528
+ };
2529
+
2530
+ #ifdef CONFIG_SCHED_HMP
2531
+ extern int sched_set_window (u64 window_start , unsigned int window_size );
2532
+ extern unsigned long sched_get_busy (int cpu );
2533
+ extern void sched_get_cpus_busy (struct sched_load * busy ,
2534
+ const struct cpumask * query_cpus );
2535
+ extern void sched_set_io_is_busy (int val );
2536
+ extern int sched_set_boost (int enable );
2537
+ extern int sched_set_init_task_load (struct task_struct * p , int init_load_pct );
2538
+ extern u32 sched_get_init_task_load (struct task_struct * p );
2539
+ extern int sched_set_static_cpu_pwr_cost (int cpu , unsigned int cost );
2540
+ extern unsigned int sched_get_static_cpu_pwr_cost (int cpu );
2541
+ extern int sched_set_static_cluster_pwr_cost (int cpu , unsigned int cost );
2542
+ extern unsigned int sched_get_static_cluster_pwr_cost (int cpu );
2543
+ extern int sched_update_freq_max_load (const cpumask_t * cpumask );
2544
+ extern void sched_update_cpu_freq_min_max (const cpumask_t * cpus ,
2545
+ u32 fmin , u32 fmax );
2546
+ extern void sched_set_cpu_cstate (int cpu , int cstate ,
2547
+ int wakeup_energy , int wakeup_latency );
2548
+ extern void sched_set_cluster_dstate (const cpumask_t * cluster_cpus , int dstate ,
2549
+ int wakeup_energy , int wakeup_latency );
2550
+ extern int register_cpu_cycle_counter_cb (struct cpu_cycle_counter_cb * cb );
2551
+ extern u64 sched_ktime_clock (void );
2552
+ extern int sched_set_group_id (struct task_struct * p , unsigned int group_id );
2553
+ extern unsigned int sched_get_group_id (struct task_struct * p );
2554
+
2555
+ #else /* CONFIG_SCHED_HMP */
2556
+ static inline u64 sched_ktime_clock (void )
2557
+ {
2558
+ return 0 ;
2559
+ }
2560
+
2561
+ static inline int
2562
+ register_cpu_cycle_counter_cb (struct cpu_cycle_counter_cb * cb )
2563
+ {
2564
+ return 0 ;
2565
+ }
2566
+
2567
+ static inline int sched_set_window (u64 window_start , unsigned int window_size )
2568
+ {
2569
+ return - EINVAL ;
2570
+ }
2571
+ static inline unsigned long sched_get_busy (int cpu )
2572
+ {
2573
+ return 0 ;
2574
+ }
2575
+ static inline void sched_get_cpus_busy (struct sched_load * busy ,
2576
+ const struct cpumask * query_cpus ) {};
2577
+
2578
+ static inline void sched_set_io_is_busy (int val ) {};
2579
+
2580
+ static inline int sched_set_boost (int enable )
2581
+ {
2582
+ return - EINVAL ;
2583
+ }
2584
+
2585
+ static inline int sched_update_freq_max_load (const cpumask_t * cpumask )
2586
+ {
2587
+ return 0 ;
2588
+ }
2589
+
2590
+ static inline void sched_update_cpu_freq_min_max (const cpumask_t * cpus ,
2591
+ u32 fmin , u32 fmax ) { }
2592
+
2593
+ static inline void
2594
+ sched_set_cpu_cstate (int cpu , int cstate , int wakeup_energy , int wakeup_latency )
2595
+ {
2596
+ }
2597
+
2598
+ static inline void sched_set_cluster_dstate (const cpumask_t * cluster_cpus ,
2599
+ int dstate , int wakeup_energy , int wakeup_latency )
2600
+ {
2601
+ }
2602
+ #endif /* CONFIG_SCHED_HMP */
2603
+
2439
2604
#ifdef CONFIG_NO_HZ_COMMON
2440
2605
void calc_load_enter_idle (void );
2441
2606
void calc_load_exit_idle (void );
@@ -2444,6 +2609,14 @@ static inline void calc_load_enter_idle(void) { }
2444
2609
static inline void calc_load_exit_idle (void ) { }
2445
2610
#endif /* CONFIG_NO_HZ_COMMON */
2446
2611
2612
+ static inline void set_wake_up_idle (bool enabled )
2613
+ {
2614
+ if (enabled )
2615
+ current -> flags |= PF_WAKE_UP_IDLE ;
2616
+ else
2617
+ current -> flags &= ~PF_WAKE_UP_IDLE ;
2618
+ }
2619
+
2447
2620
/*
2448
2621
* Do not use outside of architecture code which knows its limitations.
2449
2622
*
@@ -2461,6 +2634,7 @@ extern u64 sched_clock_cpu(int cpu);
2461
2634
2462
2635
2463
2636
extern void sched_clock_init (void );
2637
+ extern int sched_clock_initialized (void );
2464
2638
2465
2639
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2466
2640
static inline void sched_clock_tick (void )
@@ -2537,7 +2711,7 @@ extern unsigned long long
2537
2711
task_sched_runtime (struct task_struct * task );
2538
2712
2539
2713
/* sched_exec is called by processes performing an exec */
2540
- #ifdef CONFIG_SMP
2714
+ #if defined( CONFIG_SMP ) && !defined( CONFIG_SCHED_HMP )
2541
2715
extern void sched_exec (void );
2542
2716
#else
2543
2717
#define sched_exec () {}
@@ -2670,6 +2844,7 @@ extern void xtime_update(unsigned long ticks);
2670
2844
2671
2845
extern int wake_up_state (struct task_struct * tsk , unsigned int state );
2672
2846
extern int wake_up_process (struct task_struct * tsk );
2847
+ extern int wake_up_process_no_notif (struct task_struct * tsk );
2673
2848
extern void wake_up_new_task (struct task_struct * tsk );
2674
2849
#ifdef CONFIG_SMP
2675
2850
extern void kick_process (struct task_struct * tsk );
@@ -2678,6 +2853,12 @@ extern void wake_up_new_task(struct task_struct *tsk);
2678
2853
#endif
2679
2854
extern int sched_fork (unsigned long clone_flags , struct task_struct * p );
2680
2855
extern void sched_dead (struct task_struct * p );
2856
+ #ifdef CONFIG_SCHED_HMP
2857
+ extern void sched_exit (struct task_struct * p );
2858
+ #else
2859
+ static inline void sched_exit (struct task_struct * p ) { }
2860
+ #endif
2861
+
2681
2862
2682
2863
extern void proc_caches_init (void );
2683
2864
extern void flush_signals (struct task_struct * );
@@ -3506,6 +3687,8 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3506
3687
3507
3688
#endif /* CONFIG_SMP */
3508
3689
3690
+ extern struct atomic_notifier_head load_alert_notifier_head ;
3691
+
3509
3692
extern long sched_setaffinity (pid_t pid , const struct cpumask * new_mask );
3510
3693
extern long sched_getaffinity (pid_t pid , struct cpumask * mask );
3511
3694
0 commit comments