StarPU Internal Handbook
sched_ctx.h
Go to the documentation of this file.
1 /* StarPU --- Runtime system for heterogeneous multicore architectures.
2  *
3  * Copyright (C) 2011-2021 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
4  * Copyright (C) 2016 Uppsala University
5  *
6  * StarPU is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU Lesser General Public License as published by
8  * the Free Software Foundation; either version 2.1 of the License, or (at
9  * your option) any later version.
10  *
11  * StarPU is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  *
15  * See the GNU Lesser General Public License in COPYING.LGPL for more details.
16  */
17 
18 #ifndef __SCHED_CONTEXT_H__
19 #define __SCHED_CONTEXT_H__
20 
23 #include <starpu.h>
24 #include <starpu_sched_ctx.h>
25 #include <starpu_sched_ctx_hypervisor.h>
26 #include <starpu_scheduler.h>
27 #include <common/config.h>
28 #include <common/barrier_counter.h>
29 #include <common/utils.h>
30 #include <profiling/profiling.h>
31 #include <semaphore.h>
32 #include <core/task.h>
33 #include "sched_ctx_list.h"
34 
35 #ifdef STARPU_HAVE_HWLOC
36 #include <hwloc.h>
37 #endif
38 
39 #define NO_RESIZE -1
40 #define REQ_RESIZE 0
41 #define DO_RESIZE 1
42 
43 #define STARPU_GLOBAL_SCHED_CTX 0
44 #define STARPU_NMAXSMS 13
46 {
48  unsigned id;
49 
51  unsigned do_schedule;
52 
54  const char *name;
55 
57  struct starpu_sched_policy *sched_policy;
58 
60  void *policy_data;
61 
63  void *user_data;
64 
65  struct starpu_worker_collection *workers;
66 
68  unsigned is_initial_sched;
69 
72 
75 
77  double ready_flops;
78 
80  long iterations[2];
81  int iteration_level;
82 
83  /*ready tasks that couldn't be pushed because the ctx has no workers*/
84  struct starpu_task_list empty_ctx_tasks;
85 
86  /*ready tasks that couldn't be pushed because the the window of tasks was already full*/
87  struct starpu_task_list waiting_tasks;
88 
90  int min_ncpus;
91 
93  int max_ncpus;
94 
96  int min_ngpus;
97 
99  int max_ngpus;
100 
102  unsigned inheritor;
103 
106  unsigned finished_submit;
107 
111  int max_priority;
112  int min_priority_is_set;
113  int max_priority_is_set;
114 
116 #ifdef STARPU_HAVE_HWLOC
117  hwloc_bitmap_t hwloc_workers_set;
118 #endif
119 
120 #ifdef STARPU_USE_SC_HYPERVISOR
122  struct starpu_sched_ctx_performance_counters *perf_counters;
123 #endif //STARPU_USE_SC_HYPERVISOR
124 
126  void (*close_callback)(unsigned sched_ctx_id, void* args);
127  void *close_args;
128 
130  unsigned hierarchy_level;
131 
136 
139 
141  struct starpu_perfmodel_arch perf_arch;
142 
145  unsigned parallel_view;
146 
150  unsigned awake_workers;
151 
153  void (*init_sched)(unsigned);
154 
155  int sub_ctxs[STARPU_NMAXWORKERS];
156  int nsub_ctxs;
157 
159  int nsms;
160  int sms_start_idx;
161  int sms_end_idx;
162 
163  int stream_worker;
164 
165  starpu_pthread_rwlock_t rwlock;
166  starpu_pthread_t lock_write_owner;
167 };
168 
171  int sched_ctx_id;
172  int op;
173  int nworkers_to_notify;
174  int *workerids_to_notify;
175  int nworkers_to_change;
176  int *workerids_to_change;
177 );
178 
180 
183 
185 struct _starpu_sched_ctx* _starpu_create_sched_ctx(struct starpu_sched_policy *policy, int *workerid, int nworkerids, unsigned is_init_sched, const char *sched_name,
186  int min_prio_set, int min_prio,
187  int max_prio_set, int max_prio, unsigned awake_workers, void (*sched_policy_init)(unsigned), void *user_data,
188  int nsub_ctxs, int *sub_ctxs, int nsms);
189 
192 
195 int _starpu_wait_for_all_tasks_of_sched_ctx(unsigned sched_ctx_id);
196 
198 int _starpu_wait_for_n_submitted_tasks_of_sched_ctx(unsigned sched_ctx_id, unsigned n);
199 
203 void _starpu_increment_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
204 int _starpu_get_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
205 int _starpu_check_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
206 
207 void _starpu_decrement_nready_tasks_of_sched_ctx(unsigned sched_ctx_id, double ready_flops);
208 unsigned _starpu_increment_nready_tasks_of_sched_ctx(unsigned sched_ctx_id, double ready_flops, struct starpu_task *task);
209 int _starpu_wait_for_no_ready_of_sched_ctx(unsigned sched_ctx_id);
210 
212 int _starpu_get_index_in_ctx_of_workerid(unsigned sched_ctx, unsigned workerid);
213 
215 starpu_pthread_mutex_t *_starpu_get_sched_mutex(struct _starpu_sched_ctx *sched_ctx, int worker);
216 
219 int _starpu_get_workers_of_sched_ctx(unsigned sched_ctx_id, int *pus, enum starpu_worker_archtype arch);
220 
223 void _starpu_worker_gets_out_of_ctx(unsigned sched_ctx_id, struct _starpu_worker *worker);
224 
226 unsigned _starpu_worker_belongs_to_a_sched_ctx(int workerid, unsigned sched_ctx_id);
227 
231 
235 
237 int _starpu_workers_able_to_execute_task(struct starpu_task *task, struct _starpu_sched_ctx *sched_ctx);
238 
239 void _starpu_fetch_tasks_from_empty_ctx_list(struct _starpu_sched_ctx *sched_ctx);
240 
241 unsigned _starpu_sched_ctx_allow_hypervisor(unsigned sched_ctx_id);
242 
243 struct starpu_perfmodel_arch * _starpu_sched_ctx_get_perf_archtype(unsigned sched_ctx);
244 #ifdef STARPU_USE_SC_HYPERVISOR
246 void _starpu_sched_ctx_post_exec_task_cb(int workerid, struct starpu_task *task, size_t data_size, uint32_t footprint);
247 
248 #endif //STARPU_USE_SC_HYPERVISOR
249 
250 void starpu_sched_ctx_add_combined_workers(int *combined_workers_to_add, unsigned n_combined_workers_to_add, unsigned sched_ctx_id);
251 
254 
255 #define _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(w,j) \
256  (_starpu_get_nsched_ctxs() <= 1 ? _starpu_get_sched_ctx_struct(0) : __starpu_sched_ctx_get_sched_ctx_for_worker_and_job((w),(j)))
257 
258 static inline struct _starpu_sched_ctx *_starpu_get_sched_ctx_struct(unsigned id);
259 
260 static inline int _starpu_sched_ctx_check_write_locked(unsigned sched_ctx_id)
261 {
262  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
263  return starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self());
264 }
265 #define STARPU_SCHED_CTX_CHECK_LOCK(sched_ctx_id) STARPU_ASSERT(_starpu_sched_ctx_check_write_locked((sched_ctx_id)))
266 
267 static inline void _starpu_sched_ctx_lock_write(unsigned sched_ctx_id)
268 {
269  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
270  STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
271  STARPU_ASSERT(!starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
272  STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
273  STARPU_PTHREAD_RWLOCK_WRLOCK(&sched_ctx->rwlock);
274  sched_ctx->lock_write_owner = starpu_pthread_self();
275 }
276 
277 static inline void _starpu_sched_ctx_unlock_write(unsigned sched_ctx_id)
278 {
279  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
280  STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
281  STARPU_ASSERT(starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
282  memset(&sched_ctx->lock_write_owner, 0, sizeof(sched_ctx->lock_write_owner));
283  STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
284  STARPU_PTHREAD_RWLOCK_UNLOCK(&sched_ctx->rwlock);
285 }
286 
287 static inline void _starpu_sched_ctx_lock_read(unsigned sched_ctx_id)
288 {
289  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
290  STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
291  STARPU_ASSERT(!starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
292  STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
293  STARPU_PTHREAD_RWLOCK_RDLOCK(&sched_ctx->rwlock);
294 }
295 
296 static inline void _starpu_sched_ctx_unlock_read(unsigned sched_ctx_id)
297 {
298  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
299  STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
300  STARPU_ASSERT(!starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
301  STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
302  STARPU_PTHREAD_RWLOCK_UNLOCK(&sched_ctx->rwlock);
303 }
304 
305 static inline unsigned _starpu_sched_ctx_worker_is_master_for_child_ctx(unsigned sched_ctx_id, unsigned workerid, struct starpu_task *task)
306 {
307  unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
308  if(child_sched_ctx != STARPU_NMAX_SCHED_CTXS)
309  {
310  starpu_sched_ctx_move_task_to_ctx_locked(task, child_sched_ctx, 1);
311  starpu_sched_ctx_revert_task_counters_ctx_locked(sched_ctx_id, task->flops);
312  return 1;
313  }
314  return 0;
315 }
316 
320 #endif // __SCHED_CONTEXT_H__
Definition: barrier_counter.h:27
int _starpu_workers_able_to_execute_task(struct starpu_task *task, struct _starpu_sched_ctx *sched_ctx)
void _starpu_init_all_sched_ctxs(struct _starpu_machine_config *config)
starpu_pthread_mutex_t * _starpu_get_sched_mutex(struct _starpu_sched_ctx *sched_ctx, int worker)
void _starpu_worker_apply_deferred_ctx_changes(void)
struct _starpu_sched_ctx * __starpu_sched_ctx_get_sched_ctx_for_worker_and_job(struct _starpu_worker *worker, struct _starpu_job *j)
unsigned _starpu_sched_ctx_get_current_context()
int _starpu_wait_for_all_tasks_of_sched_ctx(unsigned sched_ctx_id)
int _starpu_get_workers_of_sched_ctx(unsigned sched_ctx_id, int *pus, enum starpu_worker_archtype arch)
struct _starpu_sched_ctx * _starpu_create_sched_ctx(struct starpu_sched_policy *policy, int *workerid, int nworkerids, unsigned is_init_sched, const char *sched_name, int min_prio_set, int min_prio, int max_prio_set, int max_prio, unsigned awake_workers, void(*sched_policy_init)(unsigned), void *user_data, int nsub_ctxs, int *sub_ctxs, int nsms)
unsigned _starpu_worker_belongs_to_a_sched_ctx(int workerid, unsigned sched_ctx_id)
void _starpu_decrement_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
void _starpu_delete_all_sched_ctxs()
unsigned _starpu_sched_ctx_last_worker_awake(struct _starpu_worker *worker)
void _starpu_worker_gets_out_of_ctx(unsigned sched_ctx_id, struct _starpu_worker *worker)
void _starpu_sched_ctx_post_exec_task_cb(int workerid, struct starpu_task *task, size_t data_size, uint32_t footprint)
int _starpu_wait_for_n_submitted_tasks_of_sched_ctx(unsigned sched_ctx_id, unsigned n)
int _starpu_get_index_in_ctx_of_workerid(unsigned sched_ctx, unsigned workerid)
Definition: sched_ctx.h:170
Definition: jobs.h:79
Definition: workers.h:359
Definition: sched_ctx.h:46
int max_ncpus
Definition: sched_ctx.h:93
unsigned id
Definition: sched_ctx.h:48
int min_priority
Definition: sched_ctx.h:110
void(* init_sched)(unsigned)
Definition: sched_ctx.h:153
int main_master
Definition: sched_ctx.h:135
unsigned awake_workers
Definition: sched_ctx.h:150
struct _starpu_barrier_counter tasks_barrier
Definition: sched_ctx.h:71
unsigned is_initial_sched
Definition: sched_ctx.h:68
double ready_flops
Definition: sched_ctx.h:77
unsigned parallel_view
Definition: sched_ctx.h:145
long iterations[2]
Definition: sched_ctx.h:80
int min_ngpus
Definition: sched_ctx.h:96
int max_ngpus
Definition: sched_ctx.h:99
unsigned finished_submit
Definition: sched_ctx.h:106
unsigned do_schedule
Definition: sched_ctx.h:51
struct starpu_perfmodel_arch perf_arch
Definition: sched_ctx.h:141
unsigned nesting_sched_ctx
Definition: sched_ctx.h:138
void(* close_callback)(unsigned sched_ctx_id, void *args)
Definition: sched_ctx.h:126
hwloc_bitmap_t hwloc_workers_set
Definition: sched_ctx.h:117
struct starpu_sched_policy * sched_policy
Definition: sched_ctx.h:57
const char * name
Definition: sched_ctx.h:54
void * policy_data
Definition: sched_ctx.h:60
unsigned inheritor
Definition: sched_ctx.h:102
struct starpu_sched_ctx_performance_counters * perf_counters
Definition: sched_ctx.h:122
struct _starpu_barrier_counter ready_tasks_barrier
Definition: sched_ctx.h:74
int min_ncpus
Definition: sched_ctx.h:90
unsigned hierarchy_level
Definition: sched_ctx.h:130
void * user_data
Definition: sched_ctx.h:63
int nsms
Definition: sched_ctx.h:159
Definition: workers.h:70