21 #define TRACE_SYSTEM cobalt_core
22 #undef TRACE_INCLUDE_FILE
23 #define TRACE_INCLUDE_FILE cobalt-core
25 #if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
26 #define _TRACE_COBALT_CORE_H
28 #include <linux/tracepoint.h>
29 #include <linux/sched.h>
31 DECLARE_EVENT_CLASS(thread_event,
32 TP_PROTO(
struct xnthread *thread),
36 __field(
struct xnthread *, thread)
37 __string(name, thread->name)
39 __field(
unsigned long, state)
40 __field(
unsigned long, info)
44 __entry->thread = thread;
45 __assign_str(name, thread->name);
46 __entry->state = thread->state;
47 __entry->info = thread->info;
48 __entry->pid = xnthread_host_pid(thread);
51 TP_printk(
"thread=%p(%s) pid=%d state=0x%lx info=0x%lx",
52 __entry->thread, __get_str(name), __entry->pid,
53 __entry->state, __entry->info)
56 DECLARE_EVENT_CLASS(synch_wait_event,
57 TP_PROTO(
struct xnsynch *synch,
struct xnthread *thread),
58 TP_ARGS(synch, thread),
61 __field(
struct xnthread *, thread)
62 __string(name, thread->name)
63 __field(
struct xnsynch *, synch)
67 __entry->thread = thread;
68 __assign_str(name, thread->name);
69 __entry->synch = synch;
72 TP_printk(
"synch=%p thread=%p(%s)",
73 __entry->synch, __entry->thread, __get_str(name))
76 DECLARE_EVENT_CLASS(synch_post_event,
77 TP_PROTO(
struct xnsynch *synch),
81 __field(
struct xnsynch *, synch)
85 __entry->synch = synch;
88 TP_printk(
"synch=%p", __entry->synch)
91 DECLARE_EVENT_CLASS(irq_event,
92 TP_PROTO(
unsigned int irq),
96 __field(
unsigned int, irq)
103 TP_printk(
"irq=%u", __entry->irq)
106 DECLARE_EVENT_CLASS(clock_event,
107 TP_PROTO(
unsigned int irq),
111 __field(
unsigned int, irq)
118 TP_printk(
"clock_irq=%u", __entry->irq)
121 DECLARE_EVENT_CLASS(thread_migrate,
122 TP_PROTO(
struct xnthread *thread,
unsigned int cpu),
123 TP_ARGS(thread, cpu),
126 __field(
struct xnthread *, thread)
127 __string(name, thread->name)
128 __field(
unsigned int, cpu)
132 __entry->thread = thread;
133 __assign_str(name, thread->name);
137 TP_printk(
"thread=%p(%s) cpu=%u",
138 __entry->thread, __get_str(name), __entry->cpu)
141 DECLARE_EVENT_CLASS(timer_event,
142 TP_PROTO(
struct xntimer *timer),
146 __field(
struct xntimer *, timer)
150 __entry->timer = timer;
153 TP_printk(
"timer=%p", __entry->timer)
156 TRACE_EVENT(cobalt_schedule,
157 TP_PROTO(
struct xnsched *sched),
161 __field(
unsigned long, status)
165 __entry->status = sched->
status;
168 TP_printk(
"status=0x%lx", __entry->status)
171 TRACE_EVENT(cobalt_schedule_remote,
172 TP_PROTO(
struct xnsched *sched),
176 __field(
unsigned long, status)
180 __entry->status = sched->
status;
183 TP_printk(
"status=0x%lx", __entry->status)
186 TRACE_EVENT(cobalt_switch_context,
187 TP_PROTO(
struct xnthread *prev,
struct xnthread *next),
191 __field(
struct xnthread *, prev)
192 __field(
struct xnthread *, next)
193 __string(prev_name, prev->name)
194 __string(next_name, next->name)
198 __entry->prev = prev;
199 __entry->next = next;
200 __assign_str(prev_name, prev->name);
201 __assign_str(next_name, next->name);
204 TP_printk(
"prev=%p(%s) next=%p(%s)",
205 __entry->prev, __get_str(prev_name),
206 __entry->next, __get_str(next_name))
209 TRACE_EVENT(cobalt_thread_init,
210 TP_PROTO(
struct xnthread *thread,
211 const struct xnthread_init_attr *attr,
212 struct xnsched_class *sched_class),
213 TP_ARGS(thread, attr, sched_class),
216 __field(
struct xnthread *, thread)
217 __string(thread_name, thread->name)
218 __string(class_name, sched_class->name)
219 __field(
unsigned long, flags)
224 __entry->thread = thread;
225 __assign_str(thread_name, thread->name);
226 __entry->flags = attr->flags;
227 __assign_str(class_name, sched_class->name);
228 __entry->cprio = thread->cprio;
231 TP_printk(
"thread=%p(%s) flags=0x%lx class=%s prio=%d",
232 __entry->thread, __get_str(thread_name), __entry->flags,
233 __get_str(class_name), __entry->cprio)
236 TRACE_EVENT(cobalt_thread_suspend,
237 TP_PROTO(
struct xnthread *thread,
unsigned long mask, xnticks_t timeout,
238 xntmode_t timeout_mode,
struct xnsynch *wchan),
239 TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
242 __field(
struct xnthread *, thread)
243 __field(
unsigned long, mask)
244 __field(xnticks_t, timeout)
245 __field(xntmode_t, timeout_mode)
246 __field(
struct xnsynch *, wchan)
250 __entry->thread = thread;
251 __entry->mask = mask;
252 __entry->timeout = timeout;
253 __entry->timeout_mode = timeout_mode;
254 __entry->wchan = wchan;
257 TP_printk(
"thread=%p mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p",
258 __entry->thread, __entry->mask,
259 __entry->timeout, __entry->timeout_mode, __entry->wchan)
262 TRACE_EVENT(cobalt_thread_resume,
263 TP_PROTO(
struct xnthread *thread,
unsigned long mask),
264 TP_ARGS(thread, mask),
267 __field(
struct xnthread *, thread)
268 __field(
unsigned long, mask)
272 __entry->thread = thread;
273 __entry->mask = mask;
276 TP_printk(
"thread=%p mask=0x%lx",
277 __entry->thread, __entry->mask)
280 TRACE_EVENT(cobalt_thread_fault,
281 TP_PROTO(
struct xnthread *thread,
struct ipipe_trap_data *td),
285 __field(
struct xnthread *, thread)
286 __string(name, thread->name)
288 __field(
unsigned int, type)
292 __entry->thread = thread;
293 __assign_str(name, thread->name);
294 __entry->ip = (
void *)xnarch_fault_pc(td);
295 __entry->type = xnarch_fault_trap(td);
298 TP_printk(
"thread=%p(%s) ip=%p type=%x",
299 __entry->thread, __get_str(name), __entry->ip,
303 DEFINE_EVENT(thread_event, cobalt_thread_start,
304 TP_PROTO(
struct xnthread *thread),
308 DEFINE_EVENT(thread_event, cobalt_thread_cancel,
309 TP_PROTO(
struct xnthread *thread),
313 DEFINE_EVENT(thread_event, cobalt_thread_join,
314 TP_PROTO(
struct xnthread *thread),
318 DEFINE_EVENT(thread_event, cobalt_thread_unblock,
319 TP_PROTO(
struct xnthread *thread),
323 DEFINE_EVENT(thread_event, cobalt_thread_wait_period,
324 TP_PROTO(
struct xnthread *thread),
328 DEFINE_EVENT(thread_event, cobalt_thread_missed_period,
329 TP_PROTO(
struct xnthread *thread),
333 DEFINE_EVENT(thread_event, cobalt_thread_set_mode,
334 TP_PROTO(
struct xnthread *thread),
338 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate,
339 TP_PROTO(
struct xnthread *thread,
unsigned int cpu),
343 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate_passive,
344 TP_PROTO(
struct xnthread *thread,
unsigned int cpu),
348 DEFINE_EVENT(thread_event, cobalt_shadow_gohard,
349 TP_PROTO(
struct xnthread *thread),
353 DEFINE_EVENT(thread_event, cobalt_watchdog_signal,
354 TP_PROTO(
struct xnthread *thread),
358 DEFINE_EVENT(thread_event, cobalt_shadow_hardened,
359 TP_PROTO(
struct xnthread *thread),
363 #define cobalt_print_relax_reason(reason) \
364 __print_symbolic(reason, \
365 { SIGDEBUG_UNDEFINED, "undefined" }, \
366 { SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \
367 { SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \
368 { SIGDEBUG_MIGRATE_FAULT, "fault" })
370 TRACE_EVENT(cobalt_shadow_gorelax,
371 TP_PROTO(
struct xnthread *thread,
int reason),
372 TP_ARGS(thread, reason),
375 __field(
struct xnthread *, thread)
380 __entry->thread = thread;
381 __entry->reason = reason;
384 TP_printk(
"thread=%p reason=%s",
385 __entry->thread, cobalt_print_relax_reason(__entry->reason))
388 DEFINE_EVENT(thread_event, cobalt_shadow_relaxed,
389 TP_PROTO(
struct xnthread *thread),
393 DEFINE_EVENT(thread_event, cobalt_shadow_entry,
394 TP_PROTO(
struct xnthread *thread),
398 TRACE_EVENT(cobalt_shadow_map,
399 TP_PROTO(
struct xnthread *thread),
403 __field(
struct xnthread *, thread)
404 __string(name, thread->name)
409 __entry->thread = thread;
410 __assign_str(name, thread->name);
411 __entry->prio = xnthread_base_priority(thread);
414 TP_printk(
"thread=%p(%s) prio=%d",
415 __entry->thread, __get_str(name), __entry->prio)
418 DEFINE_EVENT(thread_event, cobalt_shadow_unmap,
419 TP_PROTO(
struct xnthread *thread),
423 TRACE_EVENT(cobalt_lostage_request,
424 TP_PROTO(
const char *type,
struct task_struct *task),
429 __array(
char, comm, TASK_COMM_LEN)
430 __field(
const char *, type)
434 __entry->type = type;
435 __entry->pid = task_pid_nr(task);
436 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
439 TP_printk(
"request=%s pid=%d comm=%s",
440 __entry->type, __entry->pid, __entry->comm)
443 TRACE_EVENT(cobalt_lostage_wakeup,
444 TP_PROTO(
struct task_struct *task),
449 __array(
char, comm, TASK_COMM_LEN)
453 __entry->pid = task_pid_nr(task);
454 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
457 TP_printk(
"pid=%d comm=%s",
458 __entry->pid, __entry->comm)
461 TRACE_EVENT(cobalt_lostage_signal,
462 TP_PROTO(
struct task_struct *task,
int sig),
467 __array(
char, comm, TASK_COMM_LEN)
472 __entry->pid = task_pid_nr(task);
474 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
477 TP_printk(
"pid=%d comm=%s sig=%d",
478 __entry->pid, __entry->comm, __entry->sig)
481 DEFINE_EVENT(irq_event, cobalt_irq_entry,
482 TP_PROTO(
unsigned int irq),
486 DEFINE_EVENT(irq_event, cobalt_irq_exit,
487 TP_PROTO(
unsigned int irq),
491 DEFINE_EVENT(irq_event, cobalt_irq_attach,
492 TP_PROTO(
unsigned int irq),
496 DEFINE_EVENT(irq_event, cobalt_irq_detach,
497 TP_PROTO(
unsigned int irq),
501 DEFINE_EVENT(irq_event, cobalt_irq_enable,
502 TP_PROTO(
unsigned int irq),
506 DEFINE_EVENT(irq_event, cobalt_irq_disable,
507 TP_PROTO(
unsigned int irq),
511 DEFINE_EVENT(clock_event, cobalt_clock_entry,
512 TP_PROTO(
unsigned int irq),
516 DEFINE_EVENT(clock_event, cobalt_clock_exit,
517 TP_PROTO(
unsigned int irq),
521 DEFINE_EVENT(timer_event, cobalt_timer_stop,
522 TP_PROTO(
struct xntimer *timer),
526 DEFINE_EVENT(timer_event, cobalt_timer_expire,
527 TP_PROTO(
struct xntimer *timer),
531 #define cobalt_print_timer_mode(mode) \
532 __print_symbolic(mode, \
533 { XN_RELATIVE, "rel" }, \
534 { XN_ABSOLUTE, "abs" }, \
535 { XN_REALTIME, "rt" })
537 TRACE_EVENT(cobalt_timer_start,
538 TP_PROTO(
struct xntimer *timer, xnticks_t value, xnticks_t interval,
540 TP_ARGS(timer, value, interval, mode),
543 __field(
struct xntimer *, timer)
544 #ifdef CONFIG_XENO_OPT_STATS
545 __string(name, timer->name)
547 __field(xnticks_t, value)
548 __field(xnticks_t, interval)
549 __field(xntmode_t, mode)
553 __entry->timer = timer;
554 #ifdef CONFIG_XENO_OPT_STATS
555 __assign_str(name, timer->name);
557 __entry->value = value;
558 __entry->interval = interval;
559 __entry->mode = mode;
562 TP_printk(
"timer=%p(%s) value=%Lu interval=%Lu mode=%s",
564 #ifdef CONFIG_XENO_OPT_STATS
569 __entry->value, __entry->interval,
570 cobalt_print_timer_mode(__entry->mode))
575 TRACE_EVENT(cobalt_timer_migrate,
576 TP_PROTO(
struct xntimer *timer,
unsigned int cpu),
580 __field(
struct xntimer *, timer)
581 __field(
unsigned int, cpu)
585 __entry->timer = timer;
589 TP_printk(
"timer=%p cpu=%u",
590 __entry->timer, __entry->cpu)
595 DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
596 TP_PROTO(
struct xnsynch *synch,
struct xnthread *thread),
597 TP_ARGS(synch, thread)
600 DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
601 TP_PROTO(
struct xnsynch *synch,
struct xnthread *thread),
602 TP_ARGS(synch, thread)
605 DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
606 TP_PROTO(
struct xnsynch *synch,
struct xnthread *thread),
607 TP_ARGS(synch, thread)
610 DEFINE_EVENT(synch_post_event, cobalt_synch_release,
611 TP_PROTO(
struct xnsynch *synch),
615 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
616 TP_PROTO(
struct xnsynch *synch),
620 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
621 TP_PROTO(
struct xnsynch *synch),
625 DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
626 TP_PROTO(
struct xnsynch *synch),
630 DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
631 TP_PROTO(
struct xnsynch *synch),
638 #include <trace/define_trace.h>
Scheduling information structure.
Definition: sched.h:57
unsigned long status
Definition: sched.h:59