Xenomai  3.0.2
cobalt-core.h
1 /*
2  * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
3  * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
4  *
5  * Xenomai is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published
7  * by the Free Software Foundation; either version 2 of the License,
8  * or (at your option) any later version.
9  *
10  * Xenomai is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with Xenomai; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
18  * 02111-1307, USA.
19  */
20 #undef TRACE_SYSTEM
21 #define TRACE_SYSTEM cobalt_core
22 #undef TRACE_INCLUDE_FILE
23 #define TRACE_INCLUDE_FILE cobalt-core
24 
25 #if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
26 #define _TRACE_COBALT_CORE_H
27 
28 #include <linux/tracepoint.h>
29 #include <linux/sched.h>
30 
31 DECLARE_EVENT_CLASS(thread_event,
32  TP_PROTO(struct xnthread *thread),
33  TP_ARGS(thread),
34 
35  TP_STRUCT__entry(
36  __field(struct xnthread *, thread)
37  __string(name, thread->name)
38  __field(pid_t, pid)
39  __field(unsigned long, state)
40  __field(unsigned long, info)
41  ),
42 
43  TP_fast_assign(
44  __entry->thread = thread;
45  __assign_str(name, thread->name);
46  __entry->state = thread->state;
47  __entry->info = thread->info;
48  __entry->pid = xnthread_host_pid(thread);
49  ),
50 
51  TP_printk("thread=%p(%s) pid=%d state=0x%lx info=0x%lx",
52  __entry->thread, __get_str(name), __entry->pid,
53  __entry->state, __entry->info)
54 );
55 
56 DECLARE_EVENT_CLASS(synch_wait_event,
57  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
58  TP_ARGS(synch, thread),
59 
60  TP_STRUCT__entry(
61  __field(struct xnthread *, thread)
62  __string(name, thread->name)
63  __field(struct xnsynch *, synch)
64  ),
65 
66  TP_fast_assign(
67  __entry->thread = thread;
68  __assign_str(name, thread->name);
69  __entry->synch = synch;
70  ),
71 
72  TP_printk("synch=%p thread=%p(%s)",
73  __entry->synch, __entry->thread, __get_str(name))
74 );
75 
76 DECLARE_EVENT_CLASS(synch_post_event,
77  TP_PROTO(struct xnsynch *synch),
78  TP_ARGS(synch),
79 
80  TP_STRUCT__entry(
81  __field(struct xnsynch *, synch)
82  ),
83 
84  TP_fast_assign(
85  __entry->synch = synch;
86  ),
87 
88  TP_printk("synch=%p", __entry->synch)
89 );
90 
91 DECLARE_EVENT_CLASS(irq_event,
92  TP_PROTO(unsigned int irq),
93  TP_ARGS(irq),
94 
95  TP_STRUCT__entry(
96  __field(unsigned int, irq)
97  ),
98 
99  TP_fast_assign(
100  __entry->irq = irq;
101  ),
102 
103  TP_printk("irq=%u", __entry->irq)
104 );
105 
106 DECLARE_EVENT_CLASS(clock_event,
107  TP_PROTO(unsigned int irq),
108  TP_ARGS(irq),
109 
110  TP_STRUCT__entry(
111  __field(unsigned int, irq)
112  ),
113 
114  TP_fast_assign(
115  __entry->irq = irq;
116  ),
117 
118  TP_printk("clock_irq=%u", __entry->irq)
119 );
120 
121 DECLARE_EVENT_CLASS(thread_migrate,
122  TP_PROTO(struct xnthread *thread, unsigned int cpu),
123  TP_ARGS(thread, cpu),
124 
125  TP_STRUCT__entry(
126  __field(struct xnthread *, thread)
127  __string(name, thread->name)
128  __field(unsigned int, cpu)
129  ),
130 
131  TP_fast_assign(
132  __entry->thread = thread;
133  __assign_str(name, thread->name);
134  __entry->cpu = cpu;
135  ),
136 
137  TP_printk("thread=%p(%s) cpu=%u",
138  __entry->thread, __get_str(name), __entry->cpu)
139 );
140 
141 DECLARE_EVENT_CLASS(timer_event,
142  TP_PROTO(struct xntimer *timer),
143  TP_ARGS(timer),
144 
145  TP_STRUCT__entry(
146  __field(struct xntimer *, timer)
147  ),
148 
149  TP_fast_assign(
150  __entry->timer = timer;
151  ),
152 
153  TP_printk("timer=%p", __entry->timer)
154 );
155 
156 TRACE_EVENT(cobalt_schedule,
157  TP_PROTO(struct xnsched *sched),
158  TP_ARGS(sched),
159 
160  TP_STRUCT__entry(
161  __field(unsigned long, status)
162  ),
163 
164  TP_fast_assign(
165  __entry->status = sched->status;
166  ),
167 
168  TP_printk("status=0x%lx", __entry->status)
169 );
170 
171 TRACE_EVENT(cobalt_schedule_remote,
172  TP_PROTO(struct xnsched *sched),
173  TP_ARGS(sched),
174 
175  TP_STRUCT__entry(
176  __field(unsigned long, status)
177  ),
178 
179  TP_fast_assign(
180  __entry->status = sched->status;
181  ),
182 
183  TP_printk("status=0x%lx", __entry->status)
184 );
185 
186 TRACE_EVENT(cobalt_switch_context,
187  TP_PROTO(struct xnthread *prev, struct xnthread *next),
188  TP_ARGS(prev, next),
189 
190  TP_STRUCT__entry(
191  __field(struct xnthread *, prev)
192  __field(struct xnthread *, next)
193  __string(prev_name, prev->name)
194  __string(next_name, next->name)
195  ),
196 
197  TP_fast_assign(
198  __entry->prev = prev;
199  __entry->next = next;
200  __assign_str(prev_name, prev->name);
201  __assign_str(next_name, next->name);
202  ),
203 
204  TP_printk("prev=%p(%s) next=%p(%s)",
205  __entry->prev, __get_str(prev_name),
206  __entry->next, __get_str(next_name))
207 );
208 
209 TRACE_EVENT(cobalt_thread_init,
210  TP_PROTO(struct xnthread *thread,
211  const struct xnthread_init_attr *attr,
212  struct xnsched_class *sched_class),
213  TP_ARGS(thread, attr, sched_class),
214 
215  TP_STRUCT__entry(
216  __field(struct xnthread *, thread)
217  __string(thread_name, thread->name)
218  __string(class_name, sched_class->name)
219  __field(unsigned long, flags)
220  __field(int, cprio)
221  ),
222 
223  TP_fast_assign(
224  __entry->thread = thread;
225  __assign_str(thread_name, thread->name);
226  __entry->flags = attr->flags;
227  __assign_str(class_name, sched_class->name);
228  __entry->cprio = thread->cprio;
229  ),
230 
231  TP_printk("thread=%p(%s) flags=0x%lx class=%s prio=%d",
232  __entry->thread, __get_str(thread_name), __entry->flags,
233  __get_str(class_name), __entry->cprio)
234 );
235 
236 TRACE_EVENT(cobalt_thread_suspend,
237  TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout,
238  xntmode_t timeout_mode, struct xnsynch *wchan),
239  TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
240 
241  TP_STRUCT__entry(
242  __field(struct xnthread *, thread)
243  __field(unsigned long, mask)
244  __field(xnticks_t, timeout)
245  __field(xntmode_t, timeout_mode)
246  __field(struct xnsynch *, wchan)
247  ),
248 
249  TP_fast_assign(
250  __entry->thread = thread;
251  __entry->mask = mask;
252  __entry->timeout = timeout;
253  __entry->timeout_mode = timeout_mode;
254  __entry->wchan = wchan;
255  ),
256 
257  TP_printk("thread=%p mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p",
258  __entry->thread, __entry->mask,
259  __entry->timeout, __entry->timeout_mode, __entry->wchan)
260 );
261 
262 TRACE_EVENT(cobalt_thread_resume,
263  TP_PROTO(struct xnthread *thread, unsigned long mask),
264  TP_ARGS(thread, mask),
265 
266  TP_STRUCT__entry(
267  __field(struct xnthread *, thread)
268  __field(unsigned long, mask)
269  ),
270 
271  TP_fast_assign(
272  __entry->thread = thread;
273  __entry->mask = mask;
274  ),
275 
276  TP_printk("thread=%p mask=0x%lx",
277  __entry->thread, __entry->mask)
278 );
279 
280 TRACE_EVENT(cobalt_thread_fault,
281  TP_PROTO(struct xnthread *thread, struct ipipe_trap_data *td),
282  TP_ARGS(thread, td),
283 
284  TP_STRUCT__entry(
285  __field(struct xnthread *, thread)
286  __string(name, thread->name)
287  __field(void *, ip)
288  __field(unsigned int, type)
289  ),
290 
291  TP_fast_assign(
292  __entry->thread = thread;
293  __assign_str(name, thread->name);
294  __entry->ip = (void *)xnarch_fault_pc(td);
295  __entry->type = xnarch_fault_trap(td);
296  ),
297 
298  TP_printk("thread=%p(%s) ip=%p type=%x",
299  __entry->thread, __get_str(name), __entry->ip,
300  __entry->type)
301 );
302 
303 DEFINE_EVENT(thread_event, cobalt_thread_start,
304  TP_PROTO(struct xnthread *thread),
305  TP_ARGS(thread)
306 );
307 
308 DEFINE_EVENT(thread_event, cobalt_thread_cancel,
309  TP_PROTO(struct xnthread *thread),
310  TP_ARGS(thread)
311 );
312 
313 DEFINE_EVENT(thread_event, cobalt_thread_join,
314  TP_PROTO(struct xnthread *thread),
315  TP_ARGS(thread)
316 );
317 
318 DEFINE_EVENT(thread_event, cobalt_thread_unblock,
319  TP_PROTO(struct xnthread *thread),
320  TP_ARGS(thread)
321 );
322 
323 DEFINE_EVENT(thread_event, cobalt_thread_wait_period,
324  TP_PROTO(struct xnthread *thread),
325  TP_ARGS(thread)
326 );
327 
328 DEFINE_EVENT(thread_event, cobalt_thread_missed_period,
329  TP_PROTO(struct xnthread *thread),
330  TP_ARGS(thread)
331 );
332 
333 DEFINE_EVENT(thread_event, cobalt_thread_set_mode,
334  TP_PROTO(struct xnthread *thread),
335  TP_ARGS(thread)
336 );
337 
338 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate,
339  TP_PROTO(struct xnthread *thread, unsigned int cpu),
340  TP_ARGS(thread, cpu)
341 );
342 
343 DEFINE_EVENT(thread_migrate, cobalt_thread_migrate_passive,
344  TP_PROTO(struct xnthread *thread, unsigned int cpu),
345  TP_ARGS(thread, cpu)
346 );
347 
348 DEFINE_EVENT(thread_event, cobalt_shadow_gohard,
349  TP_PROTO(struct xnthread *thread),
350  TP_ARGS(thread)
351 );
352 
353 DEFINE_EVENT(thread_event, cobalt_watchdog_signal,
354  TP_PROTO(struct xnthread *thread),
355  TP_ARGS(thread)
356 );
357 
358 DEFINE_EVENT(thread_event, cobalt_shadow_hardened,
359  TP_PROTO(struct xnthread *thread),
360  TP_ARGS(thread)
361 );
362 
363 #define cobalt_print_relax_reason(reason) \
364  __print_symbolic(reason, \
365  { SIGDEBUG_UNDEFINED, "undefined" }, \
366  { SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \
367  { SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \
368  { SIGDEBUG_MIGRATE_FAULT, "fault" })
369 
370 TRACE_EVENT(cobalt_shadow_gorelax,
371  TP_PROTO(struct xnthread *thread, int reason),
372  TP_ARGS(thread, reason),
373 
374  TP_STRUCT__entry(
375  __field(struct xnthread *, thread)
376  __field(int, reason)
377  ),
378 
379  TP_fast_assign(
380  __entry->thread = thread;
381  __entry->reason = reason;
382  ),
383 
384  TP_printk("thread=%p reason=%s",
385  __entry->thread, cobalt_print_relax_reason(__entry->reason))
386 );
387 
388 DEFINE_EVENT(thread_event, cobalt_shadow_relaxed,
389  TP_PROTO(struct xnthread *thread),
390  TP_ARGS(thread)
391 );
392 
393 DEFINE_EVENT(thread_event, cobalt_shadow_entry,
394  TP_PROTO(struct xnthread *thread),
395  TP_ARGS(thread)
396 );
397 
398 TRACE_EVENT(cobalt_shadow_map,
399  TP_PROTO(struct xnthread *thread),
400  TP_ARGS(thread),
401 
402  TP_STRUCT__entry(
403  __field(struct xnthread *, thread)
404  __string(name, thread->name)
405  __field(int, prio)
406  ),
407 
408  TP_fast_assign(
409  __entry->thread = thread;
410  __assign_str(name, thread->name);
411  __entry->prio = xnthread_base_priority(thread);
412  ),
413 
414  TP_printk("thread=%p(%s) prio=%d",
415  __entry->thread, __get_str(name), __entry->prio)
416 );
417 
418 DEFINE_EVENT(thread_event, cobalt_shadow_unmap,
419  TP_PROTO(struct xnthread *thread),
420  TP_ARGS(thread)
421 );
422 
423 TRACE_EVENT(cobalt_lostage_request,
424  TP_PROTO(const char *type, struct task_struct *task),
425  TP_ARGS(type, task),
426 
427  TP_STRUCT__entry(
428  __field(pid_t, pid)
429  __array(char, comm, TASK_COMM_LEN)
430  __field(const char *, type)
431  ),
432 
433  TP_fast_assign(
434  __entry->type = type;
435  __entry->pid = task_pid_nr(task);
436  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
437  ),
438 
439  TP_printk("request=%s pid=%d comm=%s",
440  __entry->type, __entry->pid, __entry->comm)
441 );
442 
443 TRACE_EVENT(cobalt_lostage_wakeup,
444  TP_PROTO(struct task_struct *task),
445  TP_ARGS(task),
446 
447  TP_STRUCT__entry(
448  __field(pid_t, pid)
449  __array(char, comm, TASK_COMM_LEN)
450  ),
451 
452  TP_fast_assign(
453  __entry->pid = task_pid_nr(task);
454  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
455  ),
456 
457  TP_printk("pid=%d comm=%s",
458  __entry->pid, __entry->comm)
459 );
460 
461 TRACE_EVENT(cobalt_lostage_signal,
462  TP_PROTO(struct task_struct *task, int sig),
463  TP_ARGS(task, sig),
464 
465  TP_STRUCT__entry(
466  __field(pid_t, pid)
467  __array(char, comm, TASK_COMM_LEN)
468  __field(int, sig)
469  ),
470 
471  TP_fast_assign(
472  __entry->pid = task_pid_nr(task);
473  __entry->sig = sig;
474  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
475  ),
476 
477  TP_printk("pid=%d comm=%s sig=%d",
478  __entry->pid, __entry->comm, __entry->sig)
479 );
480 
481 DEFINE_EVENT(irq_event, cobalt_irq_entry,
482  TP_PROTO(unsigned int irq),
483  TP_ARGS(irq)
484 );
485 
486 DEFINE_EVENT(irq_event, cobalt_irq_exit,
487  TP_PROTO(unsigned int irq),
488  TP_ARGS(irq)
489 );
490 
491 DEFINE_EVENT(irq_event, cobalt_irq_attach,
492  TP_PROTO(unsigned int irq),
493  TP_ARGS(irq)
494 );
495 
496 DEFINE_EVENT(irq_event, cobalt_irq_detach,
497  TP_PROTO(unsigned int irq),
498  TP_ARGS(irq)
499 );
500 
501 DEFINE_EVENT(irq_event, cobalt_irq_enable,
502  TP_PROTO(unsigned int irq),
503  TP_ARGS(irq)
504 );
505 
506 DEFINE_EVENT(irq_event, cobalt_irq_disable,
507  TP_PROTO(unsigned int irq),
508  TP_ARGS(irq)
509 );
510 
511 DEFINE_EVENT(clock_event, cobalt_clock_entry,
512  TP_PROTO(unsigned int irq),
513  TP_ARGS(irq)
514 );
515 
516 DEFINE_EVENT(clock_event, cobalt_clock_exit,
517  TP_PROTO(unsigned int irq),
518  TP_ARGS(irq)
519 );
520 
521 DEFINE_EVENT(timer_event, cobalt_timer_stop,
522  TP_PROTO(struct xntimer *timer),
523  TP_ARGS(timer)
524 );
525 
526 DEFINE_EVENT(timer_event, cobalt_timer_expire,
527  TP_PROTO(struct xntimer *timer),
528  TP_ARGS(timer)
529 );
530 
531 #define cobalt_print_timer_mode(mode) \
532  __print_symbolic(mode, \
533  { XN_RELATIVE, "rel" }, \
534  { XN_ABSOLUTE, "abs" }, \
535  { XN_REALTIME, "rt" })
536 
537 TRACE_EVENT(cobalt_timer_start,
538  TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval,
539  xntmode_t mode),
540  TP_ARGS(timer, value, interval, mode),
541 
542  TP_STRUCT__entry(
543  __field(struct xntimer *, timer)
544 #ifdef CONFIG_XENO_OPT_STATS
545  __string(name, timer->name)
546 #endif
547  __field(xnticks_t, value)
548  __field(xnticks_t, interval)
549  __field(xntmode_t, mode)
550  ),
551 
552  TP_fast_assign(
553  __entry->timer = timer;
554 #ifdef CONFIG_XENO_OPT_STATS
555  __assign_str(name, timer->name);
556 #endif
557  __entry->value = value;
558  __entry->interval = interval;
559  __entry->mode = mode;
560  ),
561 
562  TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s",
563  __entry->timer,
564 #ifdef CONFIG_XENO_OPT_STATS
565  __get_str(name),
566 #else
567  "(anon)",
568 #endif
569  __entry->value, __entry->interval,
570  cobalt_print_timer_mode(__entry->mode))
571 );
572 
573 #ifdef CONFIG_SMP
574 
575 TRACE_EVENT(cobalt_timer_migrate,
576  TP_PROTO(struct xntimer *timer, unsigned int cpu),
577  TP_ARGS(timer, cpu),
578 
579  TP_STRUCT__entry(
580  __field(struct xntimer *, timer)
581  __field(unsigned int, cpu)
582  ),
583 
584  TP_fast_assign(
585  __entry->timer = timer;
586  __entry->cpu = cpu;
587  ),
588 
589  TP_printk("timer=%p cpu=%u",
590  __entry->timer, __entry->cpu)
591 );
592 
593 #endif /* CONFIG_SMP */
594 
595 DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
596  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
597  TP_ARGS(synch, thread)
598 );
599 
600 DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
601  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
602  TP_ARGS(synch, thread)
603 );
604 
605 DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
606  TP_PROTO(struct xnsynch *synch, struct xnthread *thread),
607  TP_ARGS(synch, thread)
608 );
609 
610 DEFINE_EVENT(synch_post_event, cobalt_synch_release,
611  TP_PROTO(struct xnsynch *synch),
612  TP_ARGS(synch)
613 );
614 
615 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
616  TP_PROTO(struct xnsynch *synch),
617  TP_ARGS(synch)
618 );
619 
620 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
621  TP_PROTO(struct xnsynch *synch),
622  TP_ARGS(synch)
623 );
624 
625 DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
626  TP_PROTO(struct xnsynch *synch),
627  TP_ARGS(synch)
628 );
629 
630 DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
631  TP_PROTO(struct xnsynch *synch),
632  TP_ARGS(synch)
633 );
634 
635 #endif /* _TRACE_COBALT_CORE_H */
636 
637 /* This part must be outside protection */
638 #include <trace/define_trace.h>
Scheduling information structure.
Definition: sched.h:57
unsigned long status
Definition: sched.h:59