/usr/include/xenomai/asm-x86/bits/pod.h is in libxenomai-dev 2.6.3-2.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 | /*
* Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
* Copyright (C) 2004 The HYADES Project (http://www.hyades-itea.org).
* Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
*
* x86_64 port:
* Copyright (C) 2001-2007 Philippe Gerum <rpm@xenomai.org>.
* Copyright (C) 2004-2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
*
* Xenomai is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Xenomai is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Xenomai; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#ifndef _XENO_ASM_X86_BITS_POD_H
#define _XENO_ASM_X86_BITS_POD_H
#define _XENO_ASM_X86_BITS_POD_H
#include <asm-generic/xenomai/bits/pod.h>
#include <asm/xenomai/switch.h>
void xnpod_welcome_thread(struct xnthread *, int);
void xnpod_delete_thread(struct xnthread *);
#ifdef CONFIG_GENERIC_CLOCKEVENTS
#define xnarch_start_timer(tick_handler, cpu) \
rthal_timer_request(tick_handler, xnarch_switch_htick_mode, xnarch_next_htick_shot, cpu)
#else
#define xnarch_start_timer(tick_handler, cpu) \
rthal_timer_request(tick_handler, cpu)
#endif
#define xnarch_stop_timer(cpu) rthal_timer_release(cpu)
static inline void xnarch_leave_root(xnarchtcb_t *rootcb)
{
rthal_root_preempt_notify();
/* Remember the preempted Linux task pointer. */
rootcb->user_task = rootcb->active_task = current;
#ifdef CONFIG_X86_64
rootcb->spp = ¤t->thread.x86reg_sp;
rootcb->ipp = ¤t->thread.rip;
#endif
rootcb->ts_usedfpu = !!wrap_test_fpu_used(current);
rootcb->cr0_ts = (read_cr0() & 8) != 0;
/* So that xnarch_save_fpu() will operate on the right FPU area. */
if (rootcb->cr0_ts || rootcb->ts_usedfpu)
rootcb->fpup = x86_fpustate_ptr(&rootcb->user_task->thread);
else
/*
* The kernel is currently using fpu in kernel-space,
* do not clobber the user-space fpu backup area.
*/
rootcb->fpup = &rootcb->i387;
}
static inline void xnarch_enter_root(xnarchtcb_t *rootcb) { }
static inline void xnarch_switch_to(xnarchtcb_t *out_tcb, xnarchtcb_t *in_tcb)
{
struct task_struct *prev = out_tcb->active_task;
struct task_struct *next = in_tcb->user_task;
#ifndef CONFIG_X86_64
unsigned long fs, gs;
#endif
if (likely(next != NULL)) {
if (wrap_test_fpu_used(prev))
/*
* __switch_to will try and use __unlazy_fpu,
* so we need to clear the ts bit.
*/
clts();
in_tcb->active_task = next;
rthal_clear_foreign_stack(&rthal_domain);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
next->fpu_counter = 0;
#endif
} else {
in_tcb->active_task = prev;
rthal_set_foreign_stack(&rthal_domain);
}
if (next && next != prev) {
struct mm_struct *oldmm = prev->active_mm;
wrap_switch_mm(oldmm, next->active_mm, next);
if (next->mm == NULL)
wrap_enter_lazy_tlb(oldmm, next);
}
#ifdef CONFIG_CC_STACKPROTECTOR
#define xnarch_switch_canary in_tcb->canary
#else
#define xnarch_switch_canary 0
#endif
#ifndef CONFIG_X86_64
if (out_tcb->user_task) {
/* Make sure that __switch_to() will always reload the correct
%fs and %gs registers, even if we happen to migrate the task
across domains in the meantime. */
asm volatile ("mov %%fs,%0":"=m" (fs));
asm volatile ("mov %%gs,%0":"=m" (gs));
}
xnarch_switch_threads(out_tcb, in_tcb, prev, next);
if (xnarch_shadow_p(out_tcb, prev)) {
loadsegment(fs, fs);
loadsegment(gs, gs);
barrier();
/*
* Eagerly reinstate the I/O bitmap of any incoming
* shadow thread which has previously requested I/O
* permissions. We don't want the unexpected latencies
* induced by lazy update from the GPF handler to bite
* shadow threads that explicitly told the kernel that
* they would need to perform raw I/O ops.
*/
wrap_switch_iobitmap(prev, rthal_processor_id());
}
#else /* CONFIG_X86_64 */
xnarch_switch_threads(prev, next,
out_tcb->spp, in_tcb->spp,
out_tcb->ipp, in_tcb->ipp,
xnarch_switch_canary);
#endif /* CONFIG_X86_64 */
stts();
}
asmlinkage static void xnarch_thread_trampoline(xnarchtcb_t *tcb)
{
/* xnpod_welcome_thread() will do clts() if needed. */
stts();
xnpod_welcome_thread(tcb->self, tcb->imask);
tcb->entry(tcb->cookie);
xnpod_delete_thread(tcb->self);
xnarch_thread_head();
}
static inline void xnarch_init_thread(xnarchtcb_t *tcb,
void (*entry)(void *),
void *cookie,
int imask,
struct xnthread *thread, char *name)
{
#ifdef CONFIG_X86_64
struct xnarch_x8664_initstack *childregs;
unsigned long *sp, flags;
/* Prepare the bootstrap stack. */
rthal_local_irq_flags_hw(flags);
sp = (unsigned long *)((unsigned long)tcb->stackbase + tcb->stacksize -
sizeof(struct xnarch_x8664_initstack) - 8);
childregs = (struct xnarch_x8664_initstack *)sp;
childregs->rbp = 0;
childregs->eflags = flags & ~X86_EFLAGS_IF;
childregs->arg = (unsigned long)tcb;
childregs->entry = (unsigned long)xnarch_thread_trampoline;
#ifdef CONFIG_CC_STACKPROTECTOR
tcb->canary = (unsigned long)xnarch_get_cpu_tsc() ^ childregs->arg;
childregs->canary = tcb->canary;
#endif
tcb->sp = (unsigned long)childregs;
tcb->ip = (unsigned long)__thread_head; /* Will branch there at startup. */
#else /* CONFIG_X86_32 */
unsigned long **psp = (unsigned long **)&tcb->sp;
tcb->ip = (unsigned long)xnarch_thread_trampoline;
tcb->sp = (unsigned long)tcb->stackbase;
*psp =
(unsigned long *)(((unsigned long)*psp + tcb->stacksize - 0x10) &
~0xf);
*--(*psp) = (unsigned long)tcb;
*--(*psp) = 0;
#endif /* CONFIG_X86_32 */
tcb->entry = entry;
tcb->cookie = cookie;
tcb->self = thread;
tcb->imask = imask;
tcb->name = name;
}
#ifdef CONFIG_XENO_HW_FPU
#define xnarch_fpu_init_p(task) tsk_used_math(task)
#define xnarch_set_fpu_init(task) set_stopped_child_used_math(task)
static inline void xnarch_init_fpu(xnarchtcb_t * tcb)
{
struct task_struct *task = tcb->user_task;
/* Initialize the FPU for a task. This must be run on behalf of the
task. */
__asm__ __volatile__("clts; fninit");
if (cpu_has_xmm) {
unsigned long __mxcsr;
__mxcsr = 0x1f80UL & 0xffbfUL;
__asm__ __volatile__("ldmxcsr %0"::"m"(__mxcsr));
}
if (task) {
/* Real-time shadow FPU initialization: tell Linux
that this thread initialized its FPU hardware. The
fpu usage bit is necessary for xnarch_save_fpu to
save the FPU state at next switch. */
xnarch_set_fpu_init(task);
wrap_set_fpu_used(task);
}
}
#ifdef CONFIG_X86_64
#define XSAVE_PREFIX "0x48,"
#define XSAVE_SUFFIX "q"
#else
#define XSAVE_PREFIX
#define XSAVE_SUFFIX
#endif
static inline void __save_i387(x86_fpustate *fpup)
{
#ifdef cpu_has_xsave
if (cpu_has_xsave) {
#if defined(CONFIG_AS_AVX) || !defined CONFIG_X86_64
asm volatile("xsave" XSAVE_SUFFIX " %0"
: "=m" (fpup->xsave) : "a" (-1), "d" (-1)
: "memory");
#else /* !CONFIG_AS_AVX */
asm volatile(".byte " XSAVE_PREFIX "0x0f,0xae,0x27"
: : "D" (&fpup->xsave), "m" (fpup->xsave),
"a" (-1), "d" (-1)
: "memory");
#endif /* !CONFIG_AS_AVX */
return;
}
#endif /* cpu_has_xsave */
#ifndef CONFIG_X86_64
if (cpu_has_fxsr)
__asm__ __volatile__("fxsave %0; fnclex":"=m"(*fpup));
else
__asm__ __volatile__("fnsave %0; fwait":"=m"(*fpup));
#else /* CONFIG_X86_64 */
#ifdef CONFIG_AS_FXSAVEQ
asm volatile("fxsaveq %0" : "=m" (fpup->fxsave));
#else /* !CONFIG_AS_FXSAVEQ */
asm volatile("rex64/fxsave (%[fx])"
: "=m" (fpup->fxsave)
: [fx] "R" (&fpup->fxsave));
#endif /* !CONFIG_AS_FXSAVEQ */
#endif /* CONFIG_X86_64 */
}
static inline void xnarch_save_fpu(xnarchtcb_t *tcb)
{
struct task_struct *task = tcb->user_task;
if (!tcb->is_root) {
if (task) {
/* fpu not used or already saved by __switch_to. */
if (wrap_test_fpu_used(task) == 0)
return;
/* Tell Linux that we already saved the state
* of the FPU hardware of this task. */
wrap_clear_fpu_used(task);
}
} else {
if (tcb->cr0_ts ||
(tcb->ts_usedfpu && wrap_test_fpu_used(task) == 0))
return;
wrap_clear_fpu_used(task);
}
clts();
__save_i387(tcb->fpup);
}
static inline void __restore_i387(x86_fpustate *fpup)
{
#ifdef cpu_has_xsave
if (cpu_has_xsave) {
#if defined(CONFIG_AS_AVX) || !defined(CONFIG_X86_64)
asm volatile("xrstor" XSAVE_SUFFIX " %0"
: : "m" (fpup->xsave), "a" (-1), "d" (-1)
: "memory");
#else /* !CONFIG_AS_AVX */
asm volatile(".byte " XSAVE_PREFIX "0x0f,0xae,0x2f"
: : "D" (&fpup->xsave), "m" (fpup->xsave),
"a" (-1), "d" (-1)
: "memory");
#endif /* !CONFIG_AS_AVX */
return;
}
#endif /* cpu_has_xsave */
#ifndef CONFIG_X86_64
if (cpu_has_fxsr)
__asm__ __volatile__("fxrstor %0": /* no output */
:"m"(*fpup));
else
__asm__ __volatile__("frstor %0": /* no output */
:"m"(*fpup));
#else /* CONFIG_X86_64 */
#ifdef CONFIG_AS_FXSAVEQ
asm volatile("fxrstorq %0" : : "m" (fpup->fxsave));
#else /* !CONFIG_AS_FXSAVEQ */
asm volatile("rex64/fxrstor (%0)"
: : "R" (&fpup->fxsave), "m" (fpup->fxsave));
#endif /* !CONFIG_AS_FXSAVEQ */
#endif /* CONFIG_X86_64 */
}
static inline void xnarch_restore_fpu(xnarchtcb_t * tcb)
{
struct task_struct *task = tcb->user_task;
if (!tcb->is_root) {
if (task) {
if (!xnarch_fpu_init_p(task)) {
stts();
return; /* Uninit fpu area -- do not restore. */
}
/* Tell Linux that this task has altered the state of
* the FPU hardware. */
wrap_set_fpu_used(task);
}
} else {
/* Restore state of FPU only if TS bit in cr0 was clear. */
if (tcb->cr0_ts) {
wrap_clear_fpu_used(task);
stts();
return;
}
if (tcb->ts_usedfpu
&& wrap_test_fpu_used(task) == 0) {
/* __switch_to saved the fpu context, no need to restore
it since we are switching to root, where fpu can be
in lazy state. */
stts();
return;
}
}
/* Restore the FPU hardware with valid fp registers from a
user-space or kernel thread. */
clts();
__restore_i387(tcb->fpup);
}
static inline void xnarch_enable_fpu(xnarchtcb_t *tcb)
{
struct task_struct *task = tcb->user_task;
if (!tcb->is_root) {
if (task) {
if (!xnarch_fpu_init_p(task))
return;
/*
* We used to test here if __switch_to had not
* saved current fpu state, but this can not
* happen, since xnarch_enable_fpu may only be
* called when switching back to a user-space
* task after one or several switches to
* non-fpu kernel-space real-time tasks, so
* xnarch_switch_to never uses __switch_to.
*/
}
} else if (tcb->cr0_ts)
return;
/* The comment in the non-root case applies here too. */
clts();
}
#else /* !CONFIG_XENO_HW_FPU */
static inline void xnarch_init_fpu(xnarchtcb_t *tcb)
{
}
static inline void xnarch_save_fpu(xnarchtcb_t *tcb)
{
}
static inline void xnarch_restore_fpu(xnarchtcb_t *tcb)
{
}
static inline void xnarch_enable_fpu(xnarchtcb_t *tcb)
{
}
#endif /* CONFIG_XENO_HW_FPU */
static inline int xnarch_escalate(void)
{
extern int xnarch_escalation_virq;
if (rthal_current_domain == rthal_root_domain) {
rthal_trigger_irq(xnarch_escalation_virq);
return 1;
}
return 0;
}
#endif /* !_XENO_ASM_X86_BITS_POD_H */
|