Jack2  1.9.9
cycles.h
1 /*
2  Copyright (C) 2001 Paul Davis
3  Code derived from various headers from the Linux kernel
4 
5  This program is free software; you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation; either version 2 of the License, or
8  (at your option) any later version.
9 
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License
16  along with this program; if not, write to the Free Software
17  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 
19  $Id: cycles.h,v 1.4.2.1 2006/06/20 14:44:00 letz Exp $
20 */
21 
22 #ifndef __jack_cycles_h__
23 #define __jack_cycles_h__
24 
25 /*
26  * Standard way to access the cycle counter on i586+ CPUs.
27  * Currently only used on SMP.
28  *
29  * If you really have a SMP machine with i486 chips or older,
30  * compile for that, and this will just always return zero.
31  * That's ok, it just means that the nicer scheduling heuristics
32  * won't work for you.
33  *
34  * We only use the low 32 bits, and we'd simply better make sure
35  * that we reschedule before that wraps. Scheduling at least every
36  * four billion cycles just basically sounds like a good idea,
37  * regardless of how fast the machine is.
38  */
39 
40 #ifdef __x86_64__
41 
42 typedef unsigned long cycles_t;
43 extern cycles_t cacheflush_time;
44 
45 static inline unsigned long get_cycles(void)
46 {
47  unsigned int hi, lo;
48  __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
49  return (((unsigned long)hi)<<32) | ((unsigned long)lo);
50 }
51 
52 #endif /* __x86_64__ */
53 
54 #ifdef __sparc_v9__
55 /* rd is V9 only */
56 static inline unsigned long long get_cycles(void)
57 {
58  unsigned long long res;
59  __asm__ __volatile__("rd %%tick, %0" : "=r"(res));
60  return res;
61 }
62 #endif /* __sparc_v9__ */
63 
64 #ifdef __sparc_v9__
65 /* rd is V9 only */
66 static inline unsigned long long get_cycles(void)
67 {
68  unsigned long long res;
69  __asm__ __volatile__("rd %%tick, %0" : "=r"(res));
70  return res;
71 }
72 #endif
73 
74 #ifdef __PPC__
75 
76 /* PowerPC */
77 
78 #define CPU_FTR_601 0x00000100
79 
80 typedef unsigned long cycles_t;
81 
82 /* For the "cycle" counter we use the timebase lower half. */
83 
84 extern cycles_t cacheflush_time;
85 
86 static inline cycles_t get_cycles(void)
87 {
88  cycles_t ret = 0;
89 
90 #ifdef __powerpc64__
91 # define LONGT ".llong"
92 #else
93 # define LONGT ".long"
94 #endif
95 
96  __asm__ __volatile__(
97  "98: mftb %0\n"
98  "99:\n"
99  ".section __ftr_fixup,\"a\"\n"
100  " .long %1\n"
101  " .long 0\n"
102  " " LONGT " 98b\n"
103  " " LONGT " 99b\n"
104  ".previous"
105  : "=r" (ret) : "i" (CPU_FTR_601));
106 
107 #undef LONGT
108  return ret;
109 }
110 
111 #endif /* __PPC__ */
112 
113 #ifdef __i386__
114 
115 typedef unsigned long long cycles_t;
116 
117 extern cycles_t cacheflush_time;
118 
119 #define rdtscll(val) \
120  __asm__ __volatile__("rdtsc" : "=A" (val))
121 
122 static inline cycles_t get_cycles (void)
123 {
124  unsigned long long ret;
125 
126  rdtscll(ret);
127  return ret;
128 }
129 
130 #endif /* __i386__ */
131 
132 /* everything else but x86, amd64, sparcv9 or ppc */
133 #if !defined (__PPC__) && !defined (__x86_64__) && !defined (__i386__) && !defined (__sparc_v9__)
134 
135 #warning No suitable get_cycles() implementation. Returning 0 instead
136 
137 typedef unsigned long long cycles_t;
138 
139 static inline cycles_t get_cycles(void)
140 {
141  return 0;
142 }
143 
144 #endif /* everything else but x86, amd64, sparcv9 or ppc */
145 
146 
147 #endif /* __jack_cycles_h__ */