|
1 /* |
|
2 * FLoating proportions |
|
3 * |
|
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
|
5 * |
|
6 * This file contains the public data structure and API definitions. |
|
7 */ |
|
8 |
|
9 #ifndef _LINUX_PROPORTIONS_H |
|
10 #define _LINUX_PROPORTIONS_H |
|
11 |
|
12 #include <linux/percpu_counter.h> |
|
13 #include <linux/spinlock.h> |
|
14 #include <linux/mutex.h> |
|
15 |
|
16 struct prop_global { |
|
17 /* |
|
18 * The period over which we differentiate |
|
19 * |
|
20 * period = 2^shift |
|
21 */ |
|
22 int shift; |
|
23 /* |
|
24 * The total event counter aka 'time'. |
|
25 * |
|
26 * Treated as an unsigned long; the lower 'shift - 1' bits are the |
|
27 * counter bits, the remaining upper bits the period counter. |
|
28 */ |
|
29 struct percpu_counter events; |
|
30 }; |
|
31 |
|
32 /* |
|
33 * global proportion descriptor |
|
34 * |
|
35 * this is needed to consitently flip prop_global structures. |
|
36 */ |
|
37 struct prop_descriptor { |
|
38 int index; |
|
39 struct prop_global pg[2]; |
|
40 struct mutex mutex; /* serialize the prop_global switch */ |
|
41 }; |
|
42 |
|
43 int prop_descriptor_init(struct prop_descriptor *pd, int shift); |
|
44 void prop_change_shift(struct prop_descriptor *pd, int new_shift); |
|
45 |
|
46 /* |
|
47 * ----- PERCPU ------ |
|
48 */ |
|
49 |
|
50 struct prop_local_percpu { |
|
51 /* |
|
52 * the local events counter |
|
53 */ |
|
54 struct percpu_counter events; |
|
55 |
|
56 /* |
|
57 * snapshot of the last seen global state |
|
58 */ |
|
59 int shift; |
|
60 unsigned long period; |
|
61 spinlock_t lock; /* protect the snapshot state */ |
|
62 }; |
|
63 |
|
64 int prop_local_init_percpu(struct prop_local_percpu *pl); |
|
65 void prop_local_destroy_percpu(struct prop_local_percpu *pl); |
|
66 void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); |
|
67 void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, |
|
68 long *numerator, long *denominator); |
|
69 |
|
70 static inline |
|
71 void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) |
|
72 { |
|
73 unsigned long flags; |
|
74 |
|
75 local_irq_save(flags); |
|
76 __prop_inc_percpu(pd, pl); |
|
77 local_irq_restore(flags); |
|
78 } |
|
79 |
|
80 /* |
|
81 * Limit the time part in order to ensure there are some bits left for the |
|
82 * cycle counter and fraction multiply. |
|
83 */ |
|
84 #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4) |
|
85 |
|
86 #define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1) |
|
87 #define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT) |
|
88 |
|
89 void __prop_inc_percpu_max(struct prop_descriptor *pd, |
|
90 struct prop_local_percpu *pl, long frac); |
|
91 |
|
92 |
|
93 /* |
|
94 * ----- SINGLE ------ |
|
95 */ |
|
96 |
|
97 struct prop_local_single { |
|
98 /* |
|
99 * the local events counter |
|
100 */ |
|
101 unsigned long events; |
|
102 |
|
103 /* |
|
104 * snapshot of the last seen global state |
|
105 * and a lock protecting this state |
|
106 */ |
|
107 unsigned long period; |
|
108 int shift; |
|
109 spinlock_t lock; /* protect the snapshot state */ |
|
110 }; |
|
111 |
|
112 #define INIT_PROP_LOCAL_SINGLE(name) \ |
|
113 { .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
|
114 } |
|
115 |
|
116 int prop_local_init_single(struct prop_local_single *pl); |
|
117 void prop_local_destroy_single(struct prop_local_single *pl); |
|
118 void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl); |
|
119 void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl, |
|
120 long *numerator, long *denominator); |
|
121 |
|
122 static inline |
|
123 void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) |
|
124 { |
|
125 unsigned long flags; |
|
126 |
|
127 local_irq_save(flags); |
|
128 __prop_inc_single(pd, pl); |
|
129 local_irq_restore(flags); |
|
130 } |
|
131 |
|
132 #endif /* _LINUX_PROPORTIONS_H */ |