idle.c 7.39 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10
 * Idle daemon for PowerPC.  Idle daemon will handle any action
 * that needs to be taken when the system becomes idle.
 *
 * Originally Written by Cort Dougan (cort@cs.nmt.edu)
 *
 * iSeries supported added by Mike Corrigan <mikejc@us.ibm.com>
 *
 * Additional shared processor, SMT, and firmware support
 *    Copyright (c) 2003 Dave Engebretsen <engebret@us.ibm.com>
11 12 13 14 15 16
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
17

18 19 20 21 22 23 24 25 26 27
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/slab.h>
28
#include <linux/interrupt.h>
29
#include <linux/cpu.h>
30 31 32 33 34 35 36 37

#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/cache.h>
38
#include <asm/cputable.h>
39 40 41 42 43
#include <asm/time.h>
#include <asm/iSeries/LparData.h>
#include <asm/iSeries/HvCall.h>
#include <asm/iSeries/ItLpQueue.h>

44 45
extern long cede_processor(void);
extern long poll_pending(void);
46
extern void power4_idle(void);
47 48

int (*idle_loop)(void);
49

50
#ifdef CONFIG_PPC_ISERIES
51 52 53
unsigned long maxYieldTime = 0;
unsigned long minYieldTime = 0xffffffffffffffffUL;

54 55
static void yield_shared_processor(void)
{
56 57
	unsigned long tb;
	unsigned long yieldTime;
58

59 60 61 62
	HvCall_setEnabledInterrupts(HvCall_MaskIPI |
				    HvCall_MaskLpEvent |
				    HvCall_MaskLpProd |
				    HvCall_MaskTimeout);
63

64 65 66
	tb = get_tb();
	/* Compute future tb value when yield should expire */
	HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
67

68 69 70
	yieldTime = get_tb() - tb;
	if (yieldTime > maxYieldTime)
		maxYieldTime = yieldTime;
71

72 73
	if (yieldTime < minYieldTime)
		minYieldTime = yieldTime;
74
	
75 76 77 78
	/*
	 * The decrementer stops during the yield.  Force a fake decrementer
	 * here and let the timer_interrupt code sort out the actual time.
	 */
David Gibson's avatar
David Gibson committed
79
	get_paca()->lppaca.xIntDword.xFields.xDecrInt = 1;
80 81 82
	process_iSeries_events();
}

83
int iSeries_idle(void)
84
{
Anton Blanchard's avatar
Anton Blanchard committed
85
	struct paca_struct *lpaca;
86 87 88 89
	long oldval;
	unsigned long CTRL;

	/* ensure iSeries run light will be out when idle */
90
	clear_thread_flag(TIF_RUN_LIGHT);
91 92 93
	CTRL = mfspr(CTRLF);
	CTRL &= ~RUNLATCH;
	mtspr(CTRLT, CTRL);
94
#if 0
95
	init_idle();	
96
#endif
97

98
	lpaca = get_paca();
99

100
	for (;;) {
David Gibson's avatar
David Gibson committed
101 102
		if (lpaca->lppaca.xSharedProc) {
			if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
103
				process_iSeries_events();
104
			if (!need_resched())
105
				yield_shared_processor();
106
		} else {
107 108
			oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

109
			if (!oldval) {
110 111 112
				set_thread_flag(TIF_POLLING_NRFLAG);

				while (!need_resched()) {
113
					HMT_medium();
David Gibson's avatar
David Gibson committed
114
					if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
115 116 117
						process_iSeries_events();
					HMT_low();
				}
118 119 120 121 122

				HMT_medium();
				clear_thread_flag(TIF_POLLING_NRFLAG);
			} else {
				set_need_resched();
123 124
			}
		}
125 126

		schedule();
127 128 129
	}
	return 0;
}
130
#endif
131

132
int default_idle(void)
133
{
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
	long oldval;

	while (1) {
		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			while (!need_resched()) {
				barrier();
				HMT_low();
			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		schedule();
154 155 156
		if (cpu_is_offline(smp_processor_id()) &&
				system_state == SYSTEM_RUNNING)
			cpu_die();
157 158 159
	}

	return 0;
160
}
161

162
#ifdef CONFIG_PPC_PSERIES
163

164
DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
165

166 167 168
int dedicated_idle(void)
{
	long oldval;
169
	struct paca_struct *lpaca = get_paca(), *ppaca;
170
	unsigned long start_snooze;
171
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
172

173
	ppaca = &paca[smp_processor_id() ^ 1];
174 175 176 177

	while (1) {
		/* Indicate to the HV that we are idle.  Now would be
		 * a good time to find other work to dispatch. */
David Gibson's avatar
David Gibson committed
178
		lpaca->lppaca.xIdle = 1;
179 180 181 182

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);
183
			start_snooze = __get_tb() +
184
				*smt_snooze_delay * tb_ticks_per_usec;
185 186 187 188 189 190
			while (!need_resched()) {
				/* need_resched could be 1 or 0 at this 
				 * point.  If it is 0, set it to 0, so
				 * an IPI/Prod is sent.  If it is 1, keep
				 * it that way & schedule work.
				 */
191
				if (*smt_snooze_delay == 0 ||
192
				    __get_tb() < start_snooze) {
193 194 195 196 197 198 199 200 201 202 203
					HMT_low(); /* Low thread priority */
					continue;
				}

				HMT_very_low(); /* Low power mode */

				/* If the SMT mode is system controlled & the 
				 * partner thread is doing work, switch into
				 * ST mode.
				 */
				if((naca->smt_state == SMT_DYNAMIC) &&
David Gibson's avatar
David Gibson committed
204
				   (!(ppaca->lppaca.xIdle))) {
205 206 207 208 209 210 211 212 213 214 215 216 217 218
					/* Indicate we are no longer polling for
					 * work, and then clear need_resched.  If
					 * need_resched was 1, set it back to 1
					 * and schedule work
					 */
					clear_thread_flag(TIF_POLLING_NRFLAG);
					oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
					if(oldval == 1) {
						set_need_resched();
						break;
					}

					/* DRENG: Go HMT_medium here ? */
					local_irq_disable(); 
219

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
					/* SMT dynamic mode.  Cede will result 
					 * in this thread going dormant, if the
					 * partner thread is still doing work.
					 * Thread wakes up if partner goes idle,
					 * an interrupt is presented, or a prod
					 * occurs.  Returning from the cede
					 * enables external interrupts.
					 */
					cede_processor();
				} else {
					/* Give the HV an opportunity at the
					 * processor, since we are not doing
					 * any work.
					 */
					poll_pending();
				}
			}
		} else {
			set_need_resched();
		}

		HMT_medium();
David Gibson's avatar
David Gibson committed
242
		lpaca->lppaca.xIdle = 0;
243
		schedule();
244 245 246
		if (cpu_is_offline(smp_processor_id()) &&
				system_state == SYSTEM_RUNNING)
			cpu_die();
247 248 249 250 251
	}
	return 0;
}

int shared_idle(void)
252
{
253 254 255
	struct paca_struct *lpaca = get_paca();

	while (1) {
256 257 258 259
		if (cpu_is_offline(smp_processor_id()) &&
				system_state == SYSTEM_RUNNING)
			cpu_die();

260 261
		/* Indicate to the HV that we are idle.  Now would be
		 * a good time to find other work to dispatch. */
David Gibson's avatar
David Gibson committed
262
		lpaca->lppaca.xIdle = 1;
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277

		if (!need_resched()) {
			local_irq_disable(); 
			
			/* 
			 * Yield the processor to the hypervisor.  We return if
			 * an external interrupt occurs (which are driven prior
			 * to returning here) or if a prod occurs from another 
			 * processor.  When returning here, external interrupts 
			 * are enabled.
			 */
			cede_processor();
		}

		HMT_medium();
David Gibson's avatar
David Gibson committed
278
		lpaca->lppaca.xIdle = 0;
279 280 281 282 283
		schedule();
	}

	return 0;
}
284
#endif
285 286 287 288 289 290 291

int cpu_idle(void)
{
	idle_loop();
	return 0; 
}

292 293 294 295 296 297 298 299 300 301 302
int native_idle(void)
{
	while(1) {
		if (!need_resched())
			power4_idle();
		if (need_resched())
			schedule();
	}
	return 0;
}

303 304 305 306 307 308 309
int idle_setup(void)
{
#ifdef CONFIG_PPC_ISERIES
	idle_loop = iSeries_idle;
#else
	if (systemcfg->platform & PLATFORM_PSERIES) {
		if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
David Gibson's avatar
David Gibson committed
310
			if (get_paca()->lppaca.xSharedProc) {
311 312 313 314 315 316 317 318 319 320
				printk("idle = shared_idle\n");
				idle_loop = shared_idle;
			} else {
				printk("idle = dedicated_idle\n");
				idle_loop = dedicated_idle;
			}
		} else {
			printk("idle = default_idle\n");
			idle_loop = default_idle;
		}
321 322 323
	} else if (systemcfg->platform == PLATFORM_POWERMAC) {
		printk("idle = native_idle\n");
		idle_loop = native_idle;
324 325 326 327 328 329 330
	} else {
		printk("idle_setup: unknown platform, use default_idle\n");
		idle_loop = default_idle;
	}
#endif

	return 1;
331
}
332