Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* sched.c - SPU scheduler.
   3 *
   4 * Copyright (C) IBM 2005
   5 * Author: Mark Nutter <mnutter@us.ibm.com>
   6 *
   7 * 2006-03-31	NUMA domains added.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   8 */
   9
  10#undef DEBUG
  11
  12#include <linux/errno.h>
  13#include <linux/sched/signal.h>
  14#include <linux/sched/loadavg.h>
  15#include <linux/sched/rt.h>
  16#include <linux/kernel.h>
  17#include <linux/mm.h>
  18#include <linux/slab.h>
  19#include <linux/completion.h>
  20#include <linux/vmalloc.h>
  21#include <linux/smp.h>
  22#include <linux/stddef.h>
  23#include <linux/unistd.h>
  24#include <linux/numa.h>
  25#include <linux/mutex.h>
  26#include <linux/notifier.h>
  27#include <linux/kthread.h>
  28#include <linux/pid_namespace.h>
  29#include <linux/proc_fs.h>
  30#include <linux/seq_file.h>
  31
  32#include <asm/io.h>
  33#include <asm/mmu_context.h>
  34#include <asm/spu.h>
  35#include <asm/spu_csa.h>
  36#include <asm/spu_priv1.h>
  37#include "spufs.h"
  38#define CREATE_TRACE_POINTS
  39#include "sputrace.h"
  40
  41struct spu_prio_array {
  42	DECLARE_BITMAP(bitmap, MAX_PRIO);
  43	struct list_head runq[MAX_PRIO];
  44	spinlock_t runq_lock;
  45	int nr_waiting;
  46};
  47
  48static unsigned long spu_avenrun[3];
  49static struct spu_prio_array *spu_prio;
  50static struct task_struct *spusched_task;
  51static struct timer_list spusched_timer;
  52static struct timer_list spuloadavg_timer;
  53
  54/*
  55 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
  56 */
  57#define NORMAL_PRIO		120
  58
  59/*
  60 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
  61 * tick for every 10 CPU scheduler ticks.
  62 */
  63#define SPUSCHED_TICK		(10)
  64
  65/*
  66 * These are the 'tuning knobs' of the scheduler:
  67 *
  68 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
  69 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
  70 */
  71#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
  72#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
  73
  74#define SCALE_PRIO(x, prio) \
  75	max(x * (MAX_PRIO - prio) / (NICE_WIDTH / 2), MIN_SPU_TIMESLICE)
  76
  77/*
  78 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
  79 * [800ms ... 100ms ... 5ms]
  80 *
  81 * The higher a thread's priority, the bigger timeslices
  82 * it gets during one round of execution. But even the lowest
  83 * priority thread gets MIN_TIMESLICE worth of execution time.
  84 */
  85void spu_set_timeslice(struct spu_context *ctx)
  86{
  87	if (ctx->prio < NORMAL_PRIO)
  88		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
  89	else
  90		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
  91}
  92
  93/*
  94 * Update scheduling information from the owning thread.
  95 */
  96void __spu_update_sched_info(struct spu_context *ctx)
  97{
  98	/*
  99	 * assert that the context is not on the runqueue, so it is safe
 100	 * to change its scheduling parameters.
 101	 */
 102	BUG_ON(!list_empty(&ctx->rq));
 103
 104	/*
 105	 * 32-Bit assignments are atomic on powerpc, and we don't care about
 106	 * memory ordering here because retrieving the controlling thread is
 107	 * per definition racy.
 108	 */
 109	ctx->tid = current->pid;
 110
 111	/*
 112	 * We do our own priority calculations, so we normally want
 113	 * ->static_prio to start with. Unfortunately this field
 114	 * contains junk for threads with a realtime scheduling
 115	 * policy so we have to look at ->prio in this case.
 116	 */
 117	if (rt_prio(current->prio))
 118		ctx->prio = current->prio;
 119	else
 120		ctx->prio = current->static_prio;
 121	ctx->policy = current->policy;
 122
 123	/*
 124	 * TO DO: the context may be loaded, so we may need to activate
 125	 * it again on a different node. But it shouldn't hurt anything
 126	 * to update its parameters, because we know that the scheduler
 127	 * is not actively looking at this field, since it is not on the
 128	 * runqueue. The context will be rescheduled on the proper node
 129	 * if it is timesliced or preempted.
 130	 */
 131	cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
 132
 133	/* Save the current cpu id for spu interrupt routing. */
 134	ctx->last_ran = raw_smp_processor_id();
 135}
 136
 137void spu_update_sched_info(struct spu_context *ctx)
 138{
 139	int node;
 140
 141	if (ctx->state == SPU_STATE_RUNNABLE) {
 142		node = ctx->spu->node;
 143
 144		/*
 145		 * Take list_mutex to sync with find_victim().
 146		 */
 147		mutex_lock(&cbe_spu_info[node].list_mutex);
 148		__spu_update_sched_info(ctx);
 149		mutex_unlock(&cbe_spu_info[node].list_mutex);
 150	} else {
 151		__spu_update_sched_info(ctx);
 152	}
 153}
 154
 155static int __node_allowed(struct spu_context *ctx, int node)
 156{
 157	if (nr_cpus_node(node)) {
 158		const struct cpumask *mask = cpumask_of_node(node);
 159
 160		if (cpumask_intersects(mask, &ctx->cpus_allowed))
 161			return 1;
 162	}
 163
 164	return 0;
 165}
 166
 167static int node_allowed(struct spu_context *ctx, int node)
 168{
 169	int rval;
 170
 171	spin_lock(&spu_prio->runq_lock);
 172	rval = __node_allowed(ctx, node);
 173	spin_unlock(&spu_prio->runq_lock);
 174
 175	return rval;
 176}
 177
 178void do_notify_spus_active(void)
 179{
 180	int node;
 181
 182	/*
 183	 * Wake up the active spu_contexts.
 
 
 
 184	 */
 185	for_each_online_node(node) {
 186		struct spu *spu;
 187
 188		mutex_lock(&cbe_spu_info[node].list_mutex);
 189		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 190			if (spu->alloc_state != SPU_FREE) {
 191				struct spu_context *ctx = spu->ctx;
 192				set_bit(SPU_SCHED_NOTIFY_ACTIVE,
 193					&ctx->sched_flags);
 194				mb();
 195				wake_up_all(&ctx->stop_wq);
 196			}
 197		}
 198		mutex_unlock(&cbe_spu_info[node].list_mutex);
 199	}
 200}
 201
 202/**
 203 * spu_bind_context - bind spu context to physical spu
 204 * @spu:	physical spu to bind to
 205 * @ctx:	context to bind
 206 */
 207static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
 208{
 209	spu_context_trace(spu_bind_context__enter, ctx, spu);
 210
 211	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 212
 213	if (ctx->flags & SPU_CREATE_NOSCHED)
 214		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
 215
 216	ctx->stats.slb_flt_base = spu->stats.slb_flt;
 217	ctx->stats.class2_intr_base = spu->stats.class2_intr;
 218
 219	spu_associate_mm(spu, ctx->owner);
 220
 221	spin_lock_irq(&spu->register_lock);
 222	spu->ctx = ctx;
 223	spu->flags = 0;
 224	ctx->spu = spu;
 225	ctx->ops = &spu_hw_ops;
 226	spu->pid = current->pid;
 227	spu->tgid = current->tgid;
 228	spu->ibox_callback = spufs_ibox_callback;
 229	spu->wbox_callback = spufs_wbox_callback;
 230	spu->stop_callback = spufs_stop_callback;
 231	spu->mfc_callback = spufs_mfc_callback;
 232	spin_unlock_irq(&spu->register_lock);
 233
 234	spu_unmap_mappings(ctx);
 235
 236	spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
 237	spu_restore(&ctx->csa, spu);
 238	spu->timestamp = jiffies;
 
 239	ctx->state = SPU_STATE_RUNNABLE;
 240
 241	spuctx_switch_state(ctx, SPU_UTIL_USER);
 242}
 243
 244/*
 245 * Must be used with the list_mutex held.
 246 */
 247static inline int sched_spu(struct spu *spu)
 248{
 249	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
 250
 251	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
 252}
 253
 254static void aff_merge_remaining_ctxs(struct spu_gang *gang)
 255{
 256	struct spu_context *ctx;
 257
 258	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
 259		if (list_empty(&ctx->aff_list))
 260			list_add(&ctx->aff_list, &gang->aff_list_head);
 261	}
 262	gang->aff_flags |= AFF_MERGED;
 263}
 264
 265static void aff_set_offsets(struct spu_gang *gang)
 266{
 267	struct spu_context *ctx;
 268	int offset;
 269
 270	offset = -1;
 271	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
 272								aff_list) {
 273		if (&ctx->aff_list == &gang->aff_list_head)
 274			break;
 275		ctx->aff_offset = offset--;
 276	}
 277
 278	offset = 0;
 279	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
 280		if (&ctx->aff_list == &gang->aff_list_head)
 281			break;
 282		ctx->aff_offset = offset++;
 283	}
 284
 285	gang->aff_flags |= AFF_OFFSETS_SET;
 286}
 287
 288static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
 289		 int group_size, int lowest_offset)
 290{
 291	struct spu *spu;
 292	int node, n;
 293
 294	/*
 295	 * TODO: A better algorithm could be used to find a good spu to be
 296	 *       used as reference location for the ctxs chain.
 297	 */
 298	node = cpu_to_node(raw_smp_processor_id());
 299	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 300		/*
 301		 * "available_spus" counts how many spus are not potentially
 302		 * going to be used by other affinity gangs whose reference
 303		 * context is already in place. Although this code seeks to
 304		 * avoid having affinity gangs with a summed amount of
 305		 * contexts bigger than the amount of spus in the node,
 306		 * this may happen sporadically. In this case, available_spus
 307		 * becomes negative, which is harmless.
 308		 */
 309		int available_spus;
 310
 311		node = (node < MAX_NUMNODES) ? node : 0;
 312		if (!node_allowed(ctx, node))
 313			continue;
 314
 315		available_spus = 0;
 316		mutex_lock(&cbe_spu_info[node].list_mutex);
 317		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 318			if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
 319					&& spu->ctx->gang->aff_ref_spu)
 320				available_spus -= spu->ctx->gang->contexts;
 321			available_spus++;
 322		}
 323		if (available_spus < ctx->gang->contexts) {
 324			mutex_unlock(&cbe_spu_info[node].list_mutex);
 325			continue;
 326		}
 327
 328		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 329			if ((!mem_aff || spu->has_mem_affinity) &&
 330							sched_spu(spu)) {
 331				mutex_unlock(&cbe_spu_info[node].list_mutex);
 332				return spu;
 333			}
 334		}
 335		mutex_unlock(&cbe_spu_info[node].list_mutex);
 336	}
 337	return NULL;
 338}
 339
 340static void aff_set_ref_point_location(struct spu_gang *gang)
 341{
 342	int mem_aff, gs, lowest_offset;
 343	struct spu_context *tmp, *ctx;
 
 344
 345	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
 346	lowest_offset = 0;
 347	gs = 0;
 348
 349	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
 350		gs++;
 351
 352	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
 353								aff_list) {
 354		if (&ctx->aff_list == &gang->aff_list_head)
 355			break;
 356		lowest_offset = ctx->aff_offset;
 357	}
 358
 359	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
 360							lowest_offset);
 361}
 362
 363static struct spu *ctx_location(struct spu *ref, int offset, int node)
 364{
 365	struct spu *spu;
 366
 367	spu = NULL;
 368	if (offset >= 0) {
 369		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
 370			BUG_ON(spu->node != node);
 371			if (offset == 0)
 372				break;
 373			if (sched_spu(spu))
 374				offset--;
 375		}
 376	} else {
 377		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
 378			BUG_ON(spu->node != node);
 379			if (offset == 0)
 380				break;
 381			if (sched_spu(spu))
 382				offset++;
 383		}
 384	}
 385
 386	return spu;
 387}
 388
 389/*
 390 * affinity_check is called each time a context is going to be scheduled.
 391 * It returns the spu ptr on which the context must run.
 392 */
 393static int has_affinity(struct spu_context *ctx)
 394{
 395	struct spu_gang *gang = ctx->gang;
 396
 397	if (list_empty(&ctx->aff_list))
 398		return 0;
 399
 400	if (atomic_read(&ctx->gang->aff_sched_count) == 0)
 401		ctx->gang->aff_ref_spu = NULL;
 402
 403	if (!gang->aff_ref_spu) {
 404		if (!(gang->aff_flags & AFF_MERGED))
 405			aff_merge_remaining_ctxs(gang);
 406		if (!(gang->aff_flags & AFF_OFFSETS_SET))
 407			aff_set_offsets(gang);
 408		aff_set_ref_point_location(gang);
 409	}
 410
 411	return gang->aff_ref_spu != NULL;
 412}
 413
 414/**
 415 * spu_unbind_context - unbind spu context from physical spu
 416 * @spu:	physical spu to unbind from
 417 * @ctx:	context to unbind
 418 */
 419static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
 420{
 421	u32 status;
 422
 423	spu_context_trace(spu_unbind_context__enter, ctx, spu);
 424
 425	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 426
 427 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
 428		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
 429
 430	if (ctx->gang)
 431		/*
 432		 * If ctx->gang->aff_sched_count is positive, SPU affinity is
 433		 * being considered in this gang. Using atomic_dec_if_positive
 434		 * allow us to skip an explicit check for affinity in this gang
 435		 */
 436		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
 437
 
 438	spu_unmap_mappings(ctx);
 439	spu_save(&ctx->csa, spu);
 440	spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
 441
 442	spin_lock_irq(&spu->register_lock);
 443	spu->timestamp = jiffies;
 444	ctx->state = SPU_STATE_SAVED;
 445	spu->ibox_callback = NULL;
 446	spu->wbox_callback = NULL;
 447	spu->stop_callback = NULL;
 448	spu->mfc_callback = NULL;
 449	spu->pid = 0;
 450	spu->tgid = 0;
 451	ctx->ops = &spu_backing_ops;
 452	spu->flags = 0;
 453	spu->ctx = NULL;
 454	spin_unlock_irq(&spu->register_lock);
 455
 456	spu_associate_mm(spu, NULL);
 457
 458	ctx->stats.slb_flt +=
 459		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
 460	ctx->stats.class2_intr +=
 461		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
 462
 463	/* This maps the underlying spu state to idle */
 464	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
 465	ctx->spu = NULL;
 466
 467	if (spu_stopped(ctx, &status))
 468		wake_up_all(&ctx->stop_wq);
 469}
 470
 471/**
 472 * spu_add_to_rq - add a context to the runqueue
 473 * @ctx:       context to add
 474 */
 475static void __spu_add_to_rq(struct spu_context *ctx)
 476{
 477	/*
 478	 * Unfortunately this code path can be called from multiple threads
 479	 * on behalf of a single context due to the way the problem state
 480	 * mmap support works.
 481	 *
 482	 * Fortunately we need to wake up all these threads at the same time
 483	 * and can simply skip the runqueue addition for every but the first
 484	 * thread getting into this codepath.
 485	 *
 486	 * It's still quite hacky, and long-term we should proxy all other
 487	 * threads through the owner thread so that spu_run is in control
 488	 * of all the scheduling activity for a given context.
 489	 */
 490	if (list_empty(&ctx->rq)) {
 491		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
 492		set_bit(ctx->prio, spu_prio->bitmap);
 493		if (!spu_prio->nr_waiting++)
 494			mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
 495	}
 496}
 497
 498static void spu_add_to_rq(struct spu_context *ctx)
 499{
 500	spin_lock(&spu_prio->runq_lock);
 501	__spu_add_to_rq(ctx);
 502	spin_unlock(&spu_prio->runq_lock);
 503}
 504
 505static void __spu_del_from_rq(struct spu_context *ctx)
 506{
 507	int prio = ctx->prio;
 508
 509	if (!list_empty(&ctx->rq)) {
 510		if (!--spu_prio->nr_waiting)
 511			del_timer(&spusched_timer);
 512		list_del_init(&ctx->rq);
 513
 514		if (list_empty(&spu_prio->runq[prio]))
 515			clear_bit(prio, spu_prio->bitmap);
 516	}
 517}
 518
 519void spu_del_from_rq(struct spu_context *ctx)
 520{
 521	spin_lock(&spu_prio->runq_lock);
 522	__spu_del_from_rq(ctx);
 523	spin_unlock(&spu_prio->runq_lock);
 524}
 525
 526static void spu_prio_wait(struct spu_context *ctx)
 527{
 528	DEFINE_WAIT(wait);
 529
 530	/*
 531	 * The caller must explicitly wait for a context to be loaded
 532	 * if the nosched flag is set.  If NOSCHED is not set, the caller
 533	 * queues the context and waits for an spu event or error.
 534	 */
 535	BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
 536
 537	spin_lock(&spu_prio->runq_lock);
 538	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
 539	if (!signal_pending(current)) {
 540		__spu_add_to_rq(ctx);
 541		spin_unlock(&spu_prio->runq_lock);
 542		mutex_unlock(&ctx->state_mutex);
 543		schedule();
 544		mutex_lock(&ctx->state_mutex);
 545		spin_lock(&spu_prio->runq_lock);
 546		__spu_del_from_rq(ctx);
 547	}
 548	spin_unlock(&spu_prio->runq_lock);
 549	__set_current_state(TASK_RUNNING);
 550	remove_wait_queue(&ctx->stop_wq, &wait);
 551}
 552
 553static struct spu *spu_get_idle(struct spu_context *ctx)
 554{
 555	struct spu *spu, *aff_ref_spu;
 556	int node, n;
 557
 558	spu_context_nospu_trace(spu_get_idle__enter, ctx);
 559
 560	if (ctx->gang) {
 561		mutex_lock(&ctx->gang->aff_mutex);
 562		if (has_affinity(ctx)) {
 563			aff_ref_spu = ctx->gang->aff_ref_spu;
 564			atomic_inc(&ctx->gang->aff_sched_count);
 565			mutex_unlock(&ctx->gang->aff_mutex);
 566			node = aff_ref_spu->node;
 567
 568			mutex_lock(&cbe_spu_info[node].list_mutex);
 569			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
 570			if (spu && spu->alloc_state == SPU_FREE)
 571				goto found;
 572			mutex_unlock(&cbe_spu_info[node].list_mutex);
 573
 574			atomic_dec(&ctx->gang->aff_sched_count);
 575			goto not_found;
 576		}
 577		mutex_unlock(&ctx->gang->aff_mutex);
 578	}
 579	node = cpu_to_node(raw_smp_processor_id());
 580	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 581		node = (node < MAX_NUMNODES) ? node : 0;
 582		if (!node_allowed(ctx, node))
 583			continue;
 584
 585		mutex_lock(&cbe_spu_info[node].list_mutex);
 586		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 587			if (spu->alloc_state == SPU_FREE)
 588				goto found;
 589		}
 590		mutex_unlock(&cbe_spu_info[node].list_mutex);
 591	}
 592
 593 not_found:
 594	spu_context_nospu_trace(spu_get_idle__not_found, ctx);
 595	return NULL;
 596
 597 found:
 598	spu->alloc_state = SPU_USED;
 599	mutex_unlock(&cbe_spu_info[node].list_mutex);
 600	spu_context_trace(spu_get_idle__found, ctx, spu);
 601	spu_init_channels(spu);
 602	return spu;
 603}
 604
 605/**
 606 * find_victim - find a lower priority context to preempt
 607 * @ctx:	candidate context for running
 608 *
 609 * Returns the freed physical spu to run the new context on.
 610 */
 611static struct spu *find_victim(struct spu_context *ctx)
 612{
 613	struct spu_context *victim = NULL;
 614	struct spu *spu;
 615	int node, n;
 616
 617	spu_context_nospu_trace(spu_find_victim__enter, ctx);
 618
 619	/*
 620	 * Look for a possible preemption candidate on the local node first.
 621	 * If there is no candidate look at the other nodes.  This isn't
 622	 * exactly fair, but so far the whole spu scheduler tries to keep
 623	 * a strong node affinity.  We might want to fine-tune this in
 624	 * the future.
 625	 */
 626 restart:
 627	node = cpu_to_node(raw_smp_processor_id());
 628	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 629		node = (node < MAX_NUMNODES) ? node : 0;
 630		if (!node_allowed(ctx, node))
 631			continue;
 632
 633		mutex_lock(&cbe_spu_info[node].list_mutex);
 634		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 635			struct spu_context *tmp = spu->ctx;
 636
 637			if (tmp && tmp->prio > ctx->prio &&
 638			    !(tmp->flags & SPU_CREATE_NOSCHED) &&
 639			    (!victim || tmp->prio > victim->prio)) {
 640				victim = spu->ctx;
 641			}
 642		}
 643		if (victim)
 644			get_spu_context(victim);
 645		mutex_unlock(&cbe_spu_info[node].list_mutex);
 646
 647		if (victim) {
 648			/*
 649			 * This nests ctx->state_mutex, but we always lock
 650			 * higher priority contexts before lower priority
 651			 * ones, so this is safe until we introduce
 652			 * priority inheritance schemes.
 653			 *
 654			 * XXX if the highest priority context is locked,
 655			 * this can loop a long time.  Might be better to
 656			 * look at another context or give up after X retries.
 657			 */
 658			if (!mutex_trylock(&victim->state_mutex)) {
 659				put_spu_context(victim);
 660				victim = NULL;
 661				goto restart;
 662			}
 663
 664			spu = victim->spu;
 665			if (!spu || victim->prio <= ctx->prio) {
 666				/*
 667				 * This race can happen because we've dropped
 668				 * the active list mutex.  Not a problem, just
 669				 * restart the search.
 670				 */
 671				mutex_unlock(&victim->state_mutex);
 672				put_spu_context(victim);
 673				victim = NULL;
 674				goto restart;
 675			}
 676
 677			spu_context_trace(__spu_deactivate__unload, ctx, spu);
 678
 679			mutex_lock(&cbe_spu_info[node].list_mutex);
 680			cbe_spu_info[node].nr_active--;
 681			spu_unbind_context(spu, victim);
 682			mutex_unlock(&cbe_spu_info[node].list_mutex);
 683
 684			victim->stats.invol_ctx_switch++;
 685			spu->stats.invol_ctx_switch++;
 686			if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
 687				spu_add_to_rq(victim);
 688
 689			mutex_unlock(&victim->state_mutex);
 690			put_spu_context(victim);
 691
 692			return spu;
 693		}
 694	}
 695
 696	return NULL;
 697}
 698
 699static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
 700{
 701	int node = spu->node;
 702	int success = 0;
 703
 704	spu_set_timeslice(ctx);
 705
 706	mutex_lock(&cbe_spu_info[node].list_mutex);
 707	if (spu->ctx == NULL) {
 708		spu_bind_context(spu, ctx);
 709		cbe_spu_info[node].nr_active++;
 710		spu->alloc_state = SPU_USED;
 711		success = 1;
 712	}
 713	mutex_unlock(&cbe_spu_info[node].list_mutex);
 714
 715	if (success)
 716		wake_up_all(&ctx->run_wq);
 717	else
 718		spu_add_to_rq(ctx);
 719}
 720
 721static void spu_schedule(struct spu *spu, struct spu_context *ctx)
 722{
 723	/* not a candidate for interruptible because it's called either
 724	   from the scheduler thread or from spu_deactivate */
 725	mutex_lock(&ctx->state_mutex);
 726	if (ctx->state == SPU_STATE_SAVED)
 727		__spu_schedule(spu, ctx);
 728	spu_release(ctx);
 729}
 730
 731/**
 732 * spu_unschedule - remove a context from a spu, and possibly release it.
 733 * @spu:	The SPU to unschedule from
 734 * @ctx:	The context currently scheduled on the SPU
 735 * @free_spu	Whether to free the SPU for other contexts
 736 *
 737 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
 738 * SPU is made available for other contexts (ie, may be returned by
 739 * spu_get_idle). If this is zero, the caller is expected to schedule another
 740 * context to this spu.
 741 *
 742 * Should be called with ctx->state_mutex held.
 743 */
 744static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
 745		int free_spu)
 746{
 747	int node = spu->node;
 748
 749	mutex_lock(&cbe_spu_info[node].list_mutex);
 750	cbe_spu_info[node].nr_active--;
 751	if (free_spu)
 752		spu->alloc_state = SPU_FREE;
 753	spu_unbind_context(spu, ctx);
 754	ctx->stats.invol_ctx_switch++;
 755	spu->stats.invol_ctx_switch++;
 756	mutex_unlock(&cbe_spu_info[node].list_mutex);
 757}
 758
 759/**
 760 * spu_activate - find a free spu for a context and execute it
 761 * @ctx:	spu context to schedule
 762 * @flags:	flags (currently ignored)
 763 *
 764 * Tries to find a free spu to run @ctx.  If no free spu is available
 765 * add the context to the runqueue so it gets woken up once an spu
 766 * is available.
 767 */
 768int spu_activate(struct spu_context *ctx, unsigned long flags)
 769{
 770	struct spu *spu;
 771
 772	/*
 773	 * If there are multiple threads waiting for a single context
 774	 * only one actually binds the context while the others will
 775	 * only be able to acquire the state_mutex once the context
 776	 * already is in runnable state.
 777	 */
 778	if (ctx->spu)
 779		return 0;
 780
 781spu_activate_top:
 782	if (signal_pending(current))
 783		return -ERESTARTSYS;
 784
 785	spu = spu_get_idle(ctx);
 786	/*
 787	 * If this is a realtime thread we try to get it running by
 788	 * preempting a lower priority thread.
 789	 */
 790	if (!spu && rt_prio(ctx->prio))
 791		spu = find_victim(ctx);
 792	if (spu) {
 793		unsigned long runcntl;
 794
 795		runcntl = ctx->ops->runcntl_read(ctx);
 796		__spu_schedule(spu, ctx);
 797		if (runcntl & SPU_RUNCNTL_RUNNABLE)
 798			spuctx_switch_state(ctx, SPU_UTIL_USER);
 799
 800		return 0;
 801	}
 802
 803	if (ctx->flags & SPU_CREATE_NOSCHED) {
 804		spu_prio_wait(ctx);
 805		goto spu_activate_top;
 806	}
 807
 808	spu_add_to_rq(ctx);
 809
 810	return 0;
 811}
 812
 813/**
 814 * grab_runnable_context - try to find a runnable context
 815 *
 816 * Remove the highest priority context on the runqueue and return it
 817 * to the caller.  Returns %NULL if no runnable context was found.
 818 */
 819static struct spu_context *grab_runnable_context(int prio, int node)
 820{
 821	struct spu_context *ctx;
 822	int best;
 823
 824	spin_lock(&spu_prio->runq_lock);
 825	best = find_first_bit(spu_prio->bitmap, prio);
 826	while (best < prio) {
 827		struct list_head *rq = &spu_prio->runq[best];
 828
 829		list_for_each_entry(ctx, rq, rq) {
 830			/* XXX(hch): check for affinity here as well */
 831			if (__node_allowed(ctx, node)) {
 832				__spu_del_from_rq(ctx);
 833				goto found;
 834			}
 835		}
 836		best++;
 837	}
 838	ctx = NULL;
 839 found:
 840	spin_unlock(&spu_prio->runq_lock);
 841	return ctx;
 842}
 843
 844static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
 845{
 846	struct spu *spu = ctx->spu;
 847	struct spu_context *new = NULL;
 848
 849	if (spu) {
 850		new = grab_runnable_context(max_prio, spu->node);
 851		if (new || force) {
 852			spu_unschedule(spu, ctx, new == NULL);
 853			if (new) {
 854				if (new->flags & SPU_CREATE_NOSCHED)
 855					wake_up(&new->stop_wq);
 856				else {
 857					spu_release(ctx);
 858					spu_schedule(spu, new);
 859					/* this one can't easily be made
 860					   interruptible */
 861					mutex_lock(&ctx->state_mutex);
 862				}
 863			}
 864		}
 865	}
 866
 867	return new != NULL;
 868}
 869
 870/**
 871 * spu_deactivate - unbind a context from it's physical spu
 872 * @ctx:	spu context to unbind
 873 *
 874 * Unbind @ctx from the physical spu it is running on and schedule
 875 * the highest priority context to run on the freed physical spu.
 876 */
 877void spu_deactivate(struct spu_context *ctx)
 878{
 879	spu_context_nospu_trace(spu_deactivate__enter, ctx);
 880	__spu_deactivate(ctx, 1, MAX_PRIO);
 881}
 882
 883/**
 884 * spu_yield -	yield a physical spu if others are waiting
 885 * @ctx:	spu context to yield
 886 *
 887 * Check if there is a higher priority context waiting and if yes
 888 * unbind @ctx from the physical spu and schedule the highest
 889 * priority context to run on the freed physical spu instead.
 890 */
 891void spu_yield(struct spu_context *ctx)
 892{
 893	spu_context_nospu_trace(spu_yield__enter, ctx);
 894	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
 895		mutex_lock(&ctx->state_mutex);
 896		__spu_deactivate(ctx, 0, MAX_PRIO);
 897		mutex_unlock(&ctx->state_mutex);
 898	}
 899}
 900
 901static noinline void spusched_tick(struct spu_context *ctx)
 902{
 903	struct spu_context *new = NULL;
 904	struct spu *spu = NULL;
 905
 906	if (spu_acquire(ctx))
 907		BUG();	/* a kernel thread never has signals pending */
 908
 909	if (ctx->state != SPU_STATE_RUNNABLE)
 910		goto out;
 911	if (ctx->flags & SPU_CREATE_NOSCHED)
 912		goto out;
 913	if (ctx->policy == SCHED_FIFO)
 914		goto out;
 915
 916	if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
 917		goto out;
 918
 919	spu = ctx->spu;
 920
 921	spu_context_trace(spusched_tick__preempt, ctx, spu);
 922
 923	new = grab_runnable_context(ctx->prio + 1, spu->node);
 924	if (new) {
 925		spu_unschedule(spu, ctx, 0);
 926		if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
 927			spu_add_to_rq(ctx);
 928	} else {
 929		spu_context_nospu_trace(spusched_tick__newslice, ctx);
 930		if (!ctx->time_slice)
 931			ctx->time_slice++;
 932	}
 933out:
 934	spu_release(ctx);
 935
 936	if (new)
 937		spu_schedule(spu, new);
 938}
 939
 940/**
 941 * count_active_contexts - count nr of active tasks
 942 *
 943 * Return the number of tasks currently running or waiting to run.
 944 *
 945 * Note that we don't take runq_lock / list_mutex here.  Reading
 946 * a single 32bit value is atomic on powerpc, and we don't care
 947 * about memory ordering issues here.
 948 */
 949static unsigned long count_active_contexts(void)
 950{
 951	int nr_active = 0, node;
 952
 953	for (node = 0; node < MAX_NUMNODES; node++)
 954		nr_active += cbe_spu_info[node].nr_active;
 955	nr_active += spu_prio->nr_waiting;
 956
 957	return nr_active;
 958}
 959
 960/**
 961 * spu_calc_load - update the avenrun load estimates.
 962 *
 963 * No locking against reading these values from userspace, as for
 964 * the CPU loadavg code.
 965 */
 966static void spu_calc_load(void)
 967{
 968	unsigned long active_tasks; /* fixed-point */
 969
 970	active_tasks = count_active_contexts() * FIXED_1;
 971	spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks);
 972	spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks);
 973	spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks);
 974}
 975
 976static void spusched_wake(struct timer_list *unused)
 977{
 978	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
 979	wake_up_process(spusched_task);
 980}
 981
 982static void spuloadavg_wake(struct timer_list *unused)
 983{
 984	mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
 985	spu_calc_load();
 986}
 987
 988static int spusched_thread(void *unused)
 989{
 990	struct spu *spu;
 991	int node;
 992
 993	while (!kthread_should_stop()) {
 994		set_current_state(TASK_INTERRUPTIBLE);
 995		schedule();
 996		for (node = 0; node < MAX_NUMNODES; node++) {
 997			struct mutex *mtx = &cbe_spu_info[node].list_mutex;
 998
 999			mutex_lock(mtx);
1000			list_for_each_entry(spu, &cbe_spu_info[node].spus,
1001					cbe_list) {
1002				struct spu_context *ctx = spu->ctx;
1003
1004				if (ctx) {
1005					get_spu_context(ctx);
1006					mutex_unlock(mtx);
1007					spusched_tick(ctx);
1008					mutex_lock(mtx);
1009					put_spu_context(ctx);
1010				}
1011			}
1012			mutex_unlock(mtx);
1013		}
1014	}
1015
1016	return 0;
1017}
1018
1019void spuctx_switch_state(struct spu_context *ctx,
1020		enum spu_utilization_state new_state)
1021{
1022	unsigned long long curtime;
1023	signed long long delta;
 
1024	struct spu *spu;
1025	enum spu_utilization_state old_state;
1026	int node;
1027
1028	curtime = ktime_get_ns();
 
1029	delta = curtime - ctx->stats.tstamp;
1030
1031	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1032	WARN_ON(delta < 0);
1033
1034	spu = ctx->spu;
1035	old_state = ctx->stats.util_state;
1036	ctx->stats.util_state = new_state;
1037	ctx->stats.tstamp = curtime;
1038
1039	/*
1040	 * Update the physical SPU utilization statistics.
1041	 */
1042	if (spu) {
1043		ctx->stats.times[old_state] += delta;
1044		spu->stats.times[old_state] += delta;
1045		spu->stats.util_state = new_state;
1046		spu->stats.tstamp = curtime;
1047		node = spu->node;
1048		if (old_state == SPU_UTIL_USER)
1049			atomic_dec(&cbe_spu_info[node].busy_spus);
1050		if (new_state == SPU_UTIL_USER)
1051			atomic_inc(&cbe_spu_info[node].busy_spus);
1052	}
1053}
1054
1055#ifdef CONFIG_PROC_FS
 
 
1056static int show_spu_loadavg(struct seq_file *s, void *private)
1057{
1058	int a, b, c;
1059
1060	a = spu_avenrun[0] + (FIXED_1/200);
1061	b = spu_avenrun[1] + (FIXED_1/200);
1062	c = spu_avenrun[2] + (FIXED_1/200);
1063
1064	/*
1065	 * Note that last_pid doesn't really make much sense for the
1066	 * SPU loadavg (it even seems very odd on the CPU side...),
1067	 * but we include it here to have a 100% compatible interface.
1068	 */
1069	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1070		LOAD_INT(a), LOAD_FRAC(a),
1071		LOAD_INT(b), LOAD_FRAC(b),
1072		LOAD_INT(c), LOAD_FRAC(c),
1073		count_active_contexts(),
1074		atomic_read(&nr_spu_contexts),
1075		idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
1076	return 0;
1077}
1078#endif
 
 
 
 
 
 
 
 
 
 
 
1079
1080int __init spu_sched_init(void)
1081{
1082	struct proc_dir_entry *entry;
1083	int err = -ENOMEM, i;
1084
1085	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1086	if (!spu_prio)
1087		goto out;
1088
1089	for (i = 0; i < MAX_PRIO; i++) {
1090		INIT_LIST_HEAD(&spu_prio->runq[i]);
1091		__clear_bit(i, spu_prio->bitmap);
1092	}
1093	spin_lock_init(&spu_prio->runq_lock);
1094
1095	timer_setup(&spusched_timer, spusched_wake, 0);
1096	timer_setup(&spuloadavg_timer, spuloadavg_wake, 0);
1097
1098	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1099	if (IS_ERR(spusched_task)) {
1100		err = PTR_ERR(spusched_task);
1101		goto out_free_spu_prio;
1102	}
1103
1104	mod_timer(&spuloadavg_timer, 0);
1105
1106	entry = proc_create_single("spu_loadavg", 0, NULL, show_spu_loadavg);
1107	if (!entry)
1108		goto out_stop_kthread;
1109
1110	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1111			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1112	return 0;
1113
1114 out_stop_kthread:
1115	kthread_stop(spusched_task);
1116 out_free_spu_prio:
1117	kfree(spu_prio);
1118 out:
1119	return err;
1120}
1121
1122void spu_sched_exit(void)
1123{
1124	struct spu *spu;
1125	int node;
1126
1127	remove_proc_entry("spu_loadavg", NULL);
1128
1129	del_timer_sync(&spusched_timer);
1130	del_timer_sync(&spuloadavg_timer);
1131	kthread_stop(spusched_task);
1132
1133	for (node = 0; node < MAX_NUMNODES; node++) {
1134		mutex_lock(&cbe_spu_info[node].list_mutex);
1135		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1136			if (spu->alloc_state != SPU_FREE)
1137				spu->alloc_state = SPU_FREE;
1138		mutex_unlock(&cbe_spu_info[node].list_mutex);
1139	}
1140	kfree(spu_prio);
1141}
v3.15
 
   1/* sched.c - SPU scheduler.
   2 *
   3 * Copyright (C) IBM 2005
   4 * Author: Mark Nutter <mnutter@us.ibm.com>
   5 *
   6 * 2006-03-31	NUMA domains added.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2, or (at your option)
  11 * any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#undef DEBUG
  24
  25#include <linux/errno.h>
  26#include <linux/sched.h>
 
  27#include <linux/sched/rt.h>
  28#include <linux/kernel.h>
  29#include <linux/mm.h>
  30#include <linux/slab.h>
  31#include <linux/completion.h>
  32#include <linux/vmalloc.h>
  33#include <linux/smp.h>
  34#include <linux/stddef.h>
  35#include <linux/unistd.h>
  36#include <linux/numa.h>
  37#include <linux/mutex.h>
  38#include <linux/notifier.h>
  39#include <linux/kthread.h>
  40#include <linux/pid_namespace.h>
  41#include <linux/proc_fs.h>
  42#include <linux/seq_file.h>
  43
  44#include <asm/io.h>
  45#include <asm/mmu_context.h>
  46#include <asm/spu.h>
  47#include <asm/spu_csa.h>
  48#include <asm/spu_priv1.h>
  49#include "spufs.h"
  50#define CREATE_TRACE_POINTS
  51#include "sputrace.h"
  52
  53struct spu_prio_array {
  54	DECLARE_BITMAP(bitmap, MAX_PRIO);
  55	struct list_head runq[MAX_PRIO];
  56	spinlock_t runq_lock;
  57	int nr_waiting;
  58};
  59
  60static unsigned long spu_avenrun[3];
  61static struct spu_prio_array *spu_prio;
  62static struct task_struct *spusched_task;
  63static struct timer_list spusched_timer;
  64static struct timer_list spuloadavg_timer;
  65
  66/*
  67 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
  68 */
  69#define NORMAL_PRIO		120
  70
  71/*
  72 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
  73 * tick for every 10 CPU scheduler ticks.
  74 */
  75#define SPUSCHED_TICK		(10)
  76
  77/*
  78 * These are the 'tuning knobs' of the scheduler:
  79 *
  80 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
  81 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
  82 */
  83#define MIN_SPU_TIMESLICE	max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
  84#define DEF_SPU_TIMESLICE	(100 * HZ / (1000 * SPUSCHED_TICK))
  85
  86#define SCALE_PRIO(x, prio) \
  87	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
  88
  89/*
  90 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
  91 * [800ms ... 100ms ... 5ms]
  92 *
  93 * The higher a thread's priority, the bigger timeslices
  94 * it gets during one round of execution. But even the lowest
  95 * priority thread gets MIN_TIMESLICE worth of execution time.
  96 */
  97void spu_set_timeslice(struct spu_context *ctx)
  98{
  99	if (ctx->prio < NORMAL_PRIO)
 100		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
 101	else
 102		ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
 103}
 104
 105/*
 106 * Update scheduling information from the owning thread.
 107 */
 108void __spu_update_sched_info(struct spu_context *ctx)
 109{
 110	/*
 111	 * assert that the context is not on the runqueue, so it is safe
 112	 * to change its scheduling parameters.
 113	 */
 114	BUG_ON(!list_empty(&ctx->rq));
 115
 116	/*
 117	 * 32-Bit assignments are atomic on powerpc, and we don't care about
 118	 * memory ordering here because retrieving the controlling thread is
 119	 * per definition racy.
 120	 */
 121	ctx->tid = current->pid;
 122
 123	/*
 124	 * We do our own priority calculations, so we normally want
 125	 * ->static_prio to start with. Unfortunately this field
 126	 * contains junk for threads with a realtime scheduling
 127	 * policy so we have to look at ->prio in this case.
 128	 */
 129	if (rt_prio(current->prio))
 130		ctx->prio = current->prio;
 131	else
 132		ctx->prio = current->static_prio;
 133	ctx->policy = current->policy;
 134
 135	/*
 136	 * TO DO: the context may be loaded, so we may need to activate
 137	 * it again on a different node. But it shouldn't hurt anything
 138	 * to update its parameters, because we know that the scheduler
 139	 * is not actively looking at this field, since it is not on the
 140	 * runqueue. The context will be rescheduled on the proper node
 141	 * if it is timesliced or preempted.
 142	 */
 143	cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
 144
 145	/* Save the current cpu id for spu interrupt routing. */
 146	ctx->last_ran = raw_smp_processor_id();
 147}
 148
 149void spu_update_sched_info(struct spu_context *ctx)
 150{
 151	int node;
 152
 153	if (ctx->state == SPU_STATE_RUNNABLE) {
 154		node = ctx->spu->node;
 155
 156		/*
 157		 * Take list_mutex to sync with find_victim().
 158		 */
 159		mutex_lock(&cbe_spu_info[node].list_mutex);
 160		__spu_update_sched_info(ctx);
 161		mutex_unlock(&cbe_spu_info[node].list_mutex);
 162	} else {
 163		__spu_update_sched_info(ctx);
 164	}
 165}
 166
 167static int __node_allowed(struct spu_context *ctx, int node)
 168{
 169	if (nr_cpus_node(node)) {
 170		const struct cpumask *mask = cpumask_of_node(node);
 171
 172		if (cpumask_intersects(mask, &ctx->cpus_allowed))
 173			return 1;
 174	}
 175
 176	return 0;
 177}
 178
 179static int node_allowed(struct spu_context *ctx, int node)
 180{
 181	int rval;
 182
 183	spin_lock(&spu_prio->runq_lock);
 184	rval = __node_allowed(ctx, node);
 185	spin_unlock(&spu_prio->runq_lock);
 186
 187	return rval;
 188}
 189
 190void do_notify_spus_active(void)
 191{
 192	int node;
 193
 194	/*
 195	 * Wake up the active spu_contexts.
 196	 *
 197	 * When the awakened processes see their "notify_active" flag is set,
 198	 * they will call spu_switch_notify().
 199	 */
 200	for_each_online_node(node) {
 201		struct spu *spu;
 202
 203		mutex_lock(&cbe_spu_info[node].list_mutex);
 204		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 205			if (spu->alloc_state != SPU_FREE) {
 206				struct spu_context *ctx = spu->ctx;
 207				set_bit(SPU_SCHED_NOTIFY_ACTIVE,
 208					&ctx->sched_flags);
 209				mb();
 210				wake_up_all(&ctx->stop_wq);
 211			}
 212		}
 213		mutex_unlock(&cbe_spu_info[node].list_mutex);
 214	}
 215}
 216
 217/**
 218 * spu_bind_context - bind spu context to physical spu
 219 * @spu:	physical spu to bind to
 220 * @ctx:	context to bind
 221 */
 222static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
 223{
 224	spu_context_trace(spu_bind_context__enter, ctx, spu);
 225
 226	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 227
 228	if (ctx->flags & SPU_CREATE_NOSCHED)
 229		atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
 230
 231	ctx->stats.slb_flt_base = spu->stats.slb_flt;
 232	ctx->stats.class2_intr_base = spu->stats.class2_intr;
 233
 234	spu_associate_mm(spu, ctx->owner);
 235
 236	spin_lock_irq(&spu->register_lock);
 237	spu->ctx = ctx;
 238	spu->flags = 0;
 239	ctx->spu = spu;
 240	ctx->ops = &spu_hw_ops;
 241	spu->pid = current->pid;
 242	spu->tgid = current->tgid;
 243	spu->ibox_callback = spufs_ibox_callback;
 244	spu->wbox_callback = spufs_wbox_callback;
 245	spu->stop_callback = spufs_stop_callback;
 246	spu->mfc_callback = spufs_mfc_callback;
 247	spin_unlock_irq(&spu->register_lock);
 248
 249	spu_unmap_mappings(ctx);
 250
 251	spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
 252	spu_restore(&ctx->csa, spu);
 253	spu->timestamp = jiffies;
 254	spu_switch_notify(spu, ctx);
 255	ctx->state = SPU_STATE_RUNNABLE;
 256
 257	spuctx_switch_state(ctx, SPU_UTIL_USER);
 258}
 259
 260/*
 261 * Must be used with the list_mutex held.
 262 */
 263static inline int sched_spu(struct spu *spu)
 264{
 265	BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
 266
 267	return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
 268}
 269
 270static void aff_merge_remaining_ctxs(struct spu_gang *gang)
 271{
 272	struct spu_context *ctx;
 273
 274	list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
 275		if (list_empty(&ctx->aff_list))
 276			list_add(&ctx->aff_list, &gang->aff_list_head);
 277	}
 278	gang->aff_flags |= AFF_MERGED;
 279}
 280
 281static void aff_set_offsets(struct spu_gang *gang)
 282{
 283	struct spu_context *ctx;
 284	int offset;
 285
 286	offset = -1;
 287	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
 288								aff_list) {
 289		if (&ctx->aff_list == &gang->aff_list_head)
 290			break;
 291		ctx->aff_offset = offset--;
 292	}
 293
 294	offset = 0;
 295	list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
 296		if (&ctx->aff_list == &gang->aff_list_head)
 297			break;
 298		ctx->aff_offset = offset++;
 299	}
 300
 301	gang->aff_flags |= AFF_OFFSETS_SET;
 302}
 303
 304static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
 305		 int group_size, int lowest_offset)
 306{
 307	struct spu *spu;
 308	int node, n;
 309
 310	/*
 311	 * TODO: A better algorithm could be used to find a good spu to be
 312	 *       used as reference location for the ctxs chain.
 313	 */
 314	node = cpu_to_node(raw_smp_processor_id());
 315	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 316		/*
 317		 * "available_spus" counts how many spus are not potentially
 318		 * going to be used by other affinity gangs whose reference
 319		 * context is already in place. Although this code seeks to
 320		 * avoid having affinity gangs with a summed amount of
 321		 * contexts bigger than the amount of spus in the node,
 322		 * this may happen sporadically. In this case, available_spus
 323		 * becomes negative, which is harmless.
 324		 */
 325		int available_spus;
 326
 327		node = (node < MAX_NUMNODES) ? node : 0;
 328		if (!node_allowed(ctx, node))
 329			continue;
 330
 331		available_spus = 0;
 332		mutex_lock(&cbe_spu_info[node].list_mutex);
 333		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 334			if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
 335					&& spu->ctx->gang->aff_ref_spu)
 336				available_spus -= spu->ctx->gang->contexts;
 337			available_spus++;
 338		}
 339		if (available_spus < ctx->gang->contexts) {
 340			mutex_unlock(&cbe_spu_info[node].list_mutex);
 341			continue;
 342		}
 343
 344		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 345			if ((!mem_aff || spu->has_mem_affinity) &&
 346							sched_spu(spu)) {
 347				mutex_unlock(&cbe_spu_info[node].list_mutex);
 348				return spu;
 349			}
 350		}
 351		mutex_unlock(&cbe_spu_info[node].list_mutex);
 352	}
 353	return NULL;
 354}
 355
 356static void aff_set_ref_point_location(struct spu_gang *gang)
 357{
 358	int mem_aff, gs, lowest_offset;
 359	struct spu_context *ctx;
 360	struct spu *tmp;
 361
 362	mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
 363	lowest_offset = 0;
 364	gs = 0;
 365
 366	list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
 367		gs++;
 368
 369	list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
 370								aff_list) {
 371		if (&ctx->aff_list == &gang->aff_list_head)
 372			break;
 373		lowest_offset = ctx->aff_offset;
 374	}
 375
 376	gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
 377							lowest_offset);
 378}
 379
 380static struct spu *ctx_location(struct spu *ref, int offset, int node)
 381{
 382	struct spu *spu;
 383
 384	spu = NULL;
 385	if (offset >= 0) {
 386		list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
 387			BUG_ON(spu->node != node);
 388			if (offset == 0)
 389				break;
 390			if (sched_spu(spu))
 391				offset--;
 392		}
 393	} else {
 394		list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
 395			BUG_ON(spu->node != node);
 396			if (offset == 0)
 397				break;
 398			if (sched_spu(spu))
 399				offset++;
 400		}
 401	}
 402
 403	return spu;
 404}
 405
 406/*
 407 * affinity_check is called each time a context is going to be scheduled.
 408 * It returns the spu ptr on which the context must run.
 409 */
 410static int has_affinity(struct spu_context *ctx)
 411{
 412	struct spu_gang *gang = ctx->gang;
 413
 414	if (list_empty(&ctx->aff_list))
 415		return 0;
 416
 417	if (atomic_read(&ctx->gang->aff_sched_count) == 0)
 418		ctx->gang->aff_ref_spu = NULL;
 419
 420	if (!gang->aff_ref_spu) {
 421		if (!(gang->aff_flags & AFF_MERGED))
 422			aff_merge_remaining_ctxs(gang);
 423		if (!(gang->aff_flags & AFF_OFFSETS_SET))
 424			aff_set_offsets(gang);
 425		aff_set_ref_point_location(gang);
 426	}
 427
 428	return gang->aff_ref_spu != NULL;
 429}
 430
 431/**
 432 * spu_unbind_context - unbind spu context from physical spu
 433 * @spu:	physical spu to unbind from
 434 * @ctx:	context to unbind
 435 */
 436static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
 437{
 438	u32 status;
 439
 440	spu_context_trace(spu_unbind_context__enter, ctx, spu);
 441
 442	spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 443
 444 	if (spu->ctx->flags & SPU_CREATE_NOSCHED)
 445		atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
 446
 447	if (ctx->gang)
 448		/*
 449		 * If ctx->gang->aff_sched_count is positive, SPU affinity is
 450		 * being considered in this gang. Using atomic_dec_if_positive
 451		 * allow us to skip an explicit check for affinity in this gang
 452		 */
 453		atomic_dec_if_positive(&ctx->gang->aff_sched_count);
 454
 455	spu_switch_notify(spu, NULL);
 456	spu_unmap_mappings(ctx);
 457	spu_save(&ctx->csa, spu);
 458	spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
 459
 460	spin_lock_irq(&spu->register_lock);
 461	spu->timestamp = jiffies;
 462	ctx->state = SPU_STATE_SAVED;
 463	spu->ibox_callback = NULL;
 464	spu->wbox_callback = NULL;
 465	spu->stop_callback = NULL;
 466	spu->mfc_callback = NULL;
 467	spu->pid = 0;
 468	spu->tgid = 0;
 469	ctx->ops = &spu_backing_ops;
 470	spu->flags = 0;
 471	spu->ctx = NULL;
 472	spin_unlock_irq(&spu->register_lock);
 473
 474	spu_associate_mm(spu, NULL);
 475
 476	ctx->stats.slb_flt +=
 477		(spu->stats.slb_flt - ctx->stats.slb_flt_base);
 478	ctx->stats.class2_intr +=
 479		(spu->stats.class2_intr - ctx->stats.class2_intr_base);
 480
 481	/* This maps the underlying spu state to idle */
 482	spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
 483	ctx->spu = NULL;
 484
 485	if (spu_stopped(ctx, &status))
 486		wake_up_all(&ctx->stop_wq);
 487}
 488
 489/**
 490 * spu_add_to_rq - add a context to the runqueue
 491 * @ctx:       context to add
 492 */
 493static void __spu_add_to_rq(struct spu_context *ctx)
 494{
 495	/*
 496	 * Unfortunately this code path can be called from multiple threads
 497	 * on behalf of a single context due to the way the problem state
 498	 * mmap support works.
 499	 *
 500	 * Fortunately we need to wake up all these threads at the same time
 501	 * and can simply skip the runqueue addition for every but the first
 502	 * thread getting into this codepath.
 503	 *
 504	 * It's still quite hacky, and long-term we should proxy all other
 505	 * threads through the owner thread so that spu_run is in control
 506	 * of all the scheduling activity for a given context.
 507	 */
 508	if (list_empty(&ctx->rq)) {
 509		list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
 510		set_bit(ctx->prio, spu_prio->bitmap);
 511		if (!spu_prio->nr_waiting++)
 512			mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
 513	}
 514}
 515
 516static void spu_add_to_rq(struct spu_context *ctx)
 517{
 518	spin_lock(&spu_prio->runq_lock);
 519	__spu_add_to_rq(ctx);
 520	spin_unlock(&spu_prio->runq_lock);
 521}
 522
 523static void __spu_del_from_rq(struct spu_context *ctx)
 524{
 525	int prio = ctx->prio;
 526
 527	if (!list_empty(&ctx->rq)) {
 528		if (!--spu_prio->nr_waiting)
 529			del_timer(&spusched_timer);
 530		list_del_init(&ctx->rq);
 531
 532		if (list_empty(&spu_prio->runq[prio]))
 533			clear_bit(prio, spu_prio->bitmap);
 534	}
 535}
 536
 537void spu_del_from_rq(struct spu_context *ctx)
 538{
 539	spin_lock(&spu_prio->runq_lock);
 540	__spu_del_from_rq(ctx);
 541	spin_unlock(&spu_prio->runq_lock);
 542}
 543
 544static void spu_prio_wait(struct spu_context *ctx)
 545{
 546	DEFINE_WAIT(wait);
 547
 548	/*
 549	 * The caller must explicitly wait for a context to be loaded
 550	 * if the nosched flag is set.  If NOSCHED is not set, the caller
 551	 * queues the context and waits for an spu event or error.
 552	 */
 553	BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
 554
 555	spin_lock(&spu_prio->runq_lock);
 556	prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
 557	if (!signal_pending(current)) {
 558		__spu_add_to_rq(ctx);
 559		spin_unlock(&spu_prio->runq_lock);
 560		mutex_unlock(&ctx->state_mutex);
 561		schedule();
 562		mutex_lock(&ctx->state_mutex);
 563		spin_lock(&spu_prio->runq_lock);
 564		__spu_del_from_rq(ctx);
 565	}
 566	spin_unlock(&spu_prio->runq_lock);
 567	__set_current_state(TASK_RUNNING);
 568	remove_wait_queue(&ctx->stop_wq, &wait);
 569}
 570
 571static struct spu *spu_get_idle(struct spu_context *ctx)
 572{
 573	struct spu *spu, *aff_ref_spu;
 574	int node, n;
 575
 576	spu_context_nospu_trace(spu_get_idle__enter, ctx);
 577
 578	if (ctx->gang) {
 579		mutex_lock(&ctx->gang->aff_mutex);
 580		if (has_affinity(ctx)) {
 581			aff_ref_spu = ctx->gang->aff_ref_spu;
 582			atomic_inc(&ctx->gang->aff_sched_count);
 583			mutex_unlock(&ctx->gang->aff_mutex);
 584			node = aff_ref_spu->node;
 585
 586			mutex_lock(&cbe_spu_info[node].list_mutex);
 587			spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
 588			if (spu && spu->alloc_state == SPU_FREE)
 589				goto found;
 590			mutex_unlock(&cbe_spu_info[node].list_mutex);
 591
 592			atomic_dec(&ctx->gang->aff_sched_count);
 593			goto not_found;
 594		}
 595		mutex_unlock(&ctx->gang->aff_mutex);
 596	}
 597	node = cpu_to_node(raw_smp_processor_id());
 598	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 599		node = (node < MAX_NUMNODES) ? node : 0;
 600		if (!node_allowed(ctx, node))
 601			continue;
 602
 603		mutex_lock(&cbe_spu_info[node].list_mutex);
 604		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 605			if (spu->alloc_state == SPU_FREE)
 606				goto found;
 607		}
 608		mutex_unlock(&cbe_spu_info[node].list_mutex);
 609	}
 610
 611 not_found:
 612	spu_context_nospu_trace(spu_get_idle__not_found, ctx);
 613	return NULL;
 614
 615 found:
 616	spu->alloc_state = SPU_USED;
 617	mutex_unlock(&cbe_spu_info[node].list_mutex);
 618	spu_context_trace(spu_get_idle__found, ctx, spu);
 619	spu_init_channels(spu);
 620	return spu;
 621}
 622
 623/**
 624 * find_victim - find a lower priority context to preempt
 625 * @ctx:	canidate context for running
 626 *
 627 * Returns the freed physical spu to run the new context on.
 628 */
 629static struct spu *find_victim(struct spu_context *ctx)
 630{
 631	struct spu_context *victim = NULL;
 632	struct spu *spu;
 633	int node, n;
 634
 635	spu_context_nospu_trace(spu_find_victim__enter, ctx);
 636
 637	/*
 638	 * Look for a possible preemption candidate on the local node first.
 639	 * If there is no candidate look at the other nodes.  This isn't
 640	 * exactly fair, but so far the whole spu scheduler tries to keep
 641	 * a strong node affinity.  We might want to fine-tune this in
 642	 * the future.
 643	 */
 644 restart:
 645	node = cpu_to_node(raw_smp_processor_id());
 646	for (n = 0; n < MAX_NUMNODES; n++, node++) {
 647		node = (node < MAX_NUMNODES) ? node : 0;
 648		if (!node_allowed(ctx, node))
 649			continue;
 650
 651		mutex_lock(&cbe_spu_info[node].list_mutex);
 652		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 653			struct spu_context *tmp = spu->ctx;
 654
 655			if (tmp && tmp->prio > ctx->prio &&
 656			    !(tmp->flags & SPU_CREATE_NOSCHED) &&
 657			    (!victim || tmp->prio > victim->prio)) {
 658				victim = spu->ctx;
 659			}
 660		}
 661		if (victim)
 662			get_spu_context(victim);
 663		mutex_unlock(&cbe_spu_info[node].list_mutex);
 664
 665		if (victim) {
 666			/*
 667			 * This nests ctx->state_mutex, but we always lock
 668			 * higher priority contexts before lower priority
 669			 * ones, so this is safe until we introduce
 670			 * priority inheritance schemes.
 671			 *
 672			 * XXX if the highest priority context is locked,
 673			 * this can loop a long time.  Might be better to
 674			 * look at another context or give up after X retries.
 675			 */
 676			if (!mutex_trylock(&victim->state_mutex)) {
 677				put_spu_context(victim);
 678				victim = NULL;
 679				goto restart;
 680			}
 681
 682			spu = victim->spu;
 683			if (!spu || victim->prio <= ctx->prio) {
 684				/*
 685				 * This race can happen because we've dropped
 686				 * the active list mutex.  Not a problem, just
 687				 * restart the search.
 688				 */
 689				mutex_unlock(&victim->state_mutex);
 690				put_spu_context(victim);
 691				victim = NULL;
 692				goto restart;
 693			}
 694
 695			spu_context_trace(__spu_deactivate__unload, ctx, spu);
 696
 697			mutex_lock(&cbe_spu_info[node].list_mutex);
 698			cbe_spu_info[node].nr_active--;
 699			spu_unbind_context(spu, victim);
 700			mutex_unlock(&cbe_spu_info[node].list_mutex);
 701
 702			victim->stats.invol_ctx_switch++;
 703			spu->stats.invol_ctx_switch++;
 704			if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
 705				spu_add_to_rq(victim);
 706
 707			mutex_unlock(&victim->state_mutex);
 708			put_spu_context(victim);
 709
 710			return spu;
 711		}
 712	}
 713
 714	return NULL;
 715}
 716
 717static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
 718{
 719	int node = spu->node;
 720	int success = 0;
 721
 722	spu_set_timeslice(ctx);
 723
 724	mutex_lock(&cbe_spu_info[node].list_mutex);
 725	if (spu->ctx == NULL) {
 726		spu_bind_context(spu, ctx);
 727		cbe_spu_info[node].nr_active++;
 728		spu->alloc_state = SPU_USED;
 729		success = 1;
 730	}
 731	mutex_unlock(&cbe_spu_info[node].list_mutex);
 732
 733	if (success)
 734		wake_up_all(&ctx->run_wq);
 735	else
 736		spu_add_to_rq(ctx);
 737}
 738
 739static void spu_schedule(struct spu *spu, struct spu_context *ctx)
 740{
 741	/* not a candidate for interruptible because it's called either
 742	   from the scheduler thread or from spu_deactivate */
 743	mutex_lock(&ctx->state_mutex);
 744	if (ctx->state == SPU_STATE_SAVED)
 745		__spu_schedule(spu, ctx);
 746	spu_release(ctx);
 747}
 748
 749/**
 750 * spu_unschedule - remove a context from a spu, and possibly release it.
 751 * @spu:	The SPU to unschedule from
 752 * @ctx:	The context currently scheduled on the SPU
 753 * @free_spu	Whether to free the SPU for other contexts
 754 *
 755 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
 756 * SPU is made available for other contexts (ie, may be returned by
 757 * spu_get_idle). If this is zero, the caller is expected to schedule another
 758 * context to this spu.
 759 *
 760 * Should be called with ctx->state_mutex held.
 761 */
 762static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
 763		int free_spu)
 764{
 765	int node = spu->node;
 766
 767	mutex_lock(&cbe_spu_info[node].list_mutex);
 768	cbe_spu_info[node].nr_active--;
 769	if (free_spu)
 770		spu->alloc_state = SPU_FREE;
 771	spu_unbind_context(spu, ctx);
 772	ctx->stats.invol_ctx_switch++;
 773	spu->stats.invol_ctx_switch++;
 774	mutex_unlock(&cbe_spu_info[node].list_mutex);
 775}
 776
 777/**
 778 * spu_activate - find a free spu for a context and execute it
 779 * @ctx:	spu context to schedule
 780 * @flags:	flags (currently ignored)
 781 *
 782 * Tries to find a free spu to run @ctx.  If no free spu is available
 783 * add the context to the runqueue so it gets woken up once an spu
 784 * is available.
 785 */
 786int spu_activate(struct spu_context *ctx, unsigned long flags)
 787{
 788	struct spu *spu;
 789
 790	/*
 791	 * If there are multiple threads waiting for a single context
 792	 * only one actually binds the context while the others will
 793	 * only be able to acquire the state_mutex once the context
 794	 * already is in runnable state.
 795	 */
 796	if (ctx->spu)
 797		return 0;
 798
 799spu_activate_top:
 800	if (signal_pending(current))
 801		return -ERESTARTSYS;
 802
 803	spu = spu_get_idle(ctx);
 804	/*
 805	 * If this is a realtime thread we try to get it running by
 806	 * preempting a lower priority thread.
 807	 */
 808	if (!spu && rt_prio(ctx->prio))
 809		spu = find_victim(ctx);
 810	if (spu) {
 811		unsigned long runcntl;
 812
 813		runcntl = ctx->ops->runcntl_read(ctx);
 814		__spu_schedule(spu, ctx);
 815		if (runcntl & SPU_RUNCNTL_RUNNABLE)
 816			spuctx_switch_state(ctx, SPU_UTIL_USER);
 817
 818		return 0;
 819	}
 820
 821	if (ctx->flags & SPU_CREATE_NOSCHED) {
 822		spu_prio_wait(ctx);
 823		goto spu_activate_top;
 824	}
 825
 826	spu_add_to_rq(ctx);
 827
 828	return 0;
 829}
 830
 831/**
 832 * grab_runnable_context - try to find a runnable context
 833 *
 834 * Remove the highest priority context on the runqueue and return it
 835 * to the caller.  Returns %NULL if no runnable context was found.
 836 */
 837static struct spu_context *grab_runnable_context(int prio, int node)
 838{
 839	struct spu_context *ctx;
 840	int best;
 841
 842	spin_lock(&spu_prio->runq_lock);
 843	best = find_first_bit(spu_prio->bitmap, prio);
 844	while (best < prio) {
 845		struct list_head *rq = &spu_prio->runq[best];
 846
 847		list_for_each_entry(ctx, rq, rq) {
 848			/* XXX(hch): check for affinity here as well */
 849			if (__node_allowed(ctx, node)) {
 850				__spu_del_from_rq(ctx);
 851				goto found;
 852			}
 853		}
 854		best++;
 855	}
 856	ctx = NULL;
 857 found:
 858	spin_unlock(&spu_prio->runq_lock);
 859	return ctx;
 860}
 861
 862static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
 863{
 864	struct spu *spu = ctx->spu;
 865	struct spu_context *new = NULL;
 866
 867	if (spu) {
 868		new = grab_runnable_context(max_prio, spu->node);
 869		if (new || force) {
 870			spu_unschedule(spu, ctx, new == NULL);
 871			if (new) {
 872				if (new->flags & SPU_CREATE_NOSCHED)
 873					wake_up(&new->stop_wq);
 874				else {
 875					spu_release(ctx);
 876					spu_schedule(spu, new);
 877					/* this one can't easily be made
 878					   interruptible */
 879					mutex_lock(&ctx->state_mutex);
 880				}
 881			}
 882		}
 883	}
 884
 885	return new != NULL;
 886}
 887
 888/**
 889 * spu_deactivate - unbind a context from it's physical spu
 890 * @ctx:	spu context to unbind
 891 *
 892 * Unbind @ctx from the physical spu it is running on and schedule
 893 * the highest priority context to run on the freed physical spu.
 894 */
 895void spu_deactivate(struct spu_context *ctx)
 896{
 897	spu_context_nospu_trace(spu_deactivate__enter, ctx);
 898	__spu_deactivate(ctx, 1, MAX_PRIO);
 899}
 900
 901/**
 902 * spu_yield -	yield a physical spu if others are waiting
 903 * @ctx:	spu context to yield
 904 *
 905 * Check if there is a higher priority context waiting and if yes
 906 * unbind @ctx from the physical spu and schedule the highest
 907 * priority context to run on the freed physical spu instead.
 908 */
 909void spu_yield(struct spu_context *ctx)
 910{
 911	spu_context_nospu_trace(spu_yield__enter, ctx);
 912	if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
 913		mutex_lock(&ctx->state_mutex);
 914		__spu_deactivate(ctx, 0, MAX_PRIO);
 915		mutex_unlock(&ctx->state_mutex);
 916	}
 917}
 918
 919static noinline void spusched_tick(struct spu_context *ctx)
 920{
 921	struct spu_context *new = NULL;
 922	struct spu *spu = NULL;
 923
 924	if (spu_acquire(ctx))
 925		BUG();	/* a kernel thread never has signals pending */
 926
 927	if (ctx->state != SPU_STATE_RUNNABLE)
 928		goto out;
 929	if (ctx->flags & SPU_CREATE_NOSCHED)
 930		goto out;
 931	if (ctx->policy == SCHED_FIFO)
 932		goto out;
 933
 934	if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
 935		goto out;
 936
 937	spu = ctx->spu;
 938
 939	spu_context_trace(spusched_tick__preempt, ctx, spu);
 940
 941	new = grab_runnable_context(ctx->prio + 1, spu->node);
 942	if (new) {
 943		spu_unschedule(spu, ctx, 0);
 944		if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
 945			spu_add_to_rq(ctx);
 946	} else {
 947		spu_context_nospu_trace(spusched_tick__newslice, ctx);
 948		if (!ctx->time_slice)
 949			ctx->time_slice++;
 950	}
 951out:
 952	spu_release(ctx);
 953
 954	if (new)
 955		spu_schedule(spu, new);
 956}
 957
 958/**
 959 * count_active_contexts - count nr of active tasks
 960 *
 961 * Return the number of tasks currently running or waiting to run.
 962 *
 963 * Note that we don't take runq_lock / list_mutex here.  Reading
 964 * a single 32bit value is atomic on powerpc, and we don't care
 965 * about memory ordering issues here.
 966 */
 967static unsigned long count_active_contexts(void)
 968{
 969	int nr_active = 0, node;
 970
 971	for (node = 0; node < MAX_NUMNODES; node++)
 972		nr_active += cbe_spu_info[node].nr_active;
 973	nr_active += spu_prio->nr_waiting;
 974
 975	return nr_active;
 976}
 977
 978/**
 979 * spu_calc_load - update the avenrun load estimates.
 980 *
 981 * No locking against reading these values from userspace, as for
 982 * the CPU loadavg code.
 983 */
 984static void spu_calc_load(void)
 985{
 986	unsigned long active_tasks; /* fixed-point */
 987
 988	active_tasks = count_active_contexts() * FIXED_1;
 989	CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
 990	CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
 991	CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
 992}
 993
 994static void spusched_wake(unsigned long data)
 995{
 996	mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
 997	wake_up_process(spusched_task);
 998}
 999
1000static void spuloadavg_wake(unsigned long data)
1001{
1002	mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
1003	spu_calc_load();
1004}
1005
1006static int spusched_thread(void *unused)
1007{
1008	struct spu *spu;
1009	int node;
1010
1011	while (!kthread_should_stop()) {
1012		set_current_state(TASK_INTERRUPTIBLE);
1013		schedule();
1014		for (node = 0; node < MAX_NUMNODES; node++) {
1015			struct mutex *mtx = &cbe_spu_info[node].list_mutex;
1016
1017			mutex_lock(mtx);
1018			list_for_each_entry(spu, &cbe_spu_info[node].spus,
1019					cbe_list) {
1020				struct spu_context *ctx = spu->ctx;
1021
1022				if (ctx) {
1023					get_spu_context(ctx);
1024					mutex_unlock(mtx);
1025					spusched_tick(ctx);
1026					mutex_lock(mtx);
1027					put_spu_context(ctx);
1028				}
1029			}
1030			mutex_unlock(mtx);
1031		}
1032	}
1033
1034	return 0;
1035}
1036
1037void spuctx_switch_state(struct spu_context *ctx,
1038		enum spu_utilization_state new_state)
1039{
1040	unsigned long long curtime;
1041	signed long long delta;
1042	struct timespec ts;
1043	struct spu *spu;
1044	enum spu_utilization_state old_state;
1045	int node;
1046
1047	ktime_get_ts(&ts);
1048	curtime = timespec_to_ns(&ts);
1049	delta = curtime - ctx->stats.tstamp;
1050
1051	WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1052	WARN_ON(delta < 0);
1053
1054	spu = ctx->spu;
1055	old_state = ctx->stats.util_state;
1056	ctx->stats.util_state = new_state;
1057	ctx->stats.tstamp = curtime;
1058
1059	/*
1060	 * Update the physical SPU utilization statistics.
1061	 */
1062	if (spu) {
1063		ctx->stats.times[old_state] += delta;
1064		spu->stats.times[old_state] += delta;
1065		spu->stats.util_state = new_state;
1066		spu->stats.tstamp = curtime;
1067		node = spu->node;
1068		if (old_state == SPU_UTIL_USER)
1069			atomic_dec(&cbe_spu_info[node].busy_spus);
1070		if (new_state == SPU_UTIL_USER)
1071			atomic_inc(&cbe_spu_info[node].busy_spus);
1072	}
1073}
1074
1075#define LOAD_INT(x) ((x) >> FSHIFT)
1076#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1077
1078static int show_spu_loadavg(struct seq_file *s, void *private)
1079{
1080	int a, b, c;
1081
1082	a = spu_avenrun[0] + (FIXED_1/200);
1083	b = spu_avenrun[1] + (FIXED_1/200);
1084	c = spu_avenrun[2] + (FIXED_1/200);
1085
1086	/*
1087	 * Note that last_pid doesn't really make much sense for the
1088	 * SPU loadavg (it even seems very odd on the CPU side...),
1089	 * but we include it here to have a 100% compatible interface.
1090	 */
1091	seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1092		LOAD_INT(a), LOAD_FRAC(a),
1093		LOAD_INT(b), LOAD_FRAC(b),
1094		LOAD_INT(c), LOAD_FRAC(c),
1095		count_active_contexts(),
1096		atomic_read(&nr_spu_contexts),
1097		task_active_pid_ns(current)->last_pid);
1098	return 0;
1099}
1100
1101static int spu_loadavg_open(struct inode *inode, struct file *file)
1102{
1103	return single_open(file, show_spu_loadavg, NULL);
1104}
1105
1106static const struct file_operations spu_loadavg_fops = {
1107	.open		= spu_loadavg_open,
1108	.read		= seq_read,
1109	.llseek		= seq_lseek,
1110	.release	= single_release,
1111};
1112
1113int __init spu_sched_init(void)
1114{
1115	struct proc_dir_entry *entry;
1116	int err = -ENOMEM, i;
1117
1118	spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1119	if (!spu_prio)
1120		goto out;
1121
1122	for (i = 0; i < MAX_PRIO; i++) {
1123		INIT_LIST_HEAD(&spu_prio->runq[i]);
1124		__clear_bit(i, spu_prio->bitmap);
1125	}
1126	spin_lock_init(&spu_prio->runq_lock);
1127
1128	setup_timer(&spusched_timer, spusched_wake, 0);
1129	setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
1130
1131	spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1132	if (IS_ERR(spusched_task)) {
1133		err = PTR_ERR(spusched_task);
1134		goto out_free_spu_prio;
1135	}
1136
1137	mod_timer(&spuloadavg_timer, 0);
1138
1139	entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
1140	if (!entry)
1141		goto out_stop_kthread;
1142
1143	pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1144			SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1145	return 0;
1146
1147 out_stop_kthread:
1148	kthread_stop(spusched_task);
1149 out_free_spu_prio:
1150	kfree(spu_prio);
1151 out:
1152	return err;
1153}
1154
1155void spu_sched_exit(void)
1156{
1157	struct spu *spu;
1158	int node;
1159
1160	remove_proc_entry("spu_loadavg", NULL);
1161
1162	del_timer_sync(&spusched_timer);
1163	del_timer_sync(&spuloadavg_timer);
1164	kthread_stop(spusched_task);
1165
1166	for (node = 0; node < MAX_NUMNODES; node++) {
1167		mutex_lock(&cbe_spu_info[node].list_mutex);
1168		list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1169			if (spu->alloc_state != SPU_FREE)
1170				spu->alloc_state = SPU_FREE;
1171		mutex_unlock(&cbe_spu_info[node].list_mutex);
1172	}
1173	kfree(spu_prio);
1174}