Loading...
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include "drmP.h"
31#include "drm.h"
32#include "i915_drv.h"
33#include "i915_drm.h"
34#include "i915_trace.h"
35#include "intel_drv.h"
36
37static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
45static u32 i915_gem_get_seqno(struct drm_device *dev)
46{
47 drm_i915_private_t *dev_priv = dev->dev_private;
48 u32 seqno;
49
50 seqno = dev_priv->next_seqno;
51
52 /* reserve 0 for non-seqno */
53 if (++dev_priv->next_seqno == 0)
54 dev_priv->next_seqno = 1;
55
56 return seqno;
57}
58
59static int
60render_ring_flush(struct intel_ring_buffer *ring,
61 u32 invalidate_domains,
62 u32 flush_domains)
63{
64 struct drm_device *dev = ring->dev;
65 u32 cmd;
66 int ret;
67
68 /*
69 * read/write caches:
70 *
71 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
72 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
73 * also flushed at 2d versus 3d pipeline switches.
74 *
75 * read-only caches:
76 *
77 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
78 * MI_READ_FLUSH is set, and is always flushed on 965.
79 *
80 * I915_GEM_DOMAIN_COMMAND may not exist?
81 *
82 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
83 * invalidated when MI_EXE_FLUSH is set.
84 *
85 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
86 * invalidated with every MI_FLUSH.
87 *
88 * TLBs:
89 *
90 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
91 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
92 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
93 * are flushed at any MI_FLUSH.
94 */
95
96 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
97 if ((invalidate_domains|flush_domains) &
98 I915_GEM_DOMAIN_RENDER)
99 cmd &= ~MI_NO_WRITE_FLUSH;
100 if (INTEL_INFO(dev)->gen < 4) {
101 /*
102 * On the 965, the sampler cache always gets flushed
103 * and this bit is reserved.
104 */
105 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
106 cmd |= MI_READ_FLUSH;
107 }
108 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
109 cmd |= MI_EXE_FLUSH;
110
111 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
112 (IS_G4X(dev) || IS_GEN5(dev)))
113 cmd |= MI_INVALIDATE_ISP;
114
115 ret = intel_ring_begin(ring, 2);
116 if (ret)
117 return ret;
118
119 intel_ring_emit(ring, cmd);
120 intel_ring_emit(ring, MI_NOOP);
121 intel_ring_advance(ring);
122
123 return 0;
124}
125
126static void ring_write_tail(struct intel_ring_buffer *ring,
127 u32 value)
128{
129 drm_i915_private_t *dev_priv = ring->dev->dev_private;
130 I915_WRITE_TAIL(ring, value);
131}
132
133u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
134{
135 drm_i915_private_t *dev_priv = ring->dev->dev_private;
136 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
137 RING_ACTHD(ring->mmio_base) : ACTHD;
138
139 return I915_READ(acthd_reg);
140}
141
142static int init_ring_common(struct intel_ring_buffer *ring)
143{
144 drm_i915_private_t *dev_priv = ring->dev->dev_private;
145 struct drm_i915_gem_object *obj = ring->obj;
146 u32 head;
147
148 /* Stop the ring if it's running. */
149 I915_WRITE_CTL(ring, 0);
150 I915_WRITE_HEAD(ring, 0);
151 ring->write_tail(ring, 0);
152
153 /* Initialize the ring. */
154 I915_WRITE_START(ring, obj->gtt_offset);
155 head = I915_READ_HEAD(ring) & HEAD_ADDR;
156
157 /* G45 ring initialization fails to reset head to zero */
158 if (head != 0) {
159 DRM_DEBUG_KMS("%s head not reset to zero "
160 "ctl %08x head %08x tail %08x start %08x\n",
161 ring->name,
162 I915_READ_CTL(ring),
163 I915_READ_HEAD(ring),
164 I915_READ_TAIL(ring),
165 I915_READ_START(ring));
166
167 I915_WRITE_HEAD(ring, 0);
168
169 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170 DRM_ERROR("failed to set %s head to zero "
171 "ctl %08x head %08x tail %08x start %08x\n",
172 ring->name,
173 I915_READ_CTL(ring),
174 I915_READ_HEAD(ring),
175 I915_READ_TAIL(ring),
176 I915_READ_START(ring));
177 }
178 }
179
180 I915_WRITE_CTL(ring,
181 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
182 | RING_REPORT_64K | RING_VALID);
183
184 /* If the head is still not zero, the ring is dead */
185 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
186 I915_READ_START(ring) != obj->gtt_offset ||
187 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
188 DRM_ERROR("%s initialization failed "
189 "ctl %08x head %08x tail %08x start %08x\n",
190 ring->name,
191 I915_READ_CTL(ring),
192 I915_READ_HEAD(ring),
193 I915_READ_TAIL(ring),
194 I915_READ_START(ring));
195 return -EIO;
196 }
197
198 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
199 i915_kernel_lost_context(ring->dev);
200 else {
201 ring->head = I915_READ_HEAD(ring);
202 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
203 ring->space = ring_space(ring);
204 }
205
206 return 0;
207}
208
209/*
210 * 965+ support PIPE_CONTROL commands, which provide finer grained control
211 * over cache flushing.
212 */
213struct pipe_control {
214 struct drm_i915_gem_object *obj;
215 volatile u32 *cpu_page;
216 u32 gtt_offset;
217};
218
219static int
220init_pipe_control(struct intel_ring_buffer *ring)
221{
222 struct pipe_control *pc;
223 struct drm_i915_gem_object *obj;
224 int ret;
225
226 if (ring->private)
227 return 0;
228
229 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
230 if (!pc)
231 return -ENOMEM;
232
233 obj = i915_gem_alloc_object(ring->dev, 4096);
234 if (obj == NULL) {
235 DRM_ERROR("Failed to allocate seqno page\n");
236 ret = -ENOMEM;
237 goto err;
238 }
239
240 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
241
242 ret = i915_gem_object_pin(obj, 4096, true);
243 if (ret)
244 goto err_unref;
245
246 pc->gtt_offset = obj->gtt_offset;
247 pc->cpu_page = kmap(obj->pages[0]);
248 if (pc->cpu_page == NULL)
249 goto err_unpin;
250
251 pc->obj = obj;
252 ring->private = pc;
253 return 0;
254
255err_unpin:
256 i915_gem_object_unpin(obj);
257err_unref:
258 drm_gem_object_unreference(&obj->base);
259err:
260 kfree(pc);
261 return ret;
262}
263
264static void
265cleanup_pipe_control(struct intel_ring_buffer *ring)
266{
267 struct pipe_control *pc = ring->private;
268 struct drm_i915_gem_object *obj;
269
270 if (!ring->private)
271 return;
272
273 obj = pc->obj;
274 kunmap(obj->pages[0]);
275 i915_gem_object_unpin(obj);
276 drm_gem_object_unreference(&obj->base);
277
278 kfree(pc);
279 ring->private = NULL;
280}
281
282static int init_render_ring(struct intel_ring_buffer *ring)
283{
284 struct drm_device *dev = ring->dev;
285 struct drm_i915_private *dev_priv = dev->dev_private;
286 int ret = init_ring_common(ring);
287
288 if (INTEL_INFO(dev)->gen > 3) {
289 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
290 if (IS_GEN6(dev) || IS_GEN7(dev))
291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
292 I915_WRITE(MI_MODE, mode);
293 if (IS_GEN7(dev))
294 I915_WRITE(GFX_MODE_GEN7,
295 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
296 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
297 }
298
299 if (INTEL_INFO(dev)->gen >= 6) {
300 } else if (IS_GEN5(dev)) {
301 ret = init_pipe_control(ring);
302 if (ret)
303 return ret;
304 }
305
306 return ret;
307}
308
309static void render_ring_cleanup(struct intel_ring_buffer *ring)
310{
311 if (!ring->private)
312 return;
313
314 cleanup_pipe_control(ring);
315}
316
317static void
318update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
319{
320 struct drm_device *dev = ring->dev;
321 struct drm_i915_private *dev_priv = dev->dev_private;
322 int id;
323
324 /*
325 * cs -> 1 = vcs, 0 = bcs
326 * vcs -> 1 = bcs, 0 = cs,
327 * bcs -> 1 = cs, 0 = vcs.
328 */
329 id = ring - dev_priv->ring;
330 id += 2 - i;
331 id %= 3;
332
333 intel_ring_emit(ring,
334 MI_SEMAPHORE_MBOX |
335 MI_SEMAPHORE_REGISTER |
336 MI_SEMAPHORE_UPDATE);
337 intel_ring_emit(ring, seqno);
338 intel_ring_emit(ring,
339 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
340}
341
342static int
343gen6_add_request(struct intel_ring_buffer *ring,
344 u32 *result)
345{
346 u32 seqno;
347 int ret;
348
349 ret = intel_ring_begin(ring, 10);
350 if (ret)
351 return ret;
352
353 seqno = i915_gem_get_seqno(ring->dev);
354 update_semaphore(ring, 0, seqno);
355 update_semaphore(ring, 1, seqno);
356
357 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
358 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
359 intel_ring_emit(ring, seqno);
360 intel_ring_emit(ring, MI_USER_INTERRUPT);
361 intel_ring_advance(ring);
362
363 *result = seqno;
364 return 0;
365}
366
367int
368intel_ring_sync(struct intel_ring_buffer *ring,
369 struct intel_ring_buffer *to,
370 u32 seqno)
371{
372 int ret;
373
374 ret = intel_ring_begin(ring, 4);
375 if (ret)
376 return ret;
377
378 intel_ring_emit(ring,
379 MI_SEMAPHORE_MBOX |
380 MI_SEMAPHORE_REGISTER |
381 intel_ring_sync_index(ring, to) << 17 |
382 MI_SEMAPHORE_COMPARE);
383 intel_ring_emit(ring, seqno);
384 intel_ring_emit(ring, 0);
385 intel_ring_emit(ring, MI_NOOP);
386 intel_ring_advance(ring);
387
388 return 0;
389}
390
391#define PIPE_CONTROL_FLUSH(ring__, addr__) \
392do { \
393 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
394 PIPE_CONTROL_DEPTH_STALL | 2); \
395 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
396 intel_ring_emit(ring__, 0); \
397 intel_ring_emit(ring__, 0); \
398} while (0)
399
400static int
401pc_render_add_request(struct intel_ring_buffer *ring,
402 u32 *result)
403{
404 struct drm_device *dev = ring->dev;
405 u32 seqno = i915_gem_get_seqno(dev);
406 struct pipe_control *pc = ring->private;
407 u32 scratch_addr = pc->gtt_offset + 128;
408 int ret;
409
410 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
411 * incoherent with writes to memory, i.e. completely fubar,
412 * so we need to use PIPE_NOTIFY instead.
413 *
414 * However, we also need to workaround the qword write
415 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
416 * memory before requesting an interrupt.
417 */
418 ret = intel_ring_begin(ring, 32);
419 if (ret)
420 return ret;
421
422 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
423 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
424 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
425 intel_ring_emit(ring, seqno);
426 intel_ring_emit(ring, 0);
427 PIPE_CONTROL_FLUSH(ring, scratch_addr);
428 scratch_addr += 128; /* write to separate cachelines */
429 PIPE_CONTROL_FLUSH(ring, scratch_addr);
430 scratch_addr += 128;
431 PIPE_CONTROL_FLUSH(ring, scratch_addr);
432 scratch_addr += 128;
433 PIPE_CONTROL_FLUSH(ring, scratch_addr);
434 scratch_addr += 128;
435 PIPE_CONTROL_FLUSH(ring, scratch_addr);
436 scratch_addr += 128;
437 PIPE_CONTROL_FLUSH(ring, scratch_addr);
438 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
439 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
440 PIPE_CONTROL_NOTIFY);
441 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
442 intel_ring_emit(ring, seqno);
443 intel_ring_emit(ring, 0);
444 intel_ring_advance(ring);
445
446 *result = seqno;
447 return 0;
448}
449
450static int
451render_ring_add_request(struct intel_ring_buffer *ring,
452 u32 *result)
453{
454 struct drm_device *dev = ring->dev;
455 u32 seqno = i915_gem_get_seqno(dev);
456 int ret;
457
458 ret = intel_ring_begin(ring, 4);
459 if (ret)
460 return ret;
461
462 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
463 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
464 intel_ring_emit(ring, seqno);
465 intel_ring_emit(ring, MI_USER_INTERRUPT);
466 intel_ring_advance(ring);
467
468 *result = seqno;
469 return 0;
470}
471
472static u32
473ring_get_seqno(struct intel_ring_buffer *ring)
474{
475 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
476}
477
478static u32
479pc_render_get_seqno(struct intel_ring_buffer *ring)
480{
481 struct pipe_control *pc = ring->private;
482 return pc->cpu_page[0];
483}
484
485static void
486ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
487{
488 dev_priv->gt_irq_mask &= ~mask;
489 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
490 POSTING_READ(GTIMR);
491}
492
493static void
494ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
495{
496 dev_priv->gt_irq_mask |= mask;
497 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
498 POSTING_READ(GTIMR);
499}
500
501static void
502i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
503{
504 dev_priv->irq_mask &= ~mask;
505 I915_WRITE(IMR, dev_priv->irq_mask);
506 POSTING_READ(IMR);
507}
508
509static void
510i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
511{
512 dev_priv->irq_mask |= mask;
513 I915_WRITE(IMR, dev_priv->irq_mask);
514 POSTING_READ(IMR);
515}
516
517static bool
518render_ring_get_irq(struct intel_ring_buffer *ring)
519{
520 struct drm_device *dev = ring->dev;
521 drm_i915_private_t *dev_priv = dev->dev_private;
522
523 if (!dev->irq_enabled)
524 return false;
525
526 spin_lock(&ring->irq_lock);
527 if (ring->irq_refcount++ == 0) {
528 if (HAS_PCH_SPLIT(dev))
529 ironlake_enable_irq(dev_priv,
530 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
531 else
532 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
533 }
534 spin_unlock(&ring->irq_lock);
535
536 return true;
537}
538
539static void
540render_ring_put_irq(struct intel_ring_buffer *ring)
541{
542 struct drm_device *dev = ring->dev;
543 drm_i915_private_t *dev_priv = dev->dev_private;
544
545 spin_lock(&ring->irq_lock);
546 if (--ring->irq_refcount == 0) {
547 if (HAS_PCH_SPLIT(dev))
548 ironlake_disable_irq(dev_priv,
549 GT_USER_INTERRUPT |
550 GT_PIPE_NOTIFY);
551 else
552 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
553 }
554 spin_unlock(&ring->irq_lock);
555}
556
557void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
558{
559 struct drm_device *dev = ring->dev;
560 drm_i915_private_t *dev_priv = ring->dev->dev_private;
561 u32 mmio = 0;
562
563 /* The ring status page addresses are no longer next to the rest of
564 * the ring registers as of gen7.
565 */
566 if (IS_GEN7(dev)) {
567 switch (ring->id) {
568 case RING_RENDER:
569 mmio = RENDER_HWS_PGA_GEN7;
570 break;
571 case RING_BLT:
572 mmio = BLT_HWS_PGA_GEN7;
573 break;
574 case RING_BSD:
575 mmio = BSD_HWS_PGA_GEN7;
576 break;
577 }
578 } else if (IS_GEN6(ring->dev)) {
579 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
580 } else {
581 mmio = RING_HWS_PGA(ring->mmio_base);
582 }
583
584 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
585 POSTING_READ(mmio);
586}
587
588static int
589bsd_ring_flush(struct intel_ring_buffer *ring,
590 u32 invalidate_domains,
591 u32 flush_domains)
592{
593 int ret;
594
595 ret = intel_ring_begin(ring, 2);
596 if (ret)
597 return ret;
598
599 intel_ring_emit(ring, MI_FLUSH);
600 intel_ring_emit(ring, MI_NOOP);
601 intel_ring_advance(ring);
602 return 0;
603}
604
605static int
606ring_add_request(struct intel_ring_buffer *ring,
607 u32 *result)
608{
609 u32 seqno;
610 int ret;
611
612 ret = intel_ring_begin(ring, 4);
613 if (ret)
614 return ret;
615
616 seqno = i915_gem_get_seqno(ring->dev);
617
618 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
619 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
620 intel_ring_emit(ring, seqno);
621 intel_ring_emit(ring, MI_USER_INTERRUPT);
622 intel_ring_advance(ring);
623
624 *result = seqno;
625 return 0;
626}
627
628static bool
629gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
630{
631 struct drm_device *dev = ring->dev;
632 drm_i915_private_t *dev_priv = dev->dev_private;
633
634 if (!dev->irq_enabled)
635 return false;
636
637 spin_lock(&ring->irq_lock);
638 if (ring->irq_refcount++ == 0) {
639 ring->irq_mask &= ~rflag;
640 I915_WRITE_IMR(ring, ring->irq_mask);
641 ironlake_enable_irq(dev_priv, gflag);
642 }
643 spin_unlock(&ring->irq_lock);
644
645 return true;
646}
647
648static void
649gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
650{
651 struct drm_device *dev = ring->dev;
652 drm_i915_private_t *dev_priv = dev->dev_private;
653
654 spin_lock(&ring->irq_lock);
655 if (--ring->irq_refcount == 0) {
656 ring->irq_mask |= rflag;
657 I915_WRITE_IMR(ring, ring->irq_mask);
658 ironlake_disable_irq(dev_priv, gflag);
659 }
660 spin_unlock(&ring->irq_lock);
661}
662
663static bool
664bsd_ring_get_irq(struct intel_ring_buffer *ring)
665{
666 struct drm_device *dev = ring->dev;
667 drm_i915_private_t *dev_priv = dev->dev_private;
668
669 if (!dev->irq_enabled)
670 return false;
671
672 spin_lock(&ring->irq_lock);
673 if (ring->irq_refcount++ == 0) {
674 if (IS_G4X(dev))
675 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
676 else
677 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
678 }
679 spin_unlock(&ring->irq_lock);
680
681 return true;
682}
683static void
684bsd_ring_put_irq(struct intel_ring_buffer *ring)
685{
686 struct drm_device *dev = ring->dev;
687 drm_i915_private_t *dev_priv = dev->dev_private;
688
689 spin_lock(&ring->irq_lock);
690 if (--ring->irq_refcount == 0) {
691 if (IS_G4X(dev))
692 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
693 else
694 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
695 }
696 spin_unlock(&ring->irq_lock);
697}
698
699static int
700ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
701{
702 int ret;
703
704 ret = intel_ring_begin(ring, 2);
705 if (ret)
706 return ret;
707
708 intel_ring_emit(ring,
709 MI_BATCH_BUFFER_START | (2 << 6) |
710 MI_BATCH_NON_SECURE_I965);
711 intel_ring_emit(ring, offset);
712 intel_ring_advance(ring);
713
714 return 0;
715}
716
717static int
718render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
719 u32 offset, u32 len)
720{
721 struct drm_device *dev = ring->dev;
722 int ret;
723
724 if (IS_I830(dev) || IS_845G(dev)) {
725 ret = intel_ring_begin(ring, 4);
726 if (ret)
727 return ret;
728
729 intel_ring_emit(ring, MI_BATCH_BUFFER);
730 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
731 intel_ring_emit(ring, offset + len - 8);
732 intel_ring_emit(ring, 0);
733 } else {
734 ret = intel_ring_begin(ring, 2);
735 if (ret)
736 return ret;
737
738 if (INTEL_INFO(dev)->gen >= 4) {
739 intel_ring_emit(ring,
740 MI_BATCH_BUFFER_START | (2 << 6) |
741 MI_BATCH_NON_SECURE_I965);
742 intel_ring_emit(ring, offset);
743 } else {
744 intel_ring_emit(ring,
745 MI_BATCH_BUFFER_START | (2 << 6));
746 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
747 }
748 }
749 intel_ring_advance(ring);
750
751 return 0;
752}
753
754static void cleanup_status_page(struct intel_ring_buffer *ring)
755{
756 drm_i915_private_t *dev_priv = ring->dev->dev_private;
757 struct drm_i915_gem_object *obj;
758
759 obj = ring->status_page.obj;
760 if (obj == NULL)
761 return;
762
763 kunmap(obj->pages[0]);
764 i915_gem_object_unpin(obj);
765 drm_gem_object_unreference(&obj->base);
766 ring->status_page.obj = NULL;
767
768 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
769}
770
771static int init_status_page(struct intel_ring_buffer *ring)
772{
773 struct drm_device *dev = ring->dev;
774 drm_i915_private_t *dev_priv = dev->dev_private;
775 struct drm_i915_gem_object *obj;
776 int ret;
777
778 obj = i915_gem_alloc_object(dev, 4096);
779 if (obj == NULL) {
780 DRM_ERROR("Failed to allocate status page\n");
781 ret = -ENOMEM;
782 goto err;
783 }
784
785 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
786
787 ret = i915_gem_object_pin(obj, 4096, true);
788 if (ret != 0) {
789 goto err_unref;
790 }
791
792 ring->status_page.gfx_addr = obj->gtt_offset;
793 ring->status_page.page_addr = kmap(obj->pages[0]);
794 if (ring->status_page.page_addr == NULL) {
795 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
796 goto err_unpin;
797 }
798 ring->status_page.obj = obj;
799 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
800
801 intel_ring_setup_status_page(ring);
802 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
803 ring->name, ring->status_page.gfx_addr);
804
805 return 0;
806
807err_unpin:
808 i915_gem_object_unpin(obj);
809err_unref:
810 drm_gem_object_unreference(&obj->base);
811err:
812 return ret;
813}
814
815int intel_init_ring_buffer(struct drm_device *dev,
816 struct intel_ring_buffer *ring)
817{
818 struct drm_i915_gem_object *obj;
819 int ret;
820
821 ring->dev = dev;
822 INIT_LIST_HEAD(&ring->active_list);
823 INIT_LIST_HEAD(&ring->request_list);
824 INIT_LIST_HEAD(&ring->gpu_write_list);
825
826 init_waitqueue_head(&ring->irq_queue);
827 spin_lock_init(&ring->irq_lock);
828 ring->irq_mask = ~0;
829
830 if (I915_NEED_GFX_HWS(dev)) {
831 ret = init_status_page(ring);
832 if (ret)
833 return ret;
834 }
835
836 obj = i915_gem_alloc_object(dev, ring->size);
837 if (obj == NULL) {
838 DRM_ERROR("Failed to allocate ringbuffer\n");
839 ret = -ENOMEM;
840 goto err_hws;
841 }
842
843 ring->obj = obj;
844
845 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
846 if (ret)
847 goto err_unref;
848
849 ring->map.size = ring->size;
850 ring->map.offset = dev->agp->base + obj->gtt_offset;
851 ring->map.type = 0;
852 ring->map.flags = 0;
853 ring->map.mtrr = 0;
854
855 drm_core_ioremap_wc(&ring->map, dev);
856 if (ring->map.handle == NULL) {
857 DRM_ERROR("Failed to map ringbuffer.\n");
858 ret = -EINVAL;
859 goto err_unpin;
860 }
861
862 ring->virtual_start = ring->map.handle;
863 ret = ring->init(ring);
864 if (ret)
865 goto err_unmap;
866
867 /* Workaround an erratum on the i830 which causes a hang if
868 * the TAIL pointer points to within the last 2 cachelines
869 * of the buffer.
870 */
871 ring->effective_size = ring->size;
872 if (IS_I830(ring->dev))
873 ring->effective_size -= 128;
874
875 return 0;
876
877err_unmap:
878 drm_core_ioremapfree(&ring->map, dev);
879err_unpin:
880 i915_gem_object_unpin(obj);
881err_unref:
882 drm_gem_object_unreference(&obj->base);
883 ring->obj = NULL;
884err_hws:
885 cleanup_status_page(ring);
886 return ret;
887}
888
889void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
890{
891 struct drm_i915_private *dev_priv;
892 int ret;
893
894 if (ring->obj == NULL)
895 return;
896
897 /* Disable the ring buffer. The ring must be idle at this point */
898 dev_priv = ring->dev->dev_private;
899 ret = intel_wait_ring_idle(ring);
900 if (ret)
901 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
902 ring->name, ret);
903
904 I915_WRITE_CTL(ring, 0);
905
906 drm_core_ioremapfree(&ring->map, ring->dev);
907
908 i915_gem_object_unpin(ring->obj);
909 drm_gem_object_unreference(&ring->obj->base);
910 ring->obj = NULL;
911
912 if (ring->cleanup)
913 ring->cleanup(ring);
914
915 cleanup_status_page(ring);
916}
917
918static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
919{
920 unsigned int *virt;
921 int rem = ring->size - ring->tail;
922
923 if (ring->space < rem) {
924 int ret = intel_wait_ring_buffer(ring, rem);
925 if (ret)
926 return ret;
927 }
928
929 virt = (unsigned int *)(ring->virtual_start + ring->tail);
930 rem /= 8;
931 while (rem--) {
932 *virt++ = MI_NOOP;
933 *virt++ = MI_NOOP;
934 }
935
936 ring->tail = 0;
937 ring->space = ring_space(ring);
938
939 return 0;
940}
941
942int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
943{
944 struct drm_device *dev = ring->dev;
945 struct drm_i915_private *dev_priv = dev->dev_private;
946 unsigned long end;
947 u32 head;
948
949 /* If the reported head position has wrapped or hasn't advanced,
950 * fallback to the slow and accurate path.
951 */
952 head = intel_read_status_page(ring, 4);
953 if (head > ring->head) {
954 ring->head = head;
955 ring->space = ring_space(ring);
956 if (ring->space >= n)
957 return 0;
958 }
959
960 trace_i915_ring_wait_begin(ring);
961 end = jiffies + 3 * HZ;
962 do {
963 ring->head = I915_READ_HEAD(ring);
964 ring->space = ring_space(ring);
965 if (ring->space >= n) {
966 trace_i915_ring_wait_end(ring);
967 return 0;
968 }
969
970 if (dev->primary->master) {
971 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
972 if (master_priv->sarea_priv)
973 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
974 }
975
976 msleep(1);
977 if (atomic_read(&dev_priv->mm.wedged))
978 return -EAGAIN;
979 } while (!time_after(jiffies, end));
980 trace_i915_ring_wait_end(ring);
981 return -EBUSY;
982}
983
984int intel_ring_begin(struct intel_ring_buffer *ring,
985 int num_dwords)
986{
987 struct drm_i915_private *dev_priv = ring->dev->dev_private;
988 int n = 4*num_dwords;
989 int ret;
990
991 if (unlikely(atomic_read(&dev_priv->mm.wedged)))
992 return -EIO;
993
994 if (unlikely(ring->tail + n > ring->effective_size)) {
995 ret = intel_wrap_ring_buffer(ring);
996 if (unlikely(ret))
997 return ret;
998 }
999
1000 if (unlikely(ring->space < n)) {
1001 ret = intel_wait_ring_buffer(ring, n);
1002 if (unlikely(ret))
1003 return ret;
1004 }
1005
1006 ring->space -= n;
1007 return 0;
1008}
1009
1010void intel_ring_advance(struct intel_ring_buffer *ring)
1011{
1012 ring->tail &= ring->size - 1;
1013 ring->write_tail(ring, ring->tail);
1014}
1015
1016static const struct intel_ring_buffer render_ring = {
1017 .name = "render ring",
1018 .id = RING_RENDER,
1019 .mmio_base = RENDER_RING_BASE,
1020 .size = 32 * PAGE_SIZE,
1021 .init = init_render_ring,
1022 .write_tail = ring_write_tail,
1023 .flush = render_ring_flush,
1024 .add_request = render_ring_add_request,
1025 .get_seqno = ring_get_seqno,
1026 .irq_get = render_ring_get_irq,
1027 .irq_put = render_ring_put_irq,
1028 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
1029 .cleanup = render_ring_cleanup,
1030};
1031
1032/* ring buffer for bit-stream decoder */
1033
1034static const struct intel_ring_buffer bsd_ring = {
1035 .name = "bsd ring",
1036 .id = RING_BSD,
1037 .mmio_base = BSD_RING_BASE,
1038 .size = 32 * PAGE_SIZE,
1039 .init = init_ring_common,
1040 .write_tail = ring_write_tail,
1041 .flush = bsd_ring_flush,
1042 .add_request = ring_add_request,
1043 .get_seqno = ring_get_seqno,
1044 .irq_get = bsd_ring_get_irq,
1045 .irq_put = bsd_ring_put_irq,
1046 .dispatch_execbuffer = ring_dispatch_execbuffer,
1047};
1048
1049
1050static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1051 u32 value)
1052{
1053 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1054
1055 /* Every tail move must follow the sequence below */
1056 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1057 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1058 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1059 I915_WRITE(GEN6_BSD_RNCID, 0x0);
1060
1061 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1062 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
1063 50))
1064 DRM_ERROR("timed out waiting for IDLE Indicator\n");
1065
1066 I915_WRITE_TAIL(ring, value);
1067 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1068 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1069 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1070}
1071
1072static int gen6_ring_flush(struct intel_ring_buffer *ring,
1073 u32 invalidate, u32 flush)
1074{
1075 uint32_t cmd;
1076 int ret;
1077
1078 ret = intel_ring_begin(ring, 4);
1079 if (ret)
1080 return ret;
1081
1082 cmd = MI_FLUSH_DW;
1083 if (invalidate & I915_GEM_GPU_DOMAINS)
1084 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1085 intel_ring_emit(ring, cmd);
1086 intel_ring_emit(ring, 0);
1087 intel_ring_emit(ring, 0);
1088 intel_ring_emit(ring, MI_NOOP);
1089 intel_ring_advance(ring);
1090 return 0;
1091}
1092
1093static int
1094gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1095 u32 offset, u32 len)
1096{
1097 int ret;
1098
1099 ret = intel_ring_begin(ring, 2);
1100 if (ret)
1101 return ret;
1102
1103 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1104 /* bit0-7 is the length on GEN6+ */
1105 intel_ring_emit(ring, offset);
1106 intel_ring_advance(ring);
1107
1108 return 0;
1109}
1110
1111static bool
1112gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1113{
1114 return gen6_ring_get_irq(ring,
1115 GT_USER_INTERRUPT,
1116 GEN6_RENDER_USER_INTERRUPT);
1117}
1118
1119static void
1120gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1121{
1122 return gen6_ring_put_irq(ring,
1123 GT_USER_INTERRUPT,
1124 GEN6_RENDER_USER_INTERRUPT);
1125}
1126
1127static bool
1128gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1129{
1130 return gen6_ring_get_irq(ring,
1131 GT_GEN6_BSD_USER_INTERRUPT,
1132 GEN6_BSD_USER_INTERRUPT);
1133}
1134
1135static void
1136gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1137{
1138 return gen6_ring_put_irq(ring,
1139 GT_GEN6_BSD_USER_INTERRUPT,
1140 GEN6_BSD_USER_INTERRUPT);
1141}
1142
1143/* ring buffer for Video Codec for Gen6+ */
1144static const struct intel_ring_buffer gen6_bsd_ring = {
1145 .name = "gen6 bsd ring",
1146 .id = RING_BSD,
1147 .mmio_base = GEN6_BSD_RING_BASE,
1148 .size = 32 * PAGE_SIZE,
1149 .init = init_ring_common,
1150 .write_tail = gen6_bsd_ring_write_tail,
1151 .flush = gen6_ring_flush,
1152 .add_request = gen6_add_request,
1153 .get_seqno = ring_get_seqno,
1154 .irq_get = gen6_bsd_ring_get_irq,
1155 .irq_put = gen6_bsd_ring_put_irq,
1156 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1157};
1158
1159/* Blitter support (SandyBridge+) */
1160
1161static bool
1162blt_ring_get_irq(struct intel_ring_buffer *ring)
1163{
1164 return gen6_ring_get_irq(ring,
1165 GT_BLT_USER_INTERRUPT,
1166 GEN6_BLITTER_USER_INTERRUPT);
1167}
1168
1169static void
1170blt_ring_put_irq(struct intel_ring_buffer *ring)
1171{
1172 gen6_ring_put_irq(ring,
1173 GT_BLT_USER_INTERRUPT,
1174 GEN6_BLITTER_USER_INTERRUPT);
1175}
1176
1177
1178/* Workaround for some stepping of SNB,
1179 * each time when BLT engine ring tail moved,
1180 * the first command in the ring to be parsed
1181 * should be MI_BATCH_BUFFER_START
1182 */
1183#define NEED_BLT_WORKAROUND(dev) \
1184 (IS_GEN6(dev) && (dev->pdev->revision < 8))
1185
1186static inline struct drm_i915_gem_object *
1187to_blt_workaround(struct intel_ring_buffer *ring)
1188{
1189 return ring->private;
1190}
1191
1192static int blt_ring_init(struct intel_ring_buffer *ring)
1193{
1194 if (NEED_BLT_WORKAROUND(ring->dev)) {
1195 struct drm_i915_gem_object *obj;
1196 u32 *ptr;
1197 int ret;
1198
1199 obj = i915_gem_alloc_object(ring->dev, 4096);
1200 if (obj == NULL)
1201 return -ENOMEM;
1202
1203 ret = i915_gem_object_pin(obj, 4096, true);
1204 if (ret) {
1205 drm_gem_object_unreference(&obj->base);
1206 return ret;
1207 }
1208
1209 ptr = kmap(obj->pages[0]);
1210 *ptr++ = MI_BATCH_BUFFER_END;
1211 *ptr++ = MI_NOOP;
1212 kunmap(obj->pages[0]);
1213
1214 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1215 if (ret) {
1216 i915_gem_object_unpin(obj);
1217 drm_gem_object_unreference(&obj->base);
1218 return ret;
1219 }
1220
1221 ring->private = obj;
1222 }
1223
1224 return init_ring_common(ring);
1225}
1226
1227static int blt_ring_begin(struct intel_ring_buffer *ring,
1228 int num_dwords)
1229{
1230 if (ring->private) {
1231 int ret = intel_ring_begin(ring, num_dwords+2);
1232 if (ret)
1233 return ret;
1234
1235 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1236 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1237
1238 return 0;
1239 } else
1240 return intel_ring_begin(ring, 4);
1241}
1242
1243static int blt_ring_flush(struct intel_ring_buffer *ring,
1244 u32 invalidate, u32 flush)
1245{
1246 uint32_t cmd;
1247 int ret;
1248
1249 ret = blt_ring_begin(ring, 4);
1250 if (ret)
1251 return ret;
1252
1253 cmd = MI_FLUSH_DW;
1254 if (invalidate & I915_GEM_DOMAIN_RENDER)
1255 cmd |= MI_INVALIDATE_TLB;
1256 intel_ring_emit(ring, cmd);
1257 intel_ring_emit(ring, 0);
1258 intel_ring_emit(ring, 0);
1259 intel_ring_emit(ring, MI_NOOP);
1260 intel_ring_advance(ring);
1261 return 0;
1262}
1263
1264static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1265{
1266 if (!ring->private)
1267 return;
1268
1269 i915_gem_object_unpin(ring->private);
1270 drm_gem_object_unreference(ring->private);
1271 ring->private = NULL;
1272}
1273
1274static const struct intel_ring_buffer gen6_blt_ring = {
1275 .name = "blt ring",
1276 .id = RING_BLT,
1277 .mmio_base = BLT_RING_BASE,
1278 .size = 32 * PAGE_SIZE,
1279 .init = blt_ring_init,
1280 .write_tail = ring_write_tail,
1281 .flush = blt_ring_flush,
1282 .add_request = gen6_add_request,
1283 .get_seqno = ring_get_seqno,
1284 .irq_get = blt_ring_get_irq,
1285 .irq_put = blt_ring_put_irq,
1286 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1287 .cleanup = blt_ring_cleanup,
1288};
1289
1290int intel_init_render_ring_buffer(struct drm_device *dev)
1291{
1292 drm_i915_private_t *dev_priv = dev->dev_private;
1293 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1294
1295 *ring = render_ring;
1296 if (INTEL_INFO(dev)->gen >= 6) {
1297 ring->add_request = gen6_add_request;
1298 ring->irq_get = gen6_render_ring_get_irq;
1299 ring->irq_put = gen6_render_ring_put_irq;
1300 } else if (IS_GEN5(dev)) {
1301 ring->add_request = pc_render_add_request;
1302 ring->get_seqno = pc_render_get_seqno;
1303 }
1304
1305 if (!I915_NEED_GFX_HWS(dev)) {
1306 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1307 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1308 }
1309
1310 return intel_init_ring_buffer(dev, ring);
1311}
1312
1313int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1314{
1315 drm_i915_private_t *dev_priv = dev->dev_private;
1316 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1317
1318 *ring = render_ring;
1319 if (INTEL_INFO(dev)->gen >= 6) {
1320 ring->add_request = gen6_add_request;
1321 ring->irq_get = gen6_render_ring_get_irq;
1322 ring->irq_put = gen6_render_ring_put_irq;
1323 } else if (IS_GEN5(dev)) {
1324 ring->add_request = pc_render_add_request;
1325 ring->get_seqno = pc_render_get_seqno;
1326 }
1327
1328 if (!I915_NEED_GFX_HWS(dev))
1329 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1330
1331 ring->dev = dev;
1332 INIT_LIST_HEAD(&ring->active_list);
1333 INIT_LIST_HEAD(&ring->request_list);
1334 INIT_LIST_HEAD(&ring->gpu_write_list);
1335
1336 ring->size = size;
1337 ring->effective_size = ring->size;
1338 if (IS_I830(ring->dev))
1339 ring->effective_size -= 128;
1340
1341 ring->map.offset = start;
1342 ring->map.size = size;
1343 ring->map.type = 0;
1344 ring->map.flags = 0;
1345 ring->map.mtrr = 0;
1346
1347 drm_core_ioremap_wc(&ring->map, dev);
1348 if (ring->map.handle == NULL) {
1349 DRM_ERROR("can not ioremap virtual address for"
1350 " ring buffer\n");
1351 return -ENOMEM;
1352 }
1353
1354 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1355 return 0;
1356}
1357
1358int intel_init_bsd_ring_buffer(struct drm_device *dev)
1359{
1360 drm_i915_private_t *dev_priv = dev->dev_private;
1361 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1362
1363 if (IS_GEN6(dev) || IS_GEN7(dev))
1364 *ring = gen6_bsd_ring;
1365 else
1366 *ring = bsd_ring;
1367
1368 return intel_init_ring_buffer(dev, ring);
1369}
1370
1371int intel_init_blt_ring_buffer(struct drm_device *dev)
1372{
1373 drm_i915_private_t *dev_priv = dev->dev_private;
1374 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1375
1376 *ring = gen6_blt_ring;
1377
1378 return intel_init_ring_buffer(dev, ring);
1379}
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30#include <linux/log2.h>
31#include <drm/drmP.h>
32#include "i915_drv.h"
33#include <drm/i915_drm.h>
34#include "i915_trace.h"
35#include "intel_drv.h"
36
37/* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40#define LEGACY_REQUEST_SIZE 200
41
42int __intel_ring_space(int head, int tail, int size)
43{
44 int space = head - tail;
45 if (space <= 0)
46 space += size;
47 return space - I915_RING_FREE_SPACE;
48}
49
50void intel_ring_update_space(struct intel_ring *ring)
51{
52 if (ring->last_retired_head != -1) {
53 ring->head = ring->last_retired_head;
54 ring->last_retired_head = -1;
55 }
56
57 ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
58 ring->tail, ring->size);
59}
60
61static int
62gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
63{
64 struct intel_ring *ring = req->ring;
65 u32 cmd;
66 int ret;
67
68 cmd = MI_FLUSH;
69
70 if (mode & EMIT_INVALIDATE)
71 cmd |= MI_READ_FLUSH;
72
73 ret = intel_ring_begin(req, 2);
74 if (ret)
75 return ret;
76
77 intel_ring_emit(ring, cmd);
78 intel_ring_emit(ring, MI_NOOP);
79 intel_ring_advance(ring);
80
81 return 0;
82}
83
84static int
85gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
86{
87 struct intel_ring *ring = req->ring;
88 u32 cmd;
89 int ret;
90
91 /*
92 * read/write caches:
93 *
94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
96 * also flushed at 2d versus 3d pipeline switches.
97 *
98 * read-only caches:
99 *
100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101 * MI_READ_FLUSH is set, and is always flushed on 965.
102 *
103 * I915_GEM_DOMAIN_COMMAND may not exist?
104 *
105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106 * invalidated when MI_EXE_FLUSH is set.
107 *
108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109 * invalidated with every MI_FLUSH.
110 *
111 * TLBs:
112 *
113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116 * are flushed at any MI_FLUSH.
117 */
118
119 cmd = MI_FLUSH;
120 if (mode & EMIT_INVALIDATE) {
121 cmd |= MI_EXE_FLUSH;
122 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
123 cmd |= MI_INVALIDATE_ISP;
124 }
125
126 ret = intel_ring_begin(req, 2);
127 if (ret)
128 return ret;
129
130 intel_ring_emit(ring, cmd);
131 intel_ring_emit(ring, MI_NOOP);
132 intel_ring_advance(ring);
133
134 return 0;
135}
136
137/**
138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139 * implementing two workarounds on gen6. From section 1.4.7.1
140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
141 *
142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143 * produced by non-pipelined state commands), software needs to first
144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
145 * 0.
146 *
147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
149 *
150 * And the workaround for these two requires this workaround first:
151 *
152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153 * BEFORE the pipe-control with a post-sync op and no write-cache
154 * flushes.
155 *
156 * And this last workaround is tricky because of the requirements on
157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
158 * volume 2 part 1:
159 *
160 * "1 of the following must also be set:
161 * - Render Target Cache Flush Enable ([12] of DW1)
162 * - Depth Cache Flush Enable ([0] of DW1)
163 * - Stall at Pixel Scoreboard ([1] of DW1)
164 * - Depth Stall ([13] of DW1)
165 * - Post-Sync Operation ([13] of DW1)
166 * - Notify Enable ([8] of DW1)"
167 *
168 * The cache flushes require the workaround flush that triggered this
169 * one, so we can't use it. Depth stall would trigger the same.
170 * Post-sync nonzero is what triggered this second workaround, so we
171 * can't use that one either. Notify enable is IRQs, which aren't
172 * really our business. That leaves only stall at scoreboard.
173 */
174static int
175intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
176{
177 struct intel_ring *ring = req->ring;
178 u32 scratch_addr =
179 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
180 int ret;
181
182 ret = intel_ring_begin(req, 6);
183 if (ret)
184 return ret;
185
186 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
188 PIPE_CONTROL_STALL_AT_SCOREBOARD);
189 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
190 intel_ring_emit(ring, 0); /* low dword */
191 intel_ring_emit(ring, 0); /* high dword */
192 intel_ring_emit(ring, MI_NOOP);
193 intel_ring_advance(ring);
194
195 ret = intel_ring_begin(req, 6);
196 if (ret)
197 return ret;
198
199 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, 0);
204 intel_ring_emit(ring, MI_NOOP);
205 intel_ring_advance(ring);
206
207 return 0;
208}
209
210static int
211gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
212{
213 struct intel_ring *ring = req->ring;
214 u32 scratch_addr =
215 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
216 u32 flags = 0;
217 int ret;
218
219 /* Force SNB workarounds for PIPE_CONTROL flushes */
220 ret = intel_emit_post_sync_nonzero_flush(req);
221 if (ret)
222 return ret;
223
224 /* Just flush everything. Experiments have shown that reducing the
225 * number of bits based on the write domains has little performance
226 * impact.
227 */
228 if (mode & EMIT_FLUSH) {
229 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
230 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
231 /*
232 * Ensure that any following seqno writes only happen
233 * when the render cache is indeed flushed.
234 */
235 flags |= PIPE_CONTROL_CS_STALL;
236 }
237 if (mode & EMIT_INVALIDATE) {
238 flags |= PIPE_CONTROL_TLB_INVALIDATE;
239 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
240 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
241 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
244 /*
245 * TLB invalidate requires a post-sync write.
246 */
247 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
248 }
249
250 ret = intel_ring_begin(req, 4);
251 if (ret)
252 return ret;
253
254 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
255 intel_ring_emit(ring, flags);
256 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
257 intel_ring_emit(ring, 0);
258 intel_ring_advance(ring);
259
260 return 0;
261}
262
263static int
264gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
265{
266 struct intel_ring *ring = req->ring;
267 int ret;
268
269 ret = intel_ring_begin(req, 4);
270 if (ret)
271 return ret;
272
273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274 intel_ring_emit(ring,
275 PIPE_CONTROL_CS_STALL |
276 PIPE_CONTROL_STALL_AT_SCOREBOARD);
277 intel_ring_emit(ring, 0);
278 intel_ring_emit(ring, 0);
279 intel_ring_advance(ring);
280
281 return 0;
282}
283
284static int
285gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
286{
287 struct intel_ring *ring = req->ring;
288 u32 scratch_addr =
289 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
290 u32 flags = 0;
291 int ret;
292
293 /*
294 * Ensure that any following seqno writes only happen when the render
295 * cache is indeed flushed.
296 *
297 * Workaround: 4th PIPE_CONTROL command (except the ones with only
298 * read-cache invalidate bits set) must have the CS_STALL bit set. We
299 * don't try to be clever and just set it unconditionally.
300 */
301 flags |= PIPE_CONTROL_CS_STALL;
302
303 /* Just flush everything. Experiments have shown that reducing the
304 * number of bits based on the write domains has little performance
305 * impact.
306 */
307 if (mode & EMIT_FLUSH) {
308 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
309 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
310 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
311 flags |= PIPE_CONTROL_FLUSH_ENABLE;
312 }
313 if (mode & EMIT_INVALIDATE) {
314 flags |= PIPE_CONTROL_TLB_INVALIDATE;
315 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
317 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
318 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
319 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
320 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
321 /*
322 * TLB invalidate requires a post-sync write.
323 */
324 flags |= PIPE_CONTROL_QW_WRITE;
325 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
326
327 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
328
329 /* Workaround: we must issue a pipe_control with CS-stall bit
330 * set before a pipe_control command that has the state cache
331 * invalidate bit set. */
332 gen7_render_ring_cs_stall_wa(req);
333 }
334
335 ret = intel_ring_begin(req, 4);
336 if (ret)
337 return ret;
338
339 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
340 intel_ring_emit(ring, flags);
341 intel_ring_emit(ring, scratch_addr);
342 intel_ring_emit(ring, 0);
343 intel_ring_advance(ring);
344
345 return 0;
346}
347
348static int
349gen8_emit_pipe_control(struct drm_i915_gem_request *req,
350 u32 flags, u32 scratch_addr)
351{
352 struct intel_ring *ring = req->ring;
353 int ret;
354
355 ret = intel_ring_begin(req, 6);
356 if (ret)
357 return ret;
358
359 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
360 intel_ring_emit(ring, flags);
361 intel_ring_emit(ring, scratch_addr);
362 intel_ring_emit(ring, 0);
363 intel_ring_emit(ring, 0);
364 intel_ring_emit(ring, 0);
365 intel_ring_advance(ring);
366
367 return 0;
368}
369
370static int
371gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
372{
373 u32 scratch_addr =
374 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
375 u32 flags = 0;
376 int ret;
377
378 flags |= PIPE_CONTROL_CS_STALL;
379
380 if (mode & EMIT_FLUSH) {
381 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
382 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
383 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
384 flags |= PIPE_CONTROL_FLUSH_ENABLE;
385 }
386 if (mode & EMIT_INVALIDATE) {
387 flags |= PIPE_CONTROL_TLB_INVALIDATE;
388 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
389 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
390 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
391 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
392 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
393 flags |= PIPE_CONTROL_QW_WRITE;
394 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
395
396 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
397 ret = gen8_emit_pipe_control(req,
398 PIPE_CONTROL_CS_STALL |
399 PIPE_CONTROL_STALL_AT_SCOREBOARD,
400 0);
401 if (ret)
402 return ret;
403 }
404
405 return gen8_emit_pipe_control(req, flags, scratch_addr);
406}
407
408static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
409{
410 struct drm_i915_private *dev_priv = engine->i915;
411 u32 addr;
412
413 addr = dev_priv->status_page_dmah->busaddr;
414 if (INTEL_GEN(dev_priv) >= 4)
415 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
416 I915_WRITE(HWS_PGA, addr);
417}
418
419static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
420{
421 struct drm_i915_private *dev_priv = engine->i915;
422 i915_reg_t mmio;
423
424 /* The ring status page addresses are no longer next to the rest of
425 * the ring registers as of gen7.
426 */
427 if (IS_GEN7(dev_priv)) {
428 switch (engine->id) {
429 case RCS:
430 mmio = RENDER_HWS_PGA_GEN7;
431 break;
432 case BCS:
433 mmio = BLT_HWS_PGA_GEN7;
434 break;
435 /*
436 * VCS2 actually doesn't exist on Gen7. Only shut up
437 * gcc switch check warning
438 */
439 case VCS2:
440 case VCS:
441 mmio = BSD_HWS_PGA_GEN7;
442 break;
443 case VECS:
444 mmio = VEBOX_HWS_PGA_GEN7;
445 break;
446 }
447 } else if (IS_GEN6(dev_priv)) {
448 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
449 } else {
450 /* XXX: gen8 returns to sanity */
451 mmio = RING_HWS_PGA(engine->mmio_base);
452 }
453
454 I915_WRITE(mmio, engine->status_page.ggtt_offset);
455 POSTING_READ(mmio);
456
457 /*
458 * Flush the TLB for this page
459 *
460 * FIXME: These two bits have disappeared on gen8, so a question
461 * arises: do we still need this and if so how should we go about
462 * invalidating the TLB?
463 */
464 if (IS_GEN(dev_priv, 6, 7)) {
465 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
466
467 /* ring should be idle before issuing a sync flush*/
468 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
469
470 I915_WRITE(reg,
471 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
472 INSTPM_SYNC_FLUSH));
473 if (intel_wait_for_register(dev_priv,
474 reg, INSTPM_SYNC_FLUSH, 0,
475 1000))
476 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
477 engine->name);
478 }
479}
480
481static bool stop_ring(struct intel_engine_cs *engine)
482{
483 struct drm_i915_private *dev_priv = engine->i915;
484
485 if (INTEL_GEN(dev_priv) > 2) {
486 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
487 if (intel_wait_for_register(dev_priv,
488 RING_MI_MODE(engine->mmio_base),
489 MODE_IDLE,
490 MODE_IDLE,
491 1000)) {
492 DRM_ERROR("%s : timed out trying to stop ring\n",
493 engine->name);
494 /* Sometimes we observe that the idle flag is not
495 * set even though the ring is empty. So double
496 * check before giving up.
497 */
498 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
499 return false;
500 }
501 }
502
503 I915_WRITE_CTL(engine, 0);
504 I915_WRITE_HEAD(engine, 0);
505 I915_WRITE_TAIL(engine, 0);
506
507 if (INTEL_GEN(dev_priv) > 2) {
508 (void)I915_READ_CTL(engine);
509 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
510 }
511
512 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
513}
514
515static int init_ring_common(struct intel_engine_cs *engine)
516{
517 struct drm_i915_private *dev_priv = engine->i915;
518 struct intel_ring *ring = engine->buffer;
519 int ret = 0;
520
521 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
522
523 if (!stop_ring(engine)) {
524 /* G45 ring initialization often fails to reset head to zero */
525 DRM_DEBUG_KMS("%s head not reset to zero "
526 "ctl %08x head %08x tail %08x start %08x\n",
527 engine->name,
528 I915_READ_CTL(engine),
529 I915_READ_HEAD(engine),
530 I915_READ_TAIL(engine),
531 I915_READ_START(engine));
532
533 if (!stop_ring(engine)) {
534 DRM_ERROR("failed to set %s head to zero "
535 "ctl %08x head %08x tail %08x start %08x\n",
536 engine->name,
537 I915_READ_CTL(engine),
538 I915_READ_HEAD(engine),
539 I915_READ_TAIL(engine),
540 I915_READ_START(engine));
541 ret = -EIO;
542 goto out;
543 }
544 }
545
546 if (HWS_NEEDS_PHYSICAL(dev_priv))
547 ring_setup_phys_status_page(engine);
548 else
549 intel_ring_setup_status_page(engine);
550
551 intel_engine_reset_breadcrumbs(engine);
552
553 /* Enforce ordering by reading HEAD register back */
554 I915_READ_HEAD(engine);
555
556 /* Initialize the ring. This must happen _after_ we've cleared the ring
557 * registers with the above sequence (the readback of the HEAD registers
558 * also enforces ordering), otherwise the hw might lose the new ring
559 * register values. */
560 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
561
562 /* WaClearRingBufHeadRegAtInit:ctg,elk */
563 if (I915_READ_HEAD(engine))
564 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
565 engine->name, I915_READ_HEAD(engine));
566
567 intel_ring_update_space(ring);
568 I915_WRITE_HEAD(engine, ring->head);
569 I915_WRITE_TAIL(engine, ring->tail);
570 (void)I915_READ_TAIL(engine);
571
572 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
573
574 /* If the head is still not zero, the ring is dead */
575 if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
576 RING_VALID, RING_VALID,
577 50)) {
578 DRM_ERROR("%s initialization failed "
579 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
580 engine->name,
581 I915_READ_CTL(engine),
582 I915_READ_CTL(engine) & RING_VALID,
583 I915_READ_HEAD(engine), ring->head,
584 I915_READ_TAIL(engine), ring->tail,
585 I915_READ_START(engine),
586 i915_ggtt_offset(ring->vma));
587 ret = -EIO;
588 goto out;
589 }
590
591 intel_engine_init_hangcheck(engine);
592
593out:
594 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
595
596 return ret;
597}
598
599static void reset_ring_common(struct intel_engine_cs *engine,
600 struct drm_i915_gem_request *request)
601{
602 struct intel_ring *ring = request->ring;
603
604 ring->head = request->postfix;
605 ring->last_retired_head = -1;
606}
607
608static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
609{
610 struct intel_ring *ring = req->ring;
611 struct i915_workarounds *w = &req->i915->workarounds;
612 int ret, i;
613
614 if (w->count == 0)
615 return 0;
616
617 ret = req->engine->emit_flush(req, EMIT_BARRIER);
618 if (ret)
619 return ret;
620
621 ret = intel_ring_begin(req, (w->count * 2 + 2));
622 if (ret)
623 return ret;
624
625 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
626 for (i = 0; i < w->count; i++) {
627 intel_ring_emit_reg(ring, w->reg[i].addr);
628 intel_ring_emit(ring, w->reg[i].value);
629 }
630 intel_ring_emit(ring, MI_NOOP);
631
632 intel_ring_advance(ring);
633
634 ret = req->engine->emit_flush(req, EMIT_BARRIER);
635 if (ret)
636 return ret;
637
638 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
639
640 return 0;
641}
642
643static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
644{
645 int ret;
646
647 ret = intel_ring_workarounds_emit(req);
648 if (ret != 0)
649 return ret;
650
651 ret = i915_gem_render_state_emit(req);
652 if (ret)
653 return ret;
654
655 return 0;
656}
657
658static int wa_add(struct drm_i915_private *dev_priv,
659 i915_reg_t addr,
660 const u32 mask, const u32 val)
661{
662 const u32 idx = dev_priv->workarounds.count;
663
664 if (WARN_ON(idx >= I915_MAX_WA_REGS))
665 return -ENOSPC;
666
667 dev_priv->workarounds.reg[idx].addr = addr;
668 dev_priv->workarounds.reg[idx].value = val;
669 dev_priv->workarounds.reg[idx].mask = mask;
670
671 dev_priv->workarounds.count++;
672
673 return 0;
674}
675
676#define WA_REG(addr, mask, val) do { \
677 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
678 if (r) \
679 return r; \
680 } while (0)
681
682#define WA_SET_BIT_MASKED(addr, mask) \
683 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
684
685#define WA_CLR_BIT_MASKED(addr, mask) \
686 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
687
688#define WA_SET_FIELD_MASKED(addr, mask, value) \
689 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
690
691#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
692#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
693
694#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
695
696static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
697 i915_reg_t reg)
698{
699 struct drm_i915_private *dev_priv = engine->i915;
700 struct i915_workarounds *wa = &dev_priv->workarounds;
701 const uint32_t index = wa->hw_whitelist_count[engine->id];
702
703 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
704 return -EINVAL;
705
706 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
707 i915_mmio_reg_offset(reg));
708 wa->hw_whitelist_count[engine->id]++;
709
710 return 0;
711}
712
713static int gen8_init_workarounds(struct intel_engine_cs *engine)
714{
715 struct drm_i915_private *dev_priv = engine->i915;
716
717 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
718
719 /* WaDisableAsyncFlipPerfMode:bdw,chv */
720 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
721
722 /* WaDisablePartialInstShootdown:bdw,chv */
723 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
724 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
725
726 /* Use Force Non-Coherent whenever executing a 3D context. This is a
727 * workaround for for a possible hang in the unlikely event a TLB
728 * invalidation occurs during a PSD flush.
729 */
730 /* WaForceEnableNonCoherent:bdw,chv */
731 /* WaHdcDisableFetchWhenMasked:bdw,chv */
732 WA_SET_BIT_MASKED(HDC_CHICKEN0,
733 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
734 HDC_FORCE_NON_COHERENT);
735
736 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
737 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
738 * polygons in the same 8x4 pixel/sample area to be processed without
739 * stalling waiting for the earlier ones to write to Hierarchical Z
740 * buffer."
741 *
742 * This optimization is off by default for BDW and CHV; turn it on.
743 */
744 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
745
746 /* Wa4x4STCOptimizationDisable:bdw,chv */
747 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
748
749 /*
750 * BSpec recommends 8x4 when MSAA is used,
751 * however in practice 16x4 seems fastest.
752 *
753 * Note that PS/WM thread counts depend on the WIZ hashing
754 * disable bit, which we don't touch here, but it's good
755 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
756 */
757 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
758 GEN6_WIZ_HASHING_MASK,
759 GEN6_WIZ_HASHING_16x4);
760
761 return 0;
762}
763
764static int bdw_init_workarounds(struct intel_engine_cs *engine)
765{
766 struct drm_i915_private *dev_priv = engine->i915;
767 int ret;
768
769 ret = gen8_init_workarounds(engine);
770 if (ret)
771 return ret;
772
773 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
774 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
775
776 /* WaDisableDopClockGating:bdw */
777 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
778 DOP_CLOCK_GATING_DISABLE);
779
780 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
781 GEN8_SAMPLER_POWER_BYPASS_DIS);
782
783 WA_SET_BIT_MASKED(HDC_CHICKEN0,
784 /* WaForceContextSaveRestoreNonCoherent:bdw */
785 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
786 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
787 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
788
789 return 0;
790}
791
792static int chv_init_workarounds(struct intel_engine_cs *engine)
793{
794 struct drm_i915_private *dev_priv = engine->i915;
795 int ret;
796
797 ret = gen8_init_workarounds(engine);
798 if (ret)
799 return ret;
800
801 /* WaDisableThreadStallDopClockGating:chv */
802 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
803
804 /* Improve HiZ throughput on CHV. */
805 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
806
807 return 0;
808}
809
810static int gen9_init_workarounds(struct intel_engine_cs *engine)
811{
812 struct drm_i915_private *dev_priv = engine->i915;
813 int ret;
814
815 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
816 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
817
818 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
819 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
820 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
821
822 /* WaDisableKillLogic:bxt,skl,kbl */
823 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
824 ECOCHK_DIS_TLB);
825
826 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
827 /* WaDisablePartialInstShootdown:skl,bxt,kbl */
828 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
829 FLOW_CONTROL_ENABLE |
830 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
831
832 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
833 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
834 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
835
836 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
837 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
838 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
839 GEN9_DG_MIRROR_FIX_ENABLE);
840
841 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
842 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
843 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
844 GEN9_RHWO_OPTIMIZATION_DISABLE);
845 /*
846 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
847 * but we do that in per ctx batchbuffer as there is an issue
848 * with this register not getting restored on ctx restore
849 */
850 }
851
852 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
853 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
854 GEN9_ENABLE_GPGPU_PREEMPTION);
855
856 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
857 /* WaDisablePartialResolveInVc:skl,bxt,kbl */
858 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
859 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
860
861 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
862 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
863 GEN9_CCS_TLB_PREFETCH_ENABLE);
864
865 /* WaDisableMaskBasedCammingInRCC:bxt */
866 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
867 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
868 PIXEL_MASK_CAMMING_DISABLE);
869
870 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
871 WA_SET_BIT_MASKED(HDC_CHICKEN0,
872 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
873 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
874
875 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
876 * both tied to WaForceContextSaveRestoreNonCoherent
877 * in some hsds for skl. We keep the tie for all gen9. The
878 * documentation is a bit hazy and so we want to get common behaviour,
879 * even though there is no clear evidence we would need both on kbl/bxt.
880 * This area has been source of system hangs so we play it safe
881 * and mimic the skl regardless of what bspec says.
882 *
883 * Use Force Non-Coherent whenever executing a 3D context. This
884 * is a workaround for a possible hang in the unlikely event
885 * a TLB invalidation occurs during a PSD flush.
886 */
887
888 /* WaForceEnableNonCoherent:skl,bxt,kbl */
889 WA_SET_BIT_MASKED(HDC_CHICKEN0,
890 HDC_FORCE_NON_COHERENT);
891
892 /* WaDisableHDCInvalidation:skl,bxt,kbl */
893 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
894 BDW_DISABLE_HDC_INVALIDATION);
895
896 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
897 if (IS_SKYLAKE(dev_priv) ||
898 IS_KABYLAKE(dev_priv) ||
899 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
900 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
901 GEN8_SAMPLER_POWER_BYPASS_DIS);
902
903 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
904 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
905
906 /* WaOCLCoherentLineFlush:skl,bxt,kbl */
907 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
908 GEN8_LQSC_FLUSH_COHERENT_LINES));
909
910 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
911 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
912 if (ret)
913 return ret;
914
915 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
916 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
917 if (ret)
918 return ret;
919
920 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
921 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
922 if (ret)
923 return ret;
924
925 return 0;
926}
927
928static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
929{
930 struct drm_i915_private *dev_priv = engine->i915;
931 u8 vals[3] = { 0, 0, 0 };
932 unsigned int i;
933
934 for (i = 0; i < 3; i++) {
935 u8 ss;
936
937 /*
938 * Only consider slices where one, and only one, subslice has 7
939 * EUs
940 */
941 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
942 continue;
943
944 /*
945 * subslice_7eu[i] != 0 (because of the check above) and
946 * ss_max == 4 (maximum number of subslices possible per slice)
947 *
948 * -> 0 <= ss <= 3;
949 */
950 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
951 vals[i] = 3 - ss;
952 }
953
954 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
955 return 0;
956
957 /* Tune IZ hashing. See intel_device_info_runtime_init() */
958 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
959 GEN9_IZ_HASHING_MASK(2) |
960 GEN9_IZ_HASHING_MASK(1) |
961 GEN9_IZ_HASHING_MASK(0),
962 GEN9_IZ_HASHING(2, vals[2]) |
963 GEN9_IZ_HASHING(1, vals[1]) |
964 GEN9_IZ_HASHING(0, vals[0]));
965
966 return 0;
967}
968
969static int skl_init_workarounds(struct intel_engine_cs *engine)
970{
971 struct drm_i915_private *dev_priv = engine->i915;
972 int ret;
973
974 ret = gen9_init_workarounds(engine);
975 if (ret)
976 return ret;
977
978 /*
979 * Actual WA is to disable percontext preemption granularity control
980 * until D0 which is the default case so this is equivalent to
981 * !WaDisablePerCtxtPreemptionGranularityControl:skl
982 */
983 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
984 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
985
986 /* WaEnableGapsTsvCreditFix:skl */
987 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
988 GEN9_GAPS_TSV_CREDIT_DISABLE));
989
990 /* WaDisableGafsUnitClkGating:skl */
991 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
992
993 /* WaInPlaceDecompressionHang:skl */
994 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
995 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
996 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
997
998 /* WaDisableLSQCROPERFforOCL:skl */
999 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1000 if (ret)
1001 return ret;
1002
1003 return skl_tune_iz_hashing(engine);
1004}
1005
1006static int bxt_init_workarounds(struct intel_engine_cs *engine)
1007{
1008 struct drm_i915_private *dev_priv = engine->i915;
1009 int ret;
1010
1011 ret = gen9_init_workarounds(engine);
1012 if (ret)
1013 return ret;
1014
1015 /* WaStoreMultiplePTEenable:bxt */
1016 /* This is a requirement according to Hardware specification */
1017 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1018 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1019
1020 /* WaSetClckGatingDisableMedia:bxt */
1021 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1022 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1023 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1024 }
1025
1026 /* WaDisableThreadStallDopClockGating:bxt */
1027 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1028 STALL_DOP_GATING_DISABLE);
1029
1030 /* WaDisablePooledEuLoadBalancingFix:bxt */
1031 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1032 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1033 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1034 }
1035
1036 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1037 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1038 WA_SET_BIT_MASKED(
1039 GEN7_HALF_SLICE_CHICKEN1,
1040 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1041 }
1042
1043 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1044 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1045 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1046 /* WaDisableLSQCROPERFforOCL:bxt */
1047 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1048 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1049 if (ret)
1050 return ret;
1051
1052 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1053 if (ret)
1054 return ret;
1055 }
1056
1057 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1058 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1059 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1060 L3_HIGH_PRIO_CREDITS(2));
1061
1062 /* WaToEnableHwFixForPushConstHWBug:bxt */
1063 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1064 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1065 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1066
1067 /* WaInPlaceDecompressionHang:bxt */
1068 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1069 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1070 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1071
1072 return 0;
1073}
1074
1075static int kbl_init_workarounds(struct intel_engine_cs *engine)
1076{
1077 struct drm_i915_private *dev_priv = engine->i915;
1078 int ret;
1079
1080 ret = gen9_init_workarounds(engine);
1081 if (ret)
1082 return ret;
1083
1084 /* WaEnableGapsTsvCreditFix:kbl */
1085 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1086 GEN9_GAPS_TSV_CREDIT_DISABLE));
1087
1088 /* WaDisableDynamicCreditSharing:kbl */
1089 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1090 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1091 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1092
1093 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1094 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1095 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1096 HDC_FENCE_DEST_SLM_DISABLE);
1097
1098 /* WaToEnableHwFixForPushConstHWBug:kbl */
1099 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1100 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1101 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1102
1103 /* WaDisableGafsUnitClkGating:kbl */
1104 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1105
1106 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1107 WA_SET_BIT_MASKED(
1108 GEN7_HALF_SLICE_CHICKEN1,
1109 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1110
1111 /* WaInPlaceDecompressionHang:kbl */
1112 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1113 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1114
1115 /* WaDisableLSQCROPERFforOCL:kbl */
1116 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1117 if (ret)
1118 return ret;
1119
1120 return 0;
1121}
1122
1123int init_workarounds_ring(struct intel_engine_cs *engine)
1124{
1125 struct drm_i915_private *dev_priv = engine->i915;
1126
1127 WARN_ON(engine->id != RCS);
1128
1129 dev_priv->workarounds.count = 0;
1130 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1131
1132 if (IS_BROADWELL(dev_priv))
1133 return bdw_init_workarounds(engine);
1134
1135 if (IS_CHERRYVIEW(dev_priv))
1136 return chv_init_workarounds(engine);
1137
1138 if (IS_SKYLAKE(dev_priv))
1139 return skl_init_workarounds(engine);
1140
1141 if (IS_BROXTON(dev_priv))
1142 return bxt_init_workarounds(engine);
1143
1144 if (IS_KABYLAKE(dev_priv))
1145 return kbl_init_workarounds(engine);
1146
1147 return 0;
1148}
1149
1150static int init_render_ring(struct intel_engine_cs *engine)
1151{
1152 struct drm_i915_private *dev_priv = engine->i915;
1153 int ret = init_ring_common(engine);
1154 if (ret)
1155 return ret;
1156
1157 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1158 if (IS_GEN(dev_priv, 4, 6))
1159 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1160
1161 /* We need to disable the AsyncFlip performance optimisations in order
1162 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1163 * programmed to '1' on all products.
1164 *
1165 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1166 */
1167 if (IS_GEN(dev_priv, 6, 7))
1168 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1169
1170 /* Required for the hardware to program scanline values for waiting */
1171 /* WaEnableFlushTlbInvalidationMode:snb */
1172 if (IS_GEN6(dev_priv))
1173 I915_WRITE(GFX_MODE,
1174 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1175
1176 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1177 if (IS_GEN7(dev_priv))
1178 I915_WRITE(GFX_MODE_GEN7,
1179 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1180 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1181
1182 if (IS_GEN6(dev_priv)) {
1183 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1184 * "If this bit is set, STCunit will have LRA as replacement
1185 * policy. [...] This bit must be reset. LRA replacement
1186 * policy is not supported."
1187 */
1188 I915_WRITE(CACHE_MODE_0,
1189 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1190 }
1191
1192 if (IS_GEN(dev_priv, 6, 7))
1193 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1194
1195 if (INTEL_INFO(dev_priv)->gen >= 6)
1196 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1197
1198 return init_workarounds_ring(engine);
1199}
1200
1201static void render_ring_cleanup(struct intel_engine_cs *engine)
1202{
1203 struct drm_i915_private *dev_priv = engine->i915;
1204
1205 i915_vma_unpin_and_release(&dev_priv->semaphore);
1206}
1207
1208static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
1209{
1210 struct drm_i915_private *dev_priv = req->i915;
1211 struct intel_engine_cs *waiter;
1212 enum intel_engine_id id;
1213
1214 for_each_engine(waiter, dev_priv, id) {
1215 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1216 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1217 continue;
1218
1219 *out++ = GFX_OP_PIPE_CONTROL(6);
1220 *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
1221 PIPE_CONTROL_QW_WRITE |
1222 PIPE_CONTROL_CS_STALL);
1223 *out++ = lower_32_bits(gtt_offset);
1224 *out++ = upper_32_bits(gtt_offset);
1225 *out++ = req->global_seqno;
1226 *out++ = 0;
1227 *out++ = (MI_SEMAPHORE_SIGNAL |
1228 MI_SEMAPHORE_TARGET(waiter->hw_id));
1229 *out++ = 0;
1230 }
1231
1232 return out;
1233}
1234
1235static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
1236{
1237 struct drm_i915_private *dev_priv = req->i915;
1238 struct intel_engine_cs *waiter;
1239 enum intel_engine_id id;
1240
1241 for_each_engine(waiter, dev_priv, id) {
1242 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1243 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1244 continue;
1245
1246 *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1247 *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
1248 *out++ = upper_32_bits(gtt_offset);
1249 *out++ = req->global_seqno;
1250 *out++ = (MI_SEMAPHORE_SIGNAL |
1251 MI_SEMAPHORE_TARGET(waiter->hw_id));
1252 *out++ = 0;
1253 }
1254
1255 return out;
1256}
1257
1258static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
1259{
1260 struct drm_i915_private *dev_priv = req->i915;
1261 struct intel_engine_cs *engine;
1262 enum intel_engine_id id;
1263 int num_rings = 0;
1264
1265 for_each_engine(engine, dev_priv, id) {
1266 i915_reg_t mbox_reg;
1267
1268 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
1269 continue;
1270
1271 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
1272 if (i915_mmio_reg_valid(mbox_reg)) {
1273 *out++ = MI_LOAD_REGISTER_IMM(1);
1274 *out++ = i915_mmio_reg_offset(mbox_reg);
1275 *out++ = req->global_seqno;
1276 num_rings++;
1277 }
1278 }
1279 if (num_rings & 1)
1280 *out++ = MI_NOOP;
1281
1282 return out;
1283}
1284
1285static void i9xx_submit_request(struct drm_i915_gem_request *request)
1286{
1287 struct drm_i915_private *dev_priv = request->i915;
1288
1289 i915_gem_request_submit(request);
1290
1291 I915_WRITE_TAIL(request->engine, request->tail);
1292}
1293
1294static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
1295 u32 *out)
1296{
1297 *out++ = MI_STORE_DWORD_INDEX;
1298 *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
1299 *out++ = req->global_seqno;
1300 *out++ = MI_USER_INTERRUPT;
1301
1302 req->tail = intel_ring_offset(req->ring, out);
1303}
1304
1305static const int i9xx_emit_breadcrumb_sz = 4;
1306
1307/**
1308 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
1309 *
1310 * @request - request to write to the ring
1311 *
1312 * Update the mailbox registers in the *other* rings with the current seqno.
1313 * This acts like a signal in the canonical semaphore.
1314 */
1315static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
1316 u32 *out)
1317{
1318 return i9xx_emit_breadcrumb(req,
1319 req->engine->semaphore.signal(req, out));
1320}
1321
1322static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
1323 u32 *out)
1324{
1325 struct intel_engine_cs *engine = req->engine;
1326
1327 if (engine->semaphore.signal)
1328 out = engine->semaphore.signal(req, out);
1329
1330 *out++ = GFX_OP_PIPE_CONTROL(6);
1331 *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
1332 PIPE_CONTROL_CS_STALL |
1333 PIPE_CONTROL_QW_WRITE);
1334 *out++ = intel_hws_seqno_address(engine);
1335 *out++ = 0;
1336 *out++ = req->global_seqno;
1337 /* We're thrashing one dword of HWS. */
1338 *out++ = 0;
1339 *out++ = MI_USER_INTERRUPT;
1340 *out++ = MI_NOOP;
1341
1342 req->tail = intel_ring_offset(req->ring, out);
1343}
1344
1345static const int gen8_render_emit_breadcrumb_sz = 8;
1346
1347/**
1348 * intel_ring_sync - sync the waiter to the signaller on seqno
1349 *
1350 * @waiter - ring that is waiting
1351 * @signaller - ring which has, or will signal
1352 * @seqno - seqno which the waiter will block on
1353 */
1354
1355static int
1356gen8_ring_sync_to(struct drm_i915_gem_request *req,
1357 struct drm_i915_gem_request *signal)
1358{
1359 struct intel_ring *ring = req->ring;
1360 struct drm_i915_private *dev_priv = req->i915;
1361 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
1362 struct i915_hw_ppgtt *ppgtt;
1363 int ret;
1364
1365 ret = intel_ring_begin(req, 4);
1366 if (ret)
1367 return ret;
1368
1369 intel_ring_emit(ring,
1370 MI_SEMAPHORE_WAIT |
1371 MI_SEMAPHORE_GLOBAL_GTT |
1372 MI_SEMAPHORE_SAD_GTE_SDD);
1373 intel_ring_emit(ring, signal->global_seqno);
1374 intel_ring_emit(ring, lower_32_bits(offset));
1375 intel_ring_emit(ring, upper_32_bits(offset));
1376 intel_ring_advance(ring);
1377
1378 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1379 * pagetables and we must reload them before executing the batch.
1380 * We do this on the i915_switch_context() following the wait and
1381 * before the dispatch.
1382 */
1383 ppgtt = req->ctx->ppgtt;
1384 if (ppgtt && req->engine->id != RCS)
1385 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
1386 return 0;
1387}
1388
1389static int
1390gen6_ring_sync_to(struct drm_i915_gem_request *req,
1391 struct drm_i915_gem_request *signal)
1392{
1393 struct intel_ring *ring = req->ring;
1394 u32 dw1 = MI_SEMAPHORE_MBOX |
1395 MI_SEMAPHORE_COMPARE |
1396 MI_SEMAPHORE_REGISTER;
1397 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
1398 int ret;
1399
1400 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1401
1402 ret = intel_ring_begin(req, 4);
1403 if (ret)
1404 return ret;
1405
1406 intel_ring_emit(ring, dw1 | wait_mbox);
1407 /* Throughout all of the GEM code, seqno passed implies our current
1408 * seqno is >= the last seqno executed. However for hardware the
1409 * comparison is strictly greater than.
1410 */
1411 intel_ring_emit(ring, signal->global_seqno - 1);
1412 intel_ring_emit(ring, 0);
1413 intel_ring_emit(ring, MI_NOOP);
1414 intel_ring_advance(ring);
1415
1416 return 0;
1417}
1418
1419static void
1420gen5_seqno_barrier(struct intel_engine_cs *engine)
1421{
1422 /* MI_STORE are internally buffered by the GPU and not flushed
1423 * either by MI_FLUSH or SyncFlush or any other combination of
1424 * MI commands.
1425 *
1426 * "Only the submission of the store operation is guaranteed.
1427 * The write result will be complete (coherent) some time later
1428 * (this is practically a finite period but there is no guaranteed
1429 * latency)."
1430 *
1431 * Empirically, we observe that we need a delay of at least 75us to
1432 * be sure that the seqno write is visible by the CPU.
1433 */
1434 usleep_range(125, 250);
1435}
1436
1437static void
1438gen6_seqno_barrier(struct intel_engine_cs *engine)
1439{
1440 struct drm_i915_private *dev_priv = engine->i915;
1441
1442 /* Workaround to force correct ordering between irq and seqno writes on
1443 * ivb (and maybe also on snb) by reading from a CS register (like
1444 * ACTHD) before reading the status page.
1445 *
1446 * Note that this effectively stalls the read by the time it takes to
1447 * do a memory transaction, which more or less ensures that the write
1448 * from the GPU has sufficient time to invalidate the CPU cacheline.
1449 * Alternatively we could delay the interrupt from the CS ring to give
1450 * the write time to land, but that would incur a delay after every
1451 * batch i.e. much more frequent than a delay when waiting for the
1452 * interrupt (with the same net latency).
1453 *
1454 * Also note that to prevent whole machine hangs on gen7, we have to
1455 * take the spinlock to guard against concurrent cacheline access.
1456 */
1457 spin_lock_irq(&dev_priv->uncore.lock);
1458 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
1459 spin_unlock_irq(&dev_priv->uncore.lock);
1460}
1461
1462static void
1463gen5_irq_enable(struct intel_engine_cs *engine)
1464{
1465 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
1466}
1467
1468static void
1469gen5_irq_disable(struct intel_engine_cs *engine)
1470{
1471 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
1472}
1473
1474static void
1475i9xx_irq_enable(struct intel_engine_cs *engine)
1476{
1477 struct drm_i915_private *dev_priv = engine->i915;
1478
1479 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1480 I915_WRITE(IMR, dev_priv->irq_mask);
1481 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1482}
1483
1484static void
1485i9xx_irq_disable(struct intel_engine_cs *engine)
1486{
1487 struct drm_i915_private *dev_priv = engine->i915;
1488
1489 dev_priv->irq_mask |= engine->irq_enable_mask;
1490 I915_WRITE(IMR, dev_priv->irq_mask);
1491}
1492
1493static void
1494i8xx_irq_enable(struct intel_engine_cs *engine)
1495{
1496 struct drm_i915_private *dev_priv = engine->i915;
1497
1498 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1499 I915_WRITE16(IMR, dev_priv->irq_mask);
1500 POSTING_READ16(RING_IMR(engine->mmio_base));
1501}
1502
1503static void
1504i8xx_irq_disable(struct intel_engine_cs *engine)
1505{
1506 struct drm_i915_private *dev_priv = engine->i915;
1507
1508 dev_priv->irq_mask |= engine->irq_enable_mask;
1509 I915_WRITE16(IMR, dev_priv->irq_mask);
1510}
1511
1512static int
1513bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1514{
1515 struct intel_ring *ring = req->ring;
1516 int ret;
1517
1518 ret = intel_ring_begin(req, 2);
1519 if (ret)
1520 return ret;
1521
1522 intel_ring_emit(ring, MI_FLUSH);
1523 intel_ring_emit(ring, MI_NOOP);
1524 intel_ring_advance(ring);
1525 return 0;
1526}
1527
1528static void
1529gen6_irq_enable(struct intel_engine_cs *engine)
1530{
1531 struct drm_i915_private *dev_priv = engine->i915;
1532
1533 I915_WRITE_IMR(engine,
1534 ~(engine->irq_enable_mask |
1535 engine->irq_keep_mask));
1536 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1537}
1538
1539static void
1540gen6_irq_disable(struct intel_engine_cs *engine)
1541{
1542 struct drm_i915_private *dev_priv = engine->i915;
1543
1544 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1545 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1546}
1547
1548static void
1549hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1550{
1551 struct drm_i915_private *dev_priv = engine->i915;
1552
1553 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1554 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
1555}
1556
1557static void
1558hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1559{
1560 struct drm_i915_private *dev_priv = engine->i915;
1561
1562 I915_WRITE_IMR(engine, ~0);
1563 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
1564}
1565
1566static void
1567gen8_irq_enable(struct intel_engine_cs *engine)
1568{
1569 struct drm_i915_private *dev_priv = engine->i915;
1570
1571 I915_WRITE_IMR(engine,
1572 ~(engine->irq_enable_mask |
1573 engine->irq_keep_mask));
1574 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1575}
1576
1577static void
1578gen8_irq_disable(struct intel_engine_cs *engine)
1579{
1580 struct drm_i915_private *dev_priv = engine->i915;
1581
1582 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1583}
1584
1585static int
1586i965_emit_bb_start(struct drm_i915_gem_request *req,
1587 u64 offset, u32 length,
1588 unsigned int dispatch_flags)
1589{
1590 struct intel_ring *ring = req->ring;
1591 int ret;
1592
1593 ret = intel_ring_begin(req, 2);
1594 if (ret)
1595 return ret;
1596
1597 intel_ring_emit(ring,
1598 MI_BATCH_BUFFER_START |
1599 MI_BATCH_GTT |
1600 (dispatch_flags & I915_DISPATCH_SECURE ?
1601 0 : MI_BATCH_NON_SECURE_I965));
1602 intel_ring_emit(ring, offset);
1603 intel_ring_advance(ring);
1604
1605 return 0;
1606}
1607
1608/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1609#define I830_BATCH_LIMIT (256*1024)
1610#define I830_TLB_ENTRIES (2)
1611#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1612static int
1613i830_emit_bb_start(struct drm_i915_gem_request *req,
1614 u64 offset, u32 len,
1615 unsigned int dispatch_flags)
1616{
1617 struct intel_ring *ring = req->ring;
1618 u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
1619 int ret;
1620
1621 ret = intel_ring_begin(req, 6);
1622 if (ret)
1623 return ret;
1624
1625 /* Evict the invalid PTE TLBs */
1626 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1627 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1628 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1629 intel_ring_emit(ring, cs_offset);
1630 intel_ring_emit(ring, 0xdeadbeef);
1631 intel_ring_emit(ring, MI_NOOP);
1632 intel_ring_advance(ring);
1633
1634 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1635 if (len > I830_BATCH_LIMIT)
1636 return -ENOSPC;
1637
1638 ret = intel_ring_begin(req, 6 + 2);
1639 if (ret)
1640 return ret;
1641
1642 /* Blit the batch (which has now all relocs applied) to the
1643 * stable batch scratch bo area (so that the CS never
1644 * stumbles over its tlb invalidation bug) ...
1645 */
1646 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1647 intel_ring_emit(ring,
1648 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
1649 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1650 intel_ring_emit(ring, cs_offset);
1651 intel_ring_emit(ring, 4096);
1652 intel_ring_emit(ring, offset);
1653
1654 intel_ring_emit(ring, MI_FLUSH);
1655 intel_ring_emit(ring, MI_NOOP);
1656 intel_ring_advance(ring);
1657
1658 /* ... and execute it. */
1659 offset = cs_offset;
1660 }
1661
1662 ret = intel_ring_begin(req, 2);
1663 if (ret)
1664 return ret;
1665
1666 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1667 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1668 0 : MI_BATCH_NON_SECURE));
1669 intel_ring_advance(ring);
1670
1671 return 0;
1672}
1673
1674static int
1675i915_emit_bb_start(struct drm_i915_gem_request *req,
1676 u64 offset, u32 len,
1677 unsigned int dispatch_flags)
1678{
1679 struct intel_ring *ring = req->ring;
1680 int ret;
1681
1682 ret = intel_ring_begin(req, 2);
1683 if (ret)
1684 return ret;
1685
1686 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1687 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1688 0 : MI_BATCH_NON_SECURE));
1689 intel_ring_advance(ring);
1690
1691 return 0;
1692}
1693
1694static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1695{
1696 struct drm_i915_private *dev_priv = engine->i915;
1697
1698 if (!dev_priv->status_page_dmah)
1699 return;
1700
1701 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1702 engine->status_page.page_addr = NULL;
1703}
1704
1705static void cleanup_status_page(struct intel_engine_cs *engine)
1706{
1707 struct i915_vma *vma;
1708 struct drm_i915_gem_object *obj;
1709
1710 vma = fetch_and_zero(&engine->status_page.vma);
1711 if (!vma)
1712 return;
1713
1714 obj = vma->obj;
1715
1716 i915_vma_unpin(vma);
1717 i915_vma_close(vma);
1718
1719 i915_gem_object_unpin_map(obj);
1720 __i915_gem_object_release_unless_active(obj);
1721}
1722
1723static int init_status_page(struct intel_engine_cs *engine)
1724{
1725 struct drm_i915_gem_object *obj;
1726 struct i915_vma *vma;
1727 unsigned int flags;
1728 void *vaddr;
1729 int ret;
1730
1731 obj = i915_gem_object_create_internal(engine->i915, 4096);
1732 if (IS_ERR(obj)) {
1733 DRM_ERROR("Failed to allocate status page\n");
1734 return PTR_ERR(obj);
1735 }
1736
1737 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1738 if (ret)
1739 goto err;
1740
1741 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
1742 if (IS_ERR(vma)) {
1743 ret = PTR_ERR(vma);
1744 goto err;
1745 }
1746
1747 flags = PIN_GLOBAL;
1748 if (!HAS_LLC(engine->i915))
1749 /* On g33, we cannot place HWS above 256MiB, so
1750 * restrict its pinning to the low mappable arena.
1751 * Though this restriction is not documented for
1752 * gen4, gen5, or byt, they also behave similarly
1753 * and hang if the HWS is placed at the top of the
1754 * GTT. To generalise, it appears that all !llc
1755 * platforms have issues with us placing the HWS
1756 * above the mappable region (even though we never
1757 * actualy map it).
1758 */
1759 flags |= PIN_MAPPABLE;
1760 ret = i915_vma_pin(vma, 0, 4096, flags);
1761 if (ret)
1762 goto err;
1763
1764 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1765 if (IS_ERR(vaddr)) {
1766 ret = PTR_ERR(vaddr);
1767 goto err_unpin;
1768 }
1769
1770 engine->status_page.vma = vma;
1771 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
1772 engine->status_page.page_addr = memset(vaddr, 0, 4096);
1773
1774 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1775 engine->name, i915_ggtt_offset(vma));
1776 return 0;
1777
1778err_unpin:
1779 i915_vma_unpin(vma);
1780err:
1781 i915_gem_object_put(obj);
1782 return ret;
1783}
1784
1785static int init_phys_status_page(struct intel_engine_cs *engine)
1786{
1787 struct drm_i915_private *dev_priv = engine->i915;
1788
1789 dev_priv->status_page_dmah =
1790 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1791 if (!dev_priv->status_page_dmah)
1792 return -ENOMEM;
1793
1794 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1795 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
1796
1797 return 0;
1798}
1799
1800int intel_ring_pin(struct intel_ring *ring)
1801{
1802 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1803 unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
1804 enum i915_map_type map;
1805 struct i915_vma *vma = ring->vma;
1806 void *addr;
1807 int ret;
1808
1809 GEM_BUG_ON(ring->vaddr);
1810
1811 map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
1812
1813 if (vma->obj->stolen)
1814 flags |= PIN_MAPPABLE;
1815
1816 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1817 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1818 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1819 else
1820 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1821 if (unlikely(ret))
1822 return ret;
1823 }
1824
1825 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1826 if (unlikely(ret))
1827 return ret;
1828
1829 if (i915_vma_is_map_and_fenceable(vma))
1830 addr = (void __force *)i915_vma_pin_iomap(vma);
1831 else
1832 addr = i915_gem_object_pin_map(vma->obj, map);
1833 if (IS_ERR(addr))
1834 goto err;
1835
1836 ring->vaddr = addr;
1837 return 0;
1838
1839err:
1840 i915_vma_unpin(vma);
1841 return PTR_ERR(addr);
1842}
1843
1844void intel_ring_unpin(struct intel_ring *ring)
1845{
1846 GEM_BUG_ON(!ring->vma);
1847 GEM_BUG_ON(!ring->vaddr);
1848
1849 if (i915_vma_is_map_and_fenceable(ring->vma))
1850 i915_vma_unpin_iomap(ring->vma);
1851 else
1852 i915_gem_object_unpin_map(ring->vma->obj);
1853 ring->vaddr = NULL;
1854
1855 i915_vma_unpin(ring->vma);
1856}
1857
1858static struct i915_vma *
1859intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1860{
1861 struct drm_i915_gem_object *obj;
1862 struct i915_vma *vma;
1863
1864 obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
1865 if (!obj)
1866 obj = i915_gem_object_create(&dev_priv->drm, size);
1867 if (IS_ERR(obj))
1868 return ERR_CAST(obj);
1869
1870 /* mark ring buffers as read-only from GPU side by default */
1871 obj->gt_ro = 1;
1872
1873 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
1874 if (IS_ERR(vma))
1875 goto err;
1876
1877 return vma;
1878
1879err:
1880 i915_gem_object_put(obj);
1881 return vma;
1882}
1883
1884struct intel_ring *
1885intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1886{
1887 struct intel_ring *ring;
1888 struct i915_vma *vma;
1889
1890 GEM_BUG_ON(!is_power_of_2(size));
1891 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1892
1893 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1894 if (!ring)
1895 return ERR_PTR(-ENOMEM);
1896
1897 ring->engine = engine;
1898
1899 INIT_LIST_HEAD(&ring->request_list);
1900
1901 ring->size = size;
1902 /* Workaround an erratum on the i830 which causes a hang if
1903 * the TAIL pointer points to within the last 2 cachelines
1904 * of the buffer.
1905 */
1906 ring->effective_size = size;
1907 if (IS_I830(engine->i915) || IS_845G(engine->i915))
1908 ring->effective_size -= 2 * CACHELINE_BYTES;
1909
1910 ring->last_retired_head = -1;
1911 intel_ring_update_space(ring);
1912
1913 vma = intel_ring_create_vma(engine->i915, size);
1914 if (IS_ERR(vma)) {
1915 kfree(ring);
1916 return ERR_CAST(vma);
1917 }
1918 ring->vma = vma;
1919
1920 return ring;
1921}
1922
1923void
1924intel_ring_free(struct intel_ring *ring)
1925{
1926 struct drm_i915_gem_object *obj = ring->vma->obj;
1927
1928 i915_vma_close(ring->vma);
1929 __i915_gem_object_release_unless_active(obj);
1930
1931 kfree(ring);
1932}
1933
1934static int intel_ring_context_pin(struct i915_gem_context *ctx,
1935 struct intel_engine_cs *engine)
1936{
1937 struct intel_context *ce = &ctx->engine[engine->id];
1938 int ret;
1939
1940 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1941
1942 if (ce->pin_count++)
1943 return 0;
1944
1945 if (ce->state) {
1946 struct i915_vma *vma;
1947
1948 vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
1949 if (IS_ERR(vma)) {
1950 ret = PTR_ERR(vma);
1951 goto error;
1952 }
1953 }
1954
1955 /* The kernel context is only used as a placeholder for flushing the
1956 * active context. It is never used for submitting user rendering and
1957 * as such never requires the golden render context, and so we can skip
1958 * emitting it when we switch to the kernel context. This is required
1959 * as during eviction we cannot allocate and pin the renderstate in
1960 * order to initialise the context.
1961 */
1962 if (ctx == ctx->i915->kernel_context)
1963 ce->initialised = true;
1964
1965 i915_gem_context_get(ctx);
1966 return 0;
1967
1968error:
1969 ce->pin_count = 0;
1970 return ret;
1971}
1972
1973static void intel_ring_context_unpin(struct i915_gem_context *ctx,
1974 struct intel_engine_cs *engine)
1975{
1976 struct intel_context *ce = &ctx->engine[engine->id];
1977
1978 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1979
1980 if (--ce->pin_count)
1981 return;
1982
1983 if (ce->state)
1984 i915_vma_unpin(ce->state);
1985
1986 i915_gem_context_put(ctx);
1987}
1988
1989static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1990{
1991 struct drm_i915_private *dev_priv = engine->i915;
1992 struct intel_ring *ring;
1993 int ret;
1994
1995 WARN_ON(engine->buffer);
1996
1997 intel_engine_setup_common(engine);
1998
1999 ret = intel_engine_init_common(engine);
2000 if (ret)
2001 goto error;
2002
2003 /* We may need to do things with the shrinker which
2004 * require us to immediately switch back to the default
2005 * context. This can cause a problem as pinning the
2006 * default context also requires GTT space which may not
2007 * be available. To avoid this we always pin the default
2008 * context.
2009 */
2010 ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2011 if (ret)
2012 goto error;
2013
2014 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
2015 if (IS_ERR(ring)) {
2016 ret = PTR_ERR(ring);
2017 goto error;
2018 }
2019
2020 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2021 WARN_ON(engine->id != RCS);
2022 ret = init_phys_status_page(engine);
2023 if (ret)
2024 goto error;
2025 } else {
2026 ret = init_status_page(engine);
2027 if (ret)
2028 goto error;
2029 }
2030
2031 ret = intel_ring_pin(ring);
2032 if (ret) {
2033 intel_ring_free(ring);
2034 goto error;
2035 }
2036 engine->buffer = ring;
2037
2038 return 0;
2039
2040error:
2041 intel_engine_cleanup(engine);
2042 return ret;
2043}
2044
2045void intel_engine_cleanup(struct intel_engine_cs *engine)
2046{
2047 struct drm_i915_private *dev_priv;
2048
2049 dev_priv = engine->i915;
2050
2051 if (engine->buffer) {
2052 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
2053 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2054
2055 intel_ring_unpin(engine->buffer);
2056 intel_ring_free(engine->buffer);
2057 engine->buffer = NULL;
2058 }
2059
2060 if (engine->cleanup)
2061 engine->cleanup(engine);
2062
2063 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2064 WARN_ON(engine->id != RCS);
2065 cleanup_phys_status_page(engine);
2066 } else {
2067 cleanup_status_page(engine);
2068 }
2069
2070 intel_engine_cleanup_common(engine);
2071
2072 intel_ring_context_unpin(dev_priv->kernel_context, engine);
2073
2074 engine->i915 = NULL;
2075 dev_priv->engine[engine->id] = NULL;
2076 kfree(engine);
2077}
2078
2079void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
2080{
2081 struct intel_engine_cs *engine;
2082 enum intel_engine_id id;
2083
2084 for_each_engine(engine, dev_priv, id) {
2085 engine->buffer->head = engine->buffer->tail;
2086 engine->buffer->last_retired_head = -1;
2087 }
2088}
2089
2090int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2091{
2092 int ret;
2093
2094 /* Flush enough space to reduce the likelihood of waiting after
2095 * we start building the request - in which case we will just
2096 * have to repeat work.
2097 */
2098 request->reserved_space += LEGACY_REQUEST_SIZE;
2099
2100 request->ring = request->engine->buffer;
2101
2102 ret = intel_ring_begin(request, 0);
2103 if (ret)
2104 return ret;
2105
2106 request->reserved_space -= LEGACY_REQUEST_SIZE;
2107 return 0;
2108}
2109
2110static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2111{
2112 struct intel_ring *ring = req->ring;
2113 struct drm_i915_gem_request *target;
2114 long timeout;
2115
2116 lockdep_assert_held(&req->i915->drm.struct_mutex);
2117
2118 intel_ring_update_space(ring);
2119 if (ring->space >= bytes)
2120 return 0;
2121
2122 /*
2123 * Space is reserved in the ringbuffer for finalising the request,
2124 * as that cannot be allowed to fail. During request finalisation,
2125 * reserved_space is set to 0 to stop the overallocation and the
2126 * assumption is that then we never need to wait (which has the
2127 * risk of failing with EINTR).
2128 *
2129 * See also i915_gem_request_alloc() and i915_add_request().
2130 */
2131 GEM_BUG_ON(!req->reserved_space);
2132
2133 list_for_each_entry(target, &ring->request_list, ring_link) {
2134 unsigned space;
2135
2136 /* Would completion of this request free enough space? */
2137 space = __intel_ring_space(target->postfix, ring->tail,
2138 ring->size);
2139 if (space >= bytes)
2140 break;
2141 }
2142
2143 if (WARN_ON(&target->ring_link == &ring->request_list))
2144 return -ENOSPC;
2145
2146 timeout = i915_wait_request(target,
2147 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
2148 MAX_SCHEDULE_TIMEOUT);
2149 if (timeout < 0)
2150 return timeout;
2151
2152 i915_gem_request_retire_upto(target);
2153
2154 intel_ring_update_space(ring);
2155 GEM_BUG_ON(ring->space < bytes);
2156 return 0;
2157}
2158
2159int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2160{
2161 struct intel_ring *ring = req->ring;
2162 int remain_actual = ring->size - ring->tail;
2163 int remain_usable = ring->effective_size - ring->tail;
2164 int bytes = num_dwords * sizeof(u32);
2165 int total_bytes, wait_bytes;
2166 bool need_wrap = false;
2167
2168 total_bytes = bytes + req->reserved_space;
2169
2170 if (unlikely(bytes > remain_usable)) {
2171 /*
2172 * Not enough space for the basic request. So need to flush
2173 * out the remainder and then wait for base + reserved.
2174 */
2175 wait_bytes = remain_actual + total_bytes;
2176 need_wrap = true;
2177 } else if (unlikely(total_bytes > remain_usable)) {
2178 /*
2179 * The base request will fit but the reserved space
2180 * falls off the end. So we don't need an immediate wrap
2181 * and only need to effectively wait for the reserved
2182 * size space from the start of ringbuffer.
2183 */
2184 wait_bytes = remain_actual + req->reserved_space;
2185 } else {
2186 /* No wrapping required, just waiting. */
2187 wait_bytes = total_bytes;
2188 }
2189
2190 if (wait_bytes > ring->space) {
2191 int ret = wait_for_space(req, wait_bytes);
2192 if (unlikely(ret))
2193 return ret;
2194 }
2195
2196 if (unlikely(need_wrap)) {
2197 GEM_BUG_ON(remain_actual > ring->space);
2198 GEM_BUG_ON(ring->tail + remain_actual > ring->size);
2199
2200 /* Fill the tail with MI_NOOP */
2201 memset(ring->vaddr + ring->tail, 0, remain_actual);
2202 ring->tail = 0;
2203 ring->space -= remain_actual;
2204 }
2205
2206 ring->space -= bytes;
2207 GEM_BUG_ON(ring->space < 0);
2208 return 0;
2209}
2210
2211/* Align the ring tail to a cacheline boundary */
2212int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2213{
2214 struct intel_ring *ring = req->ring;
2215 int num_dwords =
2216 (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
2217 int ret;
2218
2219 if (num_dwords == 0)
2220 return 0;
2221
2222 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
2223 ret = intel_ring_begin(req, num_dwords);
2224 if (ret)
2225 return ret;
2226
2227 while (num_dwords--)
2228 intel_ring_emit(ring, MI_NOOP);
2229
2230 intel_ring_advance(ring);
2231
2232 return 0;
2233}
2234
2235static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
2236{
2237 struct drm_i915_private *dev_priv = request->i915;
2238
2239 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2240
2241 /* Every tail move must follow the sequence below */
2242
2243 /* Disable notification that the ring is IDLE. The GT
2244 * will then assume that it is busy and bring it out of rc6.
2245 */
2246 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2247 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2248
2249 /* Clear the context id. Here be magic! */
2250 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2251
2252 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2253 if (intel_wait_for_register_fw(dev_priv,
2254 GEN6_BSD_SLEEP_PSMI_CONTROL,
2255 GEN6_BSD_SLEEP_INDICATOR,
2256 0,
2257 50))
2258 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2259
2260 /* Now that the ring is fully powered up, update the tail */
2261 i9xx_submit_request(request);
2262
2263 /* Let the ring send IDLE messages to the GT again,
2264 * and so let it sleep to conserve power when idle.
2265 */
2266 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2267 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2268
2269 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2270}
2271
2272static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2273{
2274 struct intel_ring *ring = req->ring;
2275 uint32_t cmd;
2276 int ret;
2277
2278 ret = intel_ring_begin(req, 4);
2279 if (ret)
2280 return ret;
2281
2282 cmd = MI_FLUSH_DW;
2283 if (INTEL_GEN(req->i915) >= 8)
2284 cmd += 1;
2285
2286 /* We always require a command barrier so that subsequent
2287 * commands, such as breadcrumb interrupts, are strictly ordered
2288 * wrt the contents of the write cache being flushed to memory
2289 * (and thus being coherent from the CPU).
2290 */
2291 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2292
2293 /*
2294 * Bspec vol 1c.5 - video engine command streamer:
2295 * "If ENABLED, all TLBs will be invalidated once the flush
2296 * operation is complete. This bit is only valid when the
2297 * Post-Sync Operation field is a value of 1h or 3h."
2298 */
2299 if (mode & EMIT_INVALIDATE)
2300 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2301
2302 intel_ring_emit(ring, cmd);
2303 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2304 if (INTEL_GEN(req->i915) >= 8) {
2305 intel_ring_emit(ring, 0); /* upper addr */
2306 intel_ring_emit(ring, 0); /* value */
2307 } else {
2308 intel_ring_emit(ring, 0);
2309 intel_ring_emit(ring, MI_NOOP);
2310 }
2311 intel_ring_advance(ring);
2312 return 0;
2313}
2314
2315static int
2316gen8_emit_bb_start(struct drm_i915_gem_request *req,
2317 u64 offset, u32 len,
2318 unsigned int dispatch_flags)
2319{
2320 struct intel_ring *ring = req->ring;
2321 bool ppgtt = USES_PPGTT(req->i915) &&
2322 !(dispatch_flags & I915_DISPATCH_SECURE);
2323 int ret;
2324
2325 ret = intel_ring_begin(req, 4);
2326 if (ret)
2327 return ret;
2328
2329 /* FIXME(BDW): Address space and security selectors. */
2330 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
2331 (dispatch_flags & I915_DISPATCH_RS ?
2332 MI_BATCH_RESOURCE_STREAMER : 0));
2333 intel_ring_emit(ring, lower_32_bits(offset));
2334 intel_ring_emit(ring, upper_32_bits(offset));
2335 intel_ring_emit(ring, MI_NOOP);
2336 intel_ring_advance(ring);
2337
2338 return 0;
2339}
2340
2341static int
2342hsw_emit_bb_start(struct drm_i915_gem_request *req,
2343 u64 offset, u32 len,
2344 unsigned int dispatch_flags)
2345{
2346 struct intel_ring *ring = req->ring;
2347 int ret;
2348
2349 ret = intel_ring_begin(req, 2);
2350 if (ret)
2351 return ret;
2352
2353 intel_ring_emit(ring,
2354 MI_BATCH_BUFFER_START |
2355 (dispatch_flags & I915_DISPATCH_SECURE ?
2356 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
2357 (dispatch_flags & I915_DISPATCH_RS ?
2358 MI_BATCH_RESOURCE_STREAMER : 0));
2359 /* bit0-7 is the length on GEN6+ */
2360 intel_ring_emit(ring, offset);
2361 intel_ring_advance(ring);
2362
2363 return 0;
2364}
2365
2366static int
2367gen6_emit_bb_start(struct drm_i915_gem_request *req,
2368 u64 offset, u32 len,
2369 unsigned int dispatch_flags)
2370{
2371 struct intel_ring *ring = req->ring;
2372 int ret;
2373
2374 ret = intel_ring_begin(req, 2);
2375 if (ret)
2376 return ret;
2377
2378 intel_ring_emit(ring,
2379 MI_BATCH_BUFFER_START |
2380 (dispatch_flags & I915_DISPATCH_SECURE ?
2381 0 : MI_BATCH_NON_SECURE_I965));
2382 /* bit0-7 is the length on GEN6+ */
2383 intel_ring_emit(ring, offset);
2384 intel_ring_advance(ring);
2385
2386 return 0;
2387}
2388
2389/* Blitter support (SandyBridge+) */
2390
2391static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2392{
2393 struct intel_ring *ring = req->ring;
2394 uint32_t cmd;
2395 int ret;
2396
2397 ret = intel_ring_begin(req, 4);
2398 if (ret)
2399 return ret;
2400
2401 cmd = MI_FLUSH_DW;
2402 if (INTEL_GEN(req->i915) >= 8)
2403 cmd += 1;
2404
2405 /* We always require a command barrier so that subsequent
2406 * commands, such as breadcrumb interrupts, are strictly ordered
2407 * wrt the contents of the write cache being flushed to memory
2408 * (and thus being coherent from the CPU).
2409 */
2410 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2411
2412 /*
2413 * Bspec vol 1c.3 - blitter engine command streamer:
2414 * "If ENABLED, all TLBs will be invalidated once the flush
2415 * operation is complete. This bit is only valid when the
2416 * Post-Sync Operation field is a value of 1h or 3h."
2417 */
2418 if (mode & EMIT_INVALIDATE)
2419 cmd |= MI_INVALIDATE_TLB;
2420 intel_ring_emit(ring, cmd);
2421 intel_ring_emit(ring,
2422 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2423 if (INTEL_GEN(req->i915) >= 8) {
2424 intel_ring_emit(ring, 0); /* upper addr */
2425 intel_ring_emit(ring, 0); /* value */
2426 } else {
2427 intel_ring_emit(ring, 0);
2428 intel_ring_emit(ring, MI_NOOP);
2429 }
2430 intel_ring_advance(ring);
2431
2432 return 0;
2433}
2434
2435static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2436 struct intel_engine_cs *engine)
2437{
2438 struct drm_i915_gem_object *obj;
2439 int ret, i;
2440
2441 if (!i915.semaphores)
2442 return;
2443
2444 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
2445 struct i915_vma *vma;
2446
2447 obj = i915_gem_object_create(&dev_priv->drm, 4096);
2448 if (IS_ERR(obj))
2449 goto err;
2450
2451 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
2452 if (IS_ERR(vma))
2453 goto err_obj;
2454
2455 ret = i915_gem_object_set_to_gtt_domain(obj, false);
2456 if (ret)
2457 goto err_obj;
2458
2459 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
2460 if (ret)
2461 goto err_obj;
2462
2463 dev_priv->semaphore = vma;
2464 }
2465
2466 if (INTEL_GEN(dev_priv) >= 8) {
2467 u32 offset = i915_ggtt_offset(dev_priv->semaphore);
2468
2469 engine->semaphore.sync_to = gen8_ring_sync_to;
2470 engine->semaphore.signal = gen8_xcs_signal;
2471
2472 for (i = 0; i < I915_NUM_ENGINES; i++) {
2473 u32 ring_offset;
2474
2475 if (i != engine->id)
2476 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2477 else
2478 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2479
2480 engine->semaphore.signal_ggtt[i] = ring_offset;
2481 }
2482 } else if (INTEL_GEN(dev_priv) >= 6) {
2483 engine->semaphore.sync_to = gen6_ring_sync_to;
2484 engine->semaphore.signal = gen6_signal;
2485
2486 /*
2487 * The current semaphore is only applied on pre-gen8
2488 * platform. And there is no VCS2 ring on the pre-gen8
2489 * platform. So the semaphore between RCS and VCS2 is
2490 * initialized as INVALID. Gen8 will initialize the
2491 * sema between VCS2 and RCS later.
2492 */
2493 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
2494 static const struct {
2495 u32 wait_mbox;
2496 i915_reg_t mbox_reg;
2497 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2498 [RCS_HW] = {
2499 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2500 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2501 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2502 },
2503 [VCS_HW] = {
2504 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2505 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2506 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2507 },
2508 [BCS_HW] = {
2509 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2510 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2511 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2512 },
2513 [VECS_HW] = {
2514 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2515 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2516 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2517 },
2518 };
2519 u32 wait_mbox;
2520 i915_reg_t mbox_reg;
2521
2522 if (i == engine->hw_id) {
2523 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2524 mbox_reg = GEN6_NOSYNC;
2525 } else {
2526 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2527 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2528 }
2529
2530 engine->semaphore.mbox.wait[i] = wait_mbox;
2531 engine->semaphore.mbox.signal[i] = mbox_reg;
2532 }
2533 }
2534
2535 return;
2536
2537err_obj:
2538 i915_gem_object_put(obj);
2539err:
2540 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2541 i915.semaphores = 0;
2542}
2543
2544static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2545 struct intel_engine_cs *engine)
2546{
2547 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2548
2549 if (INTEL_GEN(dev_priv) >= 8) {
2550 engine->irq_enable = gen8_irq_enable;
2551 engine->irq_disable = gen8_irq_disable;
2552 engine->irq_seqno_barrier = gen6_seqno_barrier;
2553 } else if (INTEL_GEN(dev_priv) >= 6) {
2554 engine->irq_enable = gen6_irq_enable;
2555 engine->irq_disable = gen6_irq_disable;
2556 engine->irq_seqno_barrier = gen6_seqno_barrier;
2557 } else if (INTEL_GEN(dev_priv) >= 5) {
2558 engine->irq_enable = gen5_irq_enable;
2559 engine->irq_disable = gen5_irq_disable;
2560 engine->irq_seqno_barrier = gen5_seqno_barrier;
2561 } else if (INTEL_GEN(dev_priv) >= 3) {
2562 engine->irq_enable = i9xx_irq_enable;
2563 engine->irq_disable = i9xx_irq_disable;
2564 } else {
2565 engine->irq_enable = i8xx_irq_enable;
2566 engine->irq_disable = i8xx_irq_disable;
2567 }
2568}
2569
2570static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2571 struct intel_engine_cs *engine)
2572{
2573 intel_ring_init_irq(dev_priv, engine);
2574 intel_ring_init_semaphores(dev_priv, engine);
2575
2576 engine->init_hw = init_ring_common;
2577 engine->reset_hw = reset_ring_common;
2578
2579 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2580 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2581 if (i915.semaphores) {
2582 int num_rings;
2583
2584 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2585
2586 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2587 if (INTEL_GEN(dev_priv) >= 8) {
2588 engine->emit_breadcrumb_sz += num_rings * 6;
2589 } else {
2590 engine->emit_breadcrumb_sz += num_rings * 3;
2591 if (num_rings & 1)
2592 engine->emit_breadcrumb_sz++;
2593 }
2594 }
2595 engine->submit_request = i9xx_submit_request;
2596
2597 if (INTEL_GEN(dev_priv) >= 8)
2598 engine->emit_bb_start = gen8_emit_bb_start;
2599 else if (INTEL_GEN(dev_priv) >= 6)
2600 engine->emit_bb_start = gen6_emit_bb_start;
2601 else if (INTEL_GEN(dev_priv) >= 4)
2602 engine->emit_bb_start = i965_emit_bb_start;
2603 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2604 engine->emit_bb_start = i830_emit_bb_start;
2605 else
2606 engine->emit_bb_start = i915_emit_bb_start;
2607}
2608
2609int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2610{
2611 struct drm_i915_private *dev_priv = engine->i915;
2612 int ret;
2613
2614 intel_ring_default_vfuncs(dev_priv, engine);
2615
2616 if (HAS_L3_DPF(dev_priv))
2617 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2618
2619 if (INTEL_GEN(dev_priv) >= 8) {
2620 engine->init_context = intel_rcs_ctx_init;
2621 engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
2622 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
2623 engine->emit_flush = gen8_render_ring_flush;
2624 if (i915.semaphores) {
2625 int num_rings;
2626
2627 engine->semaphore.signal = gen8_rcs_signal;
2628
2629 num_rings =
2630 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2631 engine->emit_breadcrumb_sz += num_rings * 6;
2632 }
2633 } else if (INTEL_GEN(dev_priv) >= 6) {
2634 engine->init_context = intel_rcs_ctx_init;
2635 engine->emit_flush = gen7_render_ring_flush;
2636 if (IS_GEN6(dev_priv))
2637 engine->emit_flush = gen6_render_ring_flush;
2638 } else if (IS_GEN5(dev_priv)) {
2639 engine->emit_flush = gen4_render_ring_flush;
2640 } else {
2641 if (INTEL_GEN(dev_priv) < 4)
2642 engine->emit_flush = gen2_render_ring_flush;
2643 else
2644 engine->emit_flush = gen4_render_ring_flush;
2645 engine->irq_enable_mask = I915_USER_INTERRUPT;
2646 }
2647
2648 if (IS_HASWELL(dev_priv))
2649 engine->emit_bb_start = hsw_emit_bb_start;
2650
2651 engine->init_hw = init_render_ring;
2652 engine->cleanup = render_ring_cleanup;
2653
2654 ret = intel_init_ring_buffer(engine);
2655 if (ret)
2656 return ret;
2657
2658 if (INTEL_GEN(dev_priv) >= 6) {
2659 ret = intel_engine_create_scratch(engine, 4096);
2660 if (ret)
2661 return ret;
2662 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2663 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2664 if (ret)
2665 return ret;
2666 }
2667
2668 return 0;
2669}
2670
2671int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2672{
2673 struct drm_i915_private *dev_priv = engine->i915;
2674
2675 intel_ring_default_vfuncs(dev_priv, engine);
2676
2677 if (INTEL_GEN(dev_priv) >= 6) {
2678 /* gen6 bsd needs a special wa for tail updates */
2679 if (IS_GEN6(dev_priv))
2680 engine->submit_request = gen6_bsd_submit_request;
2681 engine->emit_flush = gen6_bsd_ring_flush;
2682 if (INTEL_GEN(dev_priv) < 8)
2683 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2684 } else {
2685 engine->mmio_base = BSD_RING_BASE;
2686 engine->emit_flush = bsd_ring_flush;
2687 if (IS_GEN5(dev_priv))
2688 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2689 else
2690 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2691 }
2692
2693 return intel_init_ring_buffer(engine);
2694}
2695
2696/**
2697 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2698 */
2699int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
2700{
2701 struct drm_i915_private *dev_priv = engine->i915;
2702
2703 intel_ring_default_vfuncs(dev_priv, engine);
2704
2705 engine->emit_flush = gen6_bsd_ring_flush;
2706
2707 return intel_init_ring_buffer(engine);
2708}
2709
2710int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2711{
2712 struct drm_i915_private *dev_priv = engine->i915;
2713
2714 intel_ring_default_vfuncs(dev_priv, engine);
2715
2716 engine->emit_flush = gen6_ring_flush;
2717 if (INTEL_GEN(dev_priv) < 8)
2718 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2719
2720 return intel_init_ring_buffer(engine);
2721}
2722
2723int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2724{
2725 struct drm_i915_private *dev_priv = engine->i915;
2726
2727 intel_ring_default_vfuncs(dev_priv, engine);
2728
2729 engine->emit_flush = gen6_ring_flush;
2730
2731 if (INTEL_GEN(dev_priv) < 8) {
2732 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2733 engine->irq_enable = hsw_vebox_irq_enable;
2734 engine->irq_disable = hsw_vebox_irq_disable;
2735 }
2736
2737 return intel_init_ring_buffer(engine);
2738}