Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright (C) 2008 Maarten Maathuis.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 *
 25 */
 26
 27#include <acpi/video.h>
 28
 29#include <drm/drm_atomic.h>
 30#include <drm/drm_atomic_helper.h>
 31#include <drm/drm_client_event.h>
 32#include <drm/drm_crtc_helper.h>
 33#include <drm/drm_fourcc.h>
 34#include <drm/drm_gem_framebuffer_helper.h>
 35#include <drm/drm_probe_helper.h>
 36#include <drm/drm_vblank.h>
 37
 38#include "nouveau_crtc.h"
 39#include "nouveau_gem.h"
 40#include "nouveau_connector.h"
 
 
 
 41#include "nv50_display.h"
 42
 43#include <nvif/class.h>
 44#include <nvif/if0011.h>
 45#include <nvif/if0013.h>
 46#include <dispnv50/crc.h>
 47#include <dispnv50/tile.h>
 48
 49int
 50nouveau_display_vblank_enable(struct drm_crtc *crtc)
 51{
 52	struct nouveau_crtc *nv_crtc;
 53
 54	nv_crtc = nouveau_crtc(crtc);
 55	nvif_event_allow(&nv_crtc->vblank);
 56
 57	return 0;
 58}
 59
 60void
 61nouveau_display_vblank_disable(struct drm_crtc *crtc)
 62{
 63	struct nouveau_crtc *nv_crtc;
 64
 65	nv_crtc = nouveau_crtc(crtc);
 66	nvif_event_block(&nv_crtc->vblank);
 67}
 68
 69static inline int
 70calc(int blanks, int blanke, int total, int line)
 71{
 72	if (blanke >= blanks) {
 73		if (line >= blanks)
 74			line -= total;
 75	} else {
 76		if (line >= blanks)
 77			line -= total;
 78		line -= blanke + 1;
 79	}
 80	return line;
 81}
 82
 83static bool
 84nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
 85				ktime_t *stime, ktime_t *etime)
 86{
 87	struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
 88	struct nvif_head *head = &nouveau_crtc(crtc)->head;
 89	struct nvif_head_scanoutpos_v0 args;
 90	int retry = 20;
 91	bool ret = false;
 92
 93	args.version = 0;
 94
 95	do {
 96		ret = nvif_mthd(&head->object, NVIF_HEAD_V0_SCANOUTPOS, &args, sizeof(args));
 97		if (ret != 0)
 98			return false;
 99
100		if (args.vline) {
101			ret = true;
102			break;
103		}
104
105		if (retry) ndelay(vblank->linedur_ns);
106	} while (retry--);
107
108	*hpos = args.hline;
109	*vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
110	if (stime) *stime = ns_to_ktime(args.time[0]);
111	if (etime) *etime = ns_to_ktime(args.time[1]);
112
113	return ret;
114}
115
116bool
117nouveau_display_scanoutpos(struct drm_crtc *crtc,
118			   bool in_vblank_irq, int *vpos, int *hpos,
119			   ktime_t *stime, ktime_t *etime,
120			   const struct drm_display_mode *mode)
121{
122	return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
123					       stime, etime);
124}
125
126static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
127	.destroy = drm_gem_fb_destroy,
128	.create_handle = drm_gem_fb_create_handle,
129};
130
131static void
132nouveau_decode_mod(struct nouveau_drm *drm,
133		   uint64_t modifier,
134		   uint32_t *tile_mode,
135		   uint8_t *kind)
136{
137	struct nouveau_display *disp = nouveau_display(drm->dev);
138	BUG_ON(!tile_mode || !kind);
139
140	if (modifier == DRM_FORMAT_MOD_LINEAR) {
141		/* tile_mode will not be used in this case */
142		*tile_mode = 0;
143		*kind = 0;
144	} else {
145		/*
146		 * Extract the block height and kind from the corresponding
147		 * modifier fields.  See drm_fourcc.h for details.
148		 */
149
150		if ((modifier & (0xffull << 12)) == 0ull) {
151			/* Legacy modifier.  Translate to this dev's 'kind.' */
152			modifier |= disp->format_modifiers[0] & (0xffull << 12);
153		}
154
155		*tile_mode = (uint32_t)(modifier & 0xF);
156		*kind = (uint8_t)((modifier >> 12) & 0xFF);
157
158		if (drm->client.device.info.chipset >= 0xc0)
159			*tile_mode <<= 4;
160	}
161}
162
163void
164nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
165			       uint32_t *tile_mode,
166			       uint8_t *kind)
167{
168	if (fb->flags & DRM_MODE_FB_MODIFIERS) {
169		struct nouveau_drm *drm = nouveau_drm(fb->dev);
170
171		nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
172	} else {
173		const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
174
175		*tile_mode = nvbo->mode;
176		*kind = nvbo->kind;
177	}
178}
179
180static const u64 legacy_modifiers[] = {
181	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
182	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
183	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
184	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
185	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
186	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
187	DRM_FORMAT_MOD_INVALID
188};
189
190static int
191nouveau_validate_decode_mod(struct nouveau_drm *drm,
192			    uint64_t modifier,
193			    uint32_t *tile_mode,
194			    uint8_t *kind)
195{
196	struct nouveau_display *disp = nouveau_display(drm->dev);
197	int mod;
198
199	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
200		return -EINVAL;
201	}
202
203	BUG_ON(!disp->format_modifiers);
204
205	for (mod = 0;
206	     (disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
207	     (disp->format_modifiers[mod] != modifier);
208	     mod++);
209
210	if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
211		for (mod = 0;
212		     (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
213		     (legacy_modifiers[mod] != modifier);
214		     mod++);
215		if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
216			return -EINVAL;
217	}
218
219	nouveau_decode_mod(drm, modifier, tile_mode, kind);
220
221	return 0;
222}
223
224static int
225nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
226		      uint32_t offset, uint32_t stride, uint32_t h,
227		      uint32_t tile_mode)
228{
229	uint32_t gob_size, bw, bh, gobs_in_block;
230	uint64_t bl_size;
231
232	BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
233
234	if (nouveau_check_tile_mode(tile_mode, drm->client.device.info.chipset))
235		return -EINVAL;
236
237	gobs_in_block = nouveau_get_gobs_in_block(tile_mode, drm->client.device.info.chipset);
238	bw = nouveau_get_width_in_blocks(stride);
239	bh = nouveau_get_height_in_blocks(h, gobs_in_block, drm->client.device.info.family);
240	gob_size = nouveau_get_gob_size(drm->client.device.info.family);
241
242	bl_size = bw * bh * gobs_in_block * gob_size;
243
244	DRM_DEBUG_KMS("offset=%u stride=%u h=%u gobs_in_block=%u bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n",
245		      offset, stride, h, gobs_in_block, bw, bh, gob_size,
246		      bl_size, nvbo->bo.base.size);
247
248	if (bl_size + offset > nvbo->bo.base.size)
249		return -ERANGE;
250
251	return 0;
252}
253
254int
255nouveau_framebuffer_new(struct drm_device *dev,
256			const struct drm_mode_fb_cmd2 *mode_cmd,
257			struct drm_gem_object *gem,
258			struct drm_framebuffer **pfb)
259{
260	struct nouveau_drm *drm = nouveau_drm(dev);
261	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
262	struct drm_framebuffer *fb;
263	const struct drm_format_info *info;
264	unsigned int height, i;
265	uint32_t tile_mode;
266	uint8_t kind;
267	int ret;
268
269        /* YUV overlays have special requirements pre-NV50 */
270	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
271
272	    (mode_cmd->pixel_format == DRM_FORMAT_YUYV ||
273	     mode_cmd->pixel_format == DRM_FORMAT_UYVY ||
274	     mode_cmd->pixel_format == DRM_FORMAT_NV12 ||
275	     mode_cmd->pixel_format == DRM_FORMAT_NV21) &&
276	    (mode_cmd->pitches[0] & 0x3f || /* align 64 */
277	     mode_cmd->pitches[0] >= 0x10000 || /* at most 64k pitch */
278	     (mode_cmd->pitches[1] && /* pitches for planes must match */
279	      mode_cmd->pitches[0] != mode_cmd->pitches[1]))) {
280		DRM_DEBUG_KMS("Unsuitable framebuffer: format: %p4cc; pitches: 0x%x\n 0x%x\n",
281			      &mode_cmd->pixel_format,
282			      mode_cmd->pitches[0], mode_cmd->pitches[1]);
283		return -EINVAL;
284	}
285
286	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
287		if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
288						&tile_mode, &kind)) {
289			DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
290				      mode_cmd->modifier[0]);
291			return -EINVAL;
292		}
293	} else {
294		tile_mode = nvbo->mode;
295		kind = nvbo->kind;
296	}
297
298	info = drm_get_format_info(dev, mode_cmd);
 
 
 
 
 
 
 
 
 
299
300	for (i = 0; i < info->num_planes; i++) {
301		height = drm_format_info_plane_height(info,
302						      mode_cmd->height,
303						      i);
304
305		if (kind) {
306			ret = nouveau_check_bl_size(drm, nvbo,
307						    mode_cmd->offsets[i],
308						    mode_cmd->pitches[i],
309						    height, tile_mode);
310			if (ret)
311				return ret;
 
 
 
 
 
 
 
 
312		} else {
313			uint32_t size = mode_cmd->pitches[i] * height;
314
315			if (size + mode_cmd->offsets[i] > nvbo->bo.base.size)
316				return -ERANGE;
317		}
318	}
319
320	if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
321		return -ENOMEM;
322
323	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
324	fb->obj[0] = gem;
325
326	ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
327	if (ret)
328		kfree(fb);
329	return ret;
330}
331
332struct drm_framebuffer *
333nouveau_user_framebuffer_create(struct drm_device *dev,
334				struct drm_file *file_priv,
335				const struct drm_mode_fb_cmd2 *mode_cmd)
336{
337	struct drm_framebuffer *fb;
338	struct drm_gem_object *gem;
339	int ret;
340
341	gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
342	if (!gem)
343		return ERR_PTR(-ENOENT);
344
345	ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
346	if (ret == 0)
347		return fb;
348
349	drm_gem_object_put(gem);
350	return ERR_PTR(ret);
 
 
 
 
 
351}
352
353static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
354	.fb_create = nouveau_user_framebuffer_create,
 
355};
356
357
358struct nouveau_drm_prop_enum_list {
359	u8 gen_mask;
360	int type;
361	char *name;
362};
363
364static struct nouveau_drm_prop_enum_list underscan[] = {
365	{ 6, UNDERSCAN_AUTO, "auto" },
366	{ 6, UNDERSCAN_OFF, "off" },
367	{ 6, UNDERSCAN_ON, "on" },
368	{}
369};
370
371static struct nouveau_drm_prop_enum_list dither_mode[] = {
372	{ 7, DITHERING_MODE_AUTO, "auto" },
373	{ 7, DITHERING_MODE_OFF, "off" },
374	{ 1, DITHERING_MODE_ON, "on" },
375	{ 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
376	{ 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
377	{ 4, DITHERING_MODE_TEMPORAL, "temporal" },
378	{}
379};
380
381static struct nouveau_drm_prop_enum_list dither_depth[] = {
382	{ 6, DITHERING_DEPTH_AUTO, "auto" },
383	{ 6, DITHERING_DEPTH_6BPC, "6 bpc" },
384	{ 6, DITHERING_DEPTH_8BPC, "8 bpc" },
385	{}
386};
387
388#define PROP_ENUM(p,gen,n,list) do {                                           \
389	struct nouveau_drm_prop_enum_list *l = (list);                         \
390	int c = 0;                                                             \
391	while (l->gen_mask) {                                                  \
392		if (l->gen_mask & (1 << (gen)))                                \
393			c++;                                                   \
394		l++;                                                           \
395	}                                                                      \
396	if (c) {                                                               \
397		p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c);        \
398		l = (list);                                                    \
 
399		while (p && l->gen_mask) {                                     \
400			if (l->gen_mask & (1 << (gen))) {                      \
401				drm_property_add_enum(p, l->type, l->name);    \
 
402			}                                                      \
403			l++;                                                   \
404		}                                                              \
405	}                                                                      \
406} while(0)
407
408void
409nouveau_display_hpd_resume(struct nouveau_drm *drm)
410{
411	if (drm->headless)
412		return;
413
414	spin_lock_irq(&drm->hpd_lock);
415	drm->hpd_pending = ~0;
416	spin_unlock_irq(&drm->hpd_lock);
417
418	schedule_work(&drm->hpd_work);
419}
420
421static void
422nouveau_display_hpd_work(struct work_struct *work)
423{
424	struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work);
425	struct drm_device *dev = drm->dev;
426	struct drm_connector *connector;
427	struct drm_connector_list_iter conn_iter;
428	u32 pending;
429	int changed = 0;
430	struct drm_connector *first_changed_connector = NULL;
431
432	pm_runtime_get_sync(dev->dev);
433
434	spin_lock_irq(&drm->hpd_lock);
435	pending = drm->hpd_pending;
436	drm->hpd_pending = 0;
437	spin_unlock_irq(&drm->hpd_lock);
438
439	/* Nothing to do, exit early without updating the last busy counter */
440	if (!pending)
441		goto noop;
442
443	mutex_lock(&dev->mode_config.mutex);
444	drm_connector_list_iter_begin(dev, &conn_iter);
445
446	nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
447		struct nouveau_connector *nv_connector = nouveau_connector(connector);
448		enum drm_connector_status old_status = connector->status;
449		u64 bits, old_epoch_counter = connector->epoch_counter;
450
451		if (!(pending & drm_connector_mask(connector)))
452			continue;
453
454		spin_lock_irq(&drm->hpd_lock);
455		bits = nv_connector->hpd_pending;
456		nv_connector->hpd_pending = 0;
457		spin_unlock_irq(&drm->hpd_lock);
458
459		drm_dbg_kms(dev, "[CONNECTOR:%d:%s] plug:%d unplug:%d irq:%d\n",
460			    connector->base.id, connector->name,
461			    !!(bits & NVIF_CONN_EVENT_V0_PLUG),
462			    !!(bits & NVIF_CONN_EVENT_V0_UNPLUG),
463			    !!(bits & NVIF_CONN_EVENT_V0_IRQ));
464
465		if (bits & NVIF_CONN_EVENT_V0_IRQ) {
466			if (nouveau_dp_link_check(nv_connector))
467				continue;
468		}
469
470		connector->status = drm_helper_probe_detect(connector, NULL, false);
471		if (old_epoch_counter == connector->epoch_counter)
472			continue;
473
474		changed++;
475		if (!first_changed_connector) {
476			drm_connector_get(connector);
477			first_changed_connector = connector;
478		}
479
480		drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
481			    connector->base.id, connector->name,
482			    drm_get_connector_status_name(old_status),
483			    drm_get_connector_status_name(connector->status),
484			    old_epoch_counter, connector->epoch_counter);
485	}
486
487	drm_connector_list_iter_end(&conn_iter);
488	mutex_unlock(&dev->mode_config.mutex);
489
490	if (changed == 1)
491		drm_kms_helper_connector_hotplug_event(first_changed_connector);
492	else if (changed > 0)
493		drm_kms_helper_hotplug_event(dev);
494
495	if (first_changed_connector)
496		drm_connector_put(first_changed_connector);
497
498	pm_runtime_mark_last_busy(drm->dev->dev);
499noop:
500	pm_runtime_put_autosuspend(dev->dev);
501}
502
503#ifdef CONFIG_ACPI
504
505static int
506nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
507			  void *data)
508{
509	struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
510	struct acpi_bus_event *info = data;
511	int ret;
512
513	if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
514		if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
515			ret = pm_runtime_get(drm->dev->dev);
516			if (ret == 1 || ret == -EACCES) {
517				/* If the GPU is already awake, or in a state
518				 * where we can't wake it up, it can handle
519				 * it's own hotplug events.
520				 */
521				pm_runtime_put_autosuspend(drm->dev->dev);
522			} else if (ret == 0 || ret == -EINPROGRESS) {
523				/* We've started resuming the GPU already, so
524				 * it will handle scheduling a full reprobe
525				 * itself
526				 */
527				NV_DEBUG(drm, "ACPI requested connector reprobe\n");
528				pm_runtime_put_noidle(drm->dev->dev);
529			} else {
530				NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
531					ret);
532			}
533
534			/* acpi-video should not generate keypresses for this */
535			return NOTIFY_BAD;
536		}
537	}
538
539	return NOTIFY_DONE;
540}
541#endif
542
543int
544nouveau_display_init(struct drm_device *dev, bool resume, bool runtime)
545{
546	struct nouveau_display *disp = nouveau_display(dev);
 
547	struct drm_connector *connector;
548	struct drm_connector_list_iter conn_iter;
549	int ret;
550
551	/*
552	 * Enable hotplug interrupts (done as early as possible, since we need
553	 * them for MST)
554	 */
555	drm_connector_list_iter_begin(dev, &conn_iter);
556	nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
557		struct nouveau_connector *conn = nouveau_connector(connector);
558		nvif_event_allow(&conn->hpd);
559		nvif_event_allow(&conn->irq);
560	}
561	drm_connector_list_iter_end(&conn_iter);
562
563	ret = disp->init(dev, resume, runtime);
564	if (ret)
565		return ret;
566
567	/* enable connector detection and polling for connectors without HPD
568	 * support
 
569	 */
 
 
 
 
 
 
570	drm_kms_helper_poll_enable(dev);
571
 
 
 
 
 
 
572	return ret;
573}
574
575void
576nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
577{
578	struct nouveau_display *disp = nouveau_display(dev);
579	struct nouveau_drm *drm = nouveau_drm(dev);
580	struct drm_connector *connector;
581	struct drm_connector_list_iter conn_iter;
582
583	if (!suspend) {
584		if (drm_drv_uses_atomic_modeset(dev))
585			drm_atomic_helper_shutdown(dev);
586		else
587			drm_helper_force_disable_all(dev);
588	}
589
590	/* disable hotplug interrupts */
591	drm_connector_list_iter_begin(dev, &conn_iter);
592	nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
593		struct nouveau_connector *conn = nouveau_connector(connector);
594		nvif_event_block(&conn->irq);
595		nvif_event_block(&conn->hpd);
596	}
597	drm_connector_list_iter_end(&conn_iter);
598
599	if (!runtime && !drm->headless)
600		cancel_work_sync(&drm->hpd_work);
601
602	drm_kms_helper_poll_disable(dev);
603	disp->fini(dev, runtime, suspend);
604}
605
606static void
607nouveau_display_create_properties(struct drm_device *dev)
608{
609	struct nouveau_display *disp = nouveau_display(dev);
610	int gen;
 
 
 
 
 
611
612	if (disp->disp.object.oclass < NV50_DISP)
613		gen = 0;
614	else
615	if (disp->disp.object.oclass < GF110_DISP)
616		gen = 1;
617	else
618		gen = 2;
619
620	PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
621	PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
622	PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
623
624	disp->underscan_hborder_property =
625		drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
626
627	disp->underscan_vborder_property =
628		drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
629
630	if (gen < 1)
631		return;
632
633	/* -90..+90 */
634	disp->vibrant_hue_property =
635		drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
636
637	/* -100..+100 */
638	disp->color_vibrance_property =
639		drm_property_create_range(dev, 0, "color vibrance", 0, 200);
640}
641
642int
643nouveau_display_create(struct drm_device *dev)
644{
645	struct nouveau_drm *drm = nouveau_drm(dev);
646	struct nouveau_display *disp;
647	int ret;
648
649	disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
650	if (!disp)
651		return -ENOMEM;
652
653	drm_mode_config_init(dev);
654	drm_mode_create_scaling_mode_property(dev);
655	drm_mode_create_dvi_i_properties(dev);
656
657	dev->mode_config.funcs = &nouveau_mode_config_funcs;
 
658
659	dev->mode_config.min_width = 0;
660	dev->mode_config.min_height = 0;
661	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
662		dev->mode_config.max_width = 2048;
663		dev->mode_config.max_height = 2048;
664	} else
665	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
666		dev->mode_config.max_width = 4096;
667		dev->mode_config.max_height = 4096;
668	} else
669	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) {
670		dev->mode_config.max_width = 8192;
671		dev->mode_config.max_height = 8192;
672	} else {
673		dev->mode_config.max_width = 16384;
674		dev->mode_config.max_height = 16384;
675	}
676
677	dev->mode_config.preferred_depth = 24;
678	dev->mode_config.prefer_shadow = 1;
679
680	if (drm->client.device.info.chipset < 0x11)
681		dev->mode_config.async_page_flip = false;
682	else
683		dev->mode_config.async_page_flip = true;
684
685	drm_kms_helper_poll_init(dev);
686	drm_kms_helper_poll_disable(dev);
687
688	if (nouveau_modeset != 2) {
689		ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0, &disp->disp);
690		/* no display hw */
691		if (ret == -ENODEV) {
692			ret = 0;
693			drm->headless = true;
694			goto disp_create_err;
695		}
696
697		if (!ret && (disp->disp.outp_mask || drm->vbios.dcb.entries)) {
698			nouveau_display_create_properties(dev);
699			if (disp->disp.object.oclass < NV50_DISP) {
700				dev->mode_config.fb_modifiers_not_supported = true;
701				ret = nv04_display_create(dev);
702			} else {
703				ret = nv50_display_create(dev);
704			}
705		}
706	} else {
707		ret = 0;
708	}
709
710	if (ret)
711		goto disp_create_err;
712
713	drm_mode_config_reset(dev);
714
715	if (dev->mode_config.num_crtc) {
716		ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
717		if (ret)
718			goto vblank_err;
719
720		if (disp->disp.object.oclass >= NV50_DISP)
721			nv50_crc_init(dev);
722	}
723
724	INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work);
725	spin_lock_init(&drm->hpd_lock);
726#ifdef CONFIG_ACPI
727	drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy;
728	register_acpi_notifier(&drm->acpi_nb);
729#endif
730
731	return 0;
732
733vblank_err:
734	disp->dtor(dev);
735disp_create_err:
736	drm_kms_helper_poll_fini(dev);
737	drm_mode_config_cleanup(dev);
738	return ret;
739}
740
741void
742nouveau_display_destroy(struct drm_device *dev)
743{
744	struct nouveau_display *disp = nouveau_display(dev);
745	struct nouveau_drm *drm = nouveau_drm(dev);
 
 
746
747#ifdef CONFIG_ACPI
748	unregister_acpi_notifier(&drm->acpi_nb);
749#endif
750
751	drm_kms_helper_poll_fini(dev);
752	drm_mode_config_cleanup(dev);
 
753
754	if (disp->dtor)
755		disp->dtor(dev);
 
 
756
757	nvif_disp_dtor(&disp->disp);
 
 
 
 
 
758
759	drm->display = NULL;
760	kfree(disp);
761}
762
763int
764nouveau_display_suspend(struct drm_device *dev, bool runtime)
765{
766	struct nouveau_display *disp = nouveau_display(dev);
767
768	drm_client_dev_suspend(dev, false);
 
 
 
 
 
769
770	if (drm_drv_uses_atomic_modeset(dev)) {
771		if (!runtime) {
772			disp->suspend = drm_atomic_helper_suspend(dev);
773			if (IS_ERR(disp->suspend)) {
774				int ret = PTR_ERR(disp->suspend);
775				disp->suspend = NULL;
776				return ret;
777			}
778		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
779	}
 
 
 
 
 
780
781	nouveau_display_fini(dev, true, runtime);
782	return 0;
 
 
 
 
 
783}
784
785void
786nouveau_display_resume(struct drm_device *dev, bool runtime)
 
787{
788	struct nouveau_display *disp = nouveau_display(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
789
790	nouveau_display_init(dev, true, runtime);
 
 
 
791
792	if (drm_drv_uses_atomic_modeset(dev)) {
793		if (disp->suspend) {
794			drm_atomic_helper_resume(dev, disp->suspend);
795			disp->suspend = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
796		}
797	}
798
799	drm_client_dev_resume(dev, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
800}
801
802int
803nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
804			    struct drm_mode_create_dumb *args)
805{
806	struct nouveau_cli *cli = nouveau_cli(file_priv);
807	struct nouveau_bo *bo;
808	uint32_t domain;
809	int ret;
810
811	args->pitch = roundup(args->width * (args->bpp / 8), 256);
812	args->size = args->pitch * args->height;
813	args->size = roundup(args->size, PAGE_SIZE);
814
815	/* Use VRAM if there is any ; otherwise fallback to system memory */
816	if (nouveau_drm(dev)->client.device.info.ram_size != 0)
817		domain = NOUVEAU_GEM_DOMAIN_VRAM;
818	else
819		domain = NOUVEAU_GEM_DOMAIN_GART;
820
821	ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo);
822	if (ret)
823		return ret;
824
825	ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle);
826	drm_gem_object_put(&bo->bo.base);
827	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
828}
v3.5.6
  1/*
  2 * Copyright (C) 2008 Maarten Maathuis.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining
  6 * a copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sublicense, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * The above copyright notice and this permission notice (including the
 14 * next paragraph) shall be included in all copies or substantial
 15 * portions of the Software.
 16 *
 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 24 *
 25 */
 26
 27#include "drmP.h"
 28#include "drm_crtc_helper.h"
 29#include "nouveau_drv.h"
 30#include "nouveau_fb.h"
 31#include "nouveau_fbcon.h"
 32#include "nouveau_hw.h"
 
 
 
 
 
 33#include "nouveau_crtc.h"
 34#include "nouveau_dma.h"
 35#include "nouveau_connector.h"
 36#include "nouveau_software.h"
 37#include "nouveau_gpio.h"
 38#include "nouveau_fence.h"
 39#include "nv50_display.h"
 40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41static void
 42nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43{
 44	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
 
 45
 46	if (fb->nvbo)
 47		drm_gem_object_unreference_unlocked(fb->nvbo->gem);
 
 48
 49	drm_framebuffer_cleanup(drm_fb);
 50	kfree(fb);
 
 51}
 52
 
 
 
 
 
 
 
 
 
 
 53static int
 54nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
 55				       struct drm_file *file_priv,
 56				       unsigned int *handle)
 
 57{
 58	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59
 60	return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
 61}
 62
 63static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
 64	.destroy = nouveau_user_framebuffer_destroy,
 65	.create_handle = nouveau_user_framebuffer_create_handle,
 66};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67
 68int
 69nouveau_framebuffer_init(struct drm_device *dev,
 70			 struct nouveau_framebuffer *nv_fb,
 71			 struct drm_mode_fb_cmd2 *mode_cmd,
 72			 struct nouveau_bo *nvbo)
 73{
 74	struct drm_nouveau_private *dev_priv = dev->dev_private;
 75	struct drm_framebuffer *fb = &nv_fb->base;
 
 
 
 
 
 76	int ret;
 77
 78	ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
 79	if (ret) {
 80		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 81	}
 82
 83	drm_helper_mode_fill_fb_struct(fb, mode_cmd);
 84	nv_fb->nvbo = nvbo;
 
 
 
 
 
 
 
 
 
 85
 86	if (dev_priv->card_type >= NV_50) {
 87		u32 tile_flags = nouveau_bo_tile_layout(nvbo);
 88		if (tile_flags == 0x7a00 ||
 89		    tile_flags == 0xfe00)
 90			nv_fb->r_dma = NvEvoFB32;
 91		else
 92		if (tile_flags == 0x7000)
 93			nv_fb->r_dma = NvEvoFB16;
 94		else
 95			nv_fb->r_dma = NvEvoVRAM_LP;
 96
 97		switch (fb->depth) {
 98		case  8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
 99		case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
100		case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
101		case 24:
102		case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
103		case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
104		default:
105			 NV_ERROR(dev, "unknown depth %d\n", fb->depth);
106			 return -EINVAL;
107		}
108
109		if (dev_priv->chipset == 0x50)
110			nv_fb->r_format |= (tile_flags << 8);
111
112		if (!tile_flags) {
113			if (dev_priv->card_type < NV_D0)
114				nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
115			else
116				nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
117		} else {
118			u32 mode = nvbo->tile_mode;
119			if (dev_priv->card_type >= NV_C0)
120				mode >>= 4;
121			nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
122		}
123	}
124
125	return 0;
 
 
 
 
 
 
 
 
 
126}
127
128static struct drm_framebuffer *
129nouveau_user_framebuffer_create(struct drm_device *dev,
130				struct drm_file *file_priv,
131				struct drm_mode_fb_cmd2 *mode_cmd)
132{
133	struct nouveau_framebuffer *nouveau_fb;
134	struct drm_gem_object *gem;
135	int ret;
136
137	gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
138	if (!gem)
139		return ERR_PTR(-ENOENT);
140
141	nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
142	if (!nouveau_fb)
143		return ERR_PTR(-ENOMEM);
144
145	ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
146	if (ret) {
147		drm_gem_object_unreference(gem);
148		return ERR_PTR(ret);
149	}
150
151	return &nouveau_fb->base;
152}
153
154static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
155	.fb_create = nouveau_user_framebuffer_create,
156	.output_poll_changed = nouveau_fbcon_output_poll_changed,
157};
158
159
160struct nouveau_drm_prop_enum_list {
161	u8 gen_mask;
162	int type;
163	char *name;
164};
165
166static struct nouveau_drm_prop_enum_list underscan[] = {
167	{ 6, UNDERSCAN_AUTO, "auto" },
168	{ 6, UNDERSCAN_OFF, "off" },
169	{ 6, UNDERSCAN_ON, "on" },
170	{}
171};
172
173static struct nouveau_drm_prop_enum_list dither_mode[] = {
174	{ 7, DITHERING_MODE_AUTO, "auto" },
175	{ 7, DITHERING_MODE_OFF, "off" },
176	{ 1, DITHERING_MODE_ON, "on" },
177	{ 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
178	{ 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
179	{ 4, DITHERING_MODE_TEMPORAL, "temporal" },
180	{}
181};
182
183static struct nouveau_drm_prop_enum_list dither_depth[] = {
184	{ 6, DITHERING_DEPTH_AUTO, "auto" },
185	{ 6, DITHERING_DEPTH_6BPC, "6 bpc" },
186	{ 6, DITHERING_DEPTH_8BPC, "8 bpc" },
187	{}
188};
189
190#define PROP_ENUM(p,gen,n,list) do {                                           \
191	struct nouveau_drm_prop_enum_list *l = (list);                         \
192	int c = 0;                                                             \
193	while (l->gen_mask) {                                                  \
194		if (l->gen_mask & (1 << (gen)))                                \
195			c++;                                                   \
196		l++;                                                           \
197	}                                                                      \
198	if (c) {                                                               \
199		p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c);        \
200		l = (list);                                                    \
201		c = 0;                                                         \
202		while (p && l->gen_mask) {                                     \
203			if (l->gen_mask & (1 << (gen))) {                      \
204				drm_property_add_enum(p, c, l->type, l->name); \
205				c++;                                           \
206			}                                                      \
207			l++;                                                   \
208		}                                                              \
209	}                                                                      \
210} while(0)
211
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212int
213nouveau_display_init(struct drm_device *dev)
214{
215	struct drm_nouveau_private *dev_priv = dev->dev_private;
216	struct nouveau_display_engine *disp = &dev_priv->engine.display;
217	struct drm_connector *connector;
 
218	int ret;
219
220	ret = disp->init(dev);
 
 
 
 
 
 
 
 
 
 
 
 
221	if (ret)
222		return ret;
223
224	/* power on internal panel if it's not already.  the init tables of
225	 * some vbios default this to off for some reason, causing the
226	 * panel to not work after resume
227	 */
228	if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) {
229		nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true);
230		msleep(300);
231	}
232
233	/* enable polling for external displays */
234	drm_kms_helper_poll_enable(dev);
235
236	/* enable hotplug interrupts */
237	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
238		struct nouveau_connector *conn = nouveau_connector(connector);
239		nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
240	}
241
242	return ret;
243}
244
245void
246nouveau_display_fini(struct drm_device *dev)
247{
248	struct drm_nouveau_private *dev_priv = dev->dev_private;
249	struct nouveau_display_engine *disp = &dev_priv->engine.display;
250	struct drm_connector *connector;
 
 
 
 
 
 
 
 
251
252	/* disable hotplug interrupts */
253	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
254		struct nouveau_connector *conn = nouveau_connector(connector);
255		nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
 
256	}
 
 
 
 
257
258	drm_kms_helper_poll_disable(dev);
259	disp->fini(dev);
260}
261
262int
263nouveau_display_create(struct drm_device *dev)
264{
265	struct drm_nouveau_private *dev_priv = dev->dev_private;
266	struct nouveau_display_engine *disp = &dev_priv->engine.display;
267	int ret, gen;
268
269	drm_mode_config_init(dev);
270	drm_mode_create_scaling_mode_property(dev);
271	drm_mode_create_dvi_i_properties(dev);
272
273	if (dev_priv->card_type < NV_50)
274		gen = 0;
275	else
276	if (dev_priv->card_type < NV_D0)
277		gen = 1;
278	else
279		gen = 2;
280
281	PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
282	PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
283	PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
284
285	disp->underscan_hborder_property =
286		drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
287
288	disp->underscan_vborder_property =
289		drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
290
291	if (gen == 1) {
292		disp->vibrant_hue_property =
293			drm_property_create(dev, DRM_MODE_PROP_RANGE,
294					    "vibrant hue", 2);
295		disp->vibrant_hue_property->values[0] = 0;
296		disp->vibrant_hue_property->values[1] = 180; /* -90..+90 */
297
298		disp->color_vibrance_property =
299			drm_property_create(dev, DRM_MODE_PROP_RANGE,
300					    "color vibrance", 2);
301		disp->color_vibrance_property->values[0] = 0;
302		disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
303	}
 
 
 
 
 
 
 
 
 
 
 
 
 
304
305	dev->mode_config.funcs = &nouveau_mode_config_funcs;
306	dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
307
308	dev->mode_config.min_width = 0;
309	dev->mode_config.min_height = 0;
310	if (dev_priv->card_type < NV_10) {
311		dev->mode_config.max_width = 2048;
312		dev->mode_config.max_height = 2048;
313	} else
314	if (dev_priv->card_type < NV_50) {
315		dev->mode_config.max_width = 4096;
316		dev->mode_config.max_height = 4096;
317	} else {
 
318		dev->mode_config.max_width = 8192;
319		dev->mode_config.max_height = 8192;
 
 
 
320	}
321
322	dev->mode_config.preferred_depth = 24;
323	dev->mode_config.prefer_shadow = 1;
324
 
 
 
 
 
325	drm_kms_helper_poll_init(dev);
326	drm_kms_helper_poll_disable(dev);
327
328	ret = disp->create(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329	if (ret)
330		goto disp_create_err;
331
 
 
332	if (dev->mode_config.num_crtc) {
333		ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
334		if (ret)
335			goto vblank_err;
 
 
 
336	}
337
 
 
 
 
 
 
 
338	return 0;
339
340vblank_err:
341	disp->destroy(dev);
342disp_create_err:
343	drm_kms_helper_poll_fini(dev);
344	drm_mode_config_cleanup(dev);
345	return ret;
346}
347
348void
349nouveau_display_destroy(struct drm_device *dev)
350{
351	struct drm_nouveau_private *dev_priv = dev->dev_private;
352	struct nouveau_display_engine *disp = &dev_priv->engine.display;
353
354	drm_vblank_cleanup(dev);
355
356	disp->destroy(dev);
 
 
357
358	drm_kms_helper_poll_fini(dev);
359	drm_mode_config_cleanup(dev);
360}
361
362int
363nouveau_vblank_enable(struct drm_device *dev, int crtc)
364{
365	struct drm_nouveau_private *dev_priv = dev->dev_private;
366
367	if (dev_priv->card_type >= NV_50)
368		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
369			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
370	else
371		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
372			    NV_PCRTC_INTR_0_VBLANK);
373
374	return 0;
 
375}
376
377void
378nouveau_vblank_disable(struct drm_device *dev, int crtc)
379{
380	struct drm_nouveau_private *dev_priv = dev->dev_private;
381
382	if (dev_priv->card_type >= NV_50)
383		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
384			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
385	else
386		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
387}
388
389static int
390nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
391			  struct nouveau_bo *new_bo)
392{
393	int ret;
394
395	ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
396	if (ret)
397		return ret;
398
399	ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
400	if (ret)
401		goto fail;
402
403	ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
404	if (ret)
405		goto fail_unreserve;
406
407	return 0;
408
409fail_unreserve:
410	ttm_bo_unreserve(&new_bo->bo);
411fail:
412	nouveau_bo_unpin(new_bo);
413	return ret;
414}
415
416static void
417nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
418			    struct nouveau_bo *new_bo,
419			    struct nouveau_fence *fence)
420{
421	nouveau_bo_fence(new_bo, fence);
422	ttm_bo_unreserve(&new_bo->bo);
423
424	nouveau_bo_fence(old_bo, fence);
425	ttm_bo_unreserve(&old_bo->bo);
426
427	nouveau_bo_unpin(old_bo);
428}
429
430static int
431nouveau_page_flip_emit(struct nouveau_channel *chan,
432		       struct nouveau_bo *old_bo,
433		       struct nouveau_bo *new_bo,
434		       struct nouveau_page_flip_state *s,
435		       struct nouveau_fence **pfence)
436{
437	struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
438	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
439	struct drm_device *dev = chan->dev;
440	unsigned long flags;
441	int ret;
442
443	/* Queue it to the pending list */
444	spin_lock_irqsave(&dev->event_lock, flags);
445	list_add_tail(&s->head, &swch->flip);
446	spin_unlock_irqrestore(&dev->event_lock, flags);
447
448	/* Synchronize with the old framebuffer */
449	ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
450	if (ret)
451		goto fail;
452
453	/* Emit the pageflip */
454	ret = RING_SPACE(chan, 3);
455	if (ret)
456		goto fail;
457
458	if (dev_priv->card_type < NV_C0) {
459		BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
460		OUT_RING  (chan, 0x00000000);
461		OUT_RING  (chan, 0x00000000);
462	} else {
463		BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
464		OUT_RING  (chan, 0);
465		BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
466	}
467	FIRE_RING (chan);
468
469	ret = nouveau_fence_new(chan, pfence);
470	if (ret)
471		goto fail;
472
 
473	return 0;
474fail:
475	spin_lock_irqsave(&dev->event_lock, flags);
476	list_del(&s->head);
477	spin_unlock_irqrestore(&dev->event_lock, flags);
478	return ret;
479}
480
481int
482nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
483		       struct drm_pending_vblank_event *event)
484{
485	struct drm_device *dev = crtc->dev;
486	struct drm_nouveau_private *dev_priv = dev->dev_private;
487	struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
488	struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
489	struct nouveau_page_flip_state *s;
490	struct nouveau_channel *chan = NULL;
491	struct nouveau_fence *fence;
492	int ret;
493
494	if (!dev_priv->channel)
495		return -ENODEV;
496
497	s = kzalloc(sizeof(*s), GFP_KERNEL);
498	if (!s)
499		return -ENOMEM;
500
501	/* Don't let the buffers go away while we flip */
502	ret = nouveau_page_flip_reserve(old_bo, new_bo);
503	if (ret)
504		goto fail_free;
505
506	/* Initialize a page flip struct */
507	*s = (struct nouveau_page_flip_state)
508		{ { }, event, nouveau_crtc(crtc)->index,
509		  fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
510		  new_bo->bo.offset };
511
512	/* Choose the channel the flip will be handled in */
513	fence = new_bo->bo.sync_obj;
514	if (fence)
515		chan = nouveau_channel_get_unlocked(fence->channel);
516	if (!chan)
517		chan = nouveau_channel_get_unlocked(dev_priv->channel);
518	mutex_lock(&chan->mutex);
519
520	/* Emit a page flip */
521	if (dev_priv->card_type >= NV_50) {
522		if (dev_priv->card_type >= NV_D0)
523			ret = nvd0_display_flip_next(crtc, fb, chan, 0);
524		else
525			ret = nv50_display_flip_next(crtc, fb, chan);
526		if (ret) {
527			nouveau_channel_put(&chan);
528			goto fail_unreserve;
529		}
530	}
531
532	ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
533	nouveau_channel_put(&chan);
534	if (ret)
535		goto fail_unreserve;
536
537	/* Update the crtc struct and cleanup */
538	crtc->fb = fb;
539
540	nouveau_page_flip_unreserve(old_bo, new_bo, fence);
541	nouveau_fence_unref(&fence);
542	return 0;
543
544fail_unreserve:
545	nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
546fail_free:
547	kfree(s);
548	return ret;
549}
550
551int
552nouveau_finish_page_flip(struct nouveau_channel *chan,
553			 struct nouveau_page_flip_state *ps)
554{
555	struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW];
556	struct drm_device *dev = chan->dev;
557	struct nouveau_page_flip_state *s;
558	unsigned long flags;
559
560	spin_lock_irqsave(&dev->event_lock, flags);
561
562	if (list_empty(&swch->flip)) {
563		NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
564		spin_unlock_irqrestore(&dev->event_lock, flags);
565		return -EINVAL;
566	}
567
568	s = list_first_entry(&swch->flip, struct nouveau_page_flip_state, head);
569	if (s->event) {
570		struct drm_pending_vblank_event *e = s->event;
571		struct timeval now;
572
573		do_gettimeofday(&now);
574		e->event.sequence = 0;
575		e->event.tv_sec = now.tv_sec;
576		e->event.tv_usec = now.tv_usec;
577		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
578		wake_up_interruptible(&e->base.file_priv->event_wait);
579	}
580
581	list_del(&s->head);
582	if (ps)
583		*ps = *s;
584	kfree(s);
585
586	spin_unlock_irqrestore(&dev->event_lock, flags);
587	return 0;
588}
589
590int
591nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
592			    struct drm_mode_create_dumb *args)
593{
 
594	struct nouveau_bo *bo;
 
595	int ret;
596
597	args->pitch = roundup(args->width * (args->bpp / 8), 256);
598	args->size = args->pitch * args->height;
599	args->size = roundup(args->size, PAGE_SIZE);
600
601	ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
 
 
 
 
 
 
602	if (ret)
603		return ret;
604
605	ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
606	drm_gem_object_unreference_unlocked(bo->gem);
607	return ret;
608}
609
610int
611nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
612			     uint32_t handle)
613{
614	return drm_gem_handle_delete(file_priv, handle);
615}
616
617int
618nouveau_display_dumb_map_offset(struct drm_file *file_priv,
619				struct drm_device *dev,
620				uint32_t handle, uint64_t *poffset)
621{
622	struct drm_gem_object *gem;
623
624	gem = drm_gem_object_lookup(dev, file_priv, handle);
625	if (gem) {
626		struct nouveau_bo *bo = gem->driver_private;
627		*poffset = bo->bo.addr_space_offset;
628		drm_gem_object_unreference_unlocked(gem);
629		return 0;
630	}
631
632	return -ENOENT;
633}