Linux Audio

Check our new training course

Loading...
v4.17
 
  1/**************************************************************************
  2 * Copyright (c) 2007-2011, Intel Corporation.
  3 * All Rights Reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms and conditions of the GNU General Public License,
  7 * version 2, as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope it will be useful, but WITHOUT
 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 12 * more details.
 13 *
 14 * You should have received a copy of the GNU General Public License along with
 15 * this program; if not, write to the Free Software Foundation, Inc.,
 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 17 *
 18 **************************************************************************/
 19
 20#include <linux/module.h>
 21#include <linux/kernel.h>
 22#include <linux/errno.h>
 23#include <linux/string.h>
 24#include <linux/pfn_t.h>
 25#include <linux/mm.h>
 26#include <linux/tty.h>
 
 27#include <linux/slab.h>
 28#include <linux/delay.h>
 29#include <linux/init.h>
 30#include <linux/console.h>
 31
 32#include <drm/drmP.h>
 33#include <drm/drm.h>
 34#include <drm/drm_crtc.h>
 35#include <drm/drm_fb_helper.h>
 
 
 36
 37#include "psb_drv.h"
 38#include "psb_intel_reg.h"
 39#include "psb_intel_drv.h"
 40#include "framebuffer.h"
 41#include "gtt.h"
 42
 43static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
 44static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
 45					      struct drm_file *file_priv,
 46					      unsigned int *handle);
 47
 48static const struct drm_framebuffer_funcs psb_fb_funcs = {
 49	.destroy = psb_user_framebuffer_destroy,
 50	.create_handle = psb_user_framebuffer_create_handle,
 51};
 52
 53#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
 54
 55static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 56			   unsigned blue, unsigned transp,
 57			   struct fb_info *info)
 58{
 59	struct psb_fbdev *fbdev = info->par;
 60	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
 61	uint32_t v;
 62
 63	if (!fb)
 64		return -ENOMEM;
 65
 66	if (regno > 255)
 67		return 1;
 68
 69	red = CMAP_TOHW(red, info->var.red.length);
 70	blue = CMAP_TOHW(blue, info->var.blue.length);
 71	green = CMAP_TOHW(green, info->var.green.length);
 72	transp = CMAP_TOHW(transp, info->var.transp.length);
 73
 74	v = (red << info->var.red.offset) |
 75	    (green << info->var.green.offset) |
 76	    (blue << info->var.blue.offset) |
 77	    (transp << info->var.transp.offset);
 78
 79	if (regno < 16) {
 80		switch (fb->format->cpp[0] * 8) {
 81		case 16:
 82			((uint32_t *) info->pseudo_palette)[regno] = v;
 83			break;
 84		case 24:
 85		case 32:
 86			((uint32_t *) info->pseudo_palette)[regno] = v;
 87			break;
 88		}
 89	}
 90
 91	return 0;
 92}
 93
 94static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
 95{
 96	struct psb_fbdev *fbdev = info->par;
 97	struct psb_framebuffer *psbfb = &fbdev->pfb;
 98	struct drm_device *dev = psbfb->base.dev;
 
 99
100	/*
101	 *	We have to poke our nose in here. The core fb code assumes
102	 *	panning is part of the hardware that can be invoked before
103	 *	the actual fb is mapped. In our case that isn't quite true.
104	 */
105	if (psbfb->gtt->npage) {
106		/* GTT roll shifts in 4K pages, we need to shift the right
107		   number of pages */
108		int pages = info->fix.line_length >> 12;
109		psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
110	}
111        return 0;
112}
113
114static int psbfb_vm_fault(struct vm_fault *vmf)
115{
116	struct vm_area_struct *vma = vmf->vma;
117	struct psb_framebuffer *psbfb = vma->vm_private_data;
118	struct drm_device *dev = psbfb->base.dev;
119	struct drm_psb_private *dev_priv = dev->dev_private;
 
120	int page_num;
121	int i;
122	unsigned long address;
123	int ret;
124	unsigned long pfn;
125	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
126				  psbfb->gtt->offset;
127
128	page_num = vma_pages(vma);
129	address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
130
131	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
132
133	for (i = 0; i < page_num; i++) {
134		pfn = (phys_addr >> PAGE_SHIFT);
135
136		ret = vm_insert_mixed(vma, address,
137				__pfn_to_pfn_t(pfn, PFN_DEV));
138		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
139			break;
140		else if (unlikely(ret != 0)) {
141			ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
142			return ret;
143		}
144		address += PAGE_SIZE;
145		phys_addr += PAGE_SIZE;
146	}
147	return VM_FAULT_NOPAGE;
148}
149
150static void psbfb_vm_open(struct vm_area_struct *vma)
151{
152}
153
154static void psbfb_vm_close(struct vm_area_struct *vma)
155{
156}
157
158static const struct vm_operations_struct psbfb_vm_ops = {
159	.fault	= psbfb_vm_fault,
160	.open	= psbfb_vm_open,
161	.close	= psbfb_vm_close
162};
163
164static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
165{
166	struct psb_fbdev *fbdev = info->par;
167	struct psb_framebuffer *psbfb = &fbdev->pfb;
168
169	if (vma->vm_pgoff != 0)
170		return -EINVAL;
171	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
172		return -EINVAL;
173
174	if (!psbfb->addr_space)
175		psbfb->addr_space = vma->vm_file->f_mapping;
176	/*
177	 * If this is a GEM object then info->screen_base is the virtual
178	 * kernel remapping of the object. FIXME: Review if this is
179	 * suitable for our mmap work
180	 */
181	vma->vm_ops = &psbfb_vm_ops;
182	vma->vm_private_data = (void *)psbfb;
183	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
184	return 0;
185}
186
187static struct fb_ops psbfb_ops = {
188	.owner = THIS_MODULE,
189	DRM_FB_HELPER_DEFAULT_OPS,
190	.fb_setcolreg = psbfb_setcolreg,
191	.fb_fillrect = drm_fb_helper_cfb_fillrect,
192	.fb_copyarea = psbfb_copyarea,
193	.fb_imageblit = drm_fb_helper_cfb_imageblit,
194	.fb_mmap = psbfb_mmap,
195	.fb_sync = psbfb_sync,
196};
197
198static struct fb_ops psbfb_roll_ops = {
199	.owner = THIS_MODULE,
200	DRM_FB_HELPER_DEFAULT_OPS,
201	.fb_setcolreg = psbfb_setcolreg,
202	.fb_fillrect = drm_fb_helper_cfb_fillrect,
203	.fb_copyarea = drm_fb_helper_cfb_copyarea,
204	.fb_imageblit = drm_fb_helper_cfb_imageblit,
205	.fb_pan_display = psbfb_pan,
206	.fb_mmap = psbfb_mmap,
207};
208
209static struct fb_ops psbfb_unaccel_ops = {
210	.owner = THIS_MODULE,
211	DRM_FB_HELPER_DEFAULT_OPS,
212	.fb_setcolreg = psbfb_setcolreg,
213	.fb_fillrect = drm_fb_helper_cfb_fillrect,
214	.fb_copyarea = drm_fb_helper_cfb_copyarea,
215	.fb_imageblit = drm_fb_helper_cfb_imageblit,
216	.fb_mmap = psbfb_mmap,
217};
218
219/**
220 *	psb_framebuffer_init	-	initialize a framebuffer
221 *	@dev: our DRM device
222 *	@fb: framebuffer to set up
223 *	@mode_cmd: mode description
224 *	@gt: backing object
225 *
226 *	Configure and fill in the boilerplate for our frame buffer. Return
227 *	0 on success or an error code if we fail.
228 */
229static int psb_framebuffer_init(struct drm_device *dev,
230					struct psb_framebuffer *fb,
231					const struct drm_mode_fb_cmd2 *mode_cmd,
232					struct gtt_range *gt)
233{
234	const struct drm_format_info *info;
235	int ret;
236
237	/*
238	 * Reject unknown formats, YUV formats, and formats with more than
239	 * 4 bytes per pixel.
240	 */
241	info = drm_format_info(mode_cmd->pixel_format);
242	if (!info || !info->depth || info->cpp[0] > 4)
243		return -EINVAL;
244
245	if (mode_cmd->pitches[0] & 63)
246		return -EINVAL;
247
248	drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
249	fb->gtt = gt;
250	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
251	if (ret) {
252		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
253		return ret;
254	}
255	return 0;
256}
257
258/**
259 *	psb_framebuffer_create	-	create a framebuffer backed by gt
260 *	@dev: our DRM device
261 *	@mode_cmd: the description of the requested mode
262 *	@gt: the backing object
263 *
264 *	Create a framebuffer object backed by the gt, and fill in the
265 *	boilerplate required
266 *
267 *	TODO: review object references
268 */
269
270static struct drm_framebuffer *psb_framebuffer_create
271			(struct drm_device *dev,
272			 const struct drm_mode_fb_cmd2 *mode_cmd,
273			 struct gtt_range *gt)
274{
275	struct psb_framebuffer *fb;
276	int ret;
277
278	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
279	if (!fb)
280		return ERR_PTR(-ENOMEM);
281
282	ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
283	if (ret) {
284		kfree(fb);
285		return ERR_PTR(ret);
286	}
287	return &fb->base;
288}
289
290/**
291 *	psbfb_alloc		-	allocate frame buffer memory
292 *	@dev: the DRM device
293 *	@aligned_size: space needed
294 *
295 *	Allocate the frame buffer. In the usual case we get a GTT range that
296 *	is stolen memory backed and life is simple. If there isn't sufficient
297 *	we fail as we don't have the virtual mapping space to really vmap it
298 *	and the kernel console code can't handle non linear framebuffers.
299 *
300 *	Re-address this as and if the framebuffer layer grows this ability.
301 */
302static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
303{
304	struct gtt_range *backing;
305	/* Begin by trying to use stolen memory backing */
306	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
307	if (backing) {
308		drm_gem_private_object_init(dev, &backing->gem, aligned_size);
309		return backing;
310	}
311	return NULL;
312}
313
314/**
315 *	psbfb_create		-	create a framebuffer
316 *	@fbdev: the framebuffer device
317 *	@sizes: specification of the layout
318 *
319 *	Create a framebuffer to the specifications provided
320 */
321static int psbfb_create(struct psb_fbdev *fbdev,
322				struct drm_fb_helper_surface_size *sizes)
323{
324	struct drm_device *dev = fbdev->psb_fb_helper.dev;
325	struct drm_psb_private *dev_priv = dev->dev_private;
326	struct fb_info *info;
327	struct drm_framebuffer *fb;
328	struct psb_framebuffer *psbfb = &fbdev->pfb;
329	struct drm_mode_fb_cmd2 mode_cmd;
330	int size;
331	int ret;
332	struct gtt_range *backing;
333	u32 bpp, depth;
334	int gtt_roll = 0;
335	int pitch_lines = 0;
336
337	mode_cmd.width = sizes->surface_width;
338	mode_cmd.height = sizes->surface_height;
339	bpp = sizes->surface_bpp;
340	depth = sizes->surface_depth;
341
342	/* No 24bit packed */
343	if (bpp == 24)
344		bpp = 32;
345
346	do {
347		/*
348		 * Acceleration via the GTT requires pitch to be
349		 * power of two aligned. Preferably page but less
350		 * is ok with some fonts
351		 */
352        	mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
353
354        	size = mode_cmd.pitches[0] * mode_cmd.height;
355        	size = ALIGN(size, PAGE_SIZE);
356
357		/* Allocate the fb in the GTT with stolen page backing */
358		backing = psbfb_alloc(dev, size);
359
360		if (pitch_lines)
361			pitch_lines *= 2;
362		else
363			pitch_lines = 1;
364		gtt_roll++;
365	} while (backing == NULL && pitch_lines <= 16);
366
367	/* The final pitch we accepted if we succeeded */
368	pitch_lines /= 2;
369
370	if (backing == NULL) {
371		/*
372		 *	We couldn't get the space we wanted, fall back to the
373		 *	display engine requirement instead.  The HW requires
374		 *	the pitch to be 64 byte aligned
375		 */
376
377		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
378		pitch_lines = 64;
379
380		mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
381
382		size = mode_cmd.pitches[0] * mode_cmd.height;
383		size = ALIGN(size, PAGE_SIZE);
384
385		/* Allocate the framebuffer in the GTT with stolen page backing */
386		backing = psbfb_alloc(dev, size);
387		if (backing == NULL)
388			return -ENOMEM;
389	}
390
391	memset(dev_priv->vram_addr + backing->offset, 0, size);
392
393	info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
394	if (IS_ERR(info)) {
395		ret = PTR_ERR(info);
396		goto out;
397	}
398	info->par = fbdev;
399
400	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
401
402	ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
403	if (ret)
404		goto out;
405
406	fb = &psbfb->base;
407	psbfb->fbdev = info;
408
409	fbdev->psb_fb_helper.fb = fb;
410
411	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
412	strcpy(info->fix.id, "psbdrmfb");
413
414	info->flags = FBINFO_DEFAULT;
415	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
416		info->fbops = &psbfb_ops;
417	else if (gtt_roll) {	/* GTT rolling seems best */
418		info->fbops = &psbfb_roll_ops;
419		info->flags |= FBINFO_HWACCEL_YPAN;
420	} else	/* Software */
421		info->fbops = &psbfb_unaccel_ops;
422
423	info->fix.smem_start = dev->mode_config.fb_base;
424	info->fix.smem_len = size;
425	info->fix.ywrapstep = gtt_roll;
426	info->fix.ypanstep = 0;
427
428	/* Accessed stolen memory directly */
429	info->screen_base = dev_priv->vram_addr + backing->offset;
430	info->screen_size = size;
431
432	if (dev_priv->gtt.stolen_size) {
433		info->apertures->ranges[0].base = dev->mode_config.fb_base;
434		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
435	}
436
437	drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
438				sizes->fb_width, sizes->fb_height);
439
440	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
441	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
442
443	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
444
445	dev_dbg(dev->dev, "allocated %dx%d fb\n",
446					psbfb->base.width, psbfb->base.height);
447
448	return 0;
449out:
450	psb_gtt_free_range(dev, backing);
451	return ret;
452}
453
454/**
455 *	psb_user_framebuffer_create	-	create framebuffer
456 *	@dev: our DRM device
457 *	@filp: client file
458 *	@cmd: mode request
459 *
460 *	Create a new framebuffer backed by a userspace GEM object
461 */
462static struct drm_framebuffer *psb_user_framebuffer_create
463			(struct drm_device *dev, struct drm_file *filp,
464			 const struct drm_mode_fb_cmd2 *cmd)
465{
466	struct gtt_range *r;
467	struct drm_gem_object *obj;
468
469	/*
470	 *	Find the GEM object and thus the gtt range object that is
471	 *	to back this space
472	 */
473	obj = drm_gem_object_lookup(filp, cmd->handles[0]);
474	if (obj == NULL)
475		return ERR_PTR(-ENOENT);
476
477	/* Let the core code do all the work */
478	r = container_of(obj, struct gtt_range, gem);
479	return psb_framebuffer_create(dev, cmd, r);
480}
481
482static int psbfb_probe(struct drm_fb_helper *helper,
483				struct drm_fb_helper_surface_size *sizes)
484{
485	struct psb_fbdev *psb_fbdev =
486		container_of(helper, struct psb_fbdev, psb_fb_helper);
487	struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
488	struct drm_psb_private *dev_priv = dev->dev_private;
489	int bytespp;
490
491	bytespp = sizes->surface_bpp / 8;
492	if (bytespp == 3)	/* no 24bit packed */
493		bytespp = 4;
494
495	/* If the mode will not fit in 32bit then switch to 16bit to get
496	   a console on full resolution. The X mode setting server will
497	   allocate its own 32bit GEM framebuffer */
498	if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
499	                dev_priv->vram_stolen_size) {
500                sizes->surface_bpp = 16;
501                sizes->surface_depth = 16;
502        }
503
504	return psbfb_create(psb_fbdev, sizes);
505}
506
507static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
508	.fb_probe = psbfb_probe,
509};
510
511static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
512{
513	struct psb_framebuffer *psbfb = &fbdev->pfb;
514
515	drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
516
517	drm_fb_helper_fini(&fbdev->psb_fb_helper);
518	drm_framebuffer_unregister_private(&psbfb->base);
519	drm_framebuffer_cleanup(&psbfb->base);
520
521	if (psbfb->gtt)
522		drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
523	return 0;
524}
525
526int psb_fbdev_init(struct drm_device *dev)
527{
528	struct psb_fbdev *fbdev;
529	struct drm_psb_private *dev_priv = dev->dev_private;
530	int ret;
531
532	fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
533	if (!fbdev) {
534		dev_err(dev->dev, "no memory\n");
535		return -ENOMEM;
536	}
537
538	dev_priv->fbdev = fbdev;
539
540	drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
541
542	ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
543				 INTELFB_CONN_LIMIT);
544	if (ret)
545		goto free;
546
547	ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
548	if (ret)
549		goto fini;
550
551	/* disable all the possible outputs/crtcs before entering KMS mode */
552	drm_helper_disable_unused_functions(dev);
553
554	ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
555	if (ret)
556		goto fini;
557
558	return 0;
559
560fini:
561	drm_fb_helper_fini(&fbdev->psb_fb_helper);
562free:
563	kfree(fbdev);
564	return ret;
565}
566
567static void psb_fbdev_fini(struct drm_device *dev)
568{
569	struct drm_psb_private *dev_priv = dev->dev_private;
570
571	if (!dev_priv->fbdev)
572		return;
573
574	psb_fbdev_destroy(dev, dev_priv->fbdev);
575	kfree(dev_priv->fbdev);
576	dev_priv->fbdev = NULL;
577}
578
579/**
580 *	psb_user_framebuffer_create_handle - add hamdle to a framebuffer
581 *	@fb: framebuffer
582 *	@file_priv: our DRM file
583 *	@handle: returned handle
584 *
585 *	Our framebuffer object is a GTT range which also contains a GEM
586 *	object. We need to turn it into a handle for userspace. GEM will do
587 *	the work for us
588 */
589static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
590					      struct drm_file *file_priv,
591					      unsigned int *handle)
592{
593	struct psb_framebuffer *psbfb = to_psb_fb(fb);
594	struct gtt_range *r = psbfb->gtt;
595	return drm_gem_handle_create(file_priv, &r->gem, handle);
596}
597
598/**
599 *	psb_user_framebuffer_destroy	-	destruct user created fb
600 *	@fb: framebuffer
601 *
602 *	User framebuffers are backed by GEM objects so all we have to do is
603 *	clean up a bit and drop the reference, GEM will handle the fallout
604 */
605static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
606{
607	struct psb_framebuffer *psbfb = to_psb_fb(fb);
608	struct gtt_range *r = psbfb->gtt;
609
610	/* Let DRM do its clean up */
611	drm_framebuffer_cleanup(fb);
612	/*  We are no longer using the resource in GEM */
613	drm_gem_object_unreference_unlocked(&r->gem);
614	kfree(fb);
615}
616
617static const struct drm_mode_config_funcs psb_mode_funcs = {
618	.fb_create = psb_user_framebuffer_create,
619	.output_poll_changed = drm_fb_helper_output_poll_changed,
620};
621
622static void psb_setup_outputs(struct drm_device *dev)
623{
624	struct drm_psb_private *dev_priv = dev->dev_private;
625	struct drm_connector *connector;
626
627	drm_mode_create_scaling_mode_property(dev);
628
629	/* It is ok for this to fail - we just don't get backlight control */
630	if (!dev_priv->backlight_property)
631		dev_priv->backlight_property = drm_property_create_range(dev, 0,
632							"backlight", 0, 100);
633	dev_priv->ops->output_init(dev);
634
635	list_for_each_entry(connector, &dev->mode_config.connector_list,
636			    head) {
637		struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
638		struct drm_encoder *encoder = &gma_encoder->base;
639		int crtc_mask = 0, clone_mask = 0;
640
641		/* valid crtcs */
642		switch (gma_encoder->type) {
643		case INTEL_OUTPUT_ANALOG:
644			crtc_mask = (1 << 0);
645			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
646			break;
647		case INTEL_OUTPUT_SDVO:
648			crtc_mask = dev_priv->ops->sdvo_mask;
649			clone_mask = (1 << INTEL_OUTPUT_SDVO);
650			break;
651		case INTEL_OUTPUT_LVDS:
652		        crtc_mask = dev_priv->ops->lvds_mask;
653			clone_mask = (1 << INTEL_OUTPUT_LVDS);
654			break;
655		case INTEL_OUTPUT_MIPI:
656			crtc_mask = (1 << 0);
657			clone_mask = (1 << INTEL_OUTPUT_MIPI);
658			break;
659		case INTEL_OUTPUT_MIPI2:
660			crtc_mask = (1 << 2);
661			clone_mask = (1 << INTEL_OUTPUT_MIPI2);
662			break;
663		case INTEL_OUTPUT_HDMI:
664		        crtc_mask = dev_priv->ops->hdmi_mask;
665			clone_mask = (1 << INTEL_OUTPUT_HDMI);
666			break;
667		case INTEL_OUTPUT_DISPLAYPORT:
668			crtc_mask = (1 << 0) | (1 << 1);
669			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
670			break;
671		case INTEL_OUTPUT_EDP:
672			crtc_mask = (1 << 1);
673			clone_mask = (1 << INTEL_OUTPUT_EDP);
674		}
675		encoder->possible_crtcs = crtc_mask;
676		encoder->possible_clones =
677		    gma_connector_clones(dev, clone_mask);
678	}
679}
680
681void psb_modeset_init(struct drm_device *dev)
682{
683	struct drm_psb_private *dev_priv = dev->dev_private;
684	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
685	int i;
686
687	drm_mode_config_init(dev);
688
689	dev->mode_config.min_width = 0;
690	dev->mode_config.min_height = 0;
691
692	dev->mode_config.funcs = &psb_mode_funcs;
693
694	/* set memory base */
695	/* Oaktrail and Poulsbo should use BAR 2*/
696	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
697					&(dev->mode_config.fb_base));
698
699	/* num pipes is 2 for PSB but 1 for Mrst */
700	for (i = 0; i < dev_priv->num_pipe; i++)
701		psb_intel_crtc_init(dev, i, mode_dev);
702
703	dev->mode_config.max_width = 4096;
704	dev->mode_config.max_height = 4096;
705
706	psb_setup_outputs(dev);
707
708	if (dev_priv->ops->errata)
709	        dev_priv->ops->errata(dev);
710
711        dev_priv->modeset = true;
712}
713
714void psb_modeset_cleanup(struct drm_device *dev)
715{
716	struct drm_psb_private *dev_priv = dev->dev_private;
717	if (dev_priv->modeset) {
718		drm_kms_helper_poll_fini(dev);
719		psb_fbdev_fini(dev);
720		drm_mode_config_cleanup(dev);
721	}
722}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/**************************************************************************
  3 * Copyright (c) 2007-2011, Intel Corporation.
  4 * All Rights Reserved.
  5 *
 
 
 
 
 
 
 
 
 
 
 
 
 
  6 **************************************************************************/
  7
  8#include <linux/console.h>
  9#include <linux/delay.h>
 10#include <linux/errno.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/module.h>
 15#include <linux/pfn_t.h>
 16#include <linux/slab.h>
 17#include <linux/string.h>
 18#include <linux/tty.h>
 
 19
 
 20#include <drm/drm.h>
 21#include <drm/drm_crtc.h>
 22#include <drm/drm_fb_helper.h>
 23#include <drm/drm_fourcc.h>
 24#include <drm/drm_gem_framebuffer_helper.h>
 25
 
 
 
 26#include "framebuffer.h"
 27#include "gtt.h"
 28#include "psb_drv.h"
 29#include "psb_intel_drv.h"
 30#include "psb_intel_reg.h"
 
 
 31
 32static const struct drm_framebuffer_funcs psb_fb_funcs = {
 33	.destroy = drm_gem_fb_destroy,
 34	.create_handle = drm_gem_fb_create_handle,
 35};
 36
 37#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
 38
 39static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 40			   unsigned blue, unsigned transp,
 41			   struct fb_info *info)
 42{
 43	struct psb_fbdev *fbdev = info->par;
 44	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
 45	uint32_t v;
 46
 47	if (!fb)
 48		return -ENOMEM;
 49
 50	if (regno > 255)
 51		return 1;
 52
 53	red = CMAP_TOHW(red, info->var.red.length);
 54	blue = CMAP_TOHW(blue, info->var.blue.length);
 55	green = CMAP_TOHW(green, info->var.green.length);
 56	transp = CMAP_TOHW(transp, info->var.transp.length);
 57
 58	v = (red << info->var.red.offset) |
 59	    (green << info->var.green.offset) |
 60	    (blue << info->var.blue.offset) |
 61	    (transp << info->var.transp.offset);
 62
 63	if (regno < 16) {
 64		switch (fb->format->cpp[0] * 8) {
 65		case 16:
 66			((uint32_t *) info->pseudo_palette)[regno] = v;
 67			break;
 68		case 24:
 69		case 32:
 70			((uint32_t *) info->pseudo_palette)[regno] = v;
 71			break;
 72		}
 73	}
 74
 75	return 0;
 76}
 77
 78static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
 79{
 80	struct psb_fbdev *fbdev = info->par;
 81	struct psb_framebuffer *psbfb = &fbdev->pfb;
 82	struct drm_device *dev = psbfb->base.dev;
 83	struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
 84
 85	/*
 86	 *	We have to poke our nose in here. The core fb code assumes
 87	 *	panning is part of the hardware that can be invoked before
 88	 *	the actual fb is mapped. In our case that isn't quite true.
 89	 */
 90	if (gtt->npage) {
 91		/* GTT roll shifts in 4K pages, we need to shift the right
 92		   number of pages */
 93		int pages = info->fix.line_length >> 12;
 94		psb_gtt_roll(dev, gtt, var->yoffset * pages);
 95	}
 96        return 0;
 97}
 98
 99static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
100{
101	struct vm_area_struct *vma = vmf->vma;
102	struct psb_framebuffer *psbfb = vma->vm_private_data;
103	struct drm_device *dev = psbfb->base.dev;
104	struct drm_psb_private *dev_priv = dev->dev_private;
105	struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
106	int page_num;
107	int i;
108	unsigned long address;
109	vm_fault_t ret = VM_FAULT_SIGBUS;
110	unsigned long pfn;
111	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
112				  gtt->offset;
113
114	page_num = vma_pages(vma);
115	address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
116
117	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118
119	for (i = 0; i < page_num; i++) {
120		pfn = (phys_addr >> PAGE_SHIFT);
121
122		ret = vmf_insert_mixed(vma, address,
123				__pfn_to_pfn_t(pfn, PFN_DEV));
124		if (unlikely(ret & VM_FAULT_ERROR))
125			break;
 
 
 
 
126		address += PAGE_SIZE;
127		phys_addr += PAGE_SIZE;
128	}
129	return ret;
130}
131
132static void psbfb_vm_open(struct vm_area_struct *vma)
133{
134}
135
136static void psbfb_vm_close(struct vm_area_struct *vma)
137{
138}
139
140static const struct vm_operations_struct psbfb_vm_ops = {
141	.fault	= psbfb_vm_fault,
142	.open	= psbfb_vm_open,
143	.close	= psbfb_vm_close
144};
145
146static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
147{
148	struct psb_fbdev *fbdev = info->par;
149	struct psb_framebuffer *psbfb = &fbdev->pfb;
150
151	if (vma->vm_pgoff != 0)
152		return -EINVAL;
153	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
154		return -EINVAL;
155
156	if (!psbfb->addr_space)
157		psbfb->addr_space = vma->vm_file->f_mapping;
158	/*
159	 * If this is a GEM object then info->screen_base is the virtual
160	 * kernel remapping of the object. FIXME: Review if this is
161	 * suitable for our mmap work
162	 */
163	vma->vm_ops = &psbfb_vm_ops;
164	vma->vm_private_data = (void *)psbfb;
165	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
166	return 0;
167}
168
169static struct fb_ops psbfb_ops = {
170	.owner = THIS_MODULE,
171	DRM_FB_HELPER_DEFAULT_OPS,
172	.fb_setcolreg = psbfb_setcolreg,
173	.fb_fillrect = drm_fb_helper_cfb_fillrect,
174	.fb_copyarea = psbfb_copyarea,
175	.fb_imageblit = drm_fb_helper_cfb_imageblit,
176	.fb_mmap = psbfb_mmap,
177	.fb_sync = psbfb_sync,
178};
179
180static struct fb_ops psbfb_roll_ops = {
181	.owner = THIS_MODULE,
182	DRM_FB_HELPER_DEFAULT_OPS,
183	.fb_setcolreg = psbfb_setcolreg,
184	.fb_fillrect = drm_fb_helper_cfb_fillrect,
185	.fb_copyarea = drm_fb_helper_cfb_copyarea,
186	.fb_imageblit = drm_fb_helper_cfb_imageblit,
187	.fb_pan_display = psbfb_pan,
188	.fb_mmap = psbfb_mmap,
189};
190
191static struct fb_ops psbfb_unaccel_ops = {
192	.owner = THIS_MODULE,
193	DRM_FB_HELPER_DEFAULT_OPS,
194	.fb_setcolreg = psbfb_setcolreg,
195	.fb_fillrect = drm_fb_helper_cfb_fillrect,
196	.fb_copyarea = drm_fb_helper_cfb_copyarea,
197	.fb_imageblit = drm_fb_helper_cfb_imageblit,
198	.fb_mmap = psbfb_mmap,
199};
200
201/**
202 *	psb_framebuffer_init	-	initialize a framebuffer
203 *	@dev: our DRM device
204 *	@fb: framebuffer to set up
205 *	@mode_cmd: mode description
206 *	@gt: backing object
207 *
208 *	Configure and fill in the boilerplate for our frame buffer. Return
209 *	0 on success or an error code if we fail.
210 */
211static int psb_framebuffer_init(struct drm_device *dev,
212					struct psb_framebuffer *fb,
213					const struct drm_mode_fb_cmd2 *mode_cmd,
214					struct gtt_range *gt)
215{
216	const struct drm_format_info *info;
217	int ret;
218
219	/*
220	 * Reject unknown formats, YUV formats, and formats with more than
221	 * 4 bytes per pixel.
222	 */
223	info = drm_get_format_info(dev, mode_cmd);
224	if (!info || !info->depth || info->cpp[0] > 4)
225		return -EINVAL;
226
227	if (mode_cmd->pitches[0] & 63)
228		return -EINVAL;
229
230	drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
231	fb->base.obj[0] = &gt->gem;
232	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
233	if (ret) {
234		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
235		return ret;
236	}
237	return 0;
238}
239
240/**
241 *	psb_framebuffer_create	-	create a framebuffer backed by gt
242 *	@dev: our DRM device
243 *	@mode_cmd: the description of the requested mode
244 *	@gt: the backing object
245 *
246 *	Create a framebuffer object backed by the gt, and fill in the
247 *	boilerplate required
248 *
249 *	TODO: review object references
250 */
251
252static struct drm_framebuffer *psb_framebuffer_create
253			(struct drm_device *dev,
254			 const struct drm_mode_fb_cmd2 *mode_cmd,
255			 struct gtt_range *gt)
256{
257	struct psb_framebuffer *fb;
258	int ret;
259
260	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
261	if (!fb)
262		return ERR_PTR(-ENOMEM);
263
264	ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
265	if (ret) {
266		kfree(fb);
267		return ERR_PTR(ret);
268	}
269	return &fb->base;
270}
271
272/**
273 *	psbfb_alloc		-	allocate frame buffer memory
274 *	@dev: the DRM device
275 *	@aligned_size: space needed
276 *
277 *	Allocate the frame buffer. In the usual case we get a GTT range that
278 *	is stolen memory backed and life is simple. If there isn't sufficient
279 *	we fail as we don't have the virtual mapping space to really vmap it
280 *	and the kernel console code can't handle non linear framebuffers.
281 *
282 *	Re-address this as and if the framebuffer layer grows this ability.
283 */
284static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
285{
286	struct gtt_range *backing;
287	/* Begin by trying to use stolen memory backing */
288	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
289	if (backing) {
290		drm_gem_private_object_init(dev, &backing->gem, aligned_size);
291		return backing;
292	}
293	return NULL;
294}
295
296/**
297 *	psbfb_create		-	create a framebuffer
298 *	@fbdev: the framebuffer device
299 *	@sizes: specification of the layout
300 *
301 *	Create a framebuffer to the specifications provided
302 */
303static int psbfb_create(struct psb_fbdev *fbdev,
304				struct drm_fb_helper_surface_size *sizes)
305{
306	struct drm_device *dev = fbdev->psb_fb_helper.dev;
307	struct drm_psb_private *dev_priv = dev->dev_private;
308	struct fb_info *info;
309	struct drm_framebuffer *fb;
310	struct psb_framebuffer *psbfb = &fbdev->pfb;
311	struct drm_mode_fb_cmd2 mode_cmd;
312	int size;
313	int ret;
314	struct gtt_range *backing;
315	u32 bpp, depth;
316	int gtt_roll = 0;
317	int pitch_lines = 0;
318
319	mode_cmd.width = sizes->surface_width;
320	mode_cmd.height = sizes->surface_height;
321	bpp = sizes->surface_bpp;
322	depth = sizes->surface_depth;
323
324	/* No 24bit packed */
325	if (bpp == 24)
326		bpp = 32;
327
328	do {
329		/*
330		 * Acceleration via the GTT requires pitch to be
331		 * power of two aligned. Preferably page but less
332		 * is ok with some fonts
333		 */
334        	mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
335
336        	size = mode_cmd.pitches[0] * mode_cmd.height;
337        	size = ALIGN(size, PAGE_SIZE);
338
339		/* Allocate the fb in the GTT with stolen page backing */
340		backing = psbfb_alloc(dev, size);
341
342		if (pitch_lines)
343			pitch_lines *= 2;
344		else
345			pitch_lines = 1;
346		gtt_roll++;
347	} while (backing == NULL && pitch_lines <= 16);
348
349	/* The final pitch we accepted if we succeeded */
350	pitch_lines /= 2;
351
352	if (backing == NULL) {
353		/*
354		 *	We couldn't get the space we wanted, fall back to the
355		 *	display engine requirement instead.  The HW requires
356		 *	the pitch to be 64 byte aligned
357		 */
358
359		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
360		pitch_lines = 64;
361
362		mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
363
364		size = mode_cmd.pitches[0] * mode_cmd.height;
365		size = ALIGN(size, PAGE_SIZE);
366
367		/* Allocate the framebuffer in the GTT with stolen page backing */
368		backing = psbfb_alloc(dev, size);
369		if (backing == NULL)
370			return -ENOMEM;
371	}
372
373	memset(dev_priv->vram_addr + backing->offset, 0, size);
374
375	info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
376	if (IS_ERR(info)) {
377		ret = PTR_ERR(info);
378		goto out;
379	}
 
380
381	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
382
383	ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
384	if (ret)
385		goto out;
386
387	fb = &psbfb->base;
388	psbfb->fbdev = info;
389
390	fbdev->psb_fb_helper.fb = fb;
391
 
 
 
 
392	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
393		info->fbops = &psbfb_ops;
394	else if (gtt_roll) {	/* GTT rolling seems best */
395		info->fbops = &psbfb_roll_ops;
396		info->flags |= FBINFO_HWACCEL_YPAN;
397	} else	/* Software */
398		info->fbops = &psbfb_unaccel_ops;
399
400	info->fix.smem_start = dev->mode_config.fb_base;
401	info->fix.smem_len = size;
402	info->fix.ywrapstep = gtt_roll;
403	info->fix.ypanstep = 0;
404
405	/* Accessed stolen memory directly */
406	info->screen_base = dev_priv->vram_addr + backing->offset;
407	info->screen_size = size;
408
409	if (dev_priv->gtt.stolen_size) {
410		info->apertures->ranges[0].base = dev->mode_config.fb_base;
411		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
412	}
413
414	drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
 
415
416	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
417	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
418
419	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
420
421	dev_dbg(dev->dev, "allocated %dx%d fb\n",
422					psbfb->base.width, psbfb->base.height);
423
424	return 0;
425out:
426	psb_gtt_free_range(dev, backing);
427	return ret;
428}
429
430/**
431 *	psb_user_framebuffer_create	-	create framebuffer
432 *	@dev: our DRM device
433 *	@filp: client file
434 *	@cmd: mode request
435 *
436 *	Create a new framebuffer backed by a userspace GEM object
437 */
438static struct drm_framebuffer *psb_user_framebuffer_create
439			(struct drm_device *dev, struct drm_file *filp,
440			 const struct drm_mode_fb_cmd2 *cmd)
441{
442	struct gtt_range *r;
443	struct drm_gem_object *obj;
444
445	/*
446	 *	Find the GEM object and thus the gtt range object that is
447	 *	to back this space
448	 */
449	obj = drm_gem_object_lookup(filp, cmd->handles[0]);
450	if (obj == NULL)
451		return ERR_PTR(-ENOENT);
452
453	/* Let the core code do all the work */
454	r = container_of(obj, struct gtt_range, gem);
455	return psb_framebuffer_create(dev, cmd, r);
456}
457
458static int psbfb_probe(struct drm_fb_helper *helper,
459				struct drm_fb_helper_surface_size *sizes)
460{
461	struct psb_fbdev *psb_fbdev =
462		container_of(helper, struct psb_fbdev, psb_fb_helper);
463	struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
464	struct drm_psb_private *dev_priv = dev->dev_private;
465	int bytespp;
466
467	bytespp = sizes->surface_bpp / 8;
468	if (bytespp == 3)	/* no 24bit packed */
469		bytespp = 4;
470
471	/* If the mode will not fit in 32bit then switch to 16bit to get
472	   a console on full resolution. The X mode setting server will
473	   allocate its own 32bit GEM framebuffer */
474	if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
475	                dev_priv->vram_stolen_size) {
476                sizes->surface_bpp = 16;
477                sizes->surface_depth = 16;
478        }
479
480	return psbfb_create(psb_fbdev, sizes);
481}
482
483static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
484	.fb_probe = psbfb_probe,
485};
486
487static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
488{
489	struct psb_framebuffer *psbfb = &fbdev->pfb;
490
491	drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
492
493	drm_fb_helper_fini(&fbdev->psb_fb_helper);
494	drm_framebuffer_unregister_private(&psbfb->base);
495	drm_framebuffer_cleanup(&psbfb->base);
496
497	if (psbfb->base.obj[0])
498		drm_gem_object_put_unlocked(psbfb->base.obj[0]);
499	return 0;
500}
501
502int psb_fbdev_init(struct drm_device *dev)
503{
504	struct psb_fbdev *fbdev;
505	struct drm_psb_private *dev_priv = dev->dev_private;
506	int ret;
507
508	fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
509	if (!fbdev) {
510		dev_err(dev->dev, "no memory\n");
511		return -ENOMEM;
512	}
513
514	dev_priv->fbdev = fbdev;
515
516	drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
517
518	ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
519				 INTELFB_CONN_LIMIT);
520	if (ret)
521		goto free;
522
523	ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
524	if (ret)
525		goto fini;
526
527	/* disable all the possible outputs/crtcs before entering KMS mode */
528	drm_helper_disable_unused_functions(dev);
529
530	ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
531	if (ret)
532		goto fini;
533
534	return 0;
535
536fini:
537	drm_fb_helper_fini(&fbdev->psb_fb_helper);
538free:
539	kfree(fbdev);
540	return ret;
541}
542
543static void psb_fbdev_fini(struct drm_device *dev)
544{
545	struct drm_psb_private *dev_priv = dev->dev_private;
546
547	if (!dev_priv->fbdev)
548		return;
549
550	psb_fbdev_destroy(dev, dev_priv->fbdev);
551	kfree(dev_priv->fbdev);
552	dev_priv->fbdev = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553}
554
555static const struct drm_mode_config_funcs psb_mode_funcs = {
556	.fb_create = psb_user_framebuffer_create,
557	.output_poll_changed = drm_fb_helper_output_poll_changed,
558};
559
560static void psb_setup_outputs(struct drm_device *dev)
561{
562	struct drm_psb_private *dev_priv = dev->dev_private;
563	struct drm_connector *connector;
564
565	drm_mode_create_scaling_mode_property(dev);
566
567	/* It is ok for this to fail - we just don't get backlight control */
568	if (!dev_priv->backlight_property)
569		dev_priv->backlight_property = drm_property_create_range(dev, 0,
570							"backlight", 0, 100);
571	dev_priv->ops->output_init(dev);
572
573	list_for_each_entry(connector, &dev->mode_config.connector_list,
574			    head) {
575		struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
576		struct drm_encoder *encoder = &gma_encoder->base;
577		int crtc_mask = 0, clone_mask = 0;
578
579		/* valid crtcs */
580		switch (gma_encoder->type) {
581		case INTEL_OUTPUT_ANALOG:
582			crtc_mask = (1 << 0);
583			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
584			break;
585		case INTEL_OUTPUT_SDVO:
586			crtc_mask = dev_priv->ops->sdvo_mask;
587			clone_mask = (1 << INTEL_OUTPUT_SDVO);
588			break;
589		case INTEL_OUTPUT_LVDS:
590		        crtc_mask = dev_priv->ops->lvds_mask;
591			clone_mask = (1 << INTEL_OUTPUT_LVDS);
592			break;
593		case INTEL_OUTPUT_MIPI:
594			crtc_mask = (1 << 0);
595			clone_mask = (1 << INTEL_OUTPUT_MIPI);
596			break;
597		case INTEL_OUTPUT_MIPI2:
598			crtc_mask = (1 << 2);
599			clone_mask = (1 << INTEL_OUTPUT_MIPI2);
600			break;
601		case INTEL_OUTPUT_HDMI:
602		        crtc_mask = dev_priv->ops->hdmi_mask;
603			clone_mask = (1 << INTEL_OUTPUT_HDMI);
604			break;
605		case INTEL_OUTPUT_DISPLAYPORT:
606			crtc_mask = (1 << 0) | (1 << 1);
607			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
608			break;
609		case INTEL_OUTPUT_EDP:
610			crtc_mask = (1 << 1);
611			clone_mask = (1 << INTEL_OUTPUT_EDP);
612		}
613		encoder->possible_crtcs = crtc_mask;
614		encoder->possible_clones =
615		    gma_connector_clones(dev, clone_mask);
616	}
617}
618
619void psb_modeset_init(struct drm_device *dev)
620{
621	struct drm_psb_private *dev_priv = dev->dev_private;
622	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
623	int i;
624
625	drm_mode_config_init(dev);
626
627	dev->mode_config.min_width = 0;
628	dev->mode_config.min_height = 0;
629
630	dev->mode_config.funcs = &psb_mode_funcs;
631
632	/* set memory base */
633	/* Oaktrail and Poulsbo should use BAR 2*/
634	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
635					&(dev->mode_config.fb_base));
636
637	/* num pipes is 2 for PSB but 1 for Mrst */
638	for (i = 0; i < dev_priv->num_pipe; i++)
639		psb_intel_crtc_init(dev, i, mode_dev);
640
641	dev->mode_config.max_width = 4096;
642	dev->mode_config.max_height = 4096;
643
644	psb_setup_outputs(dev);
645
646	if (dev_priv->ops->errata)
647	        dev_priv->ops->errata(dev);
648
649        dev_priv->modeset = true;
650}
651
652void psb_modeset_cleanup(struct drm_device *dev)
653{
654	struct drm_psb_private *dev_priv = dev->dev_private;
655	if (dev_priv->modeset) {
656		drm_kms_helper_poll_fini(dev);
657		psb_fbdev_fini(dev);
658		drm_mode_config_cleanup(dev);
659	}
660}