Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/**************************************************************************
  3 * Copyright (c) 2007-2011, Intel Corporation.
  4 * All Rights Reserved.
  5 *
  6 **************************************************************************/
  7
  8#include <linux/console.h>
  9#include <linux/delay.h>
 10#include <linux/errno.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/module.h>
 15#include <linux/pfn_t.h>
 16#include <linux/slab.h>
 17#include <linux/string.h>
 18#include <linux/tty.h>
 19
 20#include <drm/drm.h>
 21#include <drm/drm_crtc.h>
 22#include <drm/drm_fb_helper.h>
 23#include <drm/drm_fourcc.h>
 24#include <drm/drm_gem_framebuffer_helper.h>
 25
 26#include "framebuffer.h"
 27#include "gtt.h"
 28#include "psb_drv.h"
 29#include "psb_intel_drv.h"
 30#include "psb_intel_reg.h"
 31
 32static const struct drm_framebuffer_funcs psb_fb_funcs = {
 33	.destroy = drm_gem_fb_destroy,
 34	.create_handle = drm_gem_fb_create_handle,
 35};
 36
 37#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
 38
 39static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 40			   unsigned blue, unsigned transp,
 41			   struct fb_info *info)
 42{
 43	struct psb_fbdev *fbdev = info->par;
 44	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
 45	uint32_t v;
 46
 47	if (!fb)
 48		return -ENOMEM;
 49
 50	if (regno > 255)
 51		return 1;
 52
 53	red = CMAP_TOHW(red, info->var.red.length);
 54	blue = CMAP_TOHW(blue, info->var.blue.length);
 55	green = CMAP_TOHW(green, info->var.green.length);
 56	transp = CMAP_TOHW(transp, info->var.transp.length);
 57
 58	v = (red << info->var.red.offset) |
 59	    (green << info->var.green.offset) |
 60	    (blue << info->var.blue.offset) |
 61	    (transp << info->var.transp.offset);
 62
 63	if (regno < 16) {
 64		switch (fb->format->cpp[0] * 8) {
 65		case 16:
 66			((uint32_t *) info->pseudo_palette)[regno] = v;
 67			break;
 68		case 24:
 69		case 32:
 70			((uint32_t *) info->pseudo_palette)[regno] = v;
 71			break;
 72		}
 73	}
 74
 75	return 0;
 76}
 77
 78static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
 79{
 80	struct psb_fbdev *fbdev = info->par;
 81	struct psb_framebuffer *psbfb = &fbdev->pfb;
 82	struct drm_device *dev = psbfb->base.dev;
 83	struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
 84
 85	/*
 86	 *	We have to poke our nose in here. The core fb code assumes
 87	 *	panning is part of the hardware that can be invoked before
 88	 *	the actual fb is mapped. In our case that isn't quite true.
 89	 */
 90	if (gtt->npage) {
 91		/* GTT roll shifts in 4K pages, we need to shift the right
 92		   number of pages */
 93		int pages = info->fix.line_length >> 12;
 94		psb_gtt_roll(dev, gtt, var->yoffset * pages);
 95	}
 96        return 0;
 97}
 98
 99static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
100{
101	struct vm_area_struct *vma = vmf->vma;
102	struct psb_framebuffer *psbfb = vma->vm_private_data;
103	struct drm_device *dev = psbfb->base.dev;
104	struct drm_psb_private *dev_priv = dev->dev_private;
105	struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
106	int page_num;
107	int i;
108	unsigned long address;
109	vm_fault_t ret = VM_FAULT_SIGBUS;
110	unsigned long pfn;
111	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
112				  gtt->offset;
113
114	page_num = vma_pages(vma);
115	address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
116
117	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118
119	for (i = 0; i < page_num; i++) {
120		pfn = (phys_addr >> PAGE_SHIFT);
121
122		ret = vmf_insert_mixed(vma, address,
123				__pfn_to_pfn_t(pfn, PFN_DEV));
124		if (unlikely(ret & VM_FAULT_ERROR))
125			break;
126		address += PAGE_SIZE;
127		phys_addr += PAGE_SIZE;
128	}
129	return ret;
130}
131
132static void psbfb_vm_open(struct vm_area_struct *vma)
133{
134}
135
136static void psbfb_vm_close(struct vm_area_struct *vma)
137{
138}
139
140static const struct vm_operations_struct psbfb_vm_ops = {
141	.fault	= psbfb_vm_fault,
142	.open	= psbfb_vm_open,
143	.close	= psbfb_vm_close
144};
145
146static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
147{
148	struct psb_fbdev *fbdev = info->par;
149	struct psb_framebuffer *psbfb = &fbdev->pfb;
150
151	if (vma->vm_pgoff != 0)
152		return -EINVAL;
153	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
154		return -EINVAL;
155
156	if (!psbfb->addr_space)
157		psbfb->addr_space = vma->vm_file->f_mapping;
158	/*
159	 * If this is a GEM object then info->screen_base is the virtual
160	 * kernel remapping of the object. FIXME: Review if this is
161	 * suitable for our mmap work
162	 */
163	vma->vm_ops = &psbfb_vm_ops;
164	vma->vm_private_data = (void *)psbfb;
165	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
166	return 0;
167}
168
169static struct fb_ops psbfb_ops = {
170	.owner = THIS_MODULE,
171	DRM_FB_HELPER_DEFAULT_OPS,
172	.fb_setcolreg = psbfb_setcolreg,
173	.fb_fillrect = drm_fb_helper_cfb_fillrect,
174	.fb_copyarea = psbfb_copyarea,
175	.fb_imageblit = drm_fb_helper_cfb_imageblit,
176	.fb_mmap = psbfb_mmap,
177	.fb_sync = psbfb_sync,
178};
179
180static struct fb_ops psbfb_roll_ops = {
181	.owner = THIS_MODULE,
182	DRM_FB_HELPER_DEFAULT_OPS,
183	.fb_setcolreg = psbfb_setcolreg,
184	.fb_fillrect = drm_fb_helper_cfb_fillrect,
185	.fb_copyarea = drm_fb_helper_cfb_copyarea,
186	.fb_imageblit = drm_fb_helper_cfb_imageblit,
187	.fb_pan_display = psbfb_pan,
188	.fb_mmap = psbfb_mmap,
189};
190
191static struct fb_ops psbfb_unaccel_ops = {
192	.owner = THIS_MODULE,
193	DRM_FB_HELPER_DEFAULT_OPS,
194	.fb_setcolreg = psbfb_setcolreg,
195	.fb_fillrect = drm_fb_helper_cfb_fillrect,
196	.fb_copyarea = drm_fb_helper_cfb_copyarea,
197	.fb_imageblit = drm_fb_helper_cfb_imageblit,
198	.fb_mmap = psbfb_mmap,
199};
200
201/**
202 *	psb_framebuffer_init	-	initialize a framebuffer
203 *	@dev: our DRM device
204 *	@fb: framebuffer to set up
205 *	@mode_cmd: mode description
206 *	@gt: backing object
207 *
208 *	Configure and fill in the boilerplate for our frame buffer. Return
209 *	0 on success or an error code if we fail.
210 */
211static int psb_framebuffer_init(struct drm_device *dev,
212					struct psb_framebuffer *fb,
213					const struct drm_mode_fb_cmd2 *mode_cmd,
214					struct gtt_range *gt)
215{
216	const struct drm_format_info *info;
217	int ret;
218
219	/*
220	 * Reject unknown formats, YUV formats, and formats with more than
221	 * 4 bytes per pixel.
222	 */
223	info = drm_get_format_info(dev, mode_cmd);
224	if (!info || !info->depth || info->cpp[0] > 4)
225		return -EINVAL;
226
227	if (mode_cmd->pitches[0] & 63)
228		return -EINVAL;
229
230	drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
231	fb->base.obj[0] = &gt->gem;
232	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
233	if (ret) {
234		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
235		return ret;
236	}
237	return 0;
238}
239
240/**
241 *	psb_framebuffer_create	-	create a framebuffer backed by gt
242 *	@dev: our DRM device
243 *	@mode_cmd: the description of the requested mode
244 *	@gt: the backing object
245 *
246 *	Create a framebuffer object backed by the gt, and fill in the
247 *	boilerplate required
248 *
249 *	TODO: review object references
250 */
251
252static struct drm_framebuffer *psb_framebuffer_create
253			(struct drm_device *dev,
254			 const struct drm_mode_fb_cmd2 *mode_cmd,
255			 struct gtt_range *gt)
256{
257	struct psb_framebuffer *fb;
258	int ret;
259
260	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
261	if (!fb)
262		return ERR_PTR(-ENOMEM);
263
264	ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
265	if (ret) {
266		kfree(fb);
267		return ERR_PTR(ret);
268	}
269	return &fb->base;
270}
271
272/**
273 *	psbfb_alloc		-	allocate frame buffer memory
274 *	@dev: the DRM device
275 *	@aligned_size: space needed
276 *
277 *	Allocate the frame buffer. In the usual case we get a GTT range that
278 *	is stolen memory backed and life is simple. If there isn't sufficient
279 *	we fail as we don't have the virtual mapping space to really vmap it
280 *	and the kernel console code can't handle non linear framebuffers.
281 *
282 *	Re-address this as and if the framebuffer layer grows this ability.
283 */
284static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
285{
286	struct gtt_range *backing;
287	/* Begin by trying to use stolen memory backing */
288	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
289	if (backing) {
290		drm_gem_private_object_init(dev, &backing->gem, aligned_size);
291		return backing;
292	}
293	return NULL;
294}
295
296/**
297 *	psbfb_create		-	create a framebuffer
298 *	@fbdev: the framebuffer device
299 *	@sizes: specification of the layout
300 *
301 *	Create a framebuffer to the specifications provided
302 */
303static int psbfb_create(struct psb_fbdev *fbdev,
304				struct drm_fb_helper_surface_size *sizes)
305{
306	struct drm_device *dev = fbdev->psb_fb_helper.dev;
307	struct drm_psb_private *dev_priv = dev->dev_private;
308	struct fb_info *info;
309	struct drm_framebuffer *fb;
310	struct psb_framebuffer *psbfb = &fbdev->pfb;
311	struct drm_mode_fb_cmd2 mode_cmd;
312	int size;
313	int ret;
314	struct gtt_range *backing;
315	u32 bpp, depth;
316	int gtt_roll = 0;
317	int pitch_lines = 0;
318
319	mode_cmd.width = sizes->surface_width;
320	mode_cmd.height = sizes->surface_height;
321	bpp = sizes->surface_bpp;
322	depth = sizes->surface_depth;
323
324	/* No 24bit packed */
325	if (bpp == 24)
326		bpp = 32;
327
328	do {
329		/*
330		 * Acceleration via the GTT requires pitch to be
331		 * power of two aligned. Preferably page but less
332		 * is ok with some fonts
333		 */
334        	mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
335
336        	size = mode_cmd.pitches[0] * mode_cmd.height;
337        	size = ALIGN(size, PAGE_SIZE);
338
339		/* Allocate the fb in the GTT with stolen page backing */
340		backing = psbfb_alloc(dev, size);
341
342		if (pitch_lines)
343			pitch_lines *= 2;
344		else
345			pitch_lines = 1;
346		gtt_roll++;
347	} while (backing == NULL && pitch_lines <= 16);
348
349	/* The final pitch we accepted if we succeeded */
350	pitch_lines /= 2;
351
352	if (backing == NULL) {
353		/*
354		 *	We couldn't get the space we wanted, fall back to the
355		 *	display engine requirement instead.  The HW requires
356		 *	the pitch to be 64 byte aligned
357		 */
358
359		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
360		pitch_lines = 64;
361
362		mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
363
364		size = mode_cmd.pitches[0] * mode_cmd.height;
365		size = ALIGN(size, PAGE_SIZE);
366
367		/* Allocate the framebuffer in the GTT with stolen page backing */
368		backing = psbfb_alloc(dev, size);
369		if (backing == NULL)
370			return -ENOMEM;
371	}
372
373	memset(dev_priv->vram_addr + backing->offset, 0, size);
374
375	info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
376	if (IS_ERR(info)) {
377		ret = PTR_ERR(info);
378		goto out;
379	}
380
381	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
382
383	ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
384	if (ret)
 
385		goto out;
 
386
387	fb = &psbfb->base;
388	psbfb->fbdev = info;
389
390	fbdev->psb_fb_helper.fb = fb;
391
392	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
393		info->fbops = &psbfb_ops;
394	else if (gtt_roll) {	/* GTT rolling seems best */
395		info->fbops = &psbfb_roll_ops;
396		info->flags |= FBINFO_HWACCEL_YPAN;
397	} else	/* Software */
398		info->fbops = &psbfb_unaccel_ops;
399
400	info->fix.smem_start = dev->mode_config.fb_base;
401	info->fix.smem_len = size;
402	info->fix.ywrapstep = gtt_roll;
403	info->fix.ypanstep = 0;
404
405	/* Accessed stolen memory directly */
406	info->screen_base = dev_priv->vram_addr + backing->offset;
407	info->screen_size = size;
408
409	if (dev_priv->gtt.stolen_size) {
410		info->apertures->ranges[0].base = dev->mode_config.fb_base;
411		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
412	}
413
414	drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
415
416	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
417	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
418
419	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
420
421	dev_dbg(dev->dev, "allocated %dx%d fb\n",
422					psbfb->base.width, psbfb->base.height);
423
424	return 0;
425out:
426	psb_gtt_free_range(dev, backing);
427	return ret;
428}
429
430/**
431 *	psb_user_framebuffer_create	-	create framebuffer
432 *	@dev: our DRM device
433 *	@filp: client file
434 *	@cmd: mode request
435 *
436 *	Create a new framebuffer backed by a userspace GEM object
437 */
438static struct drm_framebuffer *psb_user_framebuffer_create
439			(struct drm_device *dev, struct drm_file *filp,
440			 const struct drm_mode_fb_cmd2 *cmd)
441{
442	struct gtt_range *r;
443	struct drm_gem_object *obj;
444
445	/*
446	 *	Find the GEM object and thus the gtt range object that is
447	 *	to back this space
448	 */
449	obj = drm_gem_object_lookup(filp, cmd->handles[0]);
450	if (obj == NULL)
451		return ERR_PTR(-ENOENT);
452
453	/* Let the core code do all the work */
454	r = container_of(obj, struct gtt_range, gem);
455	return psb_framebuffer_create(dev, cmd, r);
456}
457
458static int psbfb_probe(struct drm_fb_helper *helper,
459				struct drm_fb_helper_surface_size *sizes)
460{
461	struct psb_fbdev *psb_fbdev =
462		container_of(helper, struct psb_fbdev, psb_fb_helper);
463	struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
464	struct drm_psb_private *dev_priv = dev->dev_private;
 
465	int bytespp;
466
467	bytespp = sizes->surface_bpp / 8;
468	if (bytespp == 3)	/* no 24bit packed */
469		bytespp = 4;
470
471	/* If the mode will not fit in 32bit then switch to 16bit to get
472	   a console on full resolution. The X mode setting server will
473	   allocate its own 32bit GEM framebuffer */
474	if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
475	                dev_priv->vram_stolen_size) {
 
 
 
476                sizes->surface_bpp = 16;
477                sizes->surface_depth = 16;
478        }
479
480	return psbfb_create(psb_fbdev, sizes);
481}
482
483static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
484	.fb_probe = psbfb_probe,
485};
486
487static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
 
488{
489	struct psb_framebuffer *psbfb = &fbdev->pfb;
490
491	drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
492
493	drm_fb_helper_fini(&fbdev->psb_fb_helper);
494	drm_framebuffer_unregister_private(&psbfb->base);
495	drm_framebuffer_cleanup(&psbfb->base);
 
 
 
 
496
497	if (psbfb->base.obj[0])
498		drm_gem_object_put_unlocked(psbfb->base.obj[0]);
499	return 0;
500}
501
502int psb_fbdev_init(struct drm_device *dev)
503{
504	struct psb_fbdev *fbdev;
505	struct drm_psb_private *dev_priv = dev->dev_private;
506	int ret;
507
508	fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
509	if (!fbdev) {
510		dev_err(dev->dev, "no memory\n");
511		return -ENOMEM;
512	}
513
514	dev_priv->fbdev = fbdev;
515
516	drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
517
518	ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
519				 INTELFB_CONN_LIMIT);
520	if (ret)
521		goto free;
522
523	ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
524	if (ret)
525		goto fini;
526
527	/* disable all the possible outputs/crtcs before entering KMS mode */
528	drm_helper_disable_unused_functions(dev);
529
530	ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
531	if (ret)
532		goto fini;
533
534	return 0;
535
536fini:
537	drm_fb_helper_fini(&fbdev->psb_fb_helper);
538free:
539	kfree(fbdev);
540	return ret;
541}
542
543static void psb_fbdev_fini(struct drm_device *dev)
544{
545	struct drm_psb_private *dev_priv = dev->dev_private;
546
547	if (!dev_priv->fbdev)
548		return;
549
550	psb_fbdev_destroy(dev, dev_priv->fbdev);
551	kfree(dev_priv->fbdev);
552	dev_priv->fbdev = NULL;
553}
554
555static const struct drm_mode_config_funcs psb_mode_funcs = {
556	.fb_create = psb_user_framebuffer_create,
557	.output_poll_changed = drm_fb_helper_output_poll_changed,
558};
559
560static void psb_setup_outputs(struct drm_device *dev)
561{
562	struct drm_psb_private *dev_priv = dev->dev_private;
563	struct drm_connector *connector;
564
565	drm_mode_create_scaling_mode_property(dev);
566
567	/* It is ok for this to fail - we just don't get backlight control */
568	if (!dev_priv->backlight_property)
569		dev_priv->backlight_property = drm_property_create_range(dev, 0,
570							"backlight", 0, 100);
571	dev_priv->ops->output_init(dev);
572
573	list_for_each_entry(connector, &dev->mode_config.connector_list,
574			    head) {
575		struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
576		struct drm_encoder *encoder = &gma_encoder->base;
577		int crtc_mask = 0, clone_mask = 0;
578
579		/* valid crtcs */
580		switch (gma_encoder->type) {
581		case INTEL_OUTPUT_ANALOG:
582			crtc_mask = (1 << 0);
583			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
584			break;
585		case INTEL_OUTPUT_SDVO:
586			crtc_mask = dev_priv->ops->sdvo_mask;
587			clone_mask = (1 << INTEL_OUTPUT_SDVO);
588			break;
589		case INTEL_OUTPUT_LVDS:
590		        crtc_mask = dev_priv->ops->lvds_mask;
591			clone_mask = (1 << INTEL_OUTPUT_LVDS);
592			break;
593		case INTEL_OUTPUT_MIPI:
594			crtc_mask = (1 << 0);
595			clone_mask = (1 << INTEL_OUTPUT_MIPI);
596			break;
597		case INTEL_OUTPUT_MIPI2:
598			crtc_mask = (1 << 2);
599			clone_mask = (1 << INTEL_OUTPUT_MIPI2);
600			break;
601		case INTEL_OUTPUT_HDMI:
602		        crtc_mask = dev_priv->ops->hdmi_mask;
603			clone_mask = (1 << INTEL_OUTPUT_HDMI);
604			break;
605		case INTEL_OUTPUT_DISPLAYPORT:
606			crtc_mask = (1 << 0) | (1 << 1);
607			clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
608			break;
609		case INTEL_OUTPUT_EDP:
610			crtc_mask = (1 << 1);
611			clone_mask = (1 << INTEL_OUTPUT_EDP);
612		}
613		encoder->possible_crtcs = crtc_mask;
614		encoder->possible_clones =
615		    gma_connector_clones(dev, clone_mask);
616	}
617}
618
619void psb_modeset_init(struct drm_device *dev)
620{
621	struct drm_psb_private *dev_priv = dev->dev_private;
622	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
623	int i;
624
625	drm_mode_config_init(dev);
626
627	dev->mode_config.min_width = 0;
628	dev->mode_config.min_height = 0;
629
630	dev->mode_config.funcs = &psb_mode_funcs;
631
632	/* set memory base */
633	/* Oaktrail and Poulsbo should use BAR 2*/
634	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
635					&(dev->mode_config.fb_base));
636
637	/* num pipes is 2 for PSB but 1 for Mrst */
638	for (i = 0; i < dev_priv->num_pipe; i++)
639		psb_intel_crtc_init(dev, i, mode_dev);
640
641	dev->mode_config.max_width = 4096;
642	dev->mode_config.max_height = 4096;
643
644	psb_setup_outputs(dev);
645
646	if (dev_priv->ops->errata)
647	        dev_priv->ops->errata(dev);
648
649        dev_priv->modeset = true;
650}
651
652void psb_modeset_cleanup(struct drm_device *dev)
653{
654	struct drm_psb_private *dev_priv = dev->dev_private;
655	if (dev_priv->modeset) {
656		drm_kms_helper_poll_fini(dev);
657		psb_fbdev_fini(dev);
658		drm_mode_config_cleanup(dev);
659	}
660}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/**************************************************************************
  3 * Copyright (c) 2007-2011, Intel Corporation.
  4 * All Rights Reserved.
  5 *
  6 **************************************************************************/
  7
  8#include <linux/console.h>
  9#include <linux/delay.h>
 10#include <linux/errno.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/module.h>
 15#include <linux/pfn_t.h>
 16#include <linux/slab.h>
 17#include <linux/string.h>
 18#include <linux/tty.h>
 19
 20#include <drm/drm.h>
 21#include <drm/drm_crtc.h>
 22#include <drm/drm_fb_helper.h>
 23#include <drm/drm_fourcc.h>
 24#include <drm/drm_gem_framebuffer_helper.h>
 25
 26#include "framebuffer.h"
 27#include "gtt.h"
 28#include "psb_drv.h"
 29#include "psb_intel_drv.h"
 30#include "psb_intel_reg.h"
 31
 32static const struct drm_framebuffer_funcs psb_fb_funcs = {
 33	.destroy = drm_gem_fb_destroy,
 34	.create_handle = drm_gem_fb_create_handle,
 35};
 36
 37#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
 38
 39static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 40			   unsigned blue, unsigned transp,
 41			   struct fb_info *info)
 42{
 43	struct drm_fb_helper *fb_helper = info->par;
 44	struct drm_framebuffer *fb = fb_helper->fb;
 45	uint32_t v;
 46
 47	if (!fb)
 48		return -ENOMEM;
 49
 50	if (regno > 255)
 51		return 1;
 52
 53	red = CMAP_TOHW(red, info->var.red.length);
 54	blue = CMAP_TOHW(blue, info->var.blue.length);
 55	green = CMAP_TOHW(green, info->var.green.length);
 56	transp = CMAP_TOHW(transp, info->var.transp.length);
 57
 58	v = (red << info->var.red.offset) |
 59	    (green << info->var.green.offset) |
 60	    (blue << info->var.blue.offset) |
 61	    (transp << info->var.transp.offset);
 62
 63	if (regno < 16) {
 64		switch (fb->format->cpp[0] * 8) {
 65		case 16:
 66			((uint32_t *) info->pseudo_palette)[regno] = v;
 67			break;
 68		case 24:
 69		case 32:
 70			((uint32_t *) info->pseudo_palette)[regno] = v;
 71			break;
 72		}
 73	}
 74
 75	return 0;
 76}
 77
 78static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
 79{
 80	struct drm_fb_helper *fb_helper = info->par;
 81	struct drm_framebuffer *fb = fb_helper->fb;
 82	struct drm_device *dev = fb->dev;
 83	struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
 84
 85	/*
 86	 *	We have to poke our nose in here. The core fb code assumes
 87	 *	panning is part of the hardware that can be invoked before
 88	 *	the actual fb is mapped. In our case that isn't quite true.
 89	 */
 90	if (gtt->npage) {
 91		/* GTT roll shifts in 4K pages, we need to shift the right
 92		   number of pages */
 93		int pages = info->fix.line_length >> 12;
 94		psb_gtt_roll(dev, gtt, var->yoffset * pages);
 95	}
 96        return 0;
 97}
 98
 99static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
100{
101	struct vm_area_struct *vma = vmf->vma;
102	struct drm_framebuffer *fb = vma->vm_private_data;
103	struct drm_device *dev = fb->dev;
104	struct drm_psb_private *dev_priv = dev->dev_private;
105	struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
106	int page_num;
107	int i;
108	unsigned long address;
109	vm_fault_t ret = VM_FAULT_SIGBUS;
110	unsigned long pfn;
111	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
112				  gtt->offset;
113
114	page_num = vma_pages(vma);
115	address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
116
117	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118
119	for (i = 0; i < page_num; i++) {
120		pfn = (phys_addr >> PAGE_SHIFT);
121
122		ret = vmf_insert_mixed(vma, address,
123				__pfn_to_pfn_t(pfn, PFN_DEV));
124		if (unlikely(ret & VM_FAULT_ERROR))
125			break;
126		address += PAGE_SIZE;
127		phys_addr += PAGE_SIZE;
128	}
129	return ret;
130}
131
132static void psbfb_vm_open(struct vm_area_struct *vma)
133{
134}
135
136static void psbfb_vm_close(struct vm_area_struct *vma)
137{
138}
139
140static const struct vm_operations_struct psbfb_vm_ops = {
141	.fault	= psbfb_vm_fault,
142	.open	= psbfb_vm_open,
143	.close	= psbfb_vm_close
144};
145
146static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
147{
148	struct drm_fb_helper *fb_helper = info->par;
149	struct drm_framebuffer *fb = fb_helper->fb;
150
151	if (vma->vm_pgoff != 0)
152		return -EINVAL;
153	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
154		return -EINVAL;
155
 
 
156	/*
157	 * If this is a GEM object then info->screen_base is the virtual
158	 * kernel remapping of the object. FIXME: Review if this is
159	 * suitable for our mmap work
160	 */
161	vma->vm_ops = &psbfb_vm_ops;
162	vma->vm_private_data = (void *)fb;
163	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
164	return 0;
165}
166
167static struct fb_ops psbfb_ops = {
168	.owner = THIS_MODULE,
169	DRM_FB_HELPER_DEFAULT_OPS,
170	.fb_setcolreg = psbfb_setcolreg,
171	.fb_fillrect = drm_fb_helper_cfb_fillrect,
172	.fb_copyarea = psbfb_copyarea,
173	.fb_imageblit = drm_fb_helper_cfb_imageblit,
174	.fb_mmap = psbfb_mmap,
175	.fb_sync = psbfb_sync,
176};
177
178static struct fb_ops psbfb_roll_ops = {
179	.owner = THIS_MODULE,
180	DRM_FB_HELPER_DEFAULT_OPS,
181	.fb_setcolreg = psbfb_setcolreg,
182	.fb_fillrect = drm_fb_helper_cfb_fillrect,
183	.fb_copyarea = drm_fb_helper_cfb_copyarea,
184	.fb_imageblit = drm_fb_helper_cfb_imageblit,
185	.fb_pan_display = psbfb_pan,
186	.fb_mmap = psbfb_mmap,
187};
188
189static struct fb_ops psbfb_unaccel_ops = {
190	.owner = THIS_MODULE,
191	DRM_FB_HELPER_DEFAULT_OPS,
192	.fb_setcolreg = psbfb_setcolreg,
193	.fb_fillrect = drm_fb_helper_cfb_fillrect,
194	.fb_copyarea = drm_fb_helper_cfb_copyarea,
195	.fb_imageblit = drm_fb_helper_cfb_imageblit,
196	.fb_mmap = psbfb_mmap,
197};
198
199/**
200 *	psb_framebuffer_init	-	initialize a framebuffer
201 *	@dev: our DRM device
202 *	@fb: framebuffer to set up
203 *	@mode_cmd: mode description
204 *	@gt: backing object
205 *
206 *	Configure and fill in the boilerplate for our frame buffer. Return
207 *	0 on success or an error code if we fail.
208 */
209static int psb_framebuffer_init(struct drm_device *dev,
210					struct drm_framebuffer *fb,
211					const struct drm_mode_fb_cmd2 *mode_cmd,
212					struct drm_gem_object *obj)
213{
214	const struct drm_format_info *info;
215	int ret;
216
217	/*
218	 * Reject unknown formats, YUV formats, and formats with more than
219	 * 4 bytes per pixel.
220	 */
221	info = drm_get_format_info(dev, mode_cmd);
222	if (!info || !info->depth || info->cpp[0] > 4)
223		return -EINVAL;
224
225	if (mode_cmd->pitches[0] & 63)
226		return -EINVAL;
227
228	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
229	fb->obj[0] = obj;
230	ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
231	if (ret) {
232		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
233		return ret;
234	}
235	return 0;
236}
237
238/**
239 *	psb_framebuffer_create	-	create a framebuffer backed by gt
240 *	@dev: our DRM device
241 *	@mode_cmd: the description of the requested mode
242 *	@gt: the backing object
243 *
244 *	Create a framebuffer object backed by the gt, and fill in the
245 *	boilerplate required
246 *
247 *	TODO: review object references
248 */
249
250static struct drm_framebuffer *psb_framebuffer_create
251			(struct drm_device *dev,
252			 const struct drm_mode_fb_cmd2 *mode_cmd,
253			 struct drm_gem_object *obj)
254{
255	struct drm_framebuffer *fb;
256	int ret;
257
258	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
259	if (!fb)
260		return ERR_PTR(-ENOMEM);
261
262	ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
263	if (ret) {
264		kfree(fb);
265		return ERR_PTR(ret);
266	}
267	return fb;
268}
269
270/**
271 *	psbfb_alloc		-	allocate frame buffer memory
272 *	@dev: the DRM device
273 *	@aligned_size: space needed
274 *
275 *	Allocate the frame buffer. In the usual case we get a GTT range that
276 *	is stolen memory backed and life is simple. If there isn't sufficient
277 *	we fail as we don't have the virtual mapping space to really vmap it
278 *	and the kernel console code can't handle non linear framebuffers.
279 *
280 *	Re-address this as and if the framebuffer layer grows this ability.
281 */
282static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
283{
284	struct gtt_range *backing;
285	/* Begin by trying to use stolen memory backing */
286	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
287	if (backing) {
288		drm_gem_private_object_init(dev, &backing->gem, aligned_size);
289		return backing;
290	}
291	return NULL;
292}
293
294/**
295 *	psbfb_create		-	create a framebuffer
296 *	@fbdev: the framebuffer device
297 *	@sizes: specification of the layout
298 *
299 *	Create a framebuffer to the specifications provided
300 */
301static int psbfb_create(struct drm_fb_helper *fb_helper,
302				struct drm_fb_helper_surface_size *sizes)
303{
304	struct drm_device *dev = fb_helper->dev;
305	struct drm_psb_private *dev_priv = dev->dev_private;
306	struct fb_info *info;
307	struct drm_framebuffer *fb;
 
308	struct drm_mode_fb_cmd2 mode_cmd;
309	int size;
310	int ret;
311	struct gtt_range *backing;
312	u32 bpp, depth;
313	int gtt_roll = 0;
314	int pitch_lines = 0;
315
316	mode_cmd.width = sizes->surface_width;
317	mode_cmd.height = sizes->surface_height;
318	bpp = sizes->surface_bpp;
319	depth = sizes->surface_depth;
320
321	/* No 24bit packed */
322	if (bpp == 24)
323		bpp = 32;
324
325	do {
326		/*
327		 * Acceleration via the GTT requires pitch to be
328		 * power of two aligned. Preferably page but less
329		 * is ok with some fonts
330		 */
331        	mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
332
333        	size = mode_cmd.pitches[0] * mode_cmd.height;
334        	size = ALIGN(size, PAGE_SIZE);
335
336		/* Allocate the fb in the GTT with stolen page backing */
337		backing = psbfb_alloc(dev, size);
338
339		if (pitch_lines)
340			pitch_lines *= 2;
341		else
342			pitch_lines = 1;
343		gtt_roll++;
344	} while (backing == NULL && pitch_lines <= 16);
345
346	/* The final pitch we accepted if we succeeded */
347	pitch_lines /= 2;
348
349	if (backing == NULL) {
350		/*
351		 *	We couldn't get the space we wanted, fall back to the
352		 *	display engine requirement instead.  The HW requires
353		 *	the pitch to be 64 byte aligned
354		 */
355
356		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
357		pitch_lines = 64;
358
359		mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
360
361		size = mode_cmd.pitches[0] * mode_cmd.height;
362		size = ALIGN(size, PAGE_SIZE);
363
364		/* Allocate the framebuffer in the GTT with stolen page backing */
365		backing = psbfb_alloc(dev, size);
366		if (backing == NULL)
367			return -ENOMEM;
368	}
369
370	memset(dev_priv->vram_addr + backing->offset, 0, size);
371
372	info = drm_fb_helper_alloc_fbi(fb_helper);
373	if (IS_ERR(info)) {
374		ret = PTR_ERR(info);
375		goto out;
376	}
377
378	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
379
380	fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem);
381	if (IS_ERR(fb)) {
382		ret = PTR_ERR(fb);
383		goto out;
384	}
385
386	fb_helper->fb = fb;
 
 
 
387
388	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
389		info->fbops = &psbfb_ops;
390	else if (gtt_roll) {	/* GTT rolling seems best */
391		info->fbops = &psbfb_roll_ops;
392		info->flags |= FBINFO_HWACCEL_YPAN;
393	} else	/* Software */
394		info->fbops = &psbfb_unaccel_ops;
395
396	info->fix.smem_start = dev->mode_config.fb_base;
397	info->fix.smem_len = size;
398	info->fix.ywrapstep = gtt_roll;
399	info->fix.ypanstep = 0;
400
401	/* Accessed stolen memory directly */
402	info->screen_base = dev_priv->vram_addr + backing->offset;
403	info->screen_size = size;
404
405	if (dev_priv->gtt.stolen_size) {
406		info->apertures->ranges[0].base = dev->mode_config.fb_base;
407		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
408	}
409
410	drm_fb_helper_fill_info(info, fb_helper, sizes);
411
412	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
413	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
414
415	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
416
417	dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
 
418
419	return 0;
420out:
421	psb_gtt_free_range(dev, backing);
422	return ret;
423}
424
425/**
426 *	psb_user_framebuffer_create	-	create framebuffer
427 *	@dev: our DRM device
428 *	@filp: client file
429 *	@cmd: mode request
430 *
431 *	Create a new framebuffer backed by a userspace GEM object
432 */
433static struct drm_framebuffer *psb_user_framebuffer_create
434			(struct drm_device *dev, struct drm_file *filp,
435			 const struct drm_mode_fb_cmd2 *cmd)
436{
 
437	struct drm_gem_object *obj;
438
439	/*
440	 *	Find the GEM object and thus the gtt range object that is
441	 *	to back this space
442	 */
443	obj = drm_gem_object_lookup(filp, cmd->handles[0]);
444	if (obj == NULL)
445		return ERR_PTR(-ENOENT);
446
447	/* Let the core code do all the work */
448	return psb_framebuffer_create(dev, cmd, obj);
 
449}
450
451static int psbfb_probe(struct drm_fb_helper *fb_helper,
452				struct drm_fb_helper_surface_size *sizes)
453{
454	struct drm_device *dev = fb_helper->dev;
 
 
455	struct drm_psb_private *dev_priv = dev->dev_private;
456	unsigned int fb_size;
457	int bytespp;
458
459	bytespp = sizes->surface_bpp / 8;
460	if (bytespp == 3)	/* no 24bit packed */
461		bytespp = 4;
462
463	/* If the mode will not fit in 32bit then switch to 16bit to get
464	   a console on full resolution. The X mode setting server will
465	   allocate its own 32bit GEM framebuffer */
466	fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
467		  sizes->surface_height;
468	fb_size = ALIGN(fb_size, PAGE_SIZE);
469
470	if (fb_size > dev_priv->vram_stolen_size) {
471                sizes->surface_bpp = 16;
472                sizes->surface_depth = 16;
473        }
474
475	return psbfb_create(fb_helper, sizes);
476}
477
478static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
479	.fb_probe = psbfb_probe,
480};
481
482static int psb_fbdev_destroy(struct drm_device *dev,
483			     struct drm_fb_helper *fb_helper)
484{
485	struct drm_framebuffer *fb = fb_helper->fb;
486
487	drm_fb_helper_unregister_fbi(fb_helper);
488
489	drm_fb_helper_fini(fb_helper);
490	drm_framebuffer_unregister_private(fb);
491	drm_framebuffer_cleanup(fb);
492
493	if (fb->obj[0])
494		drm_gem_object_put(fb->obj[0]);
495	kfree(fb);
496
 
 
497	return 0;
498}
499
500int psb_fbdev_init(struct drm_device *dev)
501{
502	struct drm_fb_helper *fb_helper;
503	struct drm_psb_private *dev_priv = dev->dev_private;
504	int ret;
505
506	fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
507	if (!fb_helper) {
508		dev_err(dev->dev, "no memory\n");
509		return -ENOMEM;
510	}
511
512	dev_priv->fb_helper = fb_helper;
513
514	drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
515
516	ret = drm_fb_helper_init(dev, fb_helper);
 
517	if (ret)
518		goto free;
519
 
 
 
 
520	/* disable all the possible outputs/crtcs before entering KMS mode */
521	drm_helper_disable_unused_functions(dev);
522
523	ret = drm_fb_helper_initial_config(fb_helper, 32);
524	if (ret)
525		goto fini;
526
527	return 0;
528
529fini:
530	drm_fb_helper_fini(fb_helper);
531free:
532	kfree(fb_helper);
533	return ret;
534}
535
536static void psb_fbdev_fini(struct drm_device *dev)
537{
538	struct drm_psb_private *dev_priv = dev->dev_private;
539
540	if (!dev_priv->fb_helper)
541		return;
542
543	psb_fbdev_destroy(dev, dev_priv->fb_helper);
544	kfree(dev_priv->fb_helper);
545	dev_priv->fb_helper = NULL;
546}
547
548static const struct drm_mode_config_funcs psb_mode_funcs = {
549	.fb_create = psb_user_framebuffer_create,
550	.output_poll_changed = drm_fb_helper_output_poll_changed,
551};
552
553static void psb_setup_outputs(struct drm_device *dev)
554{
555	struct drm_psb_private *dev_priv = dev->dev_private;
556	struct drm_connector *connector;
557
558	drm_mode_create_scaling_mode_property(dev);
559
560	/* It is ok for this to fail - we just don't get backlight control */
561	if (!dev_priv->backlight_property)
562		dev_priv->backlight_property = drm_property_create_range(dev, 0,
563							"backlight", 0, 100);
564	dev_priv->ops->output_init(dev);
565
566	list_for_each_entry(connector, &dev->mode_config.connector_list,
567			    head) {
568		struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
569		struct drm_encoder *encoder = &gma_encoder->base;
570		int crtc_mask = 0, clone_mask = 0;
571
572		/* valid crtcs */
573		switch (gma_encoder->type) {
574		case INTEL_OUTPUT_ANALOG:
575			crtc_mask = (1 << 0);
576			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
577			break;
578		case INTEL_OUTPUT_SDVO:
579			crtc_mask = dev_priv->ops->sdvo_mask;
580			clone_mask = 0;
581			break;
582		case INTEL_OUTPUT_LVDS:
583			crtc_mask = dev_priv->ops->lvds_mask;
584			clone_mask = 0;
585			break;
586		case INTEL_OUTPUT_MIPI:
587			crtc_mask = (1 << 0);
588			clone_mask = 0;
589			break;
590		case INTEL_OUTPUT_MIPI2:
591			crtc_mask = (1 << 2);
592			clone_mask = 0;
593			break;
594		case INTEL_OUTPUT_HDMI:
595			crtc_mask = dev_priv->ops->hdmi_mask;
596			clone_mask = (1 << INTEL_OUTPUT_HDMI);
597			break;
598		case INTEL_OUTPUT_DISPLAYPORT:
599			crtc_mask = (1 << 0) | (1 << 1);
600			clone_mask = 0;
601			break;
602		case INTEL_OUTPUT_EDP:
603			crtc_mask = (1 << 1);
604			clone_mask = 0;
605		}
606		encoder->possible_crtcs = crtc_mask;
607		encoder->possible_clones =
608		    gma_connector_clones(dev, clone_mask);
609	}
610}
611
612void psb_modeset_init(struct drm_device *dev)
613{
614	struct drm_psb_private *dev_priv = dev->dev_private;
615	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
616	int i;
617
618	drm_mode_config_init(dev);
619
620	dev->mode_config.min_width = 0;
621	dev->mode_config.min_height = 0;
622
623	dev->mode_config.funcs = &psb_mode_funcs;
624
625	/* set memory base */
626	/* Oaktrail and Poulsbo should use BAR 2*/
627	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
628					&(dev->mode_config.fb_base));
629
630	/* num pipes is 2 for PSB but 1 for Mrst */
631	for (i = 0; i < dev_priv->num_pipe; i++)
632		psb_intel_crtc_init(dev, i, mode_dev);
633
634	dev->mode_config.max_width = 4096;
635	dev->mode_config.max_height = 4096;
636
637	psb_setup_outputs(dev);
638
639	if (dev_priv->ops->errata)
640	        dev_priv->ops->errata(dev);
641
642        dev_priv->modeset = true;
643}
644
645void psb_modeset_cleanup(struct drm_device *dev)
646{
647	struct drm_psb_private *dev_priv = dev->dev_private;
648	if (dev_priv->modeset) {
649		drm_kms_helper_poll_fini(dev);
650		psb_fbdev_fini(dev);
651		drm_mode_config_cleanup(dev);
652	}
653}