Loading...
1/**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/pfn_t.h>
25#include <linux/mm.h>
26#include <linux/tty.h>
27#include <linux/slab.h>
28#include <linux/delay.h>
29#include <linux/fb.h>
30#include <linux/init.h>
31#include <linux/console.h>
32
33#include <drm/drmP.h>
34#include <drm/drm.h>
35#include <drm/drm_crtc.h>
36#include <drm/drm_fb_helper.h>
37
38#include "psb_drv.h"
39#include "psb_intel_reg.h"
40#include "psb_intel_drv.h"
41#include "framebuffer.h"
42#include "gtt.h"
43
44static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
45static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
46 struct drm_file *file_priv,
47 unsigned int *handle);
48
49static const struct drm_framebuffer_funcs psb_fb_funcs = {
50 .destroy = psb_user_framebuffer_destroy,
51 .create_handle = psb_user_framebuffer_create_handle,
52};
53
54#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
55
56static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
57 unsigned blue, unsigned transp,
58 struct fb_info *info)
59{
60 struct psb_fbdev *fbdev = info->par;
61 struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
62 uint32_t v;
63
64 if (!fb)
65 return -ENOMEM;
66
67 if (regno > 255)
68 return 1;
69
70 red = CMAP_TOHW(red, info->var.red.length);
71 blue = CMAP_TOHW(blue, info->var.blue.length);
72 green = CMAP_TOHW(green, info->var.green.length);
73 transp = CMAP_TOHW(transp, info->var.transp.length);
74
75 v = (red << info->var.red.offset) |
76 (green << info->var.green.offset) |
77 (blue << info->var.blue.offset) |
78 (transp << info->var.transp.offset);
79
80 if (regno < 16) {
81 switch (fb->bits_per_pixel) {
82 case 16:
83 ((uint32_t *) info->pseudo_palette)[regno] = v;
84 break;
85 case 24:
86 case 32:
87 ((uint32_t *) info->pseudo_palette)[regno] = v;
88 break;
89 }
90 }
91
92 return 0;
93}
94
95static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
96{
97 struct psb_fbdev *fbdev = info->par;
98 struct psb_framebuffer *psbfb = &fbdev->pfb;
99 struct drm_device *dev = psbfb->base.dev;
100
101 /*
102 * We have to poke our nose in here. The core fb code assumes
103 * panning is part of the hardware that can be invoked before
104 * the actual fb is mapped. In our case that isn't quite true.
105 */
106 if (psbfb->gtt->npage) {
107 /* GTT roll shifts in 4K pages, we need to shift the right
108 number of pages */
109 int pages = info->fix.line_length >> 12;
110 psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
111 }
112 return 0;
113}
114
115static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
116{
117 struct psb_framebuffer *psbfb = vma->vm_private_data;
118 struct drm_device *dev = psbfb->base.dev;
119 struct drm_psb_private *dev_priv = dev->dev_private;
120 int page_num;
121 int i;
122 unsigned long address;
123 int ret;
124 unsigned long pfn;
125 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
126 psbfb->gtt->offset;
127
128 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
129 address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
130
131 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
132
133 for (i = 0; i < page_num; i++) {
134 pfn = (phys_addr >> PAGE_SHIFT);
135
136 ret = vm_insert_mixed(vma, address,
137 __pfn_to_pfn_t(pfn, PFN_DEV));
138 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
139 break;
140 else if (unlikely(ret != 0)) {
141 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
142 return ret;
143 }
144 address += PAGE_SIZE;
145 phys_addr += PAGE_SIZE;
146 }
147 return VM_FAULT_NOPAGE;
148}
149
150static void psbfb_vm_open(struct vm_area_struct *vma)
151{
152}
153
154static void psbfb_vm_close(struct vm_area_struct *vma)
155{
156}
157
158static const struct vm_operations_struct psbfb_vm_ops = {
159 .fault = psbfb_vm_fault,
160 .open = psbfb_vm_open,
161 .close = psbfb_vm_close
162};
163
164static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
165{
166 struct psb_fbdev *fbdev = info->par;
167 struct psb_framebuffer *psbfb = &fbdev->pfb;
168
169 if (vma->vm_pgoff != 0)
170 return -EINVAL;
171 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
172 return -EINVAL;
173
174 if (!psbfb->addr_space)
175 psbfb->addr_space = vma->vm_file->f_mapping;
176 /*
177 * If this is a GEM object then info->screen_base is the virtual
178 * kernel remapping of the object. FIXME: Review if this is
179 * suitable for our mmap work
180 */
181 vma->vm_ops = &psbfb_vm_ops;
182 vma->vm_private_data = (void *)psbfb;
183 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
184 return 0;
185}
186
187static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
188 unsigned long arg)
189{
190 return -ENOTTY;
191}
192
193static struct fb_ops psbfb_ops = {
194 .owner = THIS_MODULE,
195 .fb_check_var = drm_fb_helper_check_var,
196 .fb_set_par = drm_fb_helper_set_par,
197 .fb_blank = drm_fb_helper_blank,
198 .fb_setcolreg = psbfb_setcolreg,
199 .fb_fillrect = drm_fb_helper_cfb_fillrect,
200 .fb_copyarea = psbfb_copyarea,
201 .fb_imageblit = drm_fb_helper_cfb_imageblit,
202 .fb_mmap = psbfb_mmap,
203 .fb_sync = psbfb_sync,
204 .fb_ioctl = psbfb_ioctl,
205};
206
207static struct fb_ops psbfb_roll_ops = {
208 .owner = THIS_MODULE,
209 .fb_check_var = drm_fb_helper_check_var,
210 .fb_set_par = drm_fb_helper_set_par,
211 .fb_blank = drm_fb_helper_blank,
212 .fb_setcolreg = psbfb_setcolreg,
213 .fb_fillrect = drm_fb_helper_cfb_fillrect,
214 .fb_copyarea = drm_fb_helper_cfb_copyarea,
215 .fb_imageblit = drm_fb_helper_cfb_imageblit,
216 .fb_pan_display = psbfb_pan,
217 .fb_mmap = psbfb_mmap,
218 .fb_ioctl = psbfb_ioctl,
219};
220
221static struct fb_ops psbfb_unaccel_ops = {
222 .owner = THIS_MODULE,
223 .fb_check_var = drm_fb_helper_check_var,
224 .fb_set_par = drm_fb_helper_set_par,
225 .fb_blank = drm_fb_helper_blank,
226 .fb_setcolreg = psbfb_setcolreg,
227 .fb_fillrect = drm_fb_helper_cfb_fillrect,
228 .fb_copyarea = drm_fb_helper_cfb_copyarea,
229 .fb_imageblit = drm_fb_helper_cfb_imageblit,
230 .fb_mmap = psbfb_mmap,
231 .fb_ioctl = psbfb_ioctl,
232};
233
234/**
235 * psb_framebuffer_init - initialize a framebuffer
236 * @dev: our DRM device
237 * @fb: framebuffer to set up
238 * @mode_cmd: mode description
239 * @gt: backing object
240 *
241 * Configure and fill in the boilerplate for our frame buffer. Return
242 * 0 on success or an error code if we fail.
243 */
244static int psb_framebuffer_init(struct drm_device *dev,
245 struct psb_framebuffer *fb,
246 const struct drm_mode_fb_cmd2 *mode_cmd,
247 struct gtt_range *gt)
248{
249 u32 bpp, depth;
250 int ret;
251
252 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
253
254 if (mode_cmd->pitches[0] & 63)
255 return -EINVAL;
256 switch (bpp) {
257 case 8:
258 case 16:
259 case 24:
260 case 32:
261 break;
262 default:
263 return -EINVAL;
264 }
265 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
266 fb->gtt = gt;
267 ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
268 if (ret) {
269 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
270 return ret;
271 }
272 return 0;
273}
274
275/**
276 * psb_framebuffer_create - create a framebuffer backed by gt
277 * @dev: our DRM device
278 * @mode_cmd: the description of the requested mode
279 * @gt: the backing object
280 *
281 * Create a framebuffer object backed by the gt, and fill in the
282 * boilerplate required
283 *
284 * TODO: review object references
285 */
286
287static struct drm_framebuffer *psb_framebuffer_create
288 (struct drm_device *dev,
289 const struct drm_mode_fb_cmd2 *mode_cmd,
290 struct gtt_range *gt)
291{
292 struct psb_framebuffer *fb;
293 int ret;
294
295 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
296 if (!fb)
297 return ERR_PTR(-ENOMEM);
298
299 ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
300 if (ret) {
301 kfree(fb);
302 return ERR_PTR(ret);
303 }
304 return &fb->base;
305}
306
307/**
308 * psbfb_alloc - allocate frame buffer memory
309 * @dev: the DRM device
310 * @aligned_size: space needed
311 * @force: fall back to GEM buffers if need be
312 *
313 * Allocate the frame buffer. In the usual case we get a GTT range that
314 * is stolen memory backed and life is simple. If there isn't sufficient
315 * we fail as we don't have the virtual mapping space to really vmap it
316 * and the kernel console code can't handle non linear framebuffers.
317 *
318 * Re-address this as and if the framebuffer layer grows this ability.
319 */
320static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
321{
322 struct gtt_range *backing;
323 /* Begin by trying to use stolen memory backing */
324 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
325 if (backing) {
326 drm_gem_private_object_init(dev, &backing->gem, aligned_size);
327 return backing;
328 }
329 return NULL;
330}
331
332/**
333 * psbfb_create - create a framebuffer
334 * @fbdev: the framebuffer device
335 * @sizes: specification of the layout
336 *
337 * Create a framebuffer to the specifications provided
338 */
339static int psbfb_create(struct psb_fbdev *fbdev,
340 struct drm_fb_helper_surface_size *sizes)
341{
342 struct drm_device *dev = fbdev->psb_fb_helper.dev;
343 struct drm_psb_private *dev_priv = dev->dev_private;
344 struct fb_info *info;
345 struct drm_framebuffer *fb;
346 struct psb_framebuffer *psbfb = &fbdev->pfb;
347 struct drm_mode_fb_cmd2 mode_cmd;
348 int size;
349 int ret;
350 struct gtt_range *backing;
351 u32 bpp, depth;
352 int gtt_roll = 0;
353 int pitch_lines = 0;
354
355 mode_cmd.width = sizes->surface_width;
356 mode_cmd.height = sizes->surface_height;
357 bpp = sizes->surface_bpp;
358 depth = sizes->surface_depth;
359
360 /* No 24bit packed */
361 if (bpp == 24)
362 bpp = 32;
363
364 do {
365 /*
366 * Acceleration via the GTT requires pitch to be
367 * power of two aligned. Preferably page but less
368 * is ok with some fonts
369 */
370 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
371
372 size = mode_cmd.pitches[0] * mode_cmd.height;
373 size = ALIGN(size, PAGE_SIZE);
374
375 /* Allocate the fb in the GTT with stolen page backing */
376 backing = psbfb_alloc(dev, size);
377
378 if (pitch_lines)
379 pitch_lines *= 2;
380 else
381 pitch_lines = 1;
382 gtt_roll++;
383 } while (backing == NULL && pitch_lines <= 16);
384
385 /* The final pitch we accepted if we succeeded */
386 pitch_lines /= 2;
387
388 if (backing == NULL) {
389 /*
390 * We couldn't get the space we wanted, fall back to the
391 * display engine requirement instead. The HW requires
392 * the pitch to be 64 byte aligned
393 */
394
395 gtt_roll = 0; /* Don't use GTT accelerated scrolling */
396 pitch_lines = 64;
397
398 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
399
400 size = mode_cmd.pitches[0] * mode_cmd.height;
401 size = ALIGN(size, PAGE_SIZE);
402
403 /* Allocate the framebuffer in the GTT with stolen page backing */
404 backing = psbfb_alloc(dev, size);
405 if (backing == NULL)
406 return -ENOMEM;
407 }
408
409 memset(dev_priv->vram_addr + backing->offset, 0, size);
410
411 info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
412 if (IS_ERR(info)) {
413 ret = PTR_ERR(info);
414 goto out_err1;
415 }
416 info->par = fbdev;
417
418 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
419
420 ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
421 if (ret)
422 goto out_unref;
423
424 fb = &psbfb->base;
425 psbfb->fbdev = info;
426
427 fbdev->psb_fb_helper.fb = fb;
428
429 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
430 strcpy(info->fix.id, "psbdrmfb");
431
432 info->flags = FBINFO_DEFAULT;
433 if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
434 info->fbops = &psbfb_ops;
435 else if (gtt_roll) { /* GTT rolling seems best */
436 info->fbops = &psbfb_roll_ops;
437 info->flags |= FBINFO_HWACCEL_YPAN;
438 } else /* Software */
439 info->fbops = &psbfb_unaccel_ops;
440
441 info->fix.smem_start = dev->mode_config.fb_base;
442 info->fix.smem_len = size;
443 info->fix.ywrapstep = gtt_roll;
444 info->fix.ypanstep = 0;
445
446 /* Accessed stolen memory directly */
447 info->screen_base = dev_priv->vram_addr + backing->offset;
448 info->screen_size = size;
449
450 if (dev_priv->gtt.stolen_size) {
451 info->apertures->ranges[0].base = dev->mode_config.fb_base;
452 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
453 }
454
455 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
456 sizes->fb_width, sizes->fb_height);
457
458 info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
459 info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
460
461 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
462
463 dev_dbg(dev->dev, "allocated %dx%d fb\n",
464 psbfb->base.width, psbfb->base.height);
465
466 return 0;
467out_unref:
468 if (backing->stolen)
469 psb_gtt_free_range(dev, backing);
470 else
471 drm_gem_object_unreference_unlocked(&backing->gem);
472
473 drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
474out_err1:
475 psb_gtt_free_range(dev, backing);
476 return ret;
477}
478
479/**
480 * psb_user_framebuffer_create - create framebuffer
481 * @dev: our DRM device
482 * @filp: client file
483 * @cmd: mode request
484 *
485 * Create a new framebuffer backed by a userspace GEM object
486 */
487static struct drm_framebuffer *psb_user_framebuffer_create
488 (struct drm_device *dev, struct drm_file *filp,
489 const struct drm_mode_fb_cmd2 *cmd)
490{
491 struct gtt_range *r;
492 struct drm_gem_object *obj;
493
494 /*
495 * Find the GEM object and thus the gtt range object that is
496 * to back this space
497 */
498 obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
499 if (obj == NULL)
500 return ERR_PTR(-ENOENT);
501
502 /* Let the core code do all the work */
503 r = container_of(obj, struct gtt_range, gem);
504 return psb_framebuffer_create(dev, cmd, r);
505}
506
507static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
508 u16 blue, int regno)
509{
510 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
511
512 gma_crtc->lut_r[regno] = red >> 8;
513 gma_crtc->lut_g[regno] = green >> 8;
514 gma_crtc->lut_b[regno] = blue >> 8;
515}
516
517static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
518 u16 *green, u16 *blue, int regno)
519{
520 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
521
522 *red = gma_crtc->lut_r[regno] << 8;
523 *green = gma_crtc->lut_g[regno] << 8;
524 *blue = gma_crtc->lut_b[regno] << 8;
525}
526
527static int psbfb_probe(struct drm_fb_helper *helper,
528 struct drm_fb_helper_surface_size *sizes)
529{
530 struct psb_fbdev *psb_fbdev =
531 container_of(helper, struct psb_fbdev, psb_fb_helper);
532 struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
533 struct drm_psb_private *dev_priv = dev->dev_private;
534 int bytespp;
535
536 bytespp = sizes->surface_bpp / 8;
537 if (bytespp == 3) /* no 24bit packed */
538 bytespp = 4;
539
540 /* If the mode will not fit in 32bit then switch to 16bit to get
541 a console on full resolution. The X mode setting server will
542 allocate its own 32bit GEM framebuffer */
543 if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
544 dev_priv->vram_stolen_size) {
545 sizes->surface_bpp = 16;
546 sizes->surface_depth = 16;
547 }
548
549 return psbfb_create(psb_fbdev, sizes);
550}
551
552static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
553 .gamma_set = psbfb_gamma_set,
554 .gamma_get = psbfb_gamma_get,
555 .fb_probe = psbfb_probe,
556};
557
558static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
559{
560 struct psb_framebuffer *psbfb = &fbdev->pfb;
561
562 drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
563 drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
564
565 drm_fb_helper_fini(&fbdev->psb_fb_helper);
566 drm_framebuffer_unregister_private(&psbfb->base);
567 drm_framebuffer_cleanup(&psbfb->base);
568
569 if (psbfb->gtt)
570 drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
571 return 0;
572}
573
574int psb_fbdev_init(struct drm_device *dev)
575{
576 struct psb_fbdev *fbdev;
577 struct drm_psb_private *dev_priv = dev->dev_private;
578 int ret;
579
580 fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
581 if (!fbdev) {
582 dev_err(dev->dev, "no memory\n");
583 return -ENOMEM;
584 }
585
586 dev_priv->fbdev = fbdev;
587
588 drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
589
590 ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
591 dev_priv->ops->crtcs, INTELFB_CONN_LIMIT);
592 if (ret)
593 goto free;
594
595 ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
596 if (ret)
597 goto fini;
598
599 /* disable all the possible outputs/crtcs before entering KMS mode */
600 drm_helper_disable_unused_functions(dev);
601
602 ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
603 if (ret)
604 goto fini;
605
606 return 0;
607
608fini:
609 drm_fb_helper_fini(&fbdev->psb_fb_helper);
610free:
611 kfree(fbdev);
612 return ret;
613}
614
615static void psb_fbdev_fini(struct drm_device *dev)
616{
617 struct drm_psb_private *dev_priv = dev->dev_private;
618
619 if (!dev_priv->fbdev)
620 return;
621
622 psb_fbdev_destroy(dev, dev_priv->fbdev);
623 kfree(dev_priv->fbdev);
624 dev_priv->fbdev = NULL;
625}
626
627static void psbfb_output_poll_changed(struct drm_device *dev)
628{
629 struct drm_psb_private *dev_priv = dev->dev_private;
630 struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
631 drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
632}
633
634/**
635 * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
636 * @fb: framebuffer
637 * @file_priv: our DRM file
638 * @handle: returned handle
639 *
640 * Our framebuffer object is a GTT range which also contains a GEM
641 * object. We need to turn it into a handle for userspace. GEM will do
642 * the work for us
643 */
644static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
645 struct drm_file *file_priv,
646 unsigned int *handle)
647{
648 struct psb_framebuffer *psbfb = to_psb_fb(fb);
649 struct gtt_range *r = psbfb->gtt;
650 return drm_gem_handle_create(file_priv, &r->gem, handle);
651}
652
653/**
654 * psb_user_framebuffer_destroy - destruct user created fb
655 * @fb: framebuffer
656 *
657 * User framebuffers are backed by GEM objects so all we have to do is
658 * clean up a bit and drop the reference, GEM will handle the fallout
659 */
660static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
661{
662 struct psb_framebuffer *psbfb = to_psb_fb(fb);
663 struct gtt_range *r = psbfb->gtt;
664
665 /* Let DRM do its clean up */
666 drm_framebuffer_cleanup(fb);
667 /* We are no longer using the resource in GEM */
668 drm_gem_object_unreference_unlocked(&r->gem);
669 kfree(fb);
670}
671
672static const struct drm_mode_config_funcs psb_mode_funcs = {
673 .fb_create = psb_user_framebuffer_create,
674 .output_poll_changed = psbfb_output_poll_changed,
675};
676
677static void psb_setup_outputs(struct drm_device *dev)
678{
679 struct drm_psb_private *dev_priv = dev->dev_private;
680 struct drm_connector *connector;
681
682 drm_mode_create_scaling_mode_property(dev);
683
684 /* It is ok for this to fail - we just don't get backlight control */
685 if (!dev_priv->backlight_property)
686 dev_priv->backlight_property = drm_property_create_range(dev, 0,
687 "backlight", 0, 100);
688 dev_priv->ops->output_init(dev);
689
690 list_for_each_entry(connector, &dev->mode_config.connector_list,
691 head) {
692 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
693 struct drm_encoder *encoder = &gma_encoder->base;
694 int crtc_mask = 0, clone_mask = 0;
695
696 /* valid crtcs */
697 switch (gma_encoder->type) {
698 case INTEL_OUTPUT_ANALOG:
699 crtc_mask = (1 << 0);
700 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
701 break;
702 case INTEL_OUTPUT_SDVO:
703 crtc_mask = dev_priv->ops->sdvo_mask;
704 clone_mask = (1 << INTEL_OUTPUT_SDVO);
705 break;
706 case INTEL_OUTPUT_LVDS:
707 crtc_mask = dev_priv->ops->lvds_mask;
708 clone_mask = (1 << INTEL_OUTPUT_LVDS);
709 break;
710 case INTEL_OUTPUT_MIPI:
711 crtc_mask = (1 << 0);
712 clone_mask = (1 << INTEL_OUTPUT_MIPI);
713 break;
714 case INTEL_OUTPUT_MIPI2:
715 crtc_mask = (1 << 2);
716 clone_mask = (1 << INTEL_OUTPUT_MIPI2);
717 break;
718 case INTEL_OUTPUT_HDMI:
719 crtc_mask = dev_priv->ops->hdmi_mask;
720 clone_mask = (1 << INTEL_OUTPUT_HDMI);
721 break;
722 case INTEL_OUTPUT_DISPLAYPORT:
723 crtc_mask = (1 << 0) | (1 << 1);
724 clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
725 break;
726 case INTEL_OUTPUT_EDP:
727 crtc_mask = (1 << 1);
728 clone_mask = (1 << INTEL_OUTPUT_EDP);
729 }
730 encoder->possible_crtcs = crtc_mask;
731 encoder->possible_clones =
732 gma_connector_clones(dev, clone_mask);
733 }
734}
735
736void psb_modeset_init(struct drm_device *dev)
737{
738 struct drm_psb_private *dev_priv = dev->dev_private;
739 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
740 int i;
741
742 drm_mode_config_init(dev);
743
744 dev->mode_config.min_width = 0;
745 dev->mode_config.min_height = 0;
746
747 dev->mode_config.funcs = &psb_mode_funcs;
748
749 /* set memory base */
750 /* Oaktrail and Poulsbo should use BAR 2*/
751 pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
752 &(dev->mode_config.fb_base));
753
754 /* num pipes is 2 for PSB but 1 for Mrst */
755 for (i = 0; i < dev_priv->num_pipe; i++)
756 psb_intel_crtc_init(dev, i, mode_dev);
757
758 dev->mode_config.max_width = 4096;
759 dev->mode_config.max_height = 4096;
760
761 psb_setup_outputs(dev);
762
763 if (dev_priv->ops->errata)
764 dev_priv->ops->errata(dev);
765
766 dev_priv->modeset = true;
767}
768
769void psb_modeset_cleanup(struct drm_device *dev)
770{
771 struct drm_psb_private *dev_priv = dev->dev_private;
772 if (dev_priv->modeset) {
773 drm_kms_helper_poll_fini(dev);
774 psb_fbdev_fini(dev);
775 drm_mode_config_cleanup(dev);
776 }
777}
1// SPDX-License-Identifier: GPL-2.0-only
2/**************************************************************************
3 * Copyright (c) 2007-2011, Intel Corporation.
4 * All Rights Reserved.
5 *
6 **************************************************************************/
7
8#include <linux/console.h>
9#include <linux/delay.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/pfn_t.h>
16#include <linux/slab.h>
17#include <linux/string.h>
18#include <linux/tty.h>
19
20#include <drm/drm.h>
21#include <drm/drm_crtc.h>
22#include <drm/drm_fb_helper.h>
23#include <drm/drm_fourcc.h>
24#include <drm/drm_gem_framebuffer_helper.h>
25
26#include "framebuffer.h"
27#include "gtt.h"
28#include "psb_drv.h"
29#include "psb_intel_drv.h"
30#include "psb_intel_reg.h"
31
32static const struct drm_framebuffer_funcs psb_fb_funcs = {
33 .destroy = drm_gem_fb_destroy,
34 .create_handle = drm_gem_fb_create_handle,
35};
36
37#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
38
39static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
40 unsigned blue, unsigned transp,
41 struct fb_info *info)
42{
43 struct drm_fb_helper *fb_helper = info->par;
44 struct drm_framebuffer *fb = fb_helper->fb;
45 uint32_t v;
46
47 if (!fb)
48 return -ENOMEM;
49
50 if (regno > 255)
51 return 1;
52
53 red = CMAP_TOHW(red, info->var.red.length);
54 blue = CMAP_TOHW(blue, info->var.blue.length);
55 green = CMAP_TOHW(green, info->var.green.length);
56 transp = CMAP_TOHW(transp, info->var.transp.length);
57
58 v = (red << info->var.red.offset) |
59 (green << info->var.green.offset) |
60 (blue << info->var.blue.offset) |
61 (transp << info->var.transp.offset);
62
63 if (regno < 16) {
64 switch (fb->format->cpp[0] * 8) {
65 case 16:
66 ((uint32_t *) info->pseudo_palette)[regno] = v;
67 break;
68 case 24:
69 case 32:
70 ((uint32_t *) info->pseudo_palette)[regno] = v;
71 break;
72 }
73 }
74
75 return 0;
76}
77
78static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
79{
80 struct drm_fb_helper *fb_helper = info->par;
81 struct drm_framebuffer *fb = fb_helper->fb;
82 struct drm_device *dev = fb->dev;
83 struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
84
85 /*
86 * We have to poke our nose in here. The core fb code assumes
87 * panning is part of the hardware that can be invoked before
88 * the actual fb is mapped. In our case that isn't quite true.
89 */
90 if (gtt->npage) {
91 /* GTT roll shifts in 4K pages, we need to shift the right
92 number of pages */
93 int pages = info->fix.line_length >> 12;
94 psb_gtt_roll(dev, gtt, var->yoffset * pages);
95 }
96 return 0;
97}
98
99static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
100{
101 struct vm_area_struct *vma = vmf->vma;
102 struct drm_framebuffer *fb = vma->vm_private_data;
103 struct drm_device *dev = fb->dev;
104 struct drm_psb_private *dev_priv = dev->dev_private;
105 struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
106 int page_num;
107 int i;
108 unsigned long address;
109 vm_fault_t ret = VM_FAULT_SIGBUS;
110 unsigned long pfn;
111 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
112 gtt->offset;
113
114 page_num = vma_pages(vma);
115 address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
116
117 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118
119 for (i = 0; i < page_num; i++) {
120 pfn = (phys_addr >> PAGE_SHIFT);
121
122 ret = vmf_insert_mixed(vma, address,
123 __pfn_to_pfn_t(pfn, PFN_DEV));
124 if (unlikely(ret & VM_FAULT_ERROR))
125 break;
126 address += PAGE_SIZE;
127 phys_addr += PAGE_SIZE;
128 }
129 return ret;
130}
131
132static void psbfb_vm_open(struct vm_area_struct *vma)
133{
134}
135
136static void psbfb_vm_close(struct vm_area_struct *vma)
137{
138}
139
140static const struct vm_operations_struct psbfb_vm_ops = {
141 .fault = psbfb_vm_fault,
142 .open = psbfb_vm_open,
143 .close = psbfb_vm_close
144};
145
146static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
147{
148 struct drm_fb_helper *fb_helper = info->par;
149 struct drm_framebuffer *fb = fb_helper->fb;
150
151 if (vma->vm_pgoff != 0)
152 return -EINVAL;
153 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
154 return -EINVAL;
155
156 /*
157 * If this is a GEM object then info->screen_base is the virtual
158 * kernel remapping of the object. FIXME: Review if this is
159 * suitable for our mmap work
160 */
161 vma->vm_ops = &psbfb_vm_ops;
162 vma->vm_private_data = (void *)fb;
163 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
164 return 0;
165}
166
167static struct fb_ops psbfb_ops = {
168 .owner = THIS_MODULE,
169 DRM_FB_HELPER_DEFAULT_OPS,
170 .fb_setcolreg = psbfb_setcolreg,
171 .fb_fillrect = drm_fb_helper_cfb_fillrect,
172 .fb_copyarea = psbfb_copyarea,
173 .fb_imageblit = drm_fb_helper_cfb_imageblit,
174 .fb_mmap = psbfb_mmap,
175 .fb_sync = psbfb_sync,
176};
177
178static struct fb_ops psbfb_roll_ops = {
179 .owner = THIS_MODULE,
180 DRM_FB_HELPER_DEFAULT_OPS,
181 .fb_setcolreg = psbfb_setcolreg,
182 .fb_fillrect = drm_fb_helper_cfb_fillrect,
183 .fb_copyarea = drm_fb_helper_cfb_copyarea,
184 .fb_imageblit = drm_fb_helper_cfb_imageblit,
185 .fb_pan_display = psbfb_pan,
186 .fb_mmap = psbfb_mmap,
187};
188
189static struct fb_ops psbfb_unaccel_ops = {
190 .owner = THIS_MODULE,
191 DRM_FB_HELPER_DEFAULT_OPS,
192 .fb_setcolreg = psbfb_setcolreg,
193 .fb_fillrect = drm_fb_helper_cfb_fillrect,
194 .fb_copyarea = drm_fb_helper_cfb_copyarea,
195 .fb_imageblit = drm_fb_helper_cfb_imageblit,
196 .fb_mmap = psbfb_mmap,
197};
198
199/**
200 * psb_framebuffer_init - initialize a framebuffer
201 * @dev: our DRM device
202 * @fb: framebuffer to set up
203 * @mode_cmd: mode description
204 * @gt: backing object
205 *
206 * Configure and fill in the boilerplate for our frame buffer. Return
207 * 0 on success or an error code if we fail.
208 */
209static int psb_framebuffer_init(struct drm_device *dev,
210 struct drm_framebuffer *fb,
211 const struct drm_mode_fb_cmd2 *mode_cmd,
212 struct drm_gem_object *obj)
213{
214 const struct drm_format_info *info;
215 int ret;
216
217 /*
218 * Reject unknown formats, YUV formats, and formats with more than
219 * 4 bytes per pixel.
220 */
221 info = drm_get_format_info(dev, mode_cmd);
222 if (!info || !info->depth || info->cpp[0] > 4)
223 return -EINVAL;
224
225 if (mode_cmd->pitches[0] & 63)
226 return -EINVAL;
227
228 drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
229 fb->obj[0] = obj;
230 ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
231 if (ret) {
232 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
233 return ret;
234 }
235 return 0;
236}
237
238/**
239 * psb_framebuffer_create - create a framebuffer backed by gt
240 * @dev: our DRM device
241 * @mode_cmd: the description of the requested mode
242 * @gt: the backing object
243 *
244 * Create a framebuffer object backed by the gt, and fill in the
245 * boilerplate required
246 *
247 * TODO: review object references
248 */
249
250static struct drm_framebuffer *psb_framebuffer_create
251 (struct drm_device *dev,
252 const struct drm_mode_fb_cmd2 *mode_cmd,
253 struct drm_gem_object *obj)
254{
255 struct drm_framebuffer *fb;
256 int ret;
257
258 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
259 if (!fb)
260 return ERR_PTR(-ENOMEM);
261
262 ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
263 if (ret) {
264 kfree(fb);
265 return ERR_PTR(ret);
266 }
267 return fb;
268}
269
270/**
271 * psbfb_alloc - allocate frame buffer memory
272 * @dev: the DRM device
273 * @aligned_size: space needed
274 *
275 * Allocate the frame buffer. In the usual case we get a GTT range that
276 * is stolen memory backed and life is simple. If there isn't sufficient
277 * we fail as we don't have the virtual mapping space to really vmap it
278 * and the kernel console code can't handle non linear framebuffers.
279 *
280 * Re-address this as and if the framebuffer layer grows this ability.
281 */
282static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
283{
284 struct gtt_range *backing;
285 /* Begin by trying to use stolen memory backing */
286 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
287 if (backing) {
288 drm_gem_private_object_init(dev, &backing->gem, aligned_size);
289 return backing;
290 }
291 return NULL;
292}
293
294/**
295 * psbfb_create - create a framebuffer
296 * @fbdev: the framebuffer device
297 * @sizes: specification of the layout
298 *
299 * Create a framebuffer to the specifications provided
300 */
301static int psbfb_create(struct drm_fb_helper *fb_helper,
302 struct drm_fb_helper_surface_size *sizes)
303{
304 struct drm_device *dev = fb_helper->dev;
305 struct drm_psb_private *dev_priv = dev->dev_private;
306 struct fb_info *info;
307 struct drm_framebuffer *fb;
308 struct drm_mode_fb_cmd2 mode_cmd;
309 int size;
310 int ret;
311 struct gtt_range *backing;
312 u32 bpp, depth;
313 int gtt_roll = 0;
314 int pitch_lines = 0;
315
316 mode_cmd.width = sizes->surface_width;
317 mode_cmd.height = sizes->surface_height;
318 bpp = sizes->surface_bpp;
319 depth = sizes->surface_depth;
320
321 /* No 24bit packed */
322 if (bpp == 24)
323 bpp = 32;
324
325 do {
326 /*
327 * Acceleration via the GTT requires pitch to be
328 * power of two aligned. Preferably page but less
329 * is ok with some fonts
330 */
331 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
332
333 size = mode_cmd.pitches[0] * mode_cmd.height;
334 size = ALIGN(size, PAGE_SIZE);
335
336 /* Allocate the fb in the GTT with stolen page backing */
337 backing = psbfb_alloc(dev, size);
338
339 if (pitch_lines)
340 pitch_lines *= 2;
341 else
342 pitch_lines = 1;
343 gtt_roll++;
344 } while (backing == NULL && pitch_lines <= 16);
345
346 /* The final pitch we accepted if we succeeded */
347 pitch_lines /= 2;
348
349 if (backing == NULL) {
350 /*
351 * We couldn't get the space we wanted, fall back to the
352 * display engine requirement instead. The HW requires
353 * the pitch to be 64 byte aligned
354 */
355
356 gtt_roll = 0; /* Don't use GTT accelerated scrolling */
357 pitch_lines = 64;
358
359 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
360
361 size = mode_cmd.pitches[0] * mode_cmd.height;
362 size = ALIGN(size, PAGE_SIZE);
363
364 /* Allocate the framebuffer in the GTT with stolen page backing */
365 backing = psbfb_alloc(dev, size);
366 if (backing == NULL)
367 return -ENOMEM;
368 }
369
370 memset(dev_priv->vram_addr + backing->offset, 0, size);
371
372 info = drm_fb_helper_alloc_fbi(fb_helper);
373 if (IS_ERR(info)) {
374 ret = PTR_ERR(info);
375 goto out;
376 }
377
378 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
379
380 fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem);
381 if (IS_ERR(fb)) {
382 ret = PTR_ERR(fb);
383 goto out;
384 }
385
386 fb_helper->fb = fb;
387
388 if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
389 info->fbops = &psbfb_ops;
390 else if (gtt_roll) { /* GTT rolling seems best */
391 info->fbops = &psbfb_roll_ops;
392 info->flags |= FBINFO_HWACCEL_YPAN;
393 } else /* Software */
394 info->fbops = &psbfb_unaccel_ops;
395
396 info->fix.smem_start = dev->mode_config.fb_base;
397 info->fix.smem_len = size;
398 info->fix.ywrapstep = gtt_roll;
399 info->fix.ypanstep = 0;
400
401 /* Accessed stolen memory directly */
402 info->screen_base = dev_priv->vram_addr + backing->offset;
403 info->screen_size = size;
404
405 if (dev_priv->gtt.stolen_size) {
406 info->apertures->ranges[0].base = dev->mode_config.fb_base;
407 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
408 }
409
410 drm_fb_helper_fill_info(info, fb_helper, sizes);
411
412 info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
413 info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
414
415 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
416
417 dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
418
419 return 0;
420out:
421 psb_gtt_free_range(dev, backing);
422 return ret;
423}
424
425/**
426 * psb_user_framebuffer_create - create framebuffer
427 * @dev: our DRM device
428 * @filp: client file
429 * @cmd: mode request
430 *
431 * Create a new framebuffer backed by a userspace GEM object
432 */
433static struct drm_framebuffer *psb_user_framebuffer_create
434 (struct drm_device *dev, struct drm_file *filp,
435 const struct drm_mode_fb_cmd2 *cmd)
436{
437 struct drm_gem_object *obj;
438
439 /*
440 * Find the GEM object and thus the gtt range object that is
441 * to back this space
442 */
443 obj = drm_gem_object_lookup(filp, cmd->handles[0]);
444 if (obj == NULL)
445 return ERR_PTR(-ENOENT);
446
447 /* Let the core code do all the work */
448 return psb_framebuffer_create(dev, cmd, obj);
449}
450
451static int psbfb_probe(struct drm_fb_helper *fb_helper,
452 struct drm_fb_helper_surface_size *sizes)
453{
454 struct drm_device *dev = fb_helper->dev;
455 struct drm_psb_private *dev_priv = dev->dev_private;
456 unsigned int fb_size;
457 int bytespp;
458
459 bytespp = sizes->surface_bpp / 8;
460 if (bytespp == 3) /* no 24bit packed */
461 bytespp = 4;
462
463 /* If the mode will not fit in 32bit then switch to 16bit to get
464 a console on full resolution. The X mode setting server will
465 allocate its own 32bit GEM framebuffer */
466 fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
467 sizes->surface_height;
468 fb_size = ALIGN(fb_size, PAGE_SIZE);
469
470 if (fb_size > dev_priv->vram_stolen_size) {
471 sizes->surface_bpp = 16;
472 sizes->surface_depth = 16;
473 }
474
475 return psbfb_create(fb_helper, sizes);
476}
477
478static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
479 .fb_probe = psbfb_probe,
480};
481
482static int psb_fbdev_destroy(struct drm_device *dev,
483 struct drm_fb_helper *fb_helper)
484{
485 struct drm_framebuffer *fb = fb_helper->fb;
486
487 drm_fb_helper_unregister_fbi(fb_helper);
488
489 drm_fb_helper_fini(fb_helper);
490 drm_framebuffer_unregister_private(fb);
491 drm_framebuffer_cleanup(fb);
492
493 if (fb->obj[0])
494 drm_gem_object_put(fb->obj[0]);
495 kfree(fb);
496
497 return 0;
498}
499
500int psb_fbdev_init(struct drm_device *dev)
501{
502 struct drm_fb_helper *fb_helper;
503 struct drm_psb_private *dev_priv = dev->dev_private;
504 int ret;
505
506 fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
507 if (!fb_helper) {
508 dev_err(dev->dev, "no memory\n");
509 return -ENOMEM;
510 }
511
512 dev_priv->fb_helper = fb_helper;
513
514 drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
515
516 ret = drm_fb_helper_init(dev, fb_helper);
517 if (ret)
518 goto free;
519
520 /* disable all the possible outputs/crtcs before entering KMS mode */
521 drm_helper_disable_unused_functions(dev);
522
523 ret = drm_fb_helper_initial_config(fb_helper, 32);
524 if (ret)
525 goto fini;
526
527 return 0;
528
529fini:
530 drm_fb_helper_fini(fb_helper);
531free:
532 kfree(fb_helper);
533 return ret;
534}
535
536static void psb_fbdev_fini(struct drm_device *dev)
537{
538 struct drm_psb_private *dev_priv = dev->dev_private;
539
540 if (!dev_priv->fb_helper)
541 return;
542
543 psb_fbdev_destroy(dev, dev_priv->fb_helper);
544 kfree(dev_priv->fb_helper);
545 dev_priv->fb_helper = NULL;
546}
547
548static const struct drm_mode_config_funcs psb_mode_funcs = {
549 .fb_create = psb_user_framebuffer_create,
550 .output_poll_changed = drm_fb_helper_output_poll_changed,
551};
552
553static void psb_setup_outputs(struct drm_device *dev)
554{
555 struct drm_psb_private *dev_priv = dev->dev_private;
556 struct drm_connector *connector;
557
558 drm_mode_create_scaling_mode_property(dev);
559
560 /* It is ok for this to fail - we just don't get backlight control */
561 if (!dev_priv->backlight_property)
562 dev_priv->backlight_property = drm_property_create_range(dev, 0,
563 "backlight", 0, 100);
564 dev_priv->ops->output_init(dev);
565
566 list_for_each_entry(connector, &dev->mode_config.connector_list,
567 head) {
568 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
569 struct drm_encoder *encoder = &gma_encoder->base;
570 int crtc_mask = 0, clone_mask = 0;
571
572 /* valid crtcs */
573 switch (gma_encoder->type) {
574 case INTEL_OUTPUT_ANALOG:
575 crtc_mask = (1 << 0);
576 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
577 break;
578 case INTEL_OUTPUT_SDVO:
579 crtc_mask = dev_priv->ops->sdvo_mask;
580 clone_mask = 0;
581 break;
582 case INTEL_OUTPUT_LVDS:
583 crtc_mask = dev_priv->ops->lvds_mask;
584 clone_mask = 0;
585 break;
586 case INTEL_OUTPUT_MIPI:
587 crtc_mask = (1 << 0);
588 clone_mask = 0;
589 break;
590 case INTEL_OUTPUT_MIPI2:
591 crtc_mask = (1 << 2);
592 clone_mask = 0;
593 break;
594 case INTEL_OUTPUT_HDMI:
595 crtc_mask = dev_priv->ops->hdmi_mask;
596 clone_mask = (1 << INTEL_OUTPUT_HDMI);
597 break;
598 case INTEL_OUTPUT_DISPLAYPORT:
599 crtc_mask = (1 << 0) | (1 << 1);
600 clone_mask = 0;
601 break;
602 case INTEL_OUTPUT_EDP:
603 crtc_mask = (1 << 1);
604 clone_mask = 0;
605 }
606 encoder->possible_crtcs = crtc_mask;
607 encoder->possible_clones =
608 gma_connector_clones(dev, clone_mask);
609 }
610}
611
612void psb_modeset_init(struct drm_device *dev)
613{
614 struct drm_psb_private *dev_priv = dev->dev_private;
615 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
616 int i;
617
618 drm_mode_config_init(dev);
619
620 dev->mode_config.min_width = 0;
621 dev->mode_config.min_height = 0;
622
623 dev->mode_config.funcs = &psb_mode_funcs;
624
625 /* set memory base */
626 /* Oaktrail and Poulsbo should use BAR 2*/
627 pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
628 &(dev->mode_config.fb_base));
629
630 /* num pipes is 2 for PSB but 1 for Mrst */
631 for (i = 0; i < dev_priv->num_pipe; i++)
632 psb_intel_crtc_init(dev, i, mode_dev);
633
634 dev->mode_config.max_width = 4096;
635 dev->mode_config.max_height = 4096;
636
637 psb_setup_outputs(dev);
638
639 if (dev_priv->ops->errata)
640 dev_priv->ops->errata(dev);
641
642 dev_priv->modeset = true;
643}
644
645void psb_modeset_cleanup(struct drm_device *dev)
646{
647 struct drm_psb_private *dev_priv = dev->dev_private;
648 if (dev_priv->modeset) {
649 drm_kms_helper_poll_fini(dev);
650 psb_fbdev_fini(dev);
651 drm_mode_config_cleanup(dev);
652 }
653}