Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/**************************************************************************
3 * Copyright (c) 2007-2011, Intel Corporation.
4 * All Rights Reserved.
5 *
6 **************************************************************************/
7
8#include <linux/console.h>
9#include <linux/delay.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/pfn_t.h>
16#include <linux/slab.h>
17#include <linux/string.h>
18#include <linux/tty.h>
19
20#include <drm/drm.h>
21#include <drm/drm_crtc.h>
22#include <drm/drm_fb_helper.h>
23#include <drm/drm_fourcc.h>
24#include <drm/drm_gem_framebuffer_helper.h>
25
26#include "framebuffer.h"
27#include "gtt.h"
28#include "psb_drv.h"
29#include "psb_intel_drv.h"
30#include "psb_intel_reg.h"
31
32static const struct drm_framebuffer_funcs psb_fb_funcs = {
33 .destroy = drm_gem_fb_destroy,
34 .create_handle = drm_gem_fb_create_handle,
35};
36
37#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
38
39static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
40 unsigned blue, unsigned transp,
41 struct fb_info *info)
42{
43 struct psb_fbdev *fbdev = info->par;
44 struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
45 uint32_t v;
46
47 if (!fb)
48 return -ENOMEM;
49
50 if (regno > 255)
51 return 1;
52
53 red = CMAP_TOHW(red, info->var.red.length);
54 blue = CMAP_TOHW(blue, info->var.blue.length);
55 green = CMAP_TOHW(green, info->var.green.length);
56 transp = CMAP_TOHW(transp, info->var.transp.length);
57
58 v = (red << info->var.red.offset) |
59 (green << info->var.green.offset) |
60 (blue << info->var.blue.offset) |
61 (transp << info->var.transp.offset);
62
63 if (regno < 16) {
64 switch (fb->format->cpp[0] * 8) {
65 case 16:
66 ((uint32_t *) info->pseudo_palette)[regno] = v;
67 break;
68 case 24:
69 case 32:
70 ((uint32_t *) info->pseudo_palette)[regno] = v;
71 break;
72 }
73 }
74
75 return 0;
76}
77
78static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
79{
80 struct psb_fbdev *fbdev = info->par;
81 struct psb_framebuffer *psbfb = &fbdev->pfb;
82 struct drm_device *dev = psbfb->base.dev;
83 struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
84
85 /*
86 * We have to poke our nose in here. The core fb code assumes
87 * panning is part of the hardware that can be invoked before
88 * the actual fb is mapped. In our case that isn't quite true.
89 */
90 if (gtt->npage) {
91 /* GTT roll shifts in 4K pages, we need to shift the right
92 number of pages */
93 int pages = info->fix.line_length >> 12;
94 psb_gtt_roll(dev, gtt, var->yoffset * pages);
95 }
96 return 0;
97}
98
99static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
100{
101 struct vm_area_struct *vma = vmf->vma;
102 struct psb_framebuffer *psbfb = vma->vm_private_data;
103 struct drm_device *dev = psbfb->base.dev;
104 struct drm_psb_private *dev_priv = dev->dev_private;
105 struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
106 int page_num;
107 int i;
108 unsigned long address;
109 vm_fault_t ret = VM_FAULT_SIGBUS;
110 unsigned long pfn;
111 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
112 gtt->offset;
113
114 page_num = vma_pages(vma);
115 address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
116
117 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118
119 for (i = 0; i < page_num; i++) {
120 pfn = (phys_addr >> PAGE_SHIFT);
121
122 ret = vmf_insert_mixed(vma, address,
123 __pfn_to_pfn_t(pfn, PFN_DEV));
124 if (unlikely(ret & VM_FAULT_ERROR))
125 break;
126 address += PAGE_SIZE;
127 phys_addr += PAGE_SIZE;
128 }
129 return ret;
130}
131
132static void psbfb_vm_open(struct vm_area_struct *vma)
133{
134}
135
136static void psbfb_vm_close(struct vm_area_struct *vma)
137{
138}
139
140static const struct vm_operations_struct psbfb_vm_ops = {
141 .fault = psbfb_vm_fault,
142 .open = psbfb_vm_open,
143 .close = psbfb_vm_close
144};
145
146static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
147{
148 struct psb_fbdev *fbdev = info->par;
149 struct psb_framebuffer *psbfb = &fbdev->pfb;
150
151 if (vma->vm_pgoff != 0)
152 return -EINVAL;
153 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
154 return -EINVAL;
155
156 if (!psbfb->addr_space)
157 psbfb->addr_space = vma->vm_file->f_mapping;
158 /*
159 * If this is a GEM object then info->screen_base is the virtual
160 * kernel remapping of the object. FIXME: Review if this is
161 * suitable for our mmap work
162 */
163 vma->vm_ops = &psbfb_vm_ops;
164 vma->vm_private_data = (void *)psbfb;
165 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
166 return 0;
167}
168
169static struct fb_ops psbfb_ops = {
170 .owner = THIS_MODULE,
171 DRM_FB_HELPER_DEFAULT_OPS,
172 .fb_setcolreg = psbfb_setcolreg,
173 .fb_fillrect = drm_fb_helper_cfb_fillrect,
174 .fb_copyarea = psbfb_copyarea,
175 .fb_imageblit = drm_fb_helper_cfb_imageblit,
176 .fb_mmap = psbfb_mmap,
177 .fb_sync = psbfb_sync,
178};
179
180static struct fb_ops psbfb_roll_ops = {
181 .owner = THIS_MODULE,
182 DRM_FB_HELPER_DEFAULT_OPS,
183 .fb_setcolreg = psbfb_setcolreg,
184 .fb_fillrect = drm_fb_helper_cfb_fillrect,
185 .fb_copyarea = drm_fb_helper_cfb_copyarea,
186 .fb_imageblit = drm_fb_helper_cfb_imageblit,
187 .fb_pan_display = psbfb_pan,
188 .fb_mmap = psbfb_mmap,
189};
190
191static struct fb_ops psbfb_unaccel_ops = {
192 .owner = THIS_MODULE,
193 DRM_FB_HELPER_DEFAULT_OPS,
194 .fb_setcolreg = psbfb_setcolreg,
195 .fb_fillrect = drm_fb_helper_cfb_fillrect,
196 .fb_copyarea = drm_fb_helper_cfb_copyarea,
197 .fb_imageblit = drm_fb_helper_cfb_imageblit,
198 .fb_mmap = psbfb_mmap,
199};
200
201/**
202 * psb_framebuffer_init - initialize a framebuffer
203 * @dev: our DRM device
204 * @fb: framebuffer to set up
205 * @mode_cmd: mode description
206 * @gt: backing object
207 *
208 * Configure and fill in the boilerplate for our frame buffer. Return
209 * 0 on success or an error code if we fail.
210 */
211static int psb_framebuffer_init(struct drm_device *dev,
212 struct psb_framebuffer *fb,
213 const struct drm_mode_fb_cmd2 *mode_cmd,
214 struct gtt_range *gt)
215{
216 const struct drm_format_info *info;
217 int ret;
218
219 /*
220 * Reject unknown formats, YUV formats, and formats with more than
221 * 4 bytes per pixel.
222 */
223 info = drm_get_format_info(dev, mode_cmd);
224 if (!info || !info->depth || info->cpp[0] > 4)
225 return -EINVAL;
226
227 if (mode_cmd->pitches[0] & 63)
228 return -EINVAL;
229
230 drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
231 fb->base.obj[0] = >->gem;
232 ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
233 if (ret) {
234 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
235 return ret;
236 }
237 return 0;
238}
239
240/**
241 * psb_framebuffer_create - create a framebuffer backed by gt
242 * @dev: our DRM device
243 * @mode_cmd: the description of the requested mode
244 * @gt: the backing object
245 *
246 * Create a framebuffer object backed by the gt, and fill in the
247 * boilerplate required
248 *
249 * TODO: review object references
250 */
251
252static struct drm_framebuffer *psb_framebuffer_create
253 (struct drm_device *dev,
254 const struct drm_mode_fb_cmd2 *mode_cmd,
255 struct gtt_range *gt)
256{
257 struct psb_framebuffer *fb;
258 int ret;
259
260 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
261 if (!fb)
262 return ERR_PTR(-ENOMEM);
263
264 ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
265 if (ret) {
266 kfree(fb);
267 return ERR_PTR(ret);
268 }
269 return &fb->base;
270}
271
272/**
273 * psbfb_alloc - allocate frame buffer memory
274 * @dev: the DRM device
275 * @aligned_size: space needed
276 *
277 * Allocate the frame buffer. In the usual case we get a GTT range that
278 * is stolen memory backed and life is simple. If there isn't sufficient
279 * we fail as we don't have the virtual mapping space to really vmap it
280 * and the kernel console code can't handle non linear framebuffers.
281 *
282 * Re-address this as and if the framebuffer layer grows this ability.
283 */
284static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
285{
286 struct gtt_range *backing;
287 /* Begin by trying to use stolen memory backing */
288 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
289 if (backing) {
290 drm_gem_private_object_init(dev, &backing->gem, aligned_size);
291 return backing;
292 }
293 return NULL;
294}
295
296/**
297 * psbfb_create - create a framebuffer
298 * @fbdev: the framebuffer device
299 * @sizes: specification of the layout
300 *
301 * Create a framebuffer to the specifications provided
302 */
303static int psbfb_create(struct psb_fbdev *fbdev,
304 struct drm_fb_helper_surface_size *sizes)
305{
306 struct drm_device *dev = fbdev->psb_fb_helper.dev;
307 struct drm_psb_private *dev_priv = dev->dev_private;
308 struct fb_info *info;
309 struct drm_framebuffer *fb;
310 struct psb_framebuffer *psbfb = &fbdev->pfb;
311 struct drm_mode_fb_cmd2 mode_cmd;
312 int size;
313 int ret;
314 struct gtt_range *backing;
315 u32 bpp, depth;
316 int gtt_roll = 0;
317 int pitch_lines = 0;
318
319 mode_cmd.width = sizes->surface_width;
320 mode_cmd.height = sizes->surface_height;
321 bpp = sizes->surface_bpp;
322 depth = sizes->surface_depth;
323
324 /* No 24bit packed */
325 if (bpp == 24)
326 bpp = 32;
327
328 do {
329 /*
330 * Acceleration via the GTT requires pitch to be
331 * power of two aligned. Preferably page but less
332 * is ok with some fonts
333 */
334 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
335
336 size = mode_cmd.pitches[0] * mode_cmd.height;
337 size = ALIGN(size, PAGE_SIZE);
338
339 /* Allocate the fb in the GTT with stolen page backing */
340 backing = psbfb_alloc(dev, size);
341
342 if (pitch_lines)
343 pitch_lines *= 2;
344 else
345 pitch_lines = 1;
346 gtt_roll++;
347 } while (backing == NULL && pitch_lines <= 16);
348
349 /* The final pitch we accepted if we succeeded */
350 pitch_lines /= 2;
351
352 if (backing == NULL) {
353 /*
354 * We couldn't get the space we wanted, fall back to the
355 * display engine requirement instead. The HW requires
356 * the pitch to be 64 byte aligned
357 */
358
359 gtt_roll = 0; /* Don't use GTT accelerated scrolling */
360 pitch_lines = 64;
361
362 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
363
364 size = mode_cmd.pitches[0] * mode_cmd.height;
365 size = ALIGN(size, PAGE_SIZE);
366
367 /* Allocate the framebuffer in the GTT with stolen page backing */
368 backing = psbfb_alloc(dev, size);
369 if (backing == NULL)
370 return -ENOMEM;
371 }
372
373 memset(dev_priv->vram_addr + backing->offset, 0, size);
374
375 info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
376 if (IS_ERR(info)) {
377 ret = PTR_ERR(info);
378 goto out;
379 }
380
381 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
382
383 ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
384 if (ret)
385 goto out;
386
387 fb = &psbfb->base;
388 psbfb->fbdev = info;
389
390 fbdev->psb_fb_helper.fb = fb;
391
392 if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
393 info->fbops = &psbfb_ops;
394 else if (gtt_roll) { /* GTT rolling seems best */
395 info->fbops = &psbfb_roll_ops;
396 info->flags |= FBINFO_HWACCEL_YPAN;
397 } else /* Software */
398 info->fbops = &psbfb_unaccel_ops;
399
400 info->fix.smem_start = dev->mode_config.fb_base;
401 info->fix.smem_len = size;
402 info->fix.ywrapstep = gtt_roll;
403 info->fix.ypanstep = 0;
404
405 /* Accessed stolen memory directly */
406 info->screen_base = dev_priv->vram_addr + backing->offset;
407 info->screen_size = size;
408
409 if (dev_priv->gtt.stolen_size) {
410 info->apertures->ranges[0].base = dev->mode_config.fb_base;
411 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
412 }
413
414 drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
415
416 info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
417 info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
418
419 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
420
421 dev_dbg(dev->dev, "allocated %dx%d fb\n",
422 psbfb->base.width, psbfb->base.height);
423
424 return 0;
425out:
426 psb_gtt_free_range(dev, backing);
427 return ret;
428}
429
430/**
431 * psb_user_framebuffer_create - create framebuffer
432 * @dev: our DRM device
433 * @filp: client file
434 * @cmd: mode request
435 *
436 * Create a new framebuffer backed by a userspace GEM object
437 */
438static struct drm_framebuffer *psb_user_framebuffer_create
439 (struct drm_device *dev, struct drm_file *filp,
440 const struct drm_mode_fb_cmd2 *cmd)
441{
442 struct gtt_range *r;
443 struct drm_gem_object *obj;
444
445 /*
446 * Find the GEM object and thus the gtt range object that is
447 * to back this space
448 */
449 obj = drm_gem_object_lookup(filp, cmd->handles[0]);
450 if (obj == NULL)
451 return ERR_PTR(-ENOENT);
452
453 /* Let the core code do all the work */
454 r = container_of(obj, struct gtt_range, gem);
455 return psb_framebuffer_create(dev, cmd, r);
456}
457
458static int psbfb_probe(struct drm_fb_helper *helper,
459 struct drm_fb_helper_surface_size *sizes)
460{
461 struct psb_fbdev *psb_fbdev =
462 container_of(helper, struct psb_fbdev, psb_fb_helper);
463 struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
464 struct drm_psb_private *dev_priv = dev->dev_private;
465 int bytespp;
466
467 bytespp = sizes->surface_bpp / 8;
468 if (bytespp == 3) /* no 24bit packed */
469 bytespp = 4;
470
471 /* If the mode will not fit in 32bit then switch to 16bit to get
472 a console on full resolution. The X mode setting server will
473 allocate its own 32bit GEM framebuffer */
474 if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
475 dev_priv->vram_stolen_size) {
476 sizes->surface_bpp = 16;
477 sizes->surface_depth = 16;
478 }
479
480 return psbfb_create(psb_fbdev, sizes);
481}
482
483static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
484 .fb_probe = psbfb_probe,
485};
486
487static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
488{
489 struct psb_framebuffer *psbfb = &fbdev->pfb;
490
491 drm_fb_helper_unregister_fbi(&fbdev->psb_fb_helper);
492
493 drm_fb_helper_fini(&fbdev->psb_fb_helper);
494 drm_framebuffer_unregister_private(&psbfb->base);
495 drm_framebuffer_cleanup(&psbfb->base);
496
497 if (psbfb->base.obj[0])
498 drm_gem_object_put_unlocked(psbfb->base.obj[0]);
499 return 0;
500}
501
502int psb_fbdev_init(struct drm_device *dev)
503{
504 struct psb_fbdev *fbdev;
505 struct drm_psb_private *dev_priv = dev->dev_private;
506 int ret;
507
508 fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
509 if (!fbdev) {
510 dev_err(dev->dev, "no memory\n");
511 return -ENOMEM;
512 }
513
514 dev_priv->fbdev = fbdev;
515
516 drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
517
518 ret = drm_fb_helper_init(dev, &fbdev->psb_fb_helper,
519 INTELFB_CONN_LIMIT);
520 if (ret)
521 goto free;
522
523 ret = drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
524 if (ret)
525 goto fini;
526
527 /* disable all the possible outputs/crtcs before entering KMS mode */
528 drm_helper_disable_unused_functions(dev);
529
530 ret = drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
531 if (ret)
532 goto fini;
533
534 return 0;
535
536fini:
537 drm_fb_helper_fini(&fbdev->psb_fb_helper);
538free:
539 kfree(fbdev);
540 return ret;
541}
542
543static void psb_fbdev_fini(struct drm_device *dev)
544{
545 struct drm_psb_private *dev_priv = dev->dev_private;
546
547 if (!dev_priv->fbdev)
548 return;
549
550 psb_fbdev_destroy(dev, dev_priv->fbdev);
551 kfree(dev_priv->fbdev);
552 dev_priv->fbdev = NULL;
553}
554
555static const struct drm_mode_config_funcs psb_mode_funcs = {
556 .fb_create = psb_user_framebuffer_create,
557 .output_poll_changed = drm_fb_helper_output_poll_changed,
558};
559
560static void psb_setup_outputs(struct drm_device *dev)
561{
562 struct drm_psb_private *dev_priv = dev->dev_private;
563 struct drm_connector *connector;
564
565 drm_mode_create_scaling_mode_property(dev);
566
567 /* It is ok for this to fail - we just don't get backlight control */
568 if (!dev_priv->backlight_property)
569 dev_priv->backlight_property = drm_property_create_range(dev, 0,
570 "backlight", 0, 100);
571 dev_priv->ops->output_init(dev);
572
573 list_for_each_entry(connector, &dev->mode_config.connector_list,
574 head) {
575 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
576 struct drm_encoder *encoder = &gma_encoder->base;
577 int crtc_mask = 0, clone_mask = 0;
578
579 /* valid crtcs */
580 switch (gma_encoder->type) {
581 case INTEL_OUTPUT_ANALOG:
582 crtc_mask = (1 << 0);
583 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
584 break;
585 case INTEL_OUTPUT_SDVO:
586 crtc_mask = dev_priv->ops->sdvo_mask;
587 clone_mask = (1 << INTEL_OUTPUT_SDVO);
588 break;
589 case INTEL_OUTPUT_LVDS:
590 crtc_mask = dev_priv->ops->lvds_mask;
591 clone_mask = (1 << INTEL_OUTPUT_LVDS);
592 break;
593 case INTEL_OUTPUT_MIPI:
594 crtc_mask = (1 << 0);
595 clone_mask = (1 << INTEL_OUTPUT_MIPI);
596 break;
597 case INTEL_OUTPUT_MIPI2:
598 crtc_mask = (1 << 2);
599 clone_mask = (1 << INTEL_OUTPUT_MIPI2);
600 break;
601 case INTEL_OUTPUT_HDMI:
602 crtc_mask = dev_priv->ops->hdmi_mask;
603 clone_mask = (1 << INTEL_OUTPUT_HDMI);
604 break;
605 case INTEL_OUTPUT_DISPLAYPORT:
606 crtc_mask = (1 << 0) | (1 << 1);
607 clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
608 break;
609 case INTEL_OUTPUT_EDP:
610 crtc_mask = (1 << 1);
611 clone_mask = (1 << INTEL_OUTPUT_EDP);
612 }
613 encoder->possible_crtcs = crtc_mask;
614 encoder->possible_clones =
615 gma_connector_clones(dev, clone_mask);
616 }
617}
618
619void psb_modeset_init(struct drm_device *dev)
620{
621 struct drm_psb_private *dev_priv = dev->dev_private;
622 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
623 int i;
624
625 drm_mode_config_init(dev);
626
627 dev->mode_config.min_width = 0;
628 dev->mode_config.min_height = 0;
629
630 dev->mode_config.funcs = &psb_mode_funcs;
631
632 /* set memory base */
633 /* Oaktrail and Poulsbo should use BAR 2*/
634 pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
635 &(dev->mode_config.fb_base));
636
637 /* num pipes is 2 for PSB but 1 for Mrst */
638 for (i = 0; i < dev_priv->num_pipe; i++)
639 psb_intel_crtc_init(dev, i, mode_dev);
640
641 dev->mode_config.max_width = 4096;
642 dev->mode_config.max_height = 4096;
643
644 psb_setup_outputs(dev);
645
646 if (dev_priv->ops->errata)
647 dev_priv->ops->errata(dev);
648
649 dev_priv->modeset = true;
650}
651
652void psb_modeset_cleanup(struct drm_device *dev)
653{
654 struct drm_psb_private *dev_priv = dev->dev_private;
655 if (dev_priv->modeset) {
656 drm_kms_helper_poll_fini(dev);
657 psb_fbdev_fini(dev);
658 drm_mode_config_cleanup(dev);
659 }
660}
1// SPDX-License-Identifier: GPL-2.0-only
2/**************************************************************************
3 * Copyright (c) 2007-2011, Intel Corporation.
4 * All Rights Reserved.
5 *
6 **************************************************************************/
7
8#include <linux/console.h>
9#include <linux/delay.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/pfn_t.h>
16#include <linux/slab.h>
17#include <linux/string.h>
18#include <linux/tty.h>
19
20#include <drm/drm.h>
21#include <drm/drm_crtc.h>
22#include <drm/drm_fb_helper.h>
23#include <drm/drm_fourcc.h>
24#include <drm/drm_framebuffer.h>
25#include <drm/drm_gem_framebuffer_helper.h>
26
27#include "framebuffer.h"
28#include "gem.h"
29#include "psb_drv.h"
30#include "psb_intel_drv.h"
31#include "psb_intel_reg.h"
32
33static const struct drm_framebuffer_funcs psb_fb_funcs = {
34 .destroy = drm_gem_fb_destroy,
35 .create_handle = drm_gem_fb_create_handle,
36};
37
38#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
39
40static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
41 unsigned blue, unsigned transp,
42 struct fb_info *info)
43{
44 struct drm_fb_helper *fb_helper = info->par;
45 struct drm_framebuffer *fb = fb_helper->fb;
46 uint32_t v;
47
48 if (!fb)
49 return -ENOMEM;
50
51 if (regno > 255)
52 return 1;
53
54 red = CMAP_TOHW(red, info->var.red.length);
55 blue = CMAP_TOHW(blue, info->var.blue.length);
56 green = CMAP_TOHW(green, info->var.green.length);
57 transp = CMAP_TOHW(transp, info->var.transp.length);
58
59 v = (red << info->var.red.offset) |
60 (green << info->var.green.offset) |
61 (blue << info->var.blue.offset) |
62 (transp << info->var.transp.offset);
63
64 if (regno < 16) {
65 switch (fb->format->cpp[0] * 8) {
66 case 16:
67 ((uint32_t *) info->pseudo_palette)[regno] = v;
68 break;
69 case 24:
70 case 32:
71 ((uint32_t *) info->pseudo_palette)[regno] = v;
72 break;
73 }
74 }
75
76 return 0;
77}
78
79static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
80{
81 struct vm_area_struct *vma = vmf->vma;
82 struct drm_framebuffer *fb = vma->vm_private_data;
83 struct drm_device *dev = fb->dev;
84 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
85 struct psb_gem_object *pobj = to_psb_gem_object(fb->obj[0]);
86 int page_num;
87 int i;
88 unsigned long address;
89 vm_fault_t ret = VM_FAULT_SIGBUS;
90 unsigned long pfn;
91 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + pobj->offset;
92
93 page_num = vma_pages(vma);
94 address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
95
96 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
97
98 for (i = 0; i < page_num; i++) {
99 pfn = (phys_addr >> PAGE_SHIFT);
100
101 ret = vmf_insert_mixed(vma, address,
102 __pfn_to_pfn_t(pfn, PFN_DEV));
103 if (unlikely(ret & VM_FAULT_ERROR))
104 break;
105 address += PAGE_SIZE;
106 phys_addr += PAGE_SIZE;
107 }
108 return ret;
109}
110
111static void psbfb_vm_open(struct vm_area_struct *vma)
112{
113}
114
115static void psbfb_vm_close(struct vm_area_struct *vma)
116{
117}
118
119static const struct vm_operations_struct psbfb_vm_ops = {
120 .fault = psbfb_vm_fault,
121 .open = psbfb_vm_open,
122 .close = psbfb_vm_close
123};
124
125static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
126{
127 struct drm_fb_helper *fb_helper = info->par;
128 struct drm_framebuffer *fb = fb_helper->fb;
129
130 if (vma->vm_pgoff != 0)
131 return -EINVAL;
132 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
133 return -EINVAL;
134
135 /*
136 * If this is a GEM object then info->screen_base is the virtual
137 * kernel remapping of the object. FIXME: Review if this is
138 * suitable for our mmap work
139 */
140 vma->vm_ops = &psbfb_vm_ops;
141 vma->vm_private_data = (void *)fb;
142 vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
143 return 0;
144}
145
146static const struct fb_ops psbfb_unaccel_ops = {
147 .owner = THIS_MODULE,
148 DRM_FB_HELPER_DEFAULT_OPS,
149 .fb_setcolreg = psbfb_setcolreg,
150 .fb_read = drm_fb_helper_cfb_read,
151 .fb_write = drm_fb_helper_cfb_write,
152 .fb_fillrect = drm_fb_helper_cfb_fillrect,
153 .fb_copyarea = drm_fb_helper_cfb_copyarea,
154 .fb_imageblit = drm_fb_helper_cfb_imageblit,
155 .fb_mmap = psbfb_mmap,
156};
157
158/**
159 * psb_framebuffer_init - initialize a framebuffer
160 * @dev: our DRM device
161 * @fb: framebuffer to set up
162 * @mode_cmd: mode description
163 * @obj: backing object
164 *
165 * Configure and fill in the boilerplate for our frame buffer. Return
166 * 0 on success or an error code if we fail.
167 */
168static int psb_framebuffer_init(struct drm_device *dev,
169 struct drm_framebuffer *fb,
170 const struct drm_mode_fb_cmd2 *mode_cmd,
171 struct drm_gem_object *obj)
172{
173 const struct drm_format_info *info;
174 int ret;
175
176 /*
177 * Reject unknown formats, YUV formats, and formats with more than
178 * 4 bytes per pixel.
179 */
180 info = drm_get_format_info(dev, mode_cmd);
181 if (!info || !info->depth || info->cpp[0] > 4)
182 return -EINVAL;
183
184 if (mode_cmd->pitches[0] & 63)
185 return -EINVAL;
186
187 drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
188 fb->obj[0] = obj;
189 ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs);
190 if (ret) {
191 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
192 return ret;
193 }
194 return 0;
195}
196
197/**
198 * psb_framebuffer_create - create a framebuffer backed by gt
199 * @dev: our DRM device
200 * @mode_cmd: the description of the requested mode
201 * @obj: the backing object
202 *
203 * Create a framebuffer object backed by the gt, and fill in the
204 * boilerplate required
205 *
206 * TODO: review object references
207 */
208
209static struct drm_framebuffer *psb_framebuffer_create
210 (struct drm_device *dev,
211 const struct drm_mode_fb_cmd2 *mode_cmd,
212 struct drm_gem_object *obj)
213{
214 struct drm_framebuffer *fb;
215 int ret;
216
217 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
218 if (!fb)
219 return ERR_PTR(-ENOMEM);
220
221 ret = psb_framebuffer_init(dev, fb, mode_cmd, obj);
222 if (ret) {
223 kfree(fb);
224 return ERR_PTR(ret);
225 }
226 return fb;
227}
228
229/**
230 * psbfb_create - create a framebuffer
231 * @fb_helper: the framebuffer helper
232 * @sizes: specification of the layout
233 *
234 * Create a framebuffer to the specifications provided
235 */
236static int psbfb_create(struct drm_fb_helper *fb_helper,
237 struct drm_fb_helper_surface_size *sizes)
238{
239 struct drm_device *dev = fb_helper->dev;
240 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
241 struct pci_dev *pdev = to_pci_dev(dev->dev);
242 struct fb_info *info;
243 struct drm_framebuffer *fb;
244 struct drm_mode_fb_cmd2 mode_cmd;
245 int size;
246 int ret;
247 struct psb_gem_object *backing;
248 struct drm_gem_object *obj;
249 u32 bpp, depth;
250
251 mode_cmd.width = sizes->surface_width;
252 mode_cmd.height = sizes->surface_height;
253 bpp = sizes->surface_bpp;
254 depth = sizes->surface_depth;
255
256 /* No 24bit packed */
257 if (bpp == 24)
258 bpp = 32;
259
260 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64);
261
262 size = mode_cmd.pitches[0] * mode_cmd.height;
263 size = ALIGN(size, PAGE_SIZE);
264
265 /* Allocate the framebuffer in the GTT with stolen page backing */
266 backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE);
267 if (IS_ERR(backing))
268 return PTR_ERR(backing);
269 obj = &backing->base;
270
271 memset(dev_priv->vram_addr + backing->offset, 0, size);
272
273 info = drm_fb_helper_alloc_info(fb_helper);
274 if (IS_ERR(info)) {
275 ret = PTR_ERR(info);
276 goto err_drm_gem_object_put;
277 }
278
279 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
280
281 fb = psb_framebuffer_create(dev, &mode_cmd, obj);
282 if (IS_ERR(fb)) {
283 ret = PTR_ERR(fb);
284 goto err_drm_gem_object_put;
285 }
286
287 fb_helper->fb = fb;
288
289 info->fbops = &psbfb_unaccel_ops;
290
291 info->fix.smem_start = dev_priv->fb_base;
292 info->fix.smem_len = size;
293 info->fix.ywrapstep = 0;
294 info->fix.ypanstep = 0;
295
296 /* Accessed stolen memory directly */
297 info->screen_base = dev_priv->vram_addr + backing->offset;
298 info->screen_size = size;
299
300 if (dev_priv->gtt.stolen_size) {
301 info->apertures->ranges[0].base = dev_priv->fb_base;
302 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
303 }
304
305 drm_fb_helper_fill_info(info, fb_helper, sizes);
306
307 info->fix.mmio_start = pci_resource_start(pdev, 0);
308 info->fix.mmio_len = pci_resource_len(pdev, 0);
309
310 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
311
312 dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height);
313
314 return 0;
315
316err_drm_gem_object_put:
317 drm_gem_object_put(obj);
318 return ret;
319}
320
321/**
322 * psb_user_framebuffer_create - create framebuffer
323 * @dev: our DRM device
324 * @filp: client file
325 * @cmd: mode request
326 *
327 * Create a new framebuffer backed by a userspace GEM object
328 */
329static struct drm_framebuffer *psb_user_framebuffer_create
330 (struct drm_device *dev, struct drm_file *filp,
331 const struct drm_mode_fb_cmd2 *cmd)
332{
333 struct drm_gem_object *obj;
334 struct drm_framebuffer *fb;
335
336 /*
337 * Find the GEM object and thus the gtt range object that is
338 * to back this space
339 */
340 obj = drm_gem_object_lookup(filp, cmd->handles[0]);
341 if (obj == NULL)
342 return ERR_PTR(-ENOENT);
343
344 /* Let the core code do all the work */
345 fb = psb_framebuffer_create(dev, cmd, obj);
346 if (IS_ERR(fb))
347 drm_gem_object_put(obj);
348
349 return fb;
350}
351
352static int psbfb_probe(struct drm_fb_helper *fb_helper,
353 struct drm_fb_helper_surface_size *sizes)
354{
355 struct drm_device *dev = fb_helper->dev;
356 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
357 unsigned int fb_size;
358 int bytespp;
359
360 bytespp = sizes->surface_bpp / 8;
361 if (bytespp == 3) /* no 24bit packed */
362 bytespp = 4;
363
364 /* If the mode will not fit in 32bit then switch to 16bit to get
365 a console on full resolution. The X mode setting server will
366 allocate its own 32bit GEM framebuffer */
367 fb_size = ALIGN(sizes->surface_width * bytespp, 64) *
368 sizes->surface_height;
369 fb_size = ALIGN(fb_size, PAGE_SIZE);
370
371 if (fb_size > dev_priv->vram_stolen_size) {
372 sizes->surface_bpp = 16;
373 sizes->surface_depth = 16;
374 }
375
376 return psbfb_create(fb_helper, sizes);
377}
378
379static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
380 .fb_probe = psbfb_probe,
381};
382
383static int psb_fbdev_destroy(struct drm_device *dev,
384 struct drm_fb_helper *fb_helper)
385{
386 struct drm_framebuffer *fb = fb_helper->fb;
387
388 drm_fb_helper_unregister_info(fb_helper);
389
390 drm_fb_helper_fini(fb_helper);
391 drm_framebuffer_unregister_private(fb);
392 drm_framebuffer_cleanup(fb);
393
394 if (fb->obj[0])
395 drm_gem_object_put(fb->obj[0]);
396 kfree(fb);
397
398 return 0;
399}
400
401int psb_fbdev_init(struct drm_device *dev)
402{
403 struct drm_fb_helper *fb_helper;
404 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
405 int ret;
406
407 fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
408 if (!fb_helper) {
409 dev_err(dev->dev, "no memory\n");
410 return -ENOMEM;
411 }
412
413 dev_priv->fb_helper = fb_helper;
414
415 drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
416
417 ret = drm_fb_helper_init(dev, fb_helper);
418 if (ret)
419 goto free;
420
421 /* disable all the possible outputs/crtcs before entering KMS mode */
422 drm_helper_disable_unused_functions(dev);
423
424 ret = drm_fb_helper_initial_config(fb_helper, 32);
425 if (ret)
426 goto fini;
427
428 return 0;
429
430fini:
431 drm_fb_helper_fini(fb_helper);
432free:
433 kfree(fb_helper);
434 return ret;
435}
436
437static void psb_fbdev_fini(struct drm_device *dev)
438{
439 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
440
441 if (!dev_priv->fb_helper)
442 return;
443
444 psb_fbdev_destroy(dev, dev_priv->fb_helper);
445 kfree(dev_priv->fb_helper);
446 dev_priv->fb_helper = NULL;
447}
448
449static const struct drm_mode_config_funcs psb_mode_funcs = {
450 .fb_create = psb_user_framebuffer_create,
451 .output_poll_changed = drm_fb_helper_output_poll_changed,
452};
453
454static void psb_setup_outputs(struct drm_device *dev)
455{
456 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
457 struct drm_connector_list_iter conn_iter;
458 struct drm_connector *connector;
459
460 drm_mode_create_scaling_mode_property(dev);
461
462 /* It is ok for this to fail - we just don't get backlight control */
463 if (!dev_priv->backlight_property)
464 dev_priv->backlight_property = drm_property_create_range(dev, 0,
465 "backlight", 0, 100);
466 dev_priv->ops->output_init(dev);
467
468 drm_connector_list_iter_begin(dev, &conn_iter);
469 drm_for_each_connector_iter(connector, &conn_iter) {
470 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
471 struct drm_encoder *encoder = &gma_encoder->base;
472 int crtc_mask = 0, clone_mask = 0;
473
474 /* valid crtcs */
475 switch (gma_encoder->type) {
476 case INTEL_OUTPUT_ANALOG:
477 crtc_mask = (1 << 0);
478 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
479 break;
480 case INTEL_OUTPUT_SDVO:
481 crtc_mask = dev_priv->ops->sdvo_mask;
482 clone_mask = 0;
483 break;
484 case INTEL_OUTPUT_LVDS:
485 crtc_mask = dev_priv->ops->lvds_mask;
486 clone_mask = 0;
487 break;
488 case INTEL_OUTPUT_MIPI:
489 crtc_mask = (1 << 0);
490 clone_mask = 0;
491 break;
492 case INTEL_OUTPUT_MIPI2:
493 crtc_mask = (1 << 2);
494 clone_mask = 0;
495 break;
496 case INTEL_OUTPUT_HDMI:
497 crtc_mask = dev_priv->ops->hdmi_mask;
498 clone_mask = (1 << INTEL_OUTPUT_HDMI);
499 break;
500 case INTEL_OUTPUT_DISPLAYPORT:
501 crtc_mask = (1 << 0) | (1 << 1);
502 clone_mask = 0;
503 break;
504 case INTEL_OUTPUT_EDP:
505 crtc_mask = (1 << 1);
506 clone_mask = 0;
507 }
508 encoder->possible_crtcs = crtc_mask;
509 encoder->possible_clones =
510 gma_connector_clones(dev, clone_mask);
511 }
512 drm_connector_list_iter_end(&conn_iter);
513}
514
515void psb_modeset_init(struct drm_device *dev)
516{
517 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
518 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
519 struct pci_dev *pdev = to_pci_dev(dev->dev);
520 int i;
521
522 if (drmm_mode_config_init(dev))
523 return;
524
525 dev->mode_config.min_width = 0;
526 dev->mode_config.min_height = 0;
527
528 dev->mode_config.funcs = &psb_mode_funcs;
529
530 /* set memory base */
531 /* Oaktrail and Poulsbo should use BAR 2*/
532 pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev_priv->fb_base));
533
534 /* num pipes is 2 for PSB but 1 for Mrst */
535 for (i = 0; i < dev_priv->num_pipe; i++)
536 psb_intel_crtc_init(dev, i, mode_dev);
537
538 dev->mode_config.max_width = 4096;
539 dev->mode_config.max_height = 4096;
540
541 psb_setup_outputs(dev);
542
543 if (dev_priv->ops->errata)
544 dev_priv->ops->errata(dev);
545
546 dev_priv->modeset = true;
547}
548
549void psb_modeset_cleanup(struct drm_device *dev)
550{
551 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
552 if (dev_priv->modeset) {
553 drm_kms_helper_poll_fini(dev);
554 psb_fbdev_fini(dev);
555 }
556}