Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2017 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include "vmwgfx_drv.h"
30
31#include "vmwgfx_bo.h"
32#include <linux/highmem.h>
33
34/*
35 * Template that implements find_first_diff() for a generic
36 * unsigned integer type. @size and return value are in bytes.
37 */
38#define VMW_FIND_FIRST_DIFF(_type) \
39static size_t vmw_find_first_diff_ ## _type \
40 (const _type * dst, const _type * src, size_t size)\
41{ \
42 size_t i; \
43 \
44 for (i = 0; i < size; i += sizeof(_type)) { \
45 if (*dst++ != *src++) \
46 break; \
47 } \
48 \
49 return i; \
50}
51
52
53/*
54 * Template that implements find_last_diff() for a generic
55 * unsigned integer type. Pointers point to the item following the
56 * *end* of the area to be examined. @size and return value are in
57 * bytes.
58 */
59#define VMW_FIND_LAST_DIFF(_type) \
60static ssize_t vmw_find_last_diff_ ## _type( \
61 const _type * dst, const _type * src, size_t size) \
62{ \
63 while (size) { \
64 if (*--dst != *--src) \
65 break; \
66 \
67 size -= sizeof(_type); \
68 } \
69 return size; \
70}
71
72
73/*
74 * Instantiate find diff functions for relevant unsigned integer sizes,
75 * assuming that wider integers are faster (including aligning) up to the
76 * architecture native width, which is assumed to be 32 bit unless
77 * CONFIG_64BIT is defined.
78 */
79VMW_FIND_FIRST_DIFF(u8);
80VMW_FIND_LAST_DIFF(u8);
81
82VMW_FIND_FIRST_DIFF(u16);
83VMW_FIND_LAST_DIFF(u16);
84
85VMW_FIND_FIRST_DIFF(u32);
86VMW_FIND_LAST_DIFF(u32);
87
88#ifdef CONFIG_64BIT
89VMW_FIND_FIRST_DIFF(u64);
90VMW_FIND_LAST_DIFF(u64);
91#endif
92
93
94/* We use size aligned copies. This computes (addr - align(addr)) */
95#define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
96
97
98/*
99 * Template to compute find_first_diff() for a certain integer type
100 * including a head copy for alignment, and adjustment of parameters
101 * for tail find or increased resolution find using an unsigned integer find
102 * of smaller width. If finding is complete, and resolution is sufficient,
103 * the macro executes a return statement. Otherwise it falls through.
104 */
105#define VMW_TRY_FIND_FIRST_DIFF(_type) \
106do { \
107 unsigned int spill = SPILL(dst, _type); \
108 size_t diff_offs; \
109 \
110 if (spill && spill == SPILL(src, _type) && \
111 sizeof(_type) - spill <= size) { \
112 spill = sizeof(_type) - spill; \
113 diff_offs = vmw_find_first_diff_u8(dst, src, spill); \
114 if (diff_offs < spill) \
115 return round_down(offset + diff_offs, granularity); \
116 \
117 dst += spill; \
118 src += spill; \
119 size -= spill; \
120 offset += spill; \
121 spill = 0; \
122 } \
123 if (!spill && !SPILL(src, _type)) { \
124 size_t to_copy = size & ~(sizeof(_type) - 1); \
125 \
126 diff_offs = vmw_find_first_diff_ ## _type \
127 ((_type *) dst, (_type *) src, to_copy); \
128 if (diff_offs >= size || granularity == sizeof(_type)) \
129 return (offset + diff_offs); \
130 \
131 dst += diff_offs; \
132 src += diff_offs; \
133 size -= diff_offs; \
134 offset += diff_offs; \
135 } \
136} while (0) \
137
138
139/**
140 * vmw_find_first_diff - find the first difference between dst and src
141 *
142 * @dst: The destination address
143 * @src: The source address
144 * @size: Number of bytes to compare
145 * @granularity: The granularity needed for the return value in bytes.
146 * return: The offset from find start where the first difference was
147 * encountered in bytes. If no difference was found, the function returns
148 * a value >= @size.
149 */
150static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size,
151 size_t granularity)
152{
153 size_t offset = 0;
154
155 /*
156 * Try finding with large integers if alignment allows, or we can
157 * fix it. Fall through if we need better resolution or alignment
158 * was bad.
159 */
160#ifdef CONFIG_64BIT
161 VMW_TRY_FIND_FIRST_DIFF(u64);
162#endif
163 VMW_TRY_FIND_FIRST_DIFF(u32);
164 VMW_TRY_FIND_FIRST_DIFF(u16);
165
166 return round_down(offset + vmw_find_first_diff_u8(dst, src, size),
167 granularity);
168}
169
170
171/*
172 * Template to compute find_last_diff() for a certain integer type
173 * including a tail copy for alignment, and adjustment of parameters
174 * for head find or increased resolution find using an unsigned integer find
175 * of smaller width. If finding is complete, and resolution is sufficient,
176 * the macro executes a return statement. Otherwise it falls through.
177 */
178#define VMW_TRY_FIND_LAST_DIFF(_type) \
179do { \
180 unsigned int spill = SPILL(dst, _type); \
181 ssize_t location; \
182 ssize_t diff_offs; \
183 \
184 if (spill && spill <= size && spill == SPILL(src, _type)) { \
185 diff_offs = vmw_find_last_diff_u8(dst, src, spill); \
186 if (diff_offs) { \
187 location = size - spill + diff_offs - 1; \
188 return round_down(location, granularity); \
189 } \
190 \
191 dst -= spill; \
192 src -= spill; \
193 size -= spill; \
194 spill = 0; \
195 } \
196 if (!spill && !SPILL(src, _type)) { \
197 size_t to_copy = round_down(size, sizeof(_type)); \
198 \
199 diff_offs = vmw_find_last_diff_ ## _type \
200 ((_type *) dst, (_type *) src, to_copy); \
201 location = size - to_copy + diff_offs - sizeof(_type); \
202 if (location < 0 || granularity == sizeof(_type)) \
203 return location; \
204 \
205 dst -= to_copy - diff_offs; \
206 src -= to_copy - diff_offs; \
207 size -= to_copy - diff_offs; \
208 } \
209} while (0)
210
211
212/**
213 * vmw_find_last_diff - find the last difference between dst and src
214 *
215 * @dst: The destination address
216 * @src: The source address
217 * @size: Number of bytes to compare
218 * @granularity: The granularity needed for the return value in bytes.
219 * return: The offset from find start where the last difference was
220 * encountered in bytes, or a negative value if no difference was found.
221 */
222static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size,
223 size_t granularity)
224{
225 dst += size;
226 src += size;
227
228#ifdef CONFIG_64BIT
229 VMW_TRY_FIND_LAST_DIFF(u64);
230#endif
231 VMW_TRY_FIND_LAST_DIFF(u32);
232 VMW_TRY_FIND_LAST_DIFF(u16);
233
234 return round_down(vmw_find_last_diff_u8(dst, src, size) - 1,
235 granularity);
236}
237
238
239/**
240 * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a
241 * struct vmw_diff_cpy.
242 *
243 * @diff: The struct vmw_diff_cpy closure argument (unused).
244 * @dest: The copy destination.
245 * @src: The copy source.
246 * @n: Number of bytes to copy.
247 */
248void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n)
249{
250 memcpy(dest, src, n);
251}
252
253
254/**
255 * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference
256 *
257 * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
258 * @diff_offs: The offset from @diff->line_offset where the difference was
259 * found.
260 */
261static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs)
262{
263 size_t offs = (diff_offs + diff->line_offset) / diff->cpp;
264 struct drm_rect *rect = &diff->rect;
265
266 rect->x1 = min_t(int, rect->x1, offs);
267 rect->x2 = max_t(int, rect->x2, offs + 1);
268 rect->y1 = min_t(int, rect->y1, diff->line);
269 rect->y2 = max_t(int, rect->y2, diff->line + 1);
270}
271
272/**
273 * vmw_diff_memcpy - memcpy that creates a bounding box of modified content.
274 *
275 * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
276 * @dest: The copy destination.
277 * @src: The copy source.
278 * @n: Number of bytes to copy.
279 *
280 * In order to correctly track the modified content, the field @diff->line must
281 * be pre-loaded with the current line number, the field @diff->line_offset must
282 * be pre-loaded with the line offset in bytes where the copy starts, and
283 * finally the field @diff->cpp need to be preloaded with the number of bytes
284 * per unit in the horizontal direction of the area we're examining.
285 * Typically bytes per pixel.
286 * This is needed to know the needed granularity of the difference computing
287 * operations. A higher cpp generally leads to faster execution at the cost of
288 * bounding box width precision.
289 */
290void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
291 size_t n)
292{
293 ssize_t csize, byte_len;
294
295 if (WARN_ON_ONCE(round_down(n, diff->cpp) != n))
296 return;
297
298 /* TODO: Possibly use a single vmw_find_first_diff per line? */
299 csize = vmw_find_first_diff(dest, src, n, diff->cpp);
300 if (csize < n) {
301 vmw_adjust_rect(diff, csize);
302 byte_len = diff->cpp;
303
304 /*
305 * Starting from where first difference was found, find
306 * location of last difference, and then copy.
307 */
308 diff->line_offset += csize;
309 dest += csize;
310 src += csize;
311 n -= csize;
312 csize = vmw_find_last_diff(dest, src, n, diff->cpp);
313 if (csize >= 0) {
314 byte_len += csize;
315 vmw_adjust_rect(diff, csize);
316 }
317 memcpy(dest, src, byte_len);
318 }
319 diff->line_offset += n;
320}
321
322/**
323 * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line
324 *
325 * @mapped_dst: Already mapped destination page index in @dst_pages.
326 * @dst_addr: Kernel virtual address of mapped destination page.
327 * @dst_pages: Array of destination bo pages.
328 * @dst_num_pages: Number of destination bo pages.
329 * @dst_prot: Destination bo page protection.
330 * @mapped_src: Already mapped source page index in @dst_pages.
331 * @src_addr: Kernel virtual address of mapped source page.
332 * @src_pages: Array of source bo pages.
333 * @src_num_pages: Number of source bo pages.
334 * @src_prot: Source bo page protection.
335 * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine.
336 */
337struct vmw_bo_blit_line_data {
338 u32 mapped_dst;
339 u8 *dst_addr;
340 struct page **dst_pages;
341 u32 dst_num_pages;
342 pgprot_t dst_prot;
343 u32 mapped_src;
344 u8 *src_addr;
345 struct page **src_pages;
346 u32 src_num_pages;
347 pgprot_t src_prot;
348 struct vmw_diff_cpy *diff;
349};
350
351/**
352 * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another.
353 *
354 * @d: Blit data as described above.
355 * @dst_offset: Destination copy start offset from start of bo.
356 * @src_offset: Source copy start offset from start of bo.
357 * @bytes_to_copy: Number of bytes to copy in this line.
358 */
359static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
360 u32 dst_offset,
361 u32 src_offset,
362 u32 bytes_to_copy)
363{
364 struct vmw_diff_cpy *diff = d->diff;
365
366 while (bytes_to_copy) {
367 u32 copy_size = bytes_to_copy;
368 u32 dst_page = dst_offset >> PAGE_SHIFT;
369 u32 src_page = src_offset >> PAGE_SHIFT;
370 u32 dst_page_offset = dst_offset & ~PAGE_MASK;
371 u32 src_page_offset = src_offset & ~PAGE_MASK;
372 bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst;
373 bool unmap_src = d->src_addr && (src_page != d->mapped_src ||
374 unmap_dst);
375
376 copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
377 copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
378
379 if (unmap_src) {
380 kunmap_atomic(d->src_addr);
381 d->src_addr = NULL;
382 }
383
384 if (unmap_dst) {
385 kunmap_atomic(d->dst_addr);
386 d->dst_addr = NULL;
387 }
388
389 if (!d->dst_addr) {
390 if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
391 return -EINVAL;
392
393 d->dst_addr =
394 kmap_atomic_prot(d->dst_pages[dst_page],
395 d->dst_prot);
396 if (!d->dst_addr)
397 return -ENOMEM;
398
399 d->mapped_dst = dst_page;
400 }
401
402 if (!d->src_addr) {
403 if (WARN_ON_ONCE(src_page >= d->src_num_pages))
404 return -EINVAL;
405
406 d->src_addr =
407 kmap_atomic_prot(d->src_pages[src_page],
408 d->src_prot);
409 if (!d->src_addr)
410 return -ENOMEM;
411
412 d->mapped_src = src_page;
413 }
414 diff->do_cpy(diff, d->dst_addr + dst_page_offset,
415 d->src_addr + src_page_offset, copy_size);
416
417 bytes_to_copy -= copy_size;
418 dst_offset += copy_size;
419 src_offset += copy_size;
420 }
421
422 return 0;
423}
424
425static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
426{
427 struct vmw_private *vmw =
428 container_of(bo->tbo.bdev, struct vmw_private, bdev);
429 void *ptr = NULL;
430 int ret;
431
432 if (bo->tbo.base.import_attach) {
433 ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
434 if (ret) {
435 drm_dbg_driver(&vmw->drm,
436 "Wasn't able to map external bo!\n");
437 goto out;
438 }
439 ptr = map->vaddr;
440 } else {
441 ptr = vmw_bo_map_and_cache(bo);
442 }
443
444out:
445 return ptr;
446}
447
448static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
449{
450 if (bo->tbo.base.import_attach)
451 dma_buf_vunmap(bo->tbo.base.dma_buf, map);
452 else
453 vmw_bo_unmap(bo);
454}
455
456static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset,
457 u32 dst_stride, struct vmw_bo *src,
458 u32 src_offset, u32 src_stride,
459 u32 width_in_bytes, u32 height,
460 struct vmw_diff_cpy *diff)
461{
462 struct vmw_private *vmw =
463 container_of(dst->tbo.bdev, struct vmw_private, bdev);
464 size_t dst_size = dst->tbo.resource->size;
465 size_t src_size = src->tbo.resource->size;
466 struct iosys_map dst_map = {0};
467 struct iosys_map src_map = {0};
468 int ret, i;
469 int x_in_bytes;
470 u8 *vsrc;
471 u8 *vdst;
472
473 vsrc = map_external(src, &src_map);
474 if (!vsrc) {
475 drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n");
476 ret = -ENOMEM;
477 goto out;
478 }
479
480 vdst = map_external(dst, &dst_map);
481 if (!vdst) {
482 drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n");
483 ret = -ENOMEM;
484 goto out;
485 }
486
487 vsrc += src_offset;
488 vdst += dst_offset;
489 if (src_stride == dst_stride) {
490 dst_size -= dst_offset;
491 src_size -= src_offset;
492 memcpy(vdst, vsrc,
493 min(dst_stride * height, min(dst_size, src_size)));
494 } else {
495 WARN_ON(dst_stride < width_in_bytes);
496 for (i = 0; i < height; ++i) {
497 memcpy(vdst, vsrc, width_in_bytes);
498 vsrc += src_stride;
499 vdst += dst_stride;
500 }
501 }
502
503 x_in_bytes = (dst_offset % dst_stride);
504 diff->rect.x1 = x_in_bytes / diff->cpp;
505 diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride);
506 diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp;
507 diff->rect.y2 = diff->rect.y1 + height;
508
509 ret = 0;
510out:
511 unmap_external(src, &src_map);
512 unmap_external(dst, &dst_map);
513
514 return ret;
515}
516
517/**
518 * vmw_bo_cpu_blit - in-kernel cpu blit.
519 *
520 * @vmw_dst: Destination buffer object.
521 * @dst_offset: Destination offset of blit start in bytes.
522 * @dst_stride: Destination stride in bytes.
523 * @vmw_src: Source buffer object.
524 * @src_offset: Source offset of blit start in bytes.
525 * @src_stride: Source stride in bytes.
526 * @w: Width of blit.
527 * @h: Height of blit.
528 * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
529 * return: Zero on success. Negative error value on failure. Will print out
530 * kernel warnings on caller bugs.
531 *
532 * Performs a CPU blit from one buffer object to another avoiding a full
533 * bo vmap which may exhaust- or fragment vmalloc space.
534 * On supported architectures (x86), we're using kmap_atomic which avoids
535 * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
536 * reference already set-up mappings.
537 *
538 * Neither of the buffer objects may be placed in PCI memory
539 * (Fixed memory in TTM terminology) when using this function.
540 */
541int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst,
542 u32 dst_offset, u32 dst_stride,
543 struct vmw_bo *vmw_src,
544 u32 src_offset, u32 src_stride,
545 u32 w, u32 h,
546 struct vmw_diff_cpy *diff)
547{
548 struct ttm_buffer_object *src = &vmw_src->tbo;
549 struct ttm_buffer_object *dst = &vmw_dst->tbo;
550 struct ttm_operation_ctx ctx = {
551 .interruptible = false,
552 .no_wait_gpu = false
553 };
554 u32 j, initial_line = dst_offset / dst_stride;
555 struct vmw_bo_blit_line_data d = {0};
556 int ret = 0;
557 struct page **dst_pages = NULL;
558 struct page **src_pages = NULL;
559 bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
560 bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
561
562 if (WARN_ON(dst == src))
563 return -EINVAL;
564
565 /* Buffer objects need to be either pinned or reserved: */
566 if (!(dst->pin_count))
567 dma_resv_assert_held(dst->base.resv);
568 if (!(src->pin_count))
569 dma_resv_assert_held(src->base.resv);
570
571 if (!ttm_tt_is_populated(dst->ttm)) {
572 ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
573 if (ret)
574 return ret;
575 }
576
577 if (!ttm_tt_is_populated(src->ttm)) {
578 ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx);
579 if (ret)
580 return ret;
581 }
582
583 if (src_external || dst_external)
584 return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride,
585 vmw_src, src_offset, src_stride,
586 w, h, diff);
587
588 if (!src->ttm->pages && src->ttm->sg) {
589 src_pages = kvmalloc_array(src->ttm->num_pages,
590 sizeof(struct page *), GFP_KERNEL);
591 if (!src_pages)
592 return -ENOMEM;
593 ret = drm_prime_sg_to_page_array(src->ttm->sg, src_pages,
594 src->ttm->num_pages);
595 if (ret)
596 goto out;
597 }
598 if (!dst->ttm->pages && dst->ttm->sg) {
599 dst_pages = kvmalloc_array(dst->ttm->num_pages,
600 sizeof(struct page *), GFP_KERNEL);
601 if (!dst_pages) {
602 ret = -ENOMEM;
603 goto out;
604 }
605 ret = drm_prime_sg_to_page_array(dst->ttm->sg, dst_pages,
606 dst->ttm->num_pages);
607 if (ret)
608 goto out;
609 }
610
611 d.mapped_dst = 0;
612 d.mapped_src = 0;
613 d.dst_addr = NULL;
614 d.src_addr = NULL;
615 d.dst_pages = dst->ttm->pages ? dst->ttm->pages : dst_pages;
616 d.src_pages = src->ttm->pages ? src->ttm->pages : src_pages;
617 d.dst_num_pages = PFN_UP(dst->resource->size);
618 d.src_num_pages = PFN_UP(src->resource->size);
619 d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
620 d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
621 d.diff = diff;
622
623 for (j = 0; j < h; ++j) {
624 diff->line = j + initial_line;
625 diff->line_offset = dst_offset % dst_stride;
626 ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w);
627 if (ret)
628 goto out;
629
630 dst_offset += dst_stride;
631 src_offset += src_stride;
632 }
633out:
634 if (d.src_addr)
635 kunmap_atomic(d.src_addr);
636 if (d.dst_addr)
637 kunmap_atomic(d.dst_addr);
638 kvfree(src_pages);
639 kvfree(dst_pages);
640
641 return ret;
642}
1/**************************************************************************
2 *
3 * Copyright © 2017 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29
30/*
31 * Template that implements find_first_diff() for a generic
32 * unsigned integer type. @size and return value are in bytes.
33 */
34#define VMW_FIND_FIRST_DIFF(_type) \
35static size_t vmw_find_first_diff_ ## _type \
36 (const _type * dst, const _type * src, size_t size)\
37{ \
38 size_t i; \
39 \
40 for (i = 0; i < size; i += sizeof(_type)) { \
41 if (*dst++ != *src++) \
42 break; \
43 } \
44 \
45 return i; \
46}
47
48
49/*
50 * Template that implements find_last_diff() for a generic
51 * unsigned integer type. Pointers point to the item following the
52 * *end* of the area to be examined. @size and return value are in
53 * bytes.
54 */
55#define VMW_FIND_LAST_DIFF(_type) \
56static ssize_t vmw_find_last_diff_ ## _type( \
57 const _type * dst, const _type * src, size_t size) \
58{ \
59 while (size) { \
60 if (*--dst != *--src) \
61 break; \
62 \
63 size -= sizeof(_type); \
64 } \
65 return size; \
66}
67
68
69/*
70 * Instantiate find diff functions for relevant unsigned integer sizes,
71 * assuming that wider integers are faster (including aligning) up to the
72 * architecture native width, which is assumed to be 32 bit unless
73 * CONFIG_64BIT is defined.
74 */
75VMW_FIND_FIRST_DIFF(u8);
76VMW_FIND_LAST_DIFF(u8);
77
78VMW_FIND_FIRST_DIFF(u16);
79VMW_FIND_LAST_DIFF(u16);
80
81VMW_FIND_FIRST_DIFF(u32);
82VMW_FIND_LAST_DIFF(u32);
83
84#ifdef CONFIG_64BIT
85VMW_FIND_FIRST_DIFF(u64);
86VMW_FIND_LAST_DIFF(u64);
87#endif
88
89
90/* We use size aligned copies. This computes (addr - align(addr)) */
91#define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
92
93
94/*
95 * Template to compute find_first_diff() for a certain integer type
96 * including a head copy for alignment, and adjustment of parameters
97 * for tail find or increased resolution find using an unsigned integer find
98 * of smaller width. If finding is complete, and resolution is sufficient,
99 * the macro executes a return statement. Otherwise it falls through.
100 */
101#define VMW_TRY_FIND_FIRST_DIFF(_type) \
102do { \
103 unsigned int spill = SPILL(dst, _type); \
104 size_t diff_offs; \
105 \
106 if (spill && spill == SPILL(src, _type) && \
107 sizeof(_type) - spill <= size) { \
108 spill = sizeof(_type) - spill; \
109 diff_offs = vmw_find_first_diff_u8(dst, src, spill); \
110 if (diff_offs < spill) \
111 return round_down(offset + diff_offs, granularity); \
112 \
113 dst += spill; \
114 src += spill; \
115 size -= spill; \
116 offset += spill; \
117 spill = 0; \
118 } \
119 if (!spill && !SPILL(src, _type)) { \
120 size_t to_copy = size & ~(sizeof(_type) - 1); \
121 \
122 diff_offs = vmw_find_first_diff_ ## _type \
123 ((_type *) dst, (_type *) src, to_copy); \
124 if (diff_offs >= size || granularity == sizeof(_type)) \
125 return (offset + diff_offs); \
126 \
127 dst += diff_offs; \
128 src += diff_offs; \
129 size -= diff_offs; \
130 offset += diff_offs; \
131 } \
132} while (0) \
133
134
135/**
136 * vmw_find_first_diff - find the first difference between dst and src
137 *
138 * @dst: The destination address
139 * @src: The source address
140 * @size: Number of bytes to compare
141 * @granularity: The granularity needed for the return value in bytes.
142 * return: The offset from find start where the first difference was
143 * encountered in bytes. If no difference was found, the function returns
144 * a value >= @size.
145 */
146static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size,
147 size_t granularity)
148{
149 size_t offset = 0;
150
151 /*
152 * Try finding with large integers if alignment allows, or we can
153 * fix it. Fall through if we need better resolution or alignment
154 * was bad.
155 */
156#ifdef CONFIG_64BIT
157 VMW_TRY_FIND_FIRST_DIFF(u64);
158#endif
159 VMW_TRY_FIND_FIRST_DIFF(u32);
160 VMW_TRY_FIND_FIRST_DIFF(u16);
161
162 return round_down(offset + vmw_find_first_diff_u8(dst, src, size),
163 granularity);
164}
165
166
167/*
168 * Template to compute find_last_diff() for a certain integer type
169 * including a tail copy for alignment, and adjustment of parameters
170 * for head find or increased resolution find using an unsigned integer find
171 * of smaller width. If finding is complete, and resolution is sufficient,
172 * the macro executes a return statement. Otherwise it falls through.
173 */
174#define VMW_TRY_FIND_LAST_DIFF(_type) \
175do { \
176 unsigned int spill = SPILL(dst, _type); \
177 ssize_t location; \
178 ssize_t diff_offs; \
179 \
180 if (spill && spill <= size && spill == SPILL(src, _type)) { \
181 diff_offs = vmw_find_last_diff_u8(dst, src, spill); \
182 if (diff_offs) { \
183 location = size - spill + diff_offs - 1; \
184 return round_down(location, granularity); \
185 } \
186 \
187 dst -= spill; \
188 src -= spill; \
189 size -= spill; \
190 spill = 0; \
191 } \
192 if (!spill && !SPILL(src, _type)) { \
193 size_t to_copy = round_down(size, sizeof(_type)); \
194 \
195 diff_offs = vmw_find_last_diff_ ## _type \
196 ((_type *) dst, (_type *) src, to_copy); \
197 location = size - to_copy + diff_offs - sizeof(_type); \
198 if (location < 0 || granularity == sizeof(_type)) \
199 return location; \
200 \
201 dst -= to_copy - diff_offs; \
202 src -= to_copy - diff_offs; \
203 size -= to_copy - diff_offs; \
204 } \
205} while (0)
206
207
208/**
209 * vmw_find_last_diff - find the last difference between dst and src
210 *
211 * @dst: The destination address
212 * @src: The source address
213 * @size: Number of bytes to compare
214 * @granularity: The granularity needed for the return value in bytes.
215 * return: The offset from find start where the last difference was
216 * encountered in bytes, or a negative value if no difference was found.
217 */
218static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size,
219 size_t granularity)
220{
221 dst += size;
222 src += size;
223
224#ifdef CONFIG_64BIT
225 VMW_TRY_FIND_LAST_DIFF(u64);
226#endif
227 VMW_TRY_FIND_LAST_DIFF(u32);
228 VMW_TRY_FIND_LAST_DIFF(u16);
229
230 return round_down(vmw_find_last_diff_u8(dst, src, size) - 1,
231 granularity);
232}
233
234
235/**
236 * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a
237 * struct vmw_diff_cpy.
238 *
239 * @diff: The struct vmw_diff_cpy closure argument (unused).
240 * @dest: The copy destination.
241 * @src: The copy source.
242 * @n: Number of bytes to copy.
243 */
244void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n)
245{
246 memcpy(dest, src, n);
247}
248
249
250/**
251 * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference
252 *
253 * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
254 * @diff_offs: The offset from @diff->line_offset where the difference was
255 * found.
256 */
257static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs)
258{
259 size_t offs = (diff_offs + diff->line_offset) / diff->cpp;
260 struct drm_rect *rect = &diff->rect;
261
262 rect->x1 = min_t(int, rect->x1, offs);
263 rect->x2 = max_t(int, rect->x2, offs + 1);
264 rect->y1 = min_t(int, rect->y1, diff->line);
265 rect->y2 = max_t(int, rect->y2, diff->line + 1);
266}
267
268/**
269 * vmw_diff_memcpy - memcpy that creates a bounding box of modified content.
270 *
271 * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
272 * @dest: The copy destination.
273 * @src: The copy source.
274 * @n: Number of bytes to copy.
275 *
276 * In order to correctly track the modified content, the field @diff->line must
277 * be pre-loaded with the current line number, the field @diff->line_offset must
278 * be pre-loaded with the line offset in bytes where the copy starts, and
279 * finally the field @diff->cpp need to be preloaded with the number of bytes
280 * per unit in the horizontal direction of the area we're examining.
281 * Typically bytes per pixel.
282 * This is needed to know the needed granularity of the difference computing
283 * operations. A higher cpp generally leads to faster execution at the cost of
284 * bounding box width precision.
285 */
286void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
287 size_t n)
288{
289 ssize_t csize, byte_len;
290
291 if (WARN_ON_ONCE(round_down(n, diff->cpp) != n))
292 return;
293
294 /* TODO: Possibly use a single vmw_find_first_diff per line? */
295 csize = vmw_find_first_diff(dest, src, n, diff->cpp);
296 if (csize < n) {
297 vmw_adjust_rect(diff, csize);
298 byte_len = diff->cpp;
299
300 /*
301 * Starting from where first difference was found, find
302 * location of last difference, and then copy.
303 */
304 diff->line_offset += csize;
305 dest += csize;
306 src += csize;
307 n -= csize;
308 csize = vmw_find_last_diff(dest, src, n, diff->cpp);
309 if (csize >= 0) {
310 byte_len += csize;
311 vmw_adjust_rect(diff, csize);
312 }
313 memcpy(dest, src, byte_len);
314 }
315 diff->line_offset += n;
316}
317
318/**
319 * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line
320 *
321 * @mapped_dst: Already mapped destination page index in @dst_pages.
322 * @dst_addr: Kernel virtual address of mapped destination page.
323 * @dst_pages: Array of destination bo pages.
324 * @dst_num_pages: Number of destination bo pages.
325 * @dst_prot: Destination bo page protection.
326 * @mapped_src: Already mapped source page index in @dst_pages.
327 * @src_addr: Kernel virtual address of mapped source page.
328 * @src_pages: Array of source bo pages.
329 * @src_num_pages: Number of source bo pages.
330 * @src_prot: Source bo page protection.
331 * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine.
332 */
333struct vmw_bo_blit_line_data {
334 u32 mapped_dst;
335 u8 *dst_addr;
336 struct page **dst_pages;
337 u32 dst_num_pages;
338 pgprot_t dst_prot;
339 u32 mapped_src;
340 u8 *src_addr;
341 struct page **src_pages;
342 u32 src_num_pages;
343 pgprot_t src_prot;
344 struct vmw_diff_cpy *diff;
345};
346
347/**
348 * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another.
349 *
350 * @d: Blit data as described above.
351 * @dst_offset: Destination copy start offset from start of bo.
352 * @src_offset: Source copy start offset from start of bo.
353 * @bytes_to_copy: Number of bytes to copy in this line.
354 */
355static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
356 u32 dst_offset,
357 u32 src_offset,
358 u32 bytes_to_copy)
359{
360 struct vmw_diff_cpy *diff = d->diff;
361
362 while (bytes_to_copy) {
363 u32 copy_size = bytes_to_copy;
364 u32 dst_page = dst_offset >> PAGE_SHIFT;
365 u32 src_page = src_offset >> PAGE_SHIFT;
366 u32 dst_page_offset = dst_offset & ~PAGE_MASK;
367 u32 src_page_offset = src_offset & ~PAGE_MASK;
368 bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst;
369 bool unmap_src = d->src_addr && (src_page != d->mapped_src ||
370 unmap_dst);
371
372 copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
373 copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
374
375 if (unmap_src) {
376 ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
377 d->src_addr = NULL;
378 }
379
380 if (unmap_dst) {
381 ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
382 d->dst_addr = NULL;
383 }
384
385 if (!d->dst_addr) {
386 if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
387 return -EINVAL;
388
389 d->dst_addr =
390 ttm_kmap_atomic_prot(d->dst_pages[dst_page],
391 d->dst_prot);
392 if (!d->dst_addr)
393 return -ENOMEM;
394
395 d->mapped_dst = dst_page;
396 }
397
398 if (!d->src_addr) {
399 if (WARN_ON_ONCE(src_page >= d->src_num_pages))
400 return -EINVAL;
401
402 d->src_addr =
403 ttm_kmap_atomic_prot(d->src_pages[src_page],
404 d->src_prot);
405 if (!d->src_addr)
406 return -ENOMEM;
407
408 d->mapped_src = src_page;
409 }
410 diff->do_cpy(diff, d->dst_addr + dst_page_offset,
411 d->src_addr + src_page_offset, copy_size);
412
413 bytes_to_copy -= copy_size;
414 dst_offset += copy_size;
415 src_offset += copy_size;
416 }
417
418 return 0;
419}
420
421/**
422 * ttm_bo_cpu_blit - in-kernel cpu blit.
423 *
424 * @dst: Destination buffer object.
425 * @dst_offset: Destination offset of blit start in bytes.
426 * @dst_stride: Destination stride in bytes.
427 * @src: Source buffer object.
428 * @src_offset: Source offset of blit start in bytes.
429 * @src_stride: Source stride in bytes.
430 * @w: Width of blit.
431 * @h: Height of blit.
432 * return: Zero on success. Negative error value on failure. Will print out
433 * kernel warnings on caller bugs.
434 *
435 * Performs a CPU blit from one buffer object to another avoiding a full
436 * bo vmap which may exhaust- or fragment vmalloc space.
437 * On supported architectures (x86), we're using kmap_atomic which avoids
438 * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
439 * reference already set-up mappings.
440 *
441 * Neither of the buffer objects may be placed in PCI memory
442 * (Fixed memory in TTM terminology) when using this function.
443 */
444int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
445 u32 dst_offset, u32 dst_stride,
446 struct ttm_buffer_object *src,
447 u32 src_offset, u32 src_stride,
448 u32 w, u32 h,
449 struct vmw_diff_cpy *diff)
450{
451 struct ttm_operation_ctx ctx = {
452 .interruptible = false,
453 .no_wait_gpu = false
454 };
455 u32 j, initial_line = dst_offset / dst_stride;
456 struct vmw_bo_blit_line_data d;
457 int ret = 0;
458
459 /* Buffer objects need to be either pinned or reserved: */
460 if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
461 lockdep_assert_held(&dst->resv->lock.base);
462 if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
463 lockdep_assert_held(&src->resv->lock.base);
464
465 if (dst->ttm->state == tt_unpopulated) {
466 ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
467 if (ret)
468 return ret;
469 }
470
471 if (src->ttm->state == tt_unpopulated) {
472 ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx);
473 if (ret)
474 return ret;
475 }
476
477 d.mapped_dst = 0;
478 d.mapped_src = 0;
479 d.dst_addr = NULL;
480 d.src_addr = NULL;
481 d.dst_pages = dst->ttm->pages;
482 d.src_pages = src->ttm->pages;
483 d.dst_num_pages = dst->num_pages;
484 d.src_num_pages = src->num_pages;
485 d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
486 d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
487 d.diff = diff;
488
489 for (j = 0; j < h; ++j) {
490 diff->line = j + initial_line;
491 diff->line_offset = dst_offset % dst_stride;
492 ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w);
493 if (ret)
494 goto out;
495
496 dst_offset += dst_stride;
497 src_offset += src_stride;
498 }
499out:
500 if (d.src_addr)
501 ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
502 if (d.dst_addr)
503 ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
504
505 return ret;
506}