blob: c40dc7b338f0bcf8f749bce1031501dfca741ebf [file] [log] [blame]
John Stultz18814f62018-02-22 16:02:49 -08001/*
2 * Copyright (C) 2016-2017 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
John Stultza09fccb2018-02-22 22:50:24 -080019#include <cstdlib>
John Stultz18814f62018-02-22 16:02:49 -080020#include <string.h>
21#include <errno.h>
22#include <inttypes.h>
23#include <pthread.h>
24
John Stultzf35cdb32018-04-06 16:56:41 -070025#include <log/log.h>
John Stultz18814f62018-02-22 16:02:49 -080026#include <cutils/atomic.h>
27
John Stultz18814f62018-02-22 16:02:49 -080028#include <ion/ion.h>
29#include <sys/ioctl.h>
30
31#include <hardware/hardware.h>
32
33#if GRALLOC_USE_GRALLOC1_API == 1
34#include <hardware/gralloc1.h>
35#else
36#include <hardware/gralloc.h>
37#endif
38
39#include "mali_gralloc_module.h"
40#include "mali_gralloc_private_interface_types.h"
41#include "mali_gralloc_buffer.h"
42#include "gralloc_helper.h"
43#include "framebuffer_device.h"
44#include "mali_gralloc_formats.h"
45#include "mali_gralloc_usages.h"
46#include "mali_gralloc_bufferdescriptor.h"
John Stultza09fccb2018-02-22 22:50:24 -080047#include "ion_4.12.h"
John Stultz84503922019-02-21 17:20:33 -080048#include "dma-heap.h"
John Stultza09fccb2018-02-22 22:50:24 -080049
50
51#define ION_SYSTEM (char*)"ion_system_heap"
52#define ION_CMA (char*)"linux,cma"
John Stultz84503922019-02-21 17:20:33 -080053
54#define DMABUF_SYSTEM (char*)"system"
55#define DMABUF_CMA (char*)"linux,cma"
56static enum {
57 INTERFACE_UNKNOWN,
58 INTERFACE_ION_LEGACY,
59 INTERFACE_ION_MODERN,
60 INTERFACE_DMABUF_HEAPS
61} interface_ver;
62
John Stultza09fccb2018-02-22 22:50:24 -080063static int system_heap_id;
64static int cma_heap_id;
John Stultz18814f62018-02-22 16:02:49 -080065
66static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds);
67
68static void init_afbc(uint8_t *buf, uint64_t internal_format, int w, int h)
69{
70 uint32_t n_headers = (w * h) / 256;
71 uint32_t body_offset = n_headers * 16;
72 uint32_t headers[][4] = {
73 { body_offset, 0x1, 0x10000, 0x0 }, /* Layouts 0, 3, 4 */
74 { (body_offset + (1 << 28)), 0x80200040, 0x1004000, 0x20080 } /* Layouts 1, 5 */
75 };
76 uint32_t i, layout;
77
78 /* For AFBC 1.2, header buffer can be initilized to 0 for Layouts 0, 3, 4 */
79 if (internal_format & MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS)
80 {
81 memset(headers[0], 0, sizeof(uint32_t) * 4);
82 }
83 /* map format if necessary (also removes internal extension bits) */
84 uint64_t base_format = internal_format & MALI_GRALLOC_INTFMT_FMT_MASK;
85
86 switch (base_format)
87 {
88 case MALI_GRALLOC_FORMAT_INTERNAL_RGBA_8888:
89 case MALI_GRALLOC_FORMAT_INTERNAL_RGBX_8888:
90 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_888:
91 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_565:
92 case MALI_GRALLOC_FORMAT_INTERNAL_BGRA_8888:
93 layout = 0;
94 break;
95
96 case MALI_GRALLOC_FORMAT_INTERNAL_YV12:
97 case MALI_GRALLOC_FORMAT_INTERNAL_NV12:
98 case MALI_GRALLOC_FORMAT_INTERNAL_NV21:
99 layout = 1;
100 break;
101
102 default:
103 layout = 0;
104 }
105
106 ALOGV("Writing AFBC header layout %d for format %" PRIu64, layout, base_format);
107
108 for (i = 0; i < n_headers; i++)
109 {
110 memcpy(buf, headers[layout], sizeof(headers[layout]));
111 buf += sizeof(headers[layout]);
112 }
113}
114
John Stultza09fccb2018-02-22 22:50:24 -0800115
116
117static int find_heap_id(int ion_client, char *name)
118{
119 int i, ret, cnt, heap_id = -1;
120 struct ion_heap_data *data;
121
122 ret = ion_query_heap_cnt(ion_client, &cnt);
123
124 if (ret)
125 {
126 AERR("ion count query failed with %s", strerror(errno));
127 return -1;
128 }
129
130 data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
131 if (!data)
132 {
133 AERR("Error allocating data %s\n", strerror(errno));
134 return -1;
135 }
136
137 ret = ion_query_get_heaps(ion_client, cnt, data);
138 if (ret)
139 {
140 AERR("Error querying heaps from ion %s", strerror(errno));
141 }
142 else
143 {
144 for (i = 0; i < cnt; i++) {
145 if (strcmp(data[i].name, name) == 0) {
146 heap_id = data[i].heap_id;
147 break;
148 }
149 }
150
151 if (i == cnt)
152 {
153 AERR("No %s Heap Found amongst %d heaps\n", name, cnt);
154 heap_id = -1;
155 }
156 }
157
158 free(data);
159 return heap_id;
160}
161
John Stultz84503922019-02-21 17:20:33 -0800162#define DEVPATH "/dev/dma_heap"
163int dma_heap_open(const char* name)
164{
165 int ret, fd;
166 char buf[256];
167
168 ret = sprintf(buf, "%s/%s", DEVPATH, name);
169 if (ret < 0) {
170 AERR("sprintf failed!\n");
171 return ret;
172 }
173
John Stultzc090c642020-09-22 20:09:48 +0000174 fd = open(buf, O_RDONLY);
John Stultz84503922019-02-21 17:20:33 -0800175 if (fd < 0)
176 AERR("open %s failed!\n", buf);
177 return fd;
178}
179
180int dma_heap_alloc(int fd, size_t len, unsigned int flags, int *dmabuf_fd)
181{
182 struct dma_heap_allocation_data data = {
183 .len = len,
184 .fd_flags = O_RDWR | O_CLOEXEC,
185 .heap_flags = flags,
186 };
187 int ret;
188
189 if (dmabuf_fd == NULL)
190 return -EINVAL;
191
192 ret = ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &data);
193 if (ret < 0)
194 return ret;
195 *dmabuf_fd = (int)data.fd;
196 return ret;
197}
198
John Stultz7357dcd2019-02-21 15:40:32 -0800199static int alloc_ion_fd(int ion_fd, size_t size, unsigned int heap_mask, unsigned int flags, int *shared_fd)
200{
201 int heap;
202
John Stultz84503922019-02-21 17:20:33 -0800203 if (interface_ver == INTERFACE_DMABUF_HEAPS) {
204 int fd = system_heap_id;
205 unsigned long flg = 0;
206 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
207 fd = cma_heap_id;
208
209 return dma_heap_alloc(fd, size, flg, shared_fd);
210 }
211
212 if (interface_ver == INTERFACE_ION_MODERN) {
John Stultz7357dcd2019-02-21 15:40:32 -0800213 heap = 1 << system_heap_id;
214 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
215 heap = 1 << cma_heap_id;
216 } else {
217 heap = heap_mask;
218 }
John Stultz7357dcd2019-02-21 15:40:32 -0800219 return ion_alloc_fd(ion_fd, size, 0, heap, flags, shared_fd);
220}
John Stultza09fccb2018-02-22 22:50:24 -0800221
John Stultz18814f62018-02-22 16:02:49 -0800222static int alloc_from_ion_heap(int ion_fd, size_t size, unsigned int heap_mask, unsigned int flags, int *min_pgsz)
223{
224 ion_user_handle_t ion_hnd = -1;
225 int shared_fd, ret;
226
John Stultz84503922019-02-21 17:20:33 -0800227 if ((interface_ver != INTERFACE_DMABUF_HEAPS) && (ion_fd < 0))
John Stultz18814f62018-02-22 16:02:49 -0800228 return -1;
John Stultz84503922019-02-21 17:20:33 -0800229
230 if ((size <= 0) || (heap_mask == 0) || (min_pgsz == NULL))
231 return -1;
John Stultz18814f62018-02-22 16:02:49 -0800232
John Stultz7357dcd2019-02-21 15:40:32 -0800233 ret = alloc_ion_fd(ion_fd, size, heap_mask, flags, &(shared_fd));
John Stultz18814f62018-02-22 16:02:49 -0800234 if (ret < 0)
235 {
236#if defined(ION_HEAP_SECURE_MASK)
237
238 if (heap_mask == ION_HEAP_SECURE_MASK)
239 {
240 return -1;
241 }
242 else
243#endif
244 {
245 /* If everything else failed try system heap */
246 flags = 0; /* Fallback option flags are not longer valid */
247 heap_mask = ION_HEAP_SYSTEM_MASK;
John Stultz7357dcd2019-02-21 15:40:32 -0800248 ret = alloc_ion_fd(ion_fd, size, heap_mask, flags, &(shared_fd));
John Stultz18814f62018-02-22 16:02:49 -0800249 }
250 }
251
John Stultz18814f62018-02-22 16:02:49 -0800252 if (ret >= 0)
253 {
254 switch (heap_mask)
255 {
256 case ION_HEAP_SYSTEM_MASK:
257 *min_pgsz = SZ_4K;
258 break;
259
260 case ION_HEAP_SYSTEM_CONTIG_MASK:
261 case ION_HEAP_CARVEOUT_MASK:
262#ifdef ION_HEAP_TYPE_DMA_MASK
263 case ION_HEAP_TYPE_DMA_MASK:
264#endif
265 *min_pgsz = size;
266 break;
267#ifdef ION_HEAP_CHUNK_MASK
268
269 /* NOTE: if have this heap make sure your ION chunk size is 2M*/
270 case ION_HEAP_CHUNK_MASK:
271 *min_pgsz = SZ_2M;
272 break;
273#endif
274#ifdef ION_HEAP_COMPOUND_PAGE_MASK
275
276 case ION_HEAP_COMPOUND_PAGE_MASK:
277 *min_pgsz = SZ_2M;
278 break;
279#endif
280/* If have customized heap please set the suitable pg type according to
281 * the customized ION implementation
282 */
283#ifdef ION_HEAP_CUSTOM_MASK
284
285 case ION_HEAP_CUSTOM_MASK:
286 *min_pgsz = SZ_4K;
287 break;
288#endif
289
290 default:
291 *min_pgsz = SZ_4K;
292 break;
293 }
294 }
295
296 return shared_fd;
297}
298
299unsigned int pick_ion_heap(uint64_t usage)
300{
301 unsigned int heap_mask;
302
303 if (usage & GRALLOC_USAGE_PROTECTED)
304 {
305#if defined(ION_HEAP_SECURE_MASK)
306 heap_mask = ION_HEAP_SECURE_MASK;
307#else
308 AERR("Protected ION memory is not supported on this platform.");
309 return 0;
310#endif
311 }
312
313#if defined(ION_HEAP_TYPE_COMPOUND_PAGE_MASK) && GRALLOC_USE_ION_COMPOUND_PAGE_HEAP
314 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER)))
315 {
316 heap_mask = ION_HEAP_TYPE_COMPOUND_PAGE_MASK;
317 }
318
319#elif defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
John Stultza88fbc32018-06-13 16:57:04 -0700320 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB)))
John Stultz18814f62018-02-22 16:02:49 -0800321 {
322 heap_mask = ION_HEAP_TYPE_DMA_MASK;
323 }
324
325#endif
326 else
327 {
328 heap_mask = ION_HEAP_SYSTEM_MASK;
329 }
330
331 return heap_mask;
332}
333
334void set_ion_flags(unsigned int heap_mask, uint64_t usage, unsigned int *priv_heap_flag, int *ion_flags)
335{
336#if !GRALLOC_USE_ION_DMA_HEAP
337 GRALLOC_UNUSED(heap_mask);
338#endif
339
340 if (priv_heap_flag)
341 {
342#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
343
344 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
345 {
346 *priv_heap_flag = private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP;
347 }
348
349#endif
350 }
351
352 if (ion_flags)
353 {
354#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
355
356 if (heap_mask != ION_HEAP_TYPE_DMA_MASK)
357 {
358#endif
359
360 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
361 {
362 *ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
363 }
364
365#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
366 }
367
368#endif
369 }
370}
371
372static bool check_buffers_sharable(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
373{
374 unsigned int shared_backend_heap_mask = 0;
375 int shared_ion_flags = 0;
376 uint64_t usage;
377 uint32_t i;
378
379 if (numDescriptors <= 1)
380 {
381 return false;
382 }
383
384 for (i = 0; i < numDescriptors; i++)
385 {
386 unsigned int heap_mask;
387 int ion_flags;
388 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
389
390 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
391 heap_mask = pick_ion_heap(usage);
392
393 if (0 == heap_mask)
394 {
395 return false;
396 }
397
398 set_ion_flags(heap_mask, usage, NULL, &ion_flags);
399
400 if (0 != shared_backend_heap_mask)
401 {
402 if (shared_backend_heap_mask != heap_mask || shared_ion_flags != ion_flags)
403 {
404 return false;
405 }
406 }
407 else
408 {
409 shared_backend_heap_mask = heap_mask;
410 shared_ion_flags = ion_flags;
411 }
412 }
413
414 return true;
415}
416
417static int get_max_buffer_descriptor_index(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
418{
419 uint32_t i, max_buffer_index = 0;
420 size_t max_buffer_size = 0;
421
422 for (i = 0; i < numDescriptors; i++)
423 {
424 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
425
426 if (max_buffer_size < bufDescriptor->size)
427 {
428 max_buffer_index = i;
429 max_buffer_size = bufDescriptor->size;
430 }
431 }
432
433 return max_buffer_index;
434}
435
John Stultz84503922019-02-21 17:20:33 -0800436
437
438static int initialize_interface(mali_gralloc_module *m)
439{
440 int fd;
441
442 if (interface_ver != INTERFACE_UNKNOWN)
443 return 0;
444
445 /* test for dma-heaps*/
446 fd = dma_heap_open(DMABUF_SYSTEM);
447 if (fd >= 0) {
448 AINF("Using DMA-BUF Heaps.\n");
449 interface_ver = INTERFACE_DMABUF_HEAPS;
450 system_heap_id = fd;
451 cma_heap_id = dma_heap_open(DMABUF_CMA);
452 /* Open other dma heaps here */
453 return 0;
454 }
455
456 /* test for modern vs legacy ION */
457 m->ion_client = ion_open();
458 if (m->ion_client < 0) {
459 AERR("ion_open failed with %s", strerror(errno));
460 return -1;
461 }
462 if (!ion_is_legacy(m->ion_client)) {
463 system_heap_id = find_heap_id(m->ion_client, ION_SYSTEM);
464 cma_heap_id = find_heap_id(m->ion_client, ION_CMA);
465 if (system_heap_id < 0) {
466 ion_close(m->ion_client);
467 m->ion_client = -1;
468 AERR( "ion_open failed: no system heap found" );
469 return -1;
470 }
471 if (cma_heap_id < 0) {
472 AERR("No cma heap found, falling back to system");
473 cma_heap_id = system_heap_id;
474 }
475 AINF("Using ION Modern interface.\n");
476 interface_ver = INTERFACE_ION_MODERN;
477 } else {
478 AINF("Using ION Legacy interface.\n");
479 interface_ver = INTERFACE_ION_LEGACY;
480 }
481 return 0;
482}
483
484
John Stultz18814f62018-02-22 16:02:49 -0800485int mali_gralloc_ion_allocate(mali_gralloc_module *m, const gralloc_buffer_descriptor_t *descriptors,
486 uint32_t numDescriptors, buffer_handle_t *pHandle, bool *shared_backend)
487{
488 static int support_protected = 1; /* initially, assume we support protected memory */
489 unsigned int heap_mask, priv_heap_flag = 0;
490 unsigned char *cpu_ptr = NULL;
491 uint64_t usage;
492 uint32_t i, max_buffer_index = 0;
493 int shared_fd, ret, ion_flags = 0;
494 int min_pgsz = 0;
495
John Stultz84503922019-02-21 17:20:33 -0800496 ret = initialize_interface(m);
497 if (ret)
498 return ret;
John Stultz18814f62018-02-22 16:02:49 -0800499
John Stultz84503922019-02-21 17:20:33 -0800500 /* we may need to reopen the /dev/ion device */
501 if ((interface_ver != INTERFACE_DMABUF_HEAPS) && (m->ion_client < 0)) {
502 m->ion_client = ion_open();
503 if (m->ion_client < 0) {
John Stultz18814f62018-02-22 16:02:49 -0800504 AERR("ion_open failed with %s", strerror(errno));
505 return -1;
506 }
507 }
508
509 *shared_backend = check_buffers_sharable(descriptors, numDescriptors);
510
511 if (*shared_backend)
512 {
513 buffer_descriptor_t *max_bufDescriptor;
514
515 max_buffer_index = get_max_buffer_descriptor_index(descriptors, numDescriptors);
516 max_bufDescriptor = (buffer_descriptor_t *)(descriptors[max_buffer_index]);
517 usage = max_bufDescriptor->consumer_usage | max_bufDescriptor->producer_usage;
518
519 heap_mask = pick_ion_heap(usage);
520
521 if (heap_mask == 0)
522 {
523 AERR("Failed to find an appropriate ion heap");
524 return -1;
525 }
526
527 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
528
John Stultz7357dcd2019-02-21 15:40:32 -0800529 shared_fd = alloc_from_ion_heap(m->ion_client, max_bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
John Stultz18814f62018-02-22 16:02:49 -0800530
531 if (shared_fd < 0)
532 {
533 AERR("ion_alloc failed form client: ( %d )", m->ion_client);
534 return -1;
535 }
536
537 for (i = 0; i < numDescriptors; i++)
538 {
539 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
540 int tmp_fd;
541
542 if (i != max_buffer_index)
543 {
544 tmp_fd = dup(shared_fd);
545
546 if (tmp_fd < 0)
547 {
548 /* need to free already allocated memory. */
549 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
550 return -1;
551 }
552 }
553 else
554 {
555 tmp_fd = shared_fd;
556 }
557
558 private_handle_t *hnd = new private_handle_t(
559 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
560 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, tmp_fd, bufDescriptor->hal_format,
561 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
562 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
563 max_bufDescriptor->size);
564
565 if (NULL == hnd)
566 {
567 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
568 return -1;
569 }
570
571 pHandle[i] = hnd;
572 }
573 }
574 else
575 {
576 for (i = 0; i < numDescriptors; i++)
577 {
578 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
579 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
580
581 heap_mask = pick_ion_heap(usage);
582
583 if (heap_mask == 0)
584 {
585 AERR("Failed to find an appropriate ion heap");
586 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
587 return -1;
588 }
589
590 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
591
John Stultz7357dcd2019-02-21 15:40:32 -0800592 shared_fd = alloc_from_ion_heap(m->ion_client, bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
John Stultz18814f62018-02-22 16:02:49 -0800593
594 if (shared_fd < 0)
595 {
596 AERR("ion_alloc failed from client ( %d )", m->ion_client);
597
598 /* need to free already allocated memory. not just this one */
599 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
600
601 return -1;
602 }
603
604 private_handle_t *hnd = new private_handle_t(
605 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
606 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, shared_fd, bufDescriptor->hal_format,
607 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
608 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
609 bufDescriptor->size);
610
611 if (NULL == hnd)
612 {
613 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
614 return -1;
615 }
616
617 pHandle[i] = hnd;
618 }
619 }
620
621 for (i = 0; i < numDescriptors; i++)
622 {
623 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
624 private_handle_t *hnd = (private_handle_t *)(pHandle[i]);
625
626 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
627
628 if (!(usage & GRALLOC_USAGE_PROTECTED))
629 {
630 cpu_ptr =
631 (unsigned char *)mmap(NULL, bufDescriptor->size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
632
633 if (MAP_FAILED == cpu_ptr)
634 {
635 AERR("mmap failed from client ( %d ), fd ( %d )", m->ion_client, hnd->share_fd);
636 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
637 return -1;
638 }
639
640#if GRALLOC_INIT_AFBC == 1
641
642 if ((bufDescriptor->internal_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK) && (!(*shared_backend)))
643 {
644 init_afbc(cpu_ptr, bufDescriptor->internal_format, bufDescriptor->width, bufDescriptor->height);
645 }
646
647#endif
648 hnd->base = cpu_ptr;
649 }
650 }
651
652 return 0;
653}
654
655void mali_gralloc_ion_free(private_handle_t const *hnd)
656{
657 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
658 {
659 return;
660 }
661 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
662 {
663 /* Buffer might be unregistered already so we need to assure we have a valid handle*/
664 if (0 != hnd->base)
665 {
666 if (0 != munmap((void *)hnd->base, hnd->size))
667 {
668 AERR("Failed to munmap handle %p", hnd);
669 }
670 }
671
672 close(hnd->share_fd);
673 memset((void *)hnd, 0, sizeof(*hnd));
674 }
675}
676
677static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds)
678{
679 uint32_t i = 0;
680
681 for (i = 0; i < num_hnds; i++)
682 {
683 if (NULL != pHandle[i])
684 {
685 mali_gralloc_ion_free((private_handle_t *)(pHandle[i]));
686 }
687 }
688
689 return;
690}
691
692void mali_gralloc_ion_sync(const mali_gralloc_module *m, private_handle_t *hnd)
693{
John Stultz84503922019-02-21 17:20:33 -0800694 if (interface_ver != INTERFACE_ION_LEGACY)
John Stultza09fccb2018-02-22 22:50:24 -0800695 return;
696
John Stultz18814f62018-02-22 16:02:49 -0800697 if (m != NULL && hnd != NULL)
698 {
699 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
700 {
701 case private_handle_t::PRIV_FLAGS_USES_ION:
702 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP))
703 {
704 ion_sync_fd(m->ion_client, hnd->share_fd);
705 }
706
707 break;
708 }
709 }
710}
711
712int mali_gralloc_ion_map(private_handle_t *hnd)
713{
714 int retval = -EINVAL;
715
716 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
717 {
718 case private_handle_t::PRIV_FLAGS_USES_ION:
719 unsigned char *mappedAddress;
720 size_t size = hnd->size;
721 hw_module_t *pmodule = NULL;
722 private_module_t *m = NULL;
723
724 if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0)
725 {
726 m = reinterpret_cast<private_module_t *>(pmodule);
727 }
728 else
729 {
730 AERR("Could not get gralloc module for handle: %p", hnd);
731 retval = -errno;
732 break;
733 }
734
John Stultz18814f62018-02-22 16:02:49 -0800735 mappedAddress = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
736
737 if (MAP_FAILED == mappedAddress)
738 {
739 AERR("mmap( share_fd:%d ) failed with %s", hnd->share_fd, strerror(errno));
740 retval = -errno;
741 break;
742 }
743
744 hnd->base = (void *)(uintptr_t(mappedAddress) + hnd->offset);
745 retval = 0;
746 break;
747 }
748
749 return retval;
750}
751
752void mali_gralloc_ion_unmap(private_handle_t *hnd)
753{
754 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
755 {
756 case private_handle_t::PRIV_FLAGS_USES_ION:
757 void *base = (void *)hnd->base;
758 size_t size = hnd->size;
759
760 if (munmap(base, size) < 0)
761 {
762 AERR("Could not munmap base:%p size:%zd '%s'", base, size, strerror(errno));
763 }
764
765 break;
766 }
767}
768
769int mali_gralloc_ion_device_close(struct hw_device_t *device)
770{
771#if GRALLOC_USE_GRALLOC1_API == 1
772 gralloc1_device_t *dev = reinterpret_cast<gralloc1_device_t *>(device);
773#else
774 alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
775#endif
776
777 if (dev)
778 {
779 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
780
781 if (m->ion_client != -1)
782 {
783 if (0 != ion_close(m->ion_client))
784 {
785 AERR("Failed to close ion_client: %d err=%s", m->ion_client, strerror(errno));
786 }
787
788 m->ion_client = -1;
789 }
790
791 delete dev;
792 }
793
794 return 0;
795}