blob: 5fb51797ecd43bf834529c3ed85f8b6ab72f3afa [file] [log] [blame]
John Stultz18814f62018-02-22 16:02:49 -08001/*
2 * Copyright (C) 2016-2017 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
John Stultza09fccb2018-02-22 22:50:24 -080019#include <cstdlib>
John Stultz18814f62018-02-22 16:02:49 -080020#include <string.h>
21#include <errno.h>
22#include <inttypes.h>
23#include <pthread.h>
24
John Stultzf35cdb32018-04-06 16:56:41 -070025#include <log/log.h>
John Stultz18814f62018-02-22 16:02:49 -080026#include <cutils/atomic.h>
27
John Stultz18814f62018-02-22 16:02:49 -080028#include <ion/ion.h>
29#include <sys/ioctl.h>
30
31#include <hardware/hardware.h>
32
33#if GRALLOC_USE_GRALLOC1_API == 1
34#include <hardware/gralloc1.h>
35#else
36#include <hardware/gralloc.h>
37#endif
38
39#include "mali_gralloc_module.h"
40#include "mali_gralloc_private_interface_types.h"
41#include "mali_gralloc_buffer.h"
42#include "gralloc_helper.h"
43#include "framebuffer_device.h"
44#include "mali_gralloc_formats.h"
45#include "mali_gralloc_usages.h"
46#include "mali_gralloc_bufferdescriptor.h"
John Stultza09fccb2018-02-22 22:50:24 -080047#include "ion_4.12.h"
48
49
50
51#define ION_SYSTEM (char*)"ion_system_heap"
52#define ION_CMA (char*)"linux,cma"
53static bool gralloc_legacy_ion;
54static int system_heap_id;
55static int cma_heap_id;
John Stultz18814f62018-02-22 16:02:49 -080056
57static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds);
58
59static void init_afbc(uint8_t *buf, uint64_t internal_format, int w, int h)
60{
61 uint32_t n_headers = (w * h) / 256;
62 uint32_t body_offset = n_headers * 16;
63 uint32_t headers[][4] = {
64 { body_offset, 0x1, 0x10000, 0x0 }, /* Layouts 0, 3, 4 */
65 { (body_offset + (1 << 28)), 0x80200040, 0x1004000, 0x20080 } /* Layouts 1, 5 */
66 };
67 uint32_t i, layout;
68
69 /* For AFBC 1.2, header buffer can be initilized to 0 for Layouts 0, 3, 4 */
70 if (internal_format & MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS)
71 {
72 memset(headers[0], 0, sizeof(uint32_t) * 4);
73 }
74 /* map format if necessary (also removes internal extension bits) */
75 uint64_t base_format = internal_format & MALI_GRALLOC_INTFMT_FMT_MASK;
76
77 switch (base_format)
78 {
79 case MALI_GRALLOC_FORMAT_INTERNAL_RGBA_8888:
80 case MALI_GRALLOC_FORMAT_INTERNAL_RGBX_8888:
81 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_888:
82 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_565:
83 case MALI_GRALLOC_FORMAT_INTERNAL_BGRA_8888:
84 layout = 0;
85 break;
86
87 case MALI_GRALLOC_FORMAT_INTERNAL_YV12:
88 case MALI_GRALLOC_FORMAT_INTERNAL_NV12:
89 case MALI_GRALLOC_FORMAT_INTERNAL_NV21:
90 layout = 1;
91 break;
92
93 default:
94 layout = 0;
95 }
96
97 ALOGV("Writing AFBC header layout %d for format %" PRIu64, layout, base_format);
98
99 for (i = 0; i < n_headers; i++)
100 {
101 memcpy(buf, headers[layout], sizeof(headers[layout]));
102 buf += sizeof(headers[layout]);
103 }
104}
105
John Stultza09fccb2018-02-22 22:50:24 -0800106
107
108static int find_heap_id(int ion_client, char *name)
109{
110 int i, ret, cnt, heap_id = -1;
111 struct ion_heap_data *data;
112
113 ret = ion_query_heap_cnt(ion_client, &cnt);
114
115 if (ret)
116 {
117 AERR("ion count query failed with %s", strerror(errno));
118 return -1;
119 }
120
121 data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
122 if (!data)
123 {
124 AERR("Error allocating data %s\n", strerror(errno));
125 return -1;
126 }
127
128 ret = ion_query_get_heaps(ion_client, cnt, data);
129 if (ret)
130 {
131 AERR("Error querying heaps from ion %s", strerror(errno));
132 }
133 else
134 {
135 for (i = 0; i < cnt; i++) {
136 if (strcmp(data[i].name, name) == 0) {
137 heap_id = data[i].heap_id;
138 break;
139 }
140 }
141
142 if (i == cnt)
143 {
144 AERR("No %s Heap Found amongst %d heaps\n", name, cnt);
145 heap_id = -1;
146 }
147 }
148
149 free(data);
150 return heap_id;
151}
152
John Stultz7357dcd2019-02-21 15:40:32 -0800153static int alloc_ion_fd(int ion_fd, size_t size, unsigned int heap_mask, unsigned int flags, int *shared_fd)
154{
155 int heap;
156
157 if (!gralloc_legacy_ion) {
158 heap = 1 << system_heap_id;
159 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
160 heap = 1 << cma_heap_id;
161 } else {
162 heap = heap_mask;
163 }
164
165 return ion_alloc_fd(ion_fd, size, 0, heap, flags, shared_fd);
166}
John Stultza09fccb2018-02-22 22:50:24 -0800167
John Stultz18814f62018-02-22 16:02:49 -0800168static int alloc_from_ion_heap(int ion_fd, size_t size, unsigned int heap_mask, unsigned int flags, int *min_pgsz)
169{
170 ion_user_handle_t ion_hnd = -1;
171 int shared_fd, ret;
172
173 if ((ion_fd < 0) || (size <= 0) || (heap_mask == 0) || (min_pgsz == NULL))
174 {
175 return -1;
176 }
177
John Stultz7357dcd2019-02-21 15:40:32 -0800178 ret = alloc_ion_fd(ion_fd, size, heap_mask, flags, &(shared_fd));
John Stultz18814f62018-02-22 16:02:49 -0800179 if (ret < 0)
180 {
181#if defined(ION_HEAP_SECURE_MASK)
182
183 if (heap_mask == ION_HEAP_SECURE_MASK)
184 {
185 return -1;
186 }
187 else
188#endif
189 {
190 /* If everything else failed try system heap */
191 flags = 0; /* Fallback option flags are not longer valid */
192 heap_mask = ION_HEAP_SYSTEM_MASK;
John Stultz7357dcd2019-02-21 15:40:32 -0800193 ret = alloc_ion_fd(ion_fd, size, heap_mask, flags, &(shared_fd));
John Stultz18814f62018-02-22 16:02:49 -0800194 }
195 }
196
John Stultz18814f62018-02-22 16:02:49 -0800197 if (ret >= 0)
198 {
199 switch (heap_mask)
200 {
201 case ION_HEAP_SYSTEM_MASK:
202 *min_pgsz = SZ_4K;
203 break;
204
205 case ION_HEAP_SYSTEM_CONTIG_MASK:
206 case ION_HEAP_CARVEOUT_MASK:
207#ifdef ION_HEAP_TYPE_DMA_MASK
208 case ION_HEAP_TYPE_DMA_MASK:
209#endif
210 *min_pgsz = size;
211 break;
212#ifdef ION_HEAP_CHUNK_MASK
213
214 /* NOTE: if have this heap make sure your ION chunk size is 2M*/
215 case ION_HEAP_CHUNK_MASK:
216 *min_pgsz = SZ_2M;
217 break;
218#endif
219#ifdef ION_HEAP_COMPOUND_PAGE_MASK
220
221 case ION_HEAP_COMPOUND_PAGE_MASK:
222 *min_pgsz = SZ_2M;
223 break;
224#endif
225/* If have customized heap please set the suitable pg type according to
226 * the customized ION implementation
227 */
228#ifdef ION_HEAP_CUSTOM_MASK
229
230 case ION_HEAP_CUSTOM_MASK:
231 *min_pgsz = SZ_4K;
232 break;
233#endif
234
235 default:
236 *min_pgsz = SZ_4K;
237 break;
238 }
239 }
240
241 return shared_fd;
242}
243
244unsigned int pick_ion_heap(uint64_t usage)
245{
246 unsigned int heap_mask;
247
248 if (usage & GRALLOC_USAGE_PROTECTED)
249 {
250#if defined(ION_HEAP_SECURE_MASK)
251 heap_mask = ION_HEAP_SECURE_MASK;
252#else
253 AERR("Protected ION memory is not supported on this platform.");
254 return 0;
255#endif
256 }
257
258#if defined(ION_HEAP_TYPE_COMPOUND_PAGE_MASK) && GRALLOC_USE_ION_COMPOUND_PAGE_HEAP
259 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER)))
260 {
261 heap_mask = ION_HEAP_TYPE_COMPOUND_PAGE_MASK;
262 }
263
264#elif defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
John Stultza88fbc32018-06-13 16:57:04 -0700265 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB)))
John Stultz18814f62018-02-22 16:02:49 -0800266 {
267 heap_mask = ION_HEAP_TYPE_DMA_MASK;
268 }
269
270#endif
271 else
272 {
273 heap_mask = ION_HEAP_SYSTEM_MASK;
274 }
275
276 return heap_mask;
277}
278
279void set_ion_flags(unsigned int heap_mask, uint64_t usage, unsigned int *priv_heap_flag, int *ion_flags)
280{
281#if !GRALLOC_USE_ION_DMA_HEAP
282 GRALLOC_UNUSED(heap_mask);
283#endif
284
285 if (priv_heap_flag)
286 {
287#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
288
289 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
290 {
291 *priv_heap_flag = private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP;
292 }
293
294#endif
295 }
296
297 if (ion_flags)
298 {
299#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
300
301 if (heap_mask != ION_HEAP_TYPE_DMA_MASK)
302 {
303#endif
304
305 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
306 {
307 *ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
308 }
309
310#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
311 }
312
313#endif
314 }
315}
316
317static bool check_buffers_sharable(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
318{
319 unsigned int shared_backend_heap_mask = 0;
320 int shared_ion_flags = 0;
321 uint64_t usage;
322 uint32_t i;
323
324 if (numDescriptors <= 1)
325 {
326 return false;
327 }
328
329 for (i = 0; i < numDescriptors; i++)
330 {
331 unsigned int heap_mask;
332 int ion_flags;
333 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
334
335 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
336 heap_mask = pick_ion_heap(usage);
337
338 if (0 == heap_mask)
339 {
340 return false;
341 }
342
343 set_ion_flags(heap_mask, usage, NULL, &ion_flags);
344
345 if (0 != shared_backend_heap_mask)
346 {
347 if (shared_backend_heap_mask != heap_mask || shared_ion_flags != ion_flags)
348 {
349 return false;
350 }
351 }
352 else
353 {
354 shared_backend_heap_mask = heap_mask;
355 shared_ion_flags = ion_flags;
356 }
357 }
358
359 return true;
360}
361
362static int get_max_buffer_descriptor_index(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
363{
364 uint32_t i, max_buffer_index = 0;
365 size_t max_buffer_size = 0;
366
367 for (i = 0; i < numDescriptors; i++)
368 {
369 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
370
371 if (max_buffer_size < bufDescriptor->size)
372 {
373 max_buffer_index = i;
374 max_buffer_size = bufDescriptor->size;
375 }
376 }
377
378 return max_buffer_index;
379}
380
381int mali_gralloc_ion_allocate(mali_gralloc_module *m, const gralloc_buffer_descriptor_t *descriptors,
382 uint32_t numDescriptors, buffer_handle_t *pHandle, bool *shared_backend)
383{
384 static int support_protected = 1; /* initially, assume we support protected memory */
385 unsigned int heap_mask, priv_heap_flag = 0;
386 unsigned char *cpu_ptr = NULL;
387 uint64_t usage;
388 uint32_t i, max_buffer_index = 0;
389 int shared_fd, ret, ion_flags = 0;
390 int min_pgsz = 0;
391
392 if (m->ion_client < 0)
393 {
394 m->ion_client = ion_open();
395
396 if (m->ion_client < 0)
397 {
398 AERR("ion_open failed with %s", strerror(errno));
399 return -1;
400 }
John Stultza09fccb2018-02-22 22:50:24 -0800401
402 gralloc_legacy_ion = ion_is_legacy(m->ion_client);
403 if (!gralloc_legacy_ion)
404 {
405 system_heap_id = find_heap_id(m->ion_client, ION_SYSTEM);
406 cma_heap_id = find_heap_id(m->ion_client, ION_CMA);
407 if (system_heap_id < 0)
408 {
409 ion_close(m->ion_client);
410 m->ion_client = -1;
411 AERR( "ion_open failed: no system heap found" );
412 return -1;
413 }
414 if (cma_heap_id < 0) {
415 AERR("No cma heap found, falling back to system");
416 cma_heap_id = system_heap_id;
417 }
418 }
John Stultz18814f62018-02-22 16:02:49 -0800419 }
420
421 *shared_backend = check_buffers_sharable(descriptors, numDescriptors);
422
423 if (*shared_backend)
424 {
425 buffer_descriptor_t *max_bufDescriptor;
426
427 max_buffer_index = get_max_buffer_descriptor_index(descriptors, numDescriptors);
428 max_bufDescriptor = (buffer_descriptor_t *)(descriptors[max_buffer_index]);
429 usage = max_bufDescriptor->consumer_usage | max_bufDescriptor->producer_usage;
430
431 heap_mask = pick_ion_heap(usage);
432
433 if (heap_mask == 0)
434 {
435 AERR("Failed to find an appropriate ion heap");
436 return -1;
437 }
438
439 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
440
John Stultz7357dcd2019-02-21 15:40:32 -0800441 shared_fd = alloc_from_ion_heap(m->ion_client, max_bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
John Stultz18814f62018-02-22 16:02:49 -0800442
443 if (shared_fd < 0)
444 {
445 AERR("ion_alloc failed form client: ( %d )", m->ion_client);
446 return -1;
447 }
448
449 for (i = 0; i < numDescriptors; i++)
450 {
451 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
452 int tmp_fd;
453
454 if (i != max_buffer_index)
455 {
456 tmp_fd = dup(shared_fd);
457
458 if (tmp_fd < 0)
459 {
460 /* need to free already allocated memory. */
461 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
462 return -1;
463 }
464 }
465 else
466 {
467 tmp_fd = shared_fd;
468 }
469
470 private_handle_t *hnd = new private_handle_t(
471 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
472 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, tmp_fd, bufDescriptor->hal_format,
473 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
474 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
475 max_bufDescriptor->size);
476
477 if (NULL == hnd)
478 {
479 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
480 return -1;
481 }
482
483 pHandle[i] = hnd;
484 }
485 }
486 else
487 {
488 for (i = 0; i < numDescriptors; i++)
489 {
490 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
491 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
492
493 heap_mask = pick_ion_heap(usage);
494
495 if (heap_mask == 0)
496 {
497 AERR("Failed to find an appropriate ion heap");
498 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
499 return -1;
500 }
501
502 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
503
John Stultz7357dcd2019-02-21 15:40:32 -0800504 shared_fd = alloc_from_ion_heap(m->ion_client, bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
John Stultz18814f62018-02-22 16:02:49 -0800505
506 if (shared_fd < 0)
507 {
508 AERR("ion_alloc failed from client ( %d )", m->ion_client);
509
510 /* need to free already allocated memory. not just this one */
511 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
512
513 return -1;
514 }
515
516 private_handle_t *hnd = new private_handle_t(
517 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
518 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, shared_fd, bufDescriptor->hal_format,
519 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
520 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
521 bufDescriptor->size);
522
523 if (NULL == hnd)
524 {
525 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
526 return -1;
527 }
528
529 pHandle[i] = hnd;
530 }
531 }
532
533 for (i = 0; i < numDescriptors; i++)
534 {
535 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
536 private_handle_t *hnd = (private_handle_t *)(pHandle[i]);
537
538 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
539
540 if (!(usage & GRALLOC_USAGE_PROTECTED))
541 {
542 cpu_ptr =
543 (unsigned char *)mmap(NULL, bufDescriptor->size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
544
545 if (MAP_FAILED == cpu_ptr)
546 {
547 AERR("mmap failed from client ( %d ), fd ( %d )", m->ion_client, hnd->share_fd);
548 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
549 return -1;
550 }
551
552#if GRALLOC_INIT_AFBC == 1
553
554 if ((bufDescriptor->internal_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK) && (!(*shared_backend)))
555 {
556 init_afbc(cpu_ptr, bufDescriptor->internal_format, bufDescriptor->width, bufDescriptor->height);
557 }
558
559#endif
560 hnd->base = cpu_ptr;
561 }
562 }
563
564 return 0;
565}
566
567void mali_gralloc_ion_free(private_handle_t const *hnd)
568{
569 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
570 {
571 return;
572 }
573 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
574 {
575 /* Buffer might be unregistered already so we need to assure we have a valid handle*/
576 if (0 != hnd->base)
577 {
578 if (0 != munmap((void *)hnd->base, hnd->size))
579 {
580 AERR("Failed to munmap handle %p", hnd);
581 }
582 }
583
584 close(hnd->share_fd);
585 memset((void *)hnd, 0, sizeof(*hnd));
586 }
587}
588
589static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds)
590{
591 uint32_t i = 0;
592
593 for (i = 0; i < num_hnds; i++)
594 {
595 if (NULL != pHandle[i])
596 {
597 mali_gralloc_ion_free((private_handle_t *)(pHandle[i]));
598 }
599 }
600
601 return;
602}
603
604void mali_gralloc_ion_sync(const mali_gralloc_module *m, private_handle_t *hnd)
605{
John Stultza09fccb2018-02-22 22:50:24 -0800606 if (!gralloc_legacy_ion)
607 return;
608
John Stultz18814f62018-02-22 16:02:49 -0800609 if (m != NULL && hnd != NULL)
610 {
611 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
612 {
613 case private_handle_t::PRIV_FLAGS_USES_ION:
614 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP))
615 {
616 ion_sync_fd(m->ion_client, hnd->share_fd);
617 }
618
619 break;
620 }
621 }
622}
623
624int mali_gralloc_ion_map(private_handle_t *hnd)
625{
626 int retval = -EINVAL;
627
628 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
629 {
630 case private_handle_t::PRIV_FLAGS_USES_ION:
631 unsigned char *mappedAddress;
632 size_t size = hnd->size;
633 hw_module_t *pmodule = NULL;
634 private_module_t *m = NULL;
635
636 if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0)
637 {
638 m = reinterpret_cast<private_module_t *>(pmodule);
639 }
640 else
641 {
642 AERR("Could not get gralloc module for handle: %p", hnd);
643 retval = -errno;
644 break;
645 }
646
647 /* the test condition is set to m->ion_client <= 0 here, because:
648 * 1) module structure are initialized to 0 if no initial value is applied
649 * 2) a second user process should get a ion fd greater than 0.
650 */
651 if (m->ion_client <= 0)
652 {
653 /* a second user process must obtain a client handle first via ion_open before it can obtain the shared ion buffer*/
654 m->ion_client = ion_open();
655
656 if (m->ion_client < 0)
657 {
658 AERR("Could not open ion device for handle: %p", hnd);
659 retval = -errno;
660 break;
661 }
662 }
663
664 mappedAddress = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
665
666 if (MAP_FAILED == mappedAddress)
667 {
668 AERR("mmap( share_fd:%d ) failed with %s", hnd->share_fd, strerror(errno));
669 retval = -errno;
670 break;
671 }
672
673 hnd->base = (void *)(uintptr_t(mappedAddress) + hnd->offset);
674 retval = 0;
675 break;
676 }
677
678 return retval;
679}
680
681void mali_gralloc_ion_unmap(private_handle_t *hnd)
682{
683 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
684 {
685 case private_handle_t::PRIV_FLAGS_USES_ION:
686 void *base = (void *)hnd->base;
687 size_t size = hnd->size;
688
689 if (munmap(base, size) < 0)
690 {
691 AERR("Could not munmap base:%p size:%zd '%s'", base, size, strerror(errno));
692 }
693
694 break;
695 }
696}
697
698int mali_gralloc_ion_device_close(struct hw_device_t *device)
699{
700#if GRALLOC_USE_GRALLOC1_API == 1
701 gralloc1_device_t *dev = reinterpret_cast<gralloc1_device_t *>(device);
702#else
703 alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
704#endif
705
706 if (dev)
707 {
708 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
709
710 if (m->ion_client != -1)
711 {
712 if (0 != ion_close(m->ion_client))
713 {
714 AERR("Failed to close ion_client: %d err=%s", m->ion_client, strerror(errno));
715 }
716
717 m->ion_client = -1;
718 }
719
720 delete dev;
721 }
722
723 return 0;
724}