blob: 30eecdb910aaf150e6bf01e5a06ec51bd9f32cd4 [file] [log] [blame]
John Stultz18814f62018-02-22 16:02:49 -08001/*
2 * Copyright (C) 2016-2017 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
John Stultza09fccb2018-02-22 22:50:24 -080019#include <cstdlib>
John Stultz18814f62018-02-22 16:02:49 -080020#include <string.h>
21#include <errno.h>
22#include <inttypes.h>
23#include <pthread.h>
24
25#include <cutils/log.h>
26#include <cutils/atomic.h>
27
28#include <linux/ion.h>
29#include <ion/ion.h>
30#include <sys/ioctl.h>
31
32#include <hardware/hardware.h>
33
34#if GRALLOC_USE_GRALLOC1_API == 1
35#include <hardware/gralloc1.h>
36#else
37#include <hardware/gralloc.h>
38#endif
39
40#include "mali_gralloc_module.h"
41#include "mali_gralloc_private_interface_types.h"
42#include "mali_gralloc_buffer.h"
43#include "gralloc_helper.h"
44#include "framebuffer_device.h"
45#include "mali_gralloc_formats.h"
46#include "mali_gralloc_usages.h"
47#include "mali_gralloc_bufferdescriptor.h"
John Stultza09fccb2018-02-22 22:50:24 -080048#include "ion_4.12.h"
49
50
51
52#define ION_SYSTEM (char*)"ion_system_heap"
53#define ION_CMA (char*)"linux,cma"
54static bool gralloc_legacy_ion;
55static int system_heap_id;
56static int cma_heap_id;
John Stultz18814f62018-02-22 16:02:49 -080057
58static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds);
59
60static void init_afbc(uint8_t *buf, uint64_t internal_format, int w, int h)
61{
62 uint32_t n_headers = (w * h) / 256;
63 uint32_t body_offset = n_headers * 16;
64 uint32_t headers[][4] = {
65 { body_offset, 0x1, 0x10000, 0x0 }, /* Layouts 0, 3, 4 */
66 { (body_offset + (1 << 28)), 0x80200040, 0x1004000, 0x20080 } /* Layouts 1, 5 */
67 };
68 uint32_t i, layout;
69
70 /* For AFBC 1.2, header buffer can be initilized to 0 for Layouts 0, 3, 4 */
71 if (internal_format & MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS)
72 {
73 memset(headers[0], 0, sizeof(uint32_t) * 4);
74 }
75 /* map format if necessary (also removes internal extension bits) */
76 uint64_t base_format = internal_format & MALI_GRALLOC_INTFMT_FMT_MASK;
77
78 switch (base_format)
79 {
80 case MALI_GRALLOC_FORMAT_INTERNAL_RGBA_8888:
81 case MALI_GRALLOC_FORMAT_INTERNAL_RGBX_8888:
82 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_888:
83 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_565:
84 case MALI_GRALLOC_FORMAT_INTERNAL_BGRA_8888:
85 layout = 0;
86 break;
87
88 case MALI_GRALLOC_FORMAT_INTERNAL_YV12:
89 case MALI_GRALLOC_FORMAT_INTERNAL_NV12:
90 case MALI_GRALLOC_FORMAT_INTERNAL_NV21:
91 layout = 1;
92 break;
93
94 default:
95 layout = 0;
96 }
97
98 ALOGV("Writing AFBC header layout %d for format %" PRIu64, layout, base_format);
99
100 for (i = 0; i < n_headers; i++)
101 {
102 memcpy(buf, headers[layout], sizeof(headers[layout]));
103 buf += sizeof(headers[layout]);
104 }
105}
106
John Stultza09fccb2018-02-22 22:50:24 -0800107
108
109static int find_heap_id(int ion_client, char *name)
110{
111 int i, ret, cnt, heap_id = -1;
112 struct ion_heap_data *data;
113
114 ret = ion_query_heap_cnt(ion_client, &cnt);
115
116 if (ret)
117 {
118 AERR("ion count query failed with %s", strerror(errno));
119 return -1;
120 }
121
122 data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
123 if (!data)
124 {
125 AERR("Error allocating data %s\n", strerror(errno));
126 return -1;
127 }
128
129 ret = ion_query_get_heaps(ion_client, cnt, data);
130 if (ret)
131 {
132 AERR("Error querying heaps from ion %s", strerror(errno));
133 }
134 else
135 {
136 for (i = 0; i < cnt; i++) {
137 if (strcmp(data[i].name, name) == 0) {
138 heap_id = data[i].heap_id;
139 break;
140 }
141 }
142
143 if (i == cnt)
144 {
145 AERR("No %s Heap Found amongst %d heaps\n", name, cnt);
146 heap_id = -1;
147 }
148 }
149
150 free(data);
151 return heap_id;
152}
153
154
John Stultz18814f62018-02-22 16:02:49 -0800155static int alloc_from_ion_heap(int ion_fd, size_t size, unsigned int heap_mask, unsigned int flags, int *min_pgsz)
156{
157 ion_user_handle_t ion_hnd = -1;
158 int shared_fd, ret;
159
160 if ((ion_fd < 0) || (size <= 0) || (heap_mask == 0) || (min_pgsz == NULL))
161 {
162 return -1;
163 }
164
165 /**
166 * step 1: ion_alloc new ion_hnd
167 * step 2: ion_share from ion_hnd and get shared_fd
168 * step 3: ion free the given ion_hnd
169 * step 4: when we need to free this ion buffer, just close the shared_fd,
170 * kernel will count the reference of file struct, so it's safe to
171 * be transfered between processes.
172 */
173 ret = ion_alloc(ion_fd, size, 0, heap_mask, flags, &ion_hnd);
174
175 if (ret < 0)
176 {
177#if defined(ION_HEAP_SECURE_MASK)
178
179 if (heap_mask == ION_HEAP_SECURE_MASK)
180 {
181 return -1;
182 }
183 else
184#endif
185 {
186 /* If everything else failed try system heap */
187 flags = 0; /* Fallback option flags are not longer valid */
188 heap_mask = ION_HEAP_SYSTEM_MASK;
189 ret = ion_alloc(ion_fd, size, 0, heap_mask, flags, &ion_hnd);
190 }
191 }
192
193 ret = ion_share(ion_fd, ion_hnd, &shared_fd);
194
195 if (ret != 0)
196 {
197 AERR("ion_share( %d ) failed", ion_fd);
198 shared_fd = -1;
199 }
200
201 ret = ion_free(ion_fd, ion_hnd);
202
203 if (0 != ret)
204 {
205 AERR("ion_free( %d ) failed", ion_fd);
206 close(shared_fd);
207 shared_fd = -1;
208 }
209
210 if (ret >= 0)
211 {
212 switch (heap_mask)
213 {
214 case ION_HEAP_SYSTEM_MASK:
215 *min_pgsz = SZ_4K;
216 break;
217
218 case ION_HEAP_SYSTEM_CONTIG_MASK:
219 case ION_HEAP_CARVEOUT_MASK:
220#ifdef ION_HEAP_TYPE_DMA_MASK
221 case ION_HEAP_TYPE_DMA_MASK:
222#endif
223 *min_pgsz = size;
224 break;
225#ifdef ION_HEAP_CHUNK_MASK
226
227 /* NOTE: if have this heap make sure your ION chunk size is 2M*/
228 case ION_HEAP_CHUNK_MASK:
229 *min_pgsz = SZ_2M;
230 break;
231#endif
232#ifdef ION_HEAP_COMPOUND_PAGE_MASK
233
234 case ION_HEAP_COMPOUND_PAGE_MASK:
235 *min_pgsz = SZ_2M;
236 break;
237#endif
238/* If have customized heap please set the suitable pg type according to
239 * the customized ION implementation
240 */
241#ifdef ION_HEAP_CUSTOM_MASK
242
243 case ION_HEAP_CUSTOM_MASK:
244 *min_pgsz = SZ_4K;
245 break;
246#endif
247
248 default:
249 *min_pgsz = SZ_4K;
250 break;
251 }
252 }
253
254 return shared_fd;
255}
256
257unsigned int pick_ion_heap(uint64_t usage)
258{
259 unsigned int heap_mask;
260
261 if (usage & GRALLOC_USAGE_PROTECTED)
262 {
263#if defined(ION_HEAP_SECURE_MASK)
264 heap_mask = ION_HEAP_SECURE_MASK;
265#else
266 AERR("Protected ION memory is not supported on this platform.");
267 return 0;
268#endif
269 }
270
271#if defined(ION_HEAP_TYPE_COMPOUND_PAGE_MASK) && GRALLOC_USE_ION_COMPOUND_PAGE_HEAP
272 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER)))
273 {
274 heap_mask = ION_HEAP_TYPE_COMPOUND_PAGE_MASK;
275 }
276
277#elif defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
278 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER)))
279 {
280 heap_mask = ION_HEAP_TYPE_DMA_MASK;
281 }
282
283#endif
284 else
285 {
286 heap_mask = ION_HEAP_SYSTEM_MASK;
287 }
288
289 return heap_mask;
290}
291
292void set_ion_flags(unsigned int heap_mask, uint64_t usage, unsigned int *priv_heap_flag, int *ion_flags)
293{
294#if !GRALLOC_USE_ION_DMA_HEAP
295 GRALLOC_UNUSED(heap_mask);
296#endif
297
298 if (priv_heap_flag)
299 {
300#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
301
302 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
303 {
304 *priv_heap_flag = private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP;
305 }
306
307#endif
308 }
309
310 if (ion_flags)
311 {
312#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
313
314 if (heap_mask != ION_HEAP_TYPE_DMA_MASK)
315 {
316#endif
317
318 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
319 {
320 *ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
321 }
322
323#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
324 }
325
326#endif
327 }
328}
329
330static bool check_buffers_sharable(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
331{
332 unsigned int shared_backend_heap_mask = 0;
333 int shared_ion_flags = 0;
334 uint64_t usage;
335 uint32_t i;
336
337 if (numDescriptors <= 1)
338 {
339 return false;
340 }
341
342 for (i = 0; i < numDescriptors; i++)
343 {
344 unsigned int heap_mask;
345 int ion_flags;
346 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
347
348 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
349 heap_mask = pick_ion_heap(usage);
350
351 if (0 == heap_mask)
352 {
353 return false;
354 }
355
356 set_ion_flags(heap_mask, usage, NULL, &ion_flags);
357
358 if (0 != shared_backend_heap_mask)
359 {
360 if (shared_backend_heap_mask != heap_mask || shared_ion_flags != ion_flags)
361 {
362 return false;
363 }
364 }
365 else
366 {
367 shared_backend_heap_mask = heap_mask;
368 shared_ion_flags = ion_flags;
369 }
370 }
371
372 return true;
373}
374
375static int get_max_buffer_descriptor_index(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
376{
377 uint32_t i, max_buffer_index = 0;
378 size_t max_buffer_size = 0;
379
380 for (i = 0; i < numDescriptors; i++)
381 {
382 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
383
384 if (max_buffer_size < bufDescriptor->size)
385 {
386 max_buffer_index = i;
387 max_buffer_size = bufDescriptor->size;
388 }
389 }
390
391 return max_buffer_index;
392}
393
394int mali_gralloc_ion_allocate(mali_gralloc_module *m, const gralloc_buffer_descriptor_t *descriptors,
395 uint32_t numDescriptors, buffer_handle_t *pHandle, bool *shared_backend)
396{
397 static int support_protected = 1; /* initially, assume we support protected memory */
398 unsigned int heap_mask, priv_heap_flag = 0;
399 unsigned char *cpu_ptr = NULL;
400 uint64_t usage;
401 uint32_t i, max_buffer_index = 0;
402 int shared_fd, ret, ion_flags = 0;
403 int min_pgsz = 0;
404
405 if (m->ion_client < 0)
406 {
407 m->ion_client = ion_open();
408
409 if (m->ion_client < 0)
410 {
411 AERR("ion_open failed with %s", strerror(errno));
412 return -1;
413 }
John Stultza09fccb2018-02-22 22:50:24 -0800414
415 gralloc_legacy_ion = ion_is_legacy(m->ion_client);
416 if (!gralloc_legacy_ion)
417 {
418 system_heap_id = find_heap_id(m->ion_client, ION_SYSTEM);
419 cma_heap_id = find_heap_id(m->ion_client, ION_CMA);
420 if (system_heap_id < 0)
421 {
422 ion_close(m->ion_client);
423 m->ion_client = -1;
424 AERR( "ion_open failed: no system heap found" );
425 return -1;
426 }
427 if (cma_heap_id < 0) {
428 AERR("No cma heap found, falling back to system");
429 cma_heap_id = system_heap_id;
430 }
431 }
John Stultz18814f62018-02-22 16:02:49 -0800432 }
433
434 *shared_backend = check_buffers_sharable(descriptors, numDescriptors);
435
436 if (*shared_backend)
437 {
438 buffer_descriptor_t *max_bufDescriptor;
439
440 max_buffer_index = get_max_buffer_descriptor_index(descriptors, numDescriptors);
441 max_bufDescriptor = (buffer_descriptor_t *)(descriptors[max_buffer_index]);
442 usage = max_bufDescriptor->consumer_usage | max_bufDescriptor->producer_usage;
443
444 heap_mask = pick_ion_heap(usage);
445
446 if (heap_mask == 0)
447 {
448 AERR("Failed to find an appropriate ion heap");
449 return -1;
450 }
451
452 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
John Stultza09fccb2018-02-22 22:50:24 -0800453 if (gralloc_legacy_ion)
454 {
455 shared_fd = alloc_from_ion_heap(m->ion_client, max_bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
456 }
457 else
458 {
459 int heap = 1 << system_heap_id;
460 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
461 heap = 1 << cma_heap_id;
John Stultz18814f62018-02-22 16:02:49 -0800462
John Stultza09fccb2018-02-22 22:50:24 -0800463 ret = ion_alloc_fd(m->ion_client, max_bufDescriptor->size, 0, heap, 0, &(shared_fd));
464 if (ret != 0)
465 {
466 AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
467 return -1;
468 }
469 min_pgsz = SZ_4K;
470 }
John Stultz18814f62018-02-22 16:02:49 -0800471
472 if (shared_fd < 0)
473 {
474 AERR("ion_alloc failed form client: ( %d )", m->ion_client);
475 return -1;
476 }
477
478 for (i = 0; i < numDescriptors; i++)
479 {
480 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
481 int tmp_fd;
482
483 if (i != max_buffer_index)
484 {
485 tmp_fd = dup(shared_fd);
486
487 if (tmp_fd < 0)
488 {
489 /* need to free already allocated memory. */
490 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
491 return -1;
492 }
493 }
494 else
495 {
496 tmp_fd = shared_fd;
497 }
498
499 private_handle_t *hnd = new private_handle_t(
500 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
501 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, tmp_fd, bufDescriptor->hal_format,
502 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
503 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
504 max_bufDescriptor->size);
505
506 if (NULL == hnd)
507 {
508 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
509 return -1;
510 }
511
512 pHandle[i] = hnd;
513 }
514 }
515 else
516 {
517 for (i = 0; i < numDescriptors; i++)
518 {
519 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
520 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
521
522 heap_mask = pick_ion_heap(usage);
523
524 if (heap_mask == 0)
525 {
526 AERR("Failed to find an appropriate ion heap");
527 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
528 return -1;
529 }
530
531 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
John Stultza09fccb2018-02-22 22:50:24 -0800532 if (gralloc_legacy_ion)
533 {
534 shared_fd = alloc_from_ion_heap(m->ion_client, bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
535 }
536 else
537 {
538 int heap = 1 << system_heap_id;
539 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
540 heap = 1 << cma_heap_id;
John Stultz18814f62018-02-22 16:02:49 -0800541
John Stultza09fccb2018-02-22 22:50:24 -0800542 ret = ion_alloc_fd(m->ion_client, bufDescriptor->size, 0, heap, 0, &(shared_fd));
543 if (ret != 0)
544 {
545 AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
546 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
547 return -1;
548 }
549 min_pgsz = SZ_4K;
550 }
John Stultz18814f62018-02-22 16:02:49 -0800551
552 if (shared_fd < 0)
553 {
554 AERR("ion_alloc failed from client ( %d )", m->ion_client);
555
556 /* need to free already allocated memory. not just this one */
557 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
558
559 return -1;
560 }
561
562 private_handle_t *hnd = new private_handle_t(
563 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
564 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, shared_fd, bufDescriptor->hal_format,
565 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
566 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
567 bufDescriptor->size);
568
569 if (NULL == hnd)
570 {
571 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
572 return -1;
573 }
574
575 pHandle[i] = hnd;
576 }
577 }
578
579 for (i = 0; i < numDescriptors; i++)
580 {
581 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
582 private_handle_t *hnd = (private_handle_t *)(pHandle[i]);
583
584 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
585
586 if (!(usage & GRALLOC_USAGE_PROTECTED))
587 {
588 cpu_ptr =
589 (unsigned char *)mmap(NULL, bufDescriptor->size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
590
591 if (MAP_FAILED == cpu_ptr)
592 {
593 AERR("mmap failed from client ( %d ), fd ( %d )", m->ion_client, hnd->share_fd);
594 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
595 return -1;
596 }
597
598#if GRALLOC_INIT_AFBC == 1
599
600 if ((bufDescriptor->internal_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK) && (!(*shared_backend)))
601 {
602 init_afbc(cpu_ptr, bufDescriptor->internal_format, bufDescriptor->width, bufDescriptor->height);
603 }
604
605#endif
606 hnd->base = cpu_ptr;
607 }
608 }
609
610 return 0;
611}
612
613void mali_gralloc_ion_free(private_handle_t const *hnd)
614{
615 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
616 {
617 return;
618 }
619 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
620 {
621 /* Buffer might be unregistered already so we need to assure we have a valid handle*/
622 if (0 != hnd->base)
623 {
624 if (0 != munmap((void *)hnd->base, hnd->size))
625 {
626 AERR("Failed to munmap handle %p", hnd);
627 }
628 }
629
630 close(hnd->share_fd);
631 memset((void *)hnd, 0, sizeof(*hnd));
632 }
633}
634
635static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds)
636{
637 uint32_t i = 0;
638
639 for (i = 0; i < num_hnds; i++)
640 {
641 if (NULL != pHandle[i])
642 {
643 mali_gralloc_ion_free((private_handle_t *)(pHandle[i]));
644 }
645 }
646
647 return;
648}
649
650void mali_gralloc_ion_sync(const mali_gralloc_module *m, private_handle_t *hnd)
651{
John Stultza09fccb2018-02-22 22:50:24 -0800652 if (!gralloc_legacy_ion)
653 return;
654
John Stultz18814f62018-02-22 16:02:49 -0800655 if (m != NULL && hnd != NULL)
656 {
657 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
658 {
659 case private_handle_t::PRIV_FLAGS_USES_ION:
660 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP))
661 {
662 ion_sync_fd(m->ion_client, hnd->share_fd);
663 }
664
665 break;
666 }
667 }
668}
669
670int mali_gralloc_ion_map(private_handle_t *hnd)
671{
672 int retval = -EINVAL;
673
674 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
675 {
676 case private_handle_t::PRIV_FLAGS_USES_ION:
677 unsigned char *mappedAddress;
678 size_t size = hnd->size;
679 hw_module_t *pmodule = NULL;
680 private_module_t *m = NULL;
681
682 if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0)
683 {
684 m = reinterpret_cast<private_module_t *>(pmodule);
685 }
686 else
687 {
688 AERR("Could not get gralloc module for handle: %p", hnd);
689 retval = -errno;
690 break;
691 }
692
693 /* the test condition is set to m->ion_client <= 0 here, because:
694 * 1) module structure are initialized to 0 if no initial value is applied
695 * 2) a second user process should get a ion fd greater than 0.
696 */
697 if (m->ion_client <= 0)
698 {
699 /* a second user process must obtain a client handle first via ion_open before it can obtain the shared ion buffer*/
700 m->ion_client = ion_open();
701
702 if (m->ion_client < 0)
703 {
704 AERR("Could not open ion device for handle: %p", hnd);
705 retval = -errno;
706 break;
707 }
708 }
709
710 mappedAddress = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
711
712 if (MAP_FAILED == mappedAddress)
713 {
714 AERR("mmap( share_fd:%d ) failed with %s", hnd->share_fd, strerror(errno));
715 retval = -errno;
716 break;
717 }
718
719 hnd->base = (void *)(uintptr_t(mappedAddress) + hnd->offset);
720 retval = 0;
721 break;
722 }
723
724 return retval;
725}
726
727void mali_gralloc_ion_unmap(private_handle_t *hnd)
728{
729 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
730 {
731 case private_handle_t::PRIV_FLAGS_USES_ION:
732 void *base = (void *)hnd->base;
733 size_t size = hnd->size;
734
735 if (munmap(base, size) < 0)
736 {
737 AERR("Could not munmap base:%p size:%zd '%s'", base, size, strerror(errno));
738 }
739
740 break;
741 }
742}
743
744int mali_gralloc_ion_device_close(struct hw_device_t *device)
745{
746#if GRALLOC_USE_GRALLOC1_API == 1
747 gralloc1_device_t *dev = reinterpret_cast<gralloc1_device_t *>(device);
748#else
749 alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
750#endif
751
752 if (dev)
753 {
754 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
755
756 if (m->ion_client != -1)
757 {
758 if (0 != ion_close(m->ion_client))
759 {
760 AERR("Failed to close ion_client: %d err=%s", m->ion_client, strerror(errno));
761 }
762
763 m->ion_client = -1;
764 }
765
766 delete dev;
767 }
768
769 return 0;
770}