blob: ccc7e49db6d75f54509814ab79afe5530e494443 [file] [log] [blame]
John Stultz18814f62018-02-22 16:02:49 -08001/*
2 * Copyright (C) 2016-2017 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
John Stultza09fccb2018-02-22 22:50:24 -080019#include <cstdlib>
John Stultz18814f62018-02-22 16:02:49 -080020#include <string.h>
21#include <errno.h>
22#include <inttypes.h>
23#include <pthread.h>
24
John Stultzf35cdb32018-04-06 16:56:41 -070025#include <log/log.h>
John Stultz18814f62018-02-22 16:02:49 -080026#include <cutils/atomic.h>
27
John Stultz18814f62018-02-22 16:02:49 -080028#include <ion/ion.h>
29#include <sys/ioctl.h>
30
31#include <hardware/hardware.h>
32
33#if GRALLOC_USE_GRALLOC1_API == 1
34#include <hardware/gralloc1.h>
35#else
36#include <hardware/gralloc.h>
37#endif
38
39#include "mali_gralloc_module.h"
40#include "mali_gralloc_private_interface_types.h"
41#include "mali_gralloc_buffer.h"
42#include "gralloc_helper.h"
43#include "framebuffer_device.h"
44#include "mali_gralloc_formats.h"
45#include "mali_gralloc_usages.h"
46#include "mali_gralloc_bufferdescriptor.h"
John Stultza09fccb2018-02-22 22:50:24 -080047#include "ion_4.12.h"
48
49
50
51#define ION_SYSTEM (char*)"ion_system_heap"
52#define ION_CMA (char*)"linux,cma"
53static bool gralloc_legacy_ion;
54static int system_heap_id;
55static int cma_heap_id;
John Stultz18814f62018-02-22 16:02:49 -080056
57static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds);
58
59static void init_afbc(uint8_t *buf, uint64_t internal_format, int w, int h)
60{
61 uint32_t n_headers = (w * h) / 256;
62 uint32_t body_offset = n_headers * 16;
63 uint32_t headers[][4] = {
64 { body_offset, 0x1, 0x10000, 0x0 }, /* Layouts 0, 3, 4 */
65 { (body_offset + (1 << 28)), 0x80200040, 0x1004000, 0x20080 } /* Layouts 1, 5 */
66 };
67 uint32_t i, layout;
68
69 /* For AFBC 1.2, header buffer can be initilized to 0 for Layouts 0, 3, 4 */
70 if (internal_format & MALI_GRALLOC_INTFMT_AFBC_TILED_HEADERS)
71 {
72 memset(headers[0], 0, sizeof(uint32_t) * 4);
73 }
74 /* map format if necessary (also removes internal extension bits) */
75 uint64_t base_format = internal_format & MALI_GRALLOC_INTFMT_FMT_MASK;
76
77 switch (base_format)
78 {
79 case MALI_GRALLOC_FORMAT_INTERNAL_RGBA_8888:
80 case MALI_GRALLOC_FORMAT_INTERNAL_RGBX_8888:
81 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_888:
82 case MALI_GRALLOC_FORMAT_INTERNAL_RGB_565:
83 case MALI_GRALLOC_FORMAT_INTERNAL_BGRA_8888:
84 layout = 0;
85 break;
86
87 case MALI_GRALLOC_FORMAT_INTERNAL_YV12:
88 case MALI_GRALLOC_FORMAT_INTERNAL_NV12:
89 case MALI_GRALLOC_FORMAT_INTERNAL_NV21:
90 layout = 1;
91 break;
92
93 default:
94 layout = 0;
95 }
96
97 ALOGV("Writing AFBC header layout %d for format %" PRIu64, layout, base_format);
98
99 for (i = 0; i < n_headers; i++)
100 {
101 memcpy(buf, headers[layout], sizeof(headers[layout]));
102 buf += sizeof(headers[layout]);
103 }
104}
105
John Stultza09fccb2018-02-22 22:50:24 -0800106
107
108static int find_heap_id(int ion_client, char *name)
109{
110 int i, ret, cnt, heap_id = -1;
111 struct ion_heap_data *data;
112
113 ret = ion_query_heap_cnt(ion_client, &cnt);
114
115 if (ret)
116 {
117 AERR("ion count query failed with %s", strerror(errno));
118 return -1;
119 }
120
121 data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
122 if (!data)
123 {
124 AERR("Error allocating data %s\n", strerror(errno));
125 return -1;
126 }
127
128 ret = ion_query_get_heaps(ion_client, cnt, data);
129 if (ret)
130 {
131 AERR("Error querying heaps from ion %s", strerror(errno));
132 }
133 else
134 {
135 for (i = 0; i < cnt; i++) {
136 if (strcmp(data[i].name, name) == 0) {
137 heap_id = data[i].heap_id;
138 break;
139 }
140 }
141
142 if (i == cnt)
143 {
144 AERR("No %s Heap Found amongst %d heaps\n", name, cnt);
145 heap_id = -1;
146 }
147 }
148
149 free(data);
150 return heap_id;
151}
152
153
John Stultz18814f62018-02-22 16:02:49 -0800154static int alloc_from_ion_heap(int ion_fd, size_t size, unsigned int heap_mask, unsigned int flags, int *min_pgsz)
155{
156 ion_user_handle_t ion_hnd = -1;
157 int shared_fd, ret;
158
159 if ((ion_fd < 0) || (size <= 0) || (heap_mask == 0) || (min_pgsz == NULL))
160 {
161 return -1;
162 }
163
164 /**
165 * step 1: ion_alloc new ion_hnd
166 * step 2: ion_share from ion_hnd and get shared_fd
167 * step 3: ion free the given ion_hnd
168 * step 4: when we need to free this ion buffer, just close the shared_fd,
169 * kernel will count the reference of file struct, so it's safe to
170 * be transfered between processes.
171 */
172 ret = ion_alloc(ion_fd, size, 0, heap_mask, flags, &ion_hnd);
173
174 if (ret < 0)
175 {
176#if defined(ION_HEAP_SECURE_MASK)
177
178 if (heap_mask == ION_HEAP_SECURE_MASK)
179 {
180 return -1;
181 }
182 else
183#endif
184 {
185 /* If everything else failed try system heap */
186 flags = 0; /* Fallback option flags are not longer valid */
187 heap_mask = ION_HEAP_SYSTEM_MASK;
188 ret = ion_alloc(ion_fd, size, 0, heap_mask, flags, &ion_hnd);
189 }
190 }
191
192 ret = ion_share(ion_fd, ion_hnd, &shared_fd);
193
194 if (ret != 0)
195 {
196 AERR("ion_share( %d ) failed", ion_fd);
197 shared_fd = -1;
198 }
199
200 ret = ion_free(ion_fd, ion_hnd);
201
202 if (0 != ret)
203 {
204 AERR("ion_free( %d ) failed", ion_fd);
205 close(shared_fd);
206 shared_fd = -1;
207 }
208
209 if (ret >= 0)
210 {
211 switch (heap_mask)
212 {
213 case ION_HEAP_SYSTEM_MASK:
214 *min_pgsz = SZ_4K;
215 break;
216
217 case ION_HEAP_SYSTEM_CONTIG_MASK:
218 case ION_HEAP_CARVEOUT_MASK:
219#ifdef ION_HEAP_TYPE_DMA_MASK
220 case ION_HEAP_TYPE_DMA_MASK:
221#endif
222 *min_pgsz = size;
223 break;
224#ifdef ION_HEAP_CHUNK_MASK
225
226 /* NOTE: if have this heap make sure your ION chunk size is 2M*/
227 case ION_HEAP_CHUNK_MASK:
228 *min_pgsz = SZ_2M;
229 break;
230#endif
231#ifdef ION_HEAP_COMPOUND_PAGE_MASK
232
233 case ION_HEAP_COMPOUND_PAGE_MASK:
234 *min_pgsz = SZ_2M;
235 break;
236#endif
237/* If have customized heap please set the suitable pg type according to
238 * the customized ION implementation
239 */
240#ifdef ION_HEAP_CUSTOM_MASK
241
242 case ION_HEAP_CUSTOM_MASK:
243 *min_pgsz = SZ_4K;
244 break;
245#endif
246
247 default:
248 *min_pgsz = SZ_4K;
249 break;
250 }
251 }
252
253 return shared_fd;
254}
255
256unsigned int pick_ion_heap(uint64_t usage)
257{
258 unsigned int heap_mask;
259
260 if (usage & GRALLOC_USAGE_PROTECTED)
261 {
262#if defined(ION_HEAP_SECURE_MASK)
263 heap_mask = ION_HEAP_SECURE_MASK;
264#else
265 AERR("Protected ION memory is not supported on this platform.");
266 return 0;
267#endif
268 }
269
270#if defined(ION_HEAP_TYPE_COMPOUND_PAGE_MASK) && GRALLOC_USE_ION_COMPOUND_PAGE_HEAP
271 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER)))
272 {
273 heap_mask = ION_HEAP_TYPE_COMPOUND_PAGE_MASK;
274 }
275
276#elif defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
277 else if (!(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && (usage & (GRALLOC_USAGE_HW_FB | GRALLOC_USAGE_HW_COMPOSER)))
278 {
279 heap_mask = ION_HEAP_TYPE_DMA_MASK;
280 }
281
282#endif
283 else
284 {
285 heap_mask = ION_HEAP_SYSTEM_MASK;
286 }
287
288 return heap_mask;
289}
290
291void set_ion_flags(unsigned int heap_mask, uint64_t usage, unsigned int *priv_heap_flag, int *ion_flags)
292{
293#if !GRALLOC_USE_ION_DMA_HEAP
294 GRALLOC_UNUSED(heap_mask);
295#endif
296
297 if (priv_heap_flag)
298 {
299#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
300
301 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
302 {
303 *priv_heap_flag = private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP;
304 }
305
306#endif
307 }
308
309 if (ion_flags)
310 {
311#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
312
313 if (heap_mask != ION_HEAP_TYPE_DMA_MASK)
314 {
315#endif
316
317 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
318 {
319 *ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
320 }
321
322#if defined(ION_HEAP_TYPE_DMA_MASK) && GRALLOC_USE_ION_DMA_HEAP
323 }
324
325#endif
326 }
327}
328
329static bool check_buffers_sharable(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
330{
331 unsigned int shared_backend_heap_mask = 0;
332 int shared_ion_flags = 0;
333 uint64_t usage;
334 uint32_t i;
335
336 if (numDescriptors <= 1)
337 {
338 return false;
339 }
340
341 for (i = 0; i < numDescriptors; i++)
342 {
343 unsigned int heap_mask;
344 int ion_flags;
345 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
346
347 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
348 heap_mask = pick_ion_heap(usage);
349
350 if (0 == heap_mask)
351 {
352 return false;
353 }
354
355 set_ion_flags(heap_mask, usage, NULL, &ion_flags);
356
357 if (0 != shared_backend_heap_mask)
358 {
359 if (shared_backend_heap_mask != heap_mask || shared_ion_flags != ion_flags)
360 {
361 return false;
362 }
363 }
364 else
365 {
366 shared_backend_heap_mask = heap_mask;
367 shared_ion_flags = ion_flags;
368 }
369 }
370
371 return true;
372}
373
374static int get_max_buffer_descriptor_index(const gralloc_buffer_descriptor_t *descriptors, uint32_t numDescriptors)
375{
376 uint32_t i, max_buffer_index = 0;
377 size_t max_buffer_size = 0;
378
379 for (i = 0; i < numDescriptors; i++)
380 {
381 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)descriptors[i];
382
383 if (max_buffer_size < bufDescriptor->size)
384 {
385 max_buffer_index = i;
386 max_buffer_size = bufDescriptor->size;
387 }
388 }
389
390 return max_buffer_index;
391}
392
393int mali_gralloc_ion_allocate(mali_gralloc_module *m, const gralloc_buffer_descriptor_t *descriptors,
394 uint32_t numDescriptors, buffer_handle_t *pHandle, bool *shared_backend)
395{
396 static int support_protected = 1; /* initially, assume we support protected memory */
397 unsigned int heap_mask, priv_heap_flag = 0;
398 unsigned char *cpu_ptr = NULL;
399 uint64_t usage;
400 uint32_t i, max_buffer_index = 0;
401 int shared_fd, ret, ion_flags = 0;
402 int min_pgsz = 0;
403
404 if (m->ion_client < 0)
405 {
406 m->ion_client = ion_open();
407
408 if (m->ion_client < 0)
409 {
410 AERR("ion_open failed with %s", strerror(errno));
411 return -1;
412 }
John Stultza09fccb2018-02-22 22:50:24 -0800413
414 gralloc_legacy_ion = ion_is_legacy(m->ion_client);
415 if (!gralloc_legacy_ion)
416 {
417 system_heap_id = find_heap_id(m->ion_client, ION_SYSTEM);
418 cma_heap_id = find_heap_id(m->ion_client, ION_CMA);
419 if (system_heap_id < 0)
420 {
421 ion_close(m->ion_client);
422 m->ion_client = -1;
423 AERR( "ion_open failed: no system heap found" );
424 return -1;
425 }
426 if (cma_heap_id < 0) {
427 AERR("No cma heap found, falling back to system");
428 cma_heap_id = system_heap_id;
429 }
430 }
John Stultz18814f62018-02-22 16:02:49 -0800431 }
432
433 *shared_backend = check_buffers_sharable(descriptors, numDescriptors);
434
435 if (*shared_backend)
436 {
437 buffer_descriptor_t *max_bufDescriptor;
438
439 max_buffer_index = get_max_buffer_descriptor_index(descriptors, numDescriptors);
440 max_bufDescriptor = (buffer_descriptor_t *)(descriptors[max_buffer_index]);
441 usage = max_bufDescriptor->consumer_usage | max_bufDescriptor->producer_usage;
442
443 heap_mask = pick_ion_heap(usage);
444
445 if (heap_mask == 0)
446 {
447 AERR("Failed to find an appropriate ion heap");
448 return -1;
449 }
450
451 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
John Stultza09fccb2018-02-22 22:50:24 -0800452 if (gralloc_legacy_ion)
453 {
454 shared_fd = alloc_from_ion_heap(m->ion_client, max_bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
455 }
456 else
457 {
458 int heap = 1 << system_heap_id;
459 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
460 heap = 1 << cma_heap_id;
John Stultz18814f62018-02-22 16:02:49 -0800461
John Stultza09fccb2018-02-22 22:50:24 -0800462 ret = ion_alloc_fd(m->ion_client, max_bufDescriptor->size, 0, heap, 0, &(shared_fd));
463 if (ret != 0)
464 {
465 AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
466 return -1;
467 }
468 min_pgsz = SZ_4K;
469 }
John Stultz18814f62018-02-22 16:02:49 -0800470
471 if (shared_fd < 0)
472 {
473 AERR("ion_alloc failed form client: ( %d )", m->ion_client);
474 return -1;
475 }
476
477 for (i = 0; i < numDescriptors; i++)
478 {
479 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
480 int tmp_fd;
481
482 if (i != max_buffer_index)
483 {
484 tmp_fd = dup(shared_fd);
485
486 if (tmp_fd < 0)
487 {
488 /* need to free already allocated memory. */
489 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
490 return -1;
491 }
492 }
493 else
494 {
495 tmp_fd = shared_fd;
496 }
497
498 private_handle_t *hnd = new private_handle_t(
499 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
500 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, tmp_fd, bufDescriptor->hal_format,
501 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
502 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
503 max_bufDescriptor->size);
504
505 if (NULL == hnd)
506 {
507 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
508 return -1;
509 }
510
511 pHandle[i] = hnd;
512 }
513 }
514 else
515 {
516 for (i = 0; i < numDescriptors; i++)
517 {
518 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
519 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
520
521 heap_mask = pick_ion_heap(usage);
522
523 if (heap_mask == 0)
524 {
525 AERR("Failed to find an appropriate ion heap");
526 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
527 return -1;
528 }
529
530 set_ion_flags(heap_mask, usage, &priv_heap_flag, &ion_flags);
John Stultza09fccb2018-02-22 22:50:24 -0800531 if (gralloc_legacy_ion)
532 {
533 shared_fd = alloc_from_ion_heap(m->ion_client, bufDescriptor->size, heap_mask, ion_flags, &min_pgsz);
534 }
535 else
536 {
537 int heap = 1 << system_heap_id;
538 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
539 heap = 1 << cma_heap_id;
John Stultz18814f62018-02-22 16:02:49 -0800540
John Stultza09fccb2018-02-22 22:50:24 -0800541 ret = ion_alloc_fd(m->ion_client, bufDescriptor->size, 0, heap, 0, &(shared_fd));
542 if (ret != 0)
543 {
544 AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
545 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
546 return -1;
547 }
548 min_pgsz = SZ_4K;
549 }
John Stultz18814f62018-02-22 16:02:49 -0800550
551 if (shared_fd < 0)
552 {
553 AERR("ion_alloc failed from client ( %d )", m->ion_client);
554
555 /* need to free already allocated memory. not just this one */
556 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
557
558 return -1;
559 }
560
561 private_handle_t *hnd = new private_handle_t(
562 private_handle_t::PRIV_FLAGS_USES_ION | priv_heap_flag, bufDescriptor->size, min_pgsz,
563 bufDescriptor->consumer_usage, bufDescriptor->producer_usage, shared_fd, bufDescriptor->hal_format,
564 bufDescriptor->internal_format, bufDescriptor->byte_stride, bufDescriptor->width, bufDescriptor->height,
565 bufDescriptor->pixel_stride, bufDescriptor->internalWidth, bufDescriptor->internalHeight,
566 bufDescriptor->size);
567
568 if (NULL == hnd)
569 {
570 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
571 return -1;
572 }
573
574 pHandle[i] = hnd;
575 }
576 }
577
578 for (i = 0; i < numDescriptors; i++)
579 {
580 buffer_descriptor_t *bufDescriptor = (buffer_descriptor_t *)(descriptors[i]);
581 private_handle_t *hnd = (private_handle_t *)(pHandle[i]);
582
583 usage = bufDescriptor->consumer_usage | bufDescriptor->producer_usage;
584
585 if (!(usage & GRALLOC_USAGE_PROTECTED))
586 {
587 cpu_ptr =
588 (unsigned char *)mmap(NULL, bufDescriptor->size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
589
590 if (MAP_FAILED == cpu_ptr)
591 {
592 AERR("mmap failed from client ( %d ), fd ( %d )", m->ion_client, hnd->share_fd);
593 mali_gralloc_ion_free_internal(pHandle, numDescriptors);
594 return -1;
595 }
596
597#if GRALLOC_INIT_AFBC == 1
598
599 if ((bufDescriptor->internal_format & MALI_GRALLOC_INTFMT_AFBCENABLE_MASK) && (!(*shared_backend)))
600 {
601 init_afbc(cpu_ptr, bufDescriptor->internal_format, bufDescriptor->width, bufDescriptor->height);
602 }
603
604#endif
605 hnd->base = cpu_ptr;
606 }
607 }
608
609 return 0;
610}
611
612void mali_gralloc_ion_free(private_handle_t const *hnd)
613{
614 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
615 {
616 return;
617 }
618 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
619 {
620 /* Buffer might be unregistered already so we need to assure we have a valid handle*/
621 if (0 != hnd->base)
622 {
623 if (0 != munmap((void *)hnd->base, hnd->size))
624 {
625 AERR("Failed to munmap handle %p", hnd);
626 }
627 }
628
629 close(hnd->share_fd);
630 memset((void *)hnd, 0, sizeof(*hnd));
631 }
632}
633
634static void mali_gralloc_ion_free_internal(buffer_handle_t *pHandle, uint32_t num_hnds)
635{
636 uint32_t i = 0;
637
638 for (i = 0; i < num_hnds; i++)
639 {
640 if (NULL != pHandle[i])
641 {
642 mali_gralloc_ion_free((private_handle_t *)(pHandle[i]));
643 }
644 }
645
646 return;
647}
648
649void mali_gralloc_ion_sync(const mali_gralloc_module *m, private_handle_t *hnd)
650{
John Stultza09fccb2018-02-22 22:50:24 -0800651 if (!gralloc_legacy_ion)
652 return;
653
John Stultz18814f62018-02-22 16:02:49 -0800654 if (m != NULL && hnd != NULL)
655 {
656 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
657 {
658 case private_handle_t::PRIV_FLAGS_USES_ION:
659 if (!(hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP))
660 {
661 ion_sync_fd(m->ion_client, hnd->share_fd);
662 }
663
664 break;
665 }
666 }
667}
668
669int mali_gralloc_ion_map(private_handle_t *hnd)
670{
671 int retval = -EINVAL;
672
673 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
674 {
675 case private_handle_t::PRIV_FLAGS_USES_ION:
676 unsigned char *mappedAddress;
677 size_t size = hnd->size;
678 hw_module_t *pmodule = NULL;
679 private_module_t *m = NULL;
680
681 if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0)
682 {
683 m = reinterpret_cast<private_module_t *>(pmodule);
684 }
685 else
686 {
687 AERR("Could not get gralloc module for handle: %p", hnd);
688 retval = -errno;
689 break;
690 }
691
692 /* the test condition is set to m->ion_client <= 0 here, because:
693 * 1) module structure are initialized to 0 if no initial value is applied
694 * 2) a second user process should get a ion fd greater than 0.
695 */
696 if (m->ion_client <= 0)
697 {
698 /* a second user process must obtain a client handle first via ion_open before it can obtain the shared ion buffer*/
699 m->ion_client = ion_open();
700
701 if (m->ion_client < 0)
702 {
703 AERR("Could not open ion device for handle: %p", hnd);
704 retval = -errno;
705 break;
706 }
707 }
708
709 mappedAddress = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, hnd->share_fd, 0);
710
711 if (MAP_FAILED == mappedAddress)
712 {
713 AERR("mmap( share_fd:%d ) failed with %s", hnd->share_fd, strerror(errno));
714 retval = -errno;
715 break;
716 }
717
718 hnd->base = (void *)(uintptr_t(mappedAddress) + hnd->offset);
719 retval = 0;
720 break;
721 }
722
723 return retval;
724}
725
726void mali_gralloc_ion_unmap(private_handle_t *hnd)
727{
728 switch (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
729 {
730 case private_handle_t::PRIV_FLAGS_USES_ION:
731 void *base = (void *)hnd->base;
732 size_t size = hnd->size;
733
734 if (munmap(base, size) < 0)
735 {
736 AERR("Could not munmap base:%p size:%zd '%s'", base, size, strerror(errno));
737 }
738
739 break;
740 }
741}
742
743int mali_gralloc_ion_device_close(struct hw_device_t *device)
744{
745#if GRALLOC_USE_GRALLOC1_API == 1
746 gralloc1_device_t *dev = reinterpret_cast<gralloc1_device_t *>(device);
747#else
748 alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
749#endif
750
751 if (dev)
752 {
753 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
754
755 if (m->ion_client != -1)
756 {
757 if (0 != ion_close(m->ion_client))
758 {
759 AERR("Failed to close ion_client: %d err=%s", m->ion_client, strerror(errno));
760 }
761
762 m->ion_client = -1;
763 }
764
765 delete dev;
766 }
767
768 return 0;
769}