blob: 44fbf6c49afa7ef476ac95082cd01bcda1a75913 [file] [log] [blame]
Vishal Bhoj78e90492015-12-07 01:36:32 +05301/*
2 * Copyright (C) 2010 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
Laura Abbott311955b2017-06-30 11:39:35 +053019#include <cstdlib>
Vishal Bhoj78e90492015-12-07 01:36:32 +053020#include <string.h>
21#include <errno.h>
22#include <pthread.h>
23
24#include <cutils/log.h>
25#include <cutils/atomic.h>
26#include <hardware/hardware.h>
27#include <hardware/gralloc.h>
28
29#include <sys/ioctl.h>
30
31#include "alloc_device.h"
32#include "gralloc_priv.h"
33#include "gralloc_helper.h"
34#include "framebuffer_device.h"
35
36#if GRALLOC_ARM_UMP_MODULE
37#include <ump/ump.h>
38#include <ump/ump_ref_drv.h>
39#endif
40
41#if GRALLOC_ARM_DMA_BUF_MODULE
Vishal Bhoj78e90492015-12-07 01:36:32 +053042#include <ion/ion.h>
Laura Abbott311955b2017-06-30 11:39:35 +053043#include "ion_4.12.h"
John Stultzf3d868d2017-12-05 16:15:11 -080044
45#define ION_SYSTEM (char*)"ion_system_heap"
46#define ION_CMA (char*)"linux,cma"
47
John Stultzf98a4ba2020-04-22 22:50:12 +000048static int system_heap_id;
49static int cma_heap_id;
50static bool gralloc_legacy_ion;
Vishal Bhoj78e90492015-12-07 01:36:32 +053051#endif
52
Vishal Bhoj78e90492015-12-07 01:36:32 +053053#if GRALLOC_SIMULATE_FAILURES
54#include <cutils/properties.h>
55
56/* system property keys for controlling simulated UMP allocation failures */
57#define PROP_MALI_TEST_GRALLOC_FAIL_FIRST "mali.test.gralloc.fail_first"
58#define PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL "mali.test.gralloc.fail_interval"
59
60static int __ump_alloc_should_fail()
61{
62
63 static unsigned int call_count = 0;
64 unsigned int first_fail = 0;
65 int fail_period = 0;
66 int fail = 0;
67
68 ++call_count;
69
70 /* read the system properties that control failure simulation */
71 {
72 char prop_value[PROPERTY_VALUE_MAX];
73
74 if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_FIRST, prop_value, "0") > 0)
75 {
76 sscanf(prop_value, "%11u", &first_fail);
77 }
78
79 if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL, prop_value, "0") > 0)
80 {
81 sscanf(prop_value, "%11u", &fail_period);
82 }
83 }
84
85 /* failure simulation is enabled by setting the first_fail property to non-zero */
86 if (first_fail > 0)
87 {
88 LOGI("iteration %u (fail=%u, period=%u)\n", call_count, first_fail, fail_period);
89
90 fail = (call_count == first_fail) ||
91 (call_count > first_fail && fail_period > 0 && 0 == (call_count - first_fail) % fail_period);
92
93 if (fail)
94 {
95 AERR("failed ump_ref_drv_allocate on iteration #%d\n", call_count);
96 }
97 }
98
99 return fail;
100}
101#endif
102
John Stultze5c5bb32018-03-20 18:16:49 -0700103#ifdef FBIOGET_DMABUF
104static int fb_get_framebuffer_dmabuf(private_module_t *m, private_handle_t *hnd)
105{
106 struct fb_dmabuf_export fb_dma_buf;
107 int res;
108 res = ioctl(m->framebuffer->fd, FBIOGET_DMABUF, &fb_dma_buf);
109
110 if (res == 0)
111 {
112 hnd->share_fd = fb_dma_buf.fd;
113 return 0;
114 }
115 else
116 {
117 AINF("FBIOGET_DMABUF ioctl failed(%d). See gralloc_priv.h and the integration manual for vendor framebuffer "
118 "integration",
119 res);
120 return -1;
121 }
122}
123#endif
Vishal Bhoj78e90492015-12-07 01:36:32 +0530124
John Stultzf98a4ba2020-04-22 22:50:12 +0000125#if GRALLOC_ARM_DMA_BUF_MODULE
126static int alloc_ion_fd(int ion_fd, size_t size, unsigned int heap_mask, unsigned int flags, int *shared_fd)
127{
128 int heap;
129
130 if (!gralloc_legacy_ion) {
131 /* We only support two heaps, so mapping between CMA/System is simple */
132 heap = 1 << system_heap_id;
133 if (heap_mask == ION_HEAP_TYPE_DMA_MASK)
134 heap = 1 << cma_heap_id;
135 } else {
136 heap = heap_mask;
137 }
138 return ion_alloc_fd(ion_fd, size, 0, heap, flags, shared_fd);
139}
140#endif
141
Vishal Bhoj78e90492015-12-07 01:36:32 +0530142static int gralloc_alloc_buffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
143{
144#if GRALLOC_ARM_DMA_BUF_MODULE
145 {
146 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
John Stultze5c5bb32018-03-20 18:16:49 -0700147 void *cpu_ptr = MAP_FAILED;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530148 int shared_fd;
149 int ret;
John Stultze5c5bb32018-03-20 18:16:49 -0700150 unsigned int heap_mask;
151 int lock_state = 0;
152 int map_mask = 0;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530153
John Stultzf98a4ba2020-04-22 22:50:12 +0000154 if (usage & GRALLOC_USAGE_PROTECTED) {
John Stultze5c5bb32018-03-20 18:16:49 -0700155#if defined(ION_HEAP_SECURE_MASK)
156 heap_mask = ION_HEAP_SECURE_MASK;
157#else
158 AERR("The platform does NOT support protected ION memory.");
159 return -1;
160#endif
Chia-I Wu06695a62017-05-08 12:55:28 -0700161 }
John Stultzf98a4ba2020-04-22 22:50:12 +0000162 else if (usage & GRALLOC_USAGE_HW_FB) {
163 heap_mask = ION_HEAP_TYPE_DMA_MASK;
164 }
165 else {
John Stultze5c5bb32018-03-20 18:16:49 -0700166 heap_mask = ION_HEAP_SYSTEM_MASK;
Laura Abbott6ed00bc2017-06-30 11:39:35 +0530167 }
Chia-I Wu06695a62017-05-08 12:55:28 -0700168
John Stultzf98a4ba2020-04-22 22:50:12 +0000169 ret = alloc_ion_fd(m->ion_client, size, heap_mask, 0, &shared_fd);
170 if (ret != 0) {
171 AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
172 return -1;
John Stultze5c5bb32018-03-20 18:16:49 -0700173 }
174
175 if (!(usage & GRALLOC_USAGE_PROTECTED))
176 {
177 map_mask = PROT_READ | PROT_WRITE;
178 }
179 else
180 {
181 map_mask = PROT_WRITE;
182 }
John Stultze5c5bb32018-03-20 18:16:49 -0700183
184 cpu_ptr = mmap(NULL, size, map_mask, MAP_SHARED, shared_fd, 0);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530185
186 if (MAP_FAILED == cpu_ptr)
187 {
188 AERR("ion_map( %d ) failed", m->ion_client);
189
Vishal Bhoj78e90492015-12-07 01:36:32 +0530190 close(shared_fd);
191 return -1;
192 }
193
John Stultze5c5bb32018-03-20 18:16:49 -0700194 lock_state = private_handle_t::LOCK_STATE_MAPPED;
195
196 private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, cpu_ptr, lock_state);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530197
198 if (NULL != hnd)
199 {
200 hnd->share_fd = shared_fd;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530201 *pHandle = hnd;
202 return 0;
203 }
204 else
205 {
206 AERR("Gralloc out of mem for ion_client:%d", m->ion_client);
207 }
208
209 close(shared_fd);
John Stultze5c5bb32018-03-20 18:16:49 -0700210
Vishal Bhoj78e90492015-12-07 01:36:32 +0530211 ret = munmap(cpu_ptr, size);
212
213 if (0 != ret)
214 {
215 AERR("munmap failed for base:%p size: %lu", cpu_ptr, (unsigned long)size);
216 }
217
Vishal Bhoj78e90492015-12-07 01:36:32 +0530218 return -1;
219 }
220#endif
221
222#if GRALLOC_ARM_UMP_MODULE
223 MALI_IGNORE(dev);
224 {
225 ump_handle ump_mem_handle;
226 void *cpu_ptr;
227 ump_secure_id ump_id;
228 ump_alloc_constraints constraints;
229
230 size = round_up_to_page_size(size);
231
232 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
233 {
234 constraints = UMP_REF_DRV_CONSTRAINT_USE_CACHE;
235 }
236 else
237 {
238 constraints = UMP_REF_DRV_CONSTRAINT_NONE;
239 }
240
241#ifdef GRALLOC_SIMULATE_FAILURES
242
243 /* if the failure condition matches, fail this iteration */
244 if (__ump_alloc_should_fail())
245 {
246 ump_mem_handle = UMP_INVALID_MEMORY_HANDLE;
247 }
248 else
249#endif
250 {
John Stultze5c5bb32018-03-20 18:16:49 -0700251 if (usage & GRALLOC_USAGE_PROTECTED)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530252 {
John Stultze5c5bb32018-03-20 18:16:49 -0700253 AERR("gralloc_alloc_buffer() does not support to allocate protected UMP memory.");
Vishal Bhoj78e90492015-12-07 01:36:32 +0530254 }
255 else
256 {
John Stultze5c5bb32018-03-20 18:16:49 -0700257 ump_mem_handle = ump_ref_drv_allocate(size, constraints);
258
259 if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle)
260 {
261 cpu_ptr = ump_mapped_pointer_get(ump_mem_handle);
262
263 if (NULL != cpu_ptr)
264 {
265 ump_id = ump_secure_id_get(ump_mem_handle);
266
267 if (UMP_INVALID_SECURE_ID != ump_id)
268 {
269 private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, cpu_ptr,
270 private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle);
271
272 if (NULL != hnd)
273 {
274 *pHandle = hnd;
275 return 0;
276 }
277 else
278 {
279 AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id);
280 }
281 }
282 else
283 {
284 AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle);
285 }
286
287 ump_mapped_pointer_release(ump_mem_handle);
288 }
289 else
290 {
291 AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle);
292 }
293
294 ump_reference_release(ump_mem_handle);
295 }
296 else
297 {
298 AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints);
299 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530300 }
301 }
302
303 return -1;
304 }
305#endif
306
307}
308
John Stultzf3d868d2017-12-05 16:15:11 -0800309#ifndef DISABLE_FRAMEBUFFER_HAL
Vishal Bhoj78e90492015-12-07 01:36:32 +0530310static int gralloc_alloc_framebuffer_locked(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
311{
312 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
313
314 // allocate the framebuffer
315 if (m->framebuffer == NULL)
316 {
317 // initialize the framebuffer, the framebuffer is mapped once and forever.
318 int err = init_frame_buffer_locked(m);
319
320 if (err < 0)
321 {
322 return err;
323 }
324 }
325
John Stultze5c5bb32018-03-20 18:16:49 -0700326 uint32_t bufferMask = m->bufferMask;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530327 const uint32_t numBuffers = m->numBuffers;
328 const size_t bufferSize = m->finfo.line_length * m->info.yres;
329
330 if (numBuffers == 1)
331 {
332 // If we have only one buffer, we never use page-flipping. Instead,
333 // we return a regular buffer which will be memcpy'ed to the main
334 // screen when post is called.
335 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
336 AERR("fallback to single buffering. Virtual Y-res too small %d", m->info.yres);
337 return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
338 }
339
340 if (bufferMask >= ((1LU << numBuffers) - 1))
341 {
John Stultze5c5bb32018-03-20 18:16:49 -0700342 // We ran out of buffers, reset bufferMask.
343 bufferMask = 0;
344 m->bufferMask = 0;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530345 }
346
347 void *vaddr = m->framebuffer->base;
348
349 // find a free slot
350 for (uint32_t i = 0 ; i < numBuffers ; i++)
351 {
352 if ((bufferMask & (1LU << i)) == 0)
353 {
354 m->bufferMask |= (1LU << i);
355 break;
356 }
357
358 vaddr = (void *)((uintptr_t)vaddr + bufferSize);
359 }
360
361 // The entire framebuffer memory is already mapped, now create a buffer object for parts of this memory
362 private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_FRAMEBUFFER, usage, size, vaddr,
John Stultze5c5bb32018-03-20 18:16:49 -0700363 0, m->framebuffer->fd, (uintptr_t)vaddr - (uintptr_t) m->framebuffer->base, m->framebuffer->fb_paddr);
364
Vishal Bhoj78e90492015-12-07 01:36:32 +0530365#if GRALLOC_ARM_UMP_MODULE
366 hnd->ump_id = m->framebuffer->ump_id;
367
368 /* create a backing ump memory handle if the framebuffer is exposed as a secure ID */
369 if ((int)UMP_INVALID_SECURE_ID != hnd->ump_id)
370 {
371 hnd->ump_mem_handle = (int)ump_handle_create_from_secure_id(hnd->ump_id);
372
373 if ((int)UMP_INVALID_MEMORY_HANDLE == hnd->ump_mem_handle)
374 {
375 AINF("warning: unable to create UMP handle from secure ID %i\n", hnd->ump_id);
376 }
377 }
378
379#endif
380
381#if GRALLOC_ARM_DMA_BUF_MODULE
382 {
383#ifdef FBIOGET_DMABUF
John Stultze5c5bb32018-03-20 18:16:49 -0700384 /*
385 * Perform allocator specific actions. If these fail we fall back to a regular buffer
386 * which will be memcpy'ed to the main screen when fb_post is called.
387 */
388 if (fb_get_framebuffer_dmabuf(m, hnd) == -1)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530389 {
John Stultze5c5bb32018-03-20 18:16:49 -0700390 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530391
John Stultze5c5bb32018-03-20 18:16:49 -0700392 AINF("Fallback to single buffering. Unable to map framebuffer memory to handle:%p", hnd);
393 return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
394 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530395#endif
396 }
Chia-I Wua12e37d2017-05-08 12:46:13 -0700397
398 // correct numFds/numInts when there is no dmabuf fd
John Stultze5c5bb32018-03-20 18:16:49 -0700399 if (hnd->share_fd < 0)
400 {
Chia-I Wua12e37d2017-05-08 12:46:13 -0700401 hnd->numFds--;
402 hnd->numInts++;
403 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530404#endif
405
406 *pHandle = hnd;
407
408 return 0;
409}
410
411static int gralloc_alloc_framebuffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
412{
413 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
414 pthread_mutex_lock(&m->lock);
415 int err = gralloc_alloc_framebuffer_locked(dev, size, usage, pHandle);
416 pthread_mutex_unlock(&m->lock);
417 return err;
418}
John Stultzf3d868d2017-12-05 16:15:11 -0800419#endif /* DISABLE_FRAMEBUFFER_HAL */
Vishal Bhoj78e90492015-12-07 01:36:32 +0530420
421static int alloc_device_alloc(alloc_device_t *dev, int w, int h, int format, int usage, buffer_handle_t *pHandle, int *pStride)
422{
423 if (!pHandle || !pStride)
424 {
425 return -EINVAL;
426 }
427
428 size_t size;
429 size_t stride;
John Stultz30941512018-01-04 14:03:55 -0800430 int bpp = 1;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530431
432 if (format == HAL_PIXEL_FORMAT_YCrCb_420_SP || format == HAL_PIXEL_FORMAT_YV12
433 /* HAL_PIXEL_FORMAT_YCbCr_420_SP, HAL_PIXEL_FORMAT_YCbCr_420_P, HAL_PIXEL_FORMAT_YCbCr_422_I are not defined in Android.
434 * To enable Mali DDK EGLImage support for those formats, firstly, you have to add them in Android system/core/include/system/graphics.h.
435 * Then, define SUPPORT_LEGACY_FORMAT in the same header file(Mali DDK will also check this definition).
436 */
437#ifdef SUPPORT_LEGACY_FORMAT
438 || format == HAL_PIXEL_FORMAT_YCbCr_420_SP || format == HAL_PIXEL_FORMAT_YCbCr_420_P || format == HAL_PIXEL_FORMAT_YCbCr_422_I
439#endif
440 )
441 {
442 switch (format)
443 {
444 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
445 stride = GRALLOC_ALIGN(w, 16);
446 size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
447 break;
448
449 case HAL_PIXEL_FORMAT_YV12:
450#ifdef SUPPORT_LEGACY_FORMAT
451 case HAL_PIXEL_FORMAT_YCbCr_420_P:
452#endif
John Stultze5c5bb32018-03-20 18:16:49 -0700453 /*
454 * Since Utgard has limitation that "64-byte alignment is enforced on texture and mipmap addresses", here to make sure
455 * the v, u plane start addresses are 64-byte aligned.
456 */
457 stride = GRALLOC_ALIGN(w, (h % 8 == 0) ? GRALLOC_ALIGN_BASE_16 :
458 ((h % 4 == 0) ? GRALLOC_ALIGN_BASE_64 : GRALLOC_ALIGN_BASE_128));
Vishal Bhoj78e90492015-12-07 01:36:32 +0530459 size = GRALLOC_ALIGN(h, 2) * (stride + GRALLOC_ALIGN(stride / 2, 16));
460
461 break;
462#ifdef SUPPORT_LEGACY_FORMAT
463
464 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
465 stride = GRALLOC_ALIGN(w, 16);
466 size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
467 break;
468
469 case HAL_PIXEL_FORMAT_YCbCr_422_I:
470 stride = GRALLOC_ALIGN(w, 16);
471 size = h * stride * 2;
472
473 break;
474#endif
475
476 default:
477 return -EINVAL;
478 }
479 }
480 else
481 {
Vishal Bhoj78e90492015-12-07 01:36:32 +0530482
483 switch (format)
484 {
485 case HAL_PIXEL_FORMAT_RGBA_8888:
486 case HAL_PIXEL_FORMAT_RGBX_8888:
487 case HAL_PIXEL_FORMAT_BGRA_8888:
488 bpp = 4;
489 break;
490
491 case HAL_PIXEL_FORMAT_RGB_888:
492 bpp = 3;
493 break;
494
495 case HAL_PIXEL_FORMAT_RGB_565:
496#if PLATFORM_SDK_VERSION < 19
497 case HAL_PIXEL_FORMAT_RGBA_5551:
498 case HAL_PIXEL_FORMAT_RGBA_4444:
499#endif
500 bpp = 2;
501 break;
502
Yongqin Liu6e403d02017-12-06 01:09:43 +0800503 case HAL_PIXEL_FORMAT_BLOB:
504 if (h != 1) {
505 AERR("Height for HAL_PIXEL_FORMAT_BLOB must be 1. h=%d", h);
506 return -EINVAL;
507 }
508 break;
509
Vishal Bhoj78e90492015-12-07 01:36:32 +0530510 default:
Yongqin Liu6e403d02017-12-06 01:09:43 +0800511 AERR("The format is not supported yet: format=%d\n", format);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530512 return -EINVAL;
513 }
514
Yongqin Liu6e403d02017-12-06 01:09:43 +0800515 if (format == HAL_PIXEL_FORMAT_BLOB) {
516 stride = 0; /* No 'rows', it's effectively a long one dimensional array */
517 size = w;
518 }else{
519 size_t bpr = GRALLOC_ALIGN(w * bpp, 64);
520 size = bpr * h;
521 stride = bpr / bpp;
522 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530523 }
524
525 int err;
526
John Stultzf3d868d2017-12-05 16:15:11 -0800527#ifndef DISABLE_FRAMEBUFFER_HAL
Vishal Bhoj78e90492015-12-07 01:36:32 +0530528
529 if (usage & GRALLOC_USAGE_HW_FB)
530 {
531 err = gralloc_alloc_framebuffer(dev, size, usage, pHandle);
532 }
533 else
534#endif
535
536 {
537 err = gralloc_alloc_buffer(dev, size, usage, pHandle);
538 }
539
540 if (err < 0)
541 {
542 return err;
543 }
544
545 /* match the framebuffer format */
546 if (usage & GRALLOC_USAGE_HW_FB)
547 {
548#ifdef GRALLOC_16_BITS
549 format = HAL_PIXEL_FORMAT_RGB_565;
550#else
551 format = HAL_PIXEL_FORMAT_BGRA_8888;
552#endif
553 }
554
555 private_handle_t *hnd = (private_handle_t *)*pHandle;
556 int private_usage = usage & (GRALLOC_USAGE_PRIVATE_0 |
557 GRALLOC_USAGE_PRIVATE_1);
558
559 switch (private_usage)
560 {
561 case 0:
562 hnd->yuv_info = MALI_YUV_BT601_NARROW;
563 break;
564
565 case GRALLOC_USAGE_PRIVATE_1:
566 hnd->yuv_info = MALI_YUV_BT601_WIDE;
567 break;
568
569 case GRALLOC_USAGE_PRIVATE_0:
570 hnd->yuv_info = MALI_YUV_BT709_NARROW;
571 break;
572
573 case (GRALLOC_USAGE_PRIVATE_0 | GRALLOC_USAGE_PRIVATE_1):
574 hnd->yuv_info = MALI_YUV_BT709_WIDE;
575 break;
576 }
577
578 hnd->width = w;
579 hnd->height = h;
580 hnd->format = format;
581 hnd->stride = stride;
John Stultz30941512018-01-04 14:03:55 -0800582 hnd->byte_stride = GRALLOC_ALIGN(w*bpp,64);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530583 *pStride = stride;
584 return 0;
585}
586
John Stultz21ad5be2018-03-20 20:35:07 -0700587static int alloc_device_free(alloc_device_t __unused *dev, buffer_handle_t handle)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530588{
589 if (private_handle_t::validate(handle) < 0)
590 {
591 return -EINVAL;
592 }
593
594 private_handle_t const *hnd = reinterpret_cast<private_handle_t const *>(handle);
595
596 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
597 {
Vishal Bhoj78e90492015-12-07 01:36:32 +0530598#if GRALLOC_ARM_UMP_MODULE
599
600 if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
601 {
602 ump_reference_release((ump_handle)hnd->ump_mem_handle);
603 }
604
605#endif
606 }
607 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
608 {
609#if GRALLOC_ARM_UMP_MODULE
610
611 /* Buffer might be unregistered so we need to check for invalid ump handle*/
612 if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
613 {
614 ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
615 ump_reference_release((ump_handle)hnd->ump_mem_handle);
616 }
617
618#else
John Stultze5c5bb32018-03-20 18:16:49 -0700619 AERR("Can't free ump memory for handle:%p. Not supported.", hnd);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530620#endif
621 }
622 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
623 {
624#if GRALLOC_ARM_DMA_BUF_MODULE
Vishal Bhoj78e90492015-12-07 01:36:32 +0530625 /* Buffer might be unregistered so we need to check for invalid ump handle*/
626 if (0 != hnd->base)
627 {
628 if (0 != munmap((void *)hnd->base, hnd->size))
629 {
John Stultze5c5bb32018-03-20 18:16:49 -0700630 AERR("Failed to munmap handle %p", hnd);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530631 }
632 }
633
634 close(hnd->share_fd);
635
Vishal Bhoj78e90492015-12-07 01:36:32 +0530636 memset((void *)hnd, 0, sizeof(*hnd));
637#else
638 AERR("Can't free dma_buf memory for handle:0x%x. Not supported.", (unsigned int)hnd);
639#endif
640
641 }
642
643 delete hnd;
644
645 return 0;
646}
647
648static int alloc_device_close(struct hw_device_t *device)
649{
650 alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
651
652 if (dev)
653 {
654#if GRALLOC_ARM_DMA_BUF_MODULE
655 private_module_t *m = reinterpret_cast<private_module_t *>(device);
656
657 if (0 != ion_close(m->ion_client))
658 {
659 AERR("Failed to close ion_client: %d", m->ion_client);
660 }
661
662 close(m->ion_client);
663#endif
664 delete dev;
665#if GRALLOC_ARM_UMP_MODULE
666 ump_close(); // Our UMP memory refs will be released automatically here...
667#endif
668 }
669
670 return 0;
671}
672
Laura Abbott311955b2017-06-30 11:39:35 +0530673#if GRALLOC_ARM_DMA_BUF_MODULE
John Stultzf3d868d2017-12-05 16:15:11 -0800674static int find_ion_heap_id(int ion_client, char* name)
Laura Abbott311955b2017-06-30 11:39:35 +0530675{
John Stultzf3d868d2017-12-05 16:15:11 -0800676 int i, ret, cnt, heap_id = -1;
Laura Abbott311955b2017-06-30 11:39:35 +0530677 struct ion_heap_data *data;
678
679 ret = ion_query_heap_cnt(ion_client, &cnt);
680
681 if (ret)
682 {
683 AERR("ion count query failed with %s", strerror(errno));
684 return -1;
685 }
686
687 data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
688 if (!data)
689 {
690 AERR("Error allocating data %s\n", strerror(errno));
691 return -1;
692 }
693
694 ret = ion_query_get_heaps(ion_client, cnt, data);
695 if (ret)
696 {
697 AERR("Error querying heaps from ion %s", strerror(errno));
698 }
699 else
700 {
701 for (i = 0; i < cnt; i++) {
702 struct ion_heap_data *dat = (struct ion_heap_data *)data;
John Stultzf3d868d2017-12-05 16:15:11 -0800703 if (strcmp(dat[i].name, name) == 0) {
704 heap_id = dat[i].heap_id;
Laura Abbott311955b2017-06-30 11:39:35 +0530705 break;
706 }
707 }
708
709 if (i > cnt)
710 {
711 AERR("No System Heap Found amongst %d heaps\n", cnt);
John Stultzf3d868d2017-12-05 16:15:11 -0800712 heap_id = -1;
Laura Abbott311955b2017-06-30 11:39:35 +0530713 }
714 }
715
716 free(data);
John Stultzf3d868d2017-12-05 16:15:11 -0800717 return heap_id;
Laura Abbott311955b2017-06-30 11:39:35 +0530718}
719#endif
720
Vishal Bhoj78e90492015-12-07 01:36:32 +0530721int alloc_device_open(hw_module_t const *module, const char *name, hw_device_t **device)
722{
723 MALI_IGNORE(name);
724 alloc_device_t *dev;
725
726 dev = new alloc_device_t;
727
728 if (NULL == dev)
729 {
730 return -1;
731 }
732
733#if GRALLOC_ARM_UMP_MODULE
734 ump_result ump_res = ump_open();
735
736 if (UMP_OK != ump_res)
737 {
738 AERR("UMP open failed with %d", ump_res);
739 delete dev;
740 return -1;
741 }
742
743#endif
744
745 /* initialize our state here */
746 memset(dev, 0, sizeof(*dev));
747
748 /* initialize the procs */
749 dev->common.tag = HARDWARE_DEVICE_TAG;
750 dev->common.version = 0;
751 dev->common.module = const_cast<hw_module_t *>(module);
752 dev->common.close = alloc_device_close;
753 dev->alloc = alloc_device_alloc;
754 dev->free = alloc_device_free;
755
756#if GRALLOC_ARM_DMA_BUF_MODULE
757 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
758 m->ion_client = ion_open();
759
760 if (m->ion_client < 0)
761 {
762 AERR("ion_open failed with %s", strerror(errno));
763 delete dev;
764 return -1;
765 }
766
John Stultzf98a4ba2020-04-22 22:50:12 +0000767 gralloc_legacy_ion = ion_is_legacy(m->ion_client);
Laura Abbott311955b2017-06-30 11:39:35 +0530768
John Stultzf98a4ba2020-04-22 22:50:12 +0000769 if (!gralloc_legacy_ion)
Laura Abbott311955b2017-06-30 11:39:35 +0530770 {
John Stultzf98a4ba2020-04-22 22:50:12 +0000771 system_heap_id = find_ion_heap_id(m->ion_client, ION_SYSTEM);
772 cma_heap_id = find_ion_heap_id(m->ion_client, ION_CMA);
773 if (system_heap_id < 0 || cma_heap_id < 0)
Laura Abbott311955b2017-06-30 11:39:35 +0530774 {
775 delete dev;
776 ion_close(m->ion_client);
777 m->ion_client = -1;
778 return -1;
779 }
780 }
781
Vishal Bhoj78e90492015-12-07 01:36:32 +0530782#endif
783
784 *device = &dev->common;
785
786 return 0;
787}