blob: fe3f14f0b7c188f8e711676a9e59b644ac174d1e [file] [log] [blame]
Vishal Bhoj78e90492015-12-07 01:36:32 +05301/*
2 * Copyright (C) 2010 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
Laura Abbott311955b2017-06-30 11:39:35 +053019#include <cstdlib>
Vishal Bhoj78e90492015-12-07 01:36:32 +053020#include <string.h>
21#include <errno.h>
22#include <pthread.h>
23
24#include <cutils/log.h>
25#include <cutils/atomic.h>
26#include <hardware/hardware.h>
27#include <hardware/gralloc.h>
28
29#include <sys/ioctl.h>
30
31#include "alloc_device.h"
32#include "gralloc_priv.h"
33#include "gralloc_helper.h"
34#include "framebuffer_device.h"
35
36#if GRALLOC_ARM_UMP_MODULE
37#include <ump/ump.h>
38#include <ump/ump_ref_drv.h>
39#endif
40
41#if GRALLOC_ARM_DMA_BUF_MODULE
Vishal Bhoj78e90492015-12-07 01:36:32 +053042#include <ion/ion.h>
Laura Abbott311955b2017-06-30 11:39:35 +053043#include "ion_4.12.h"
Vishal Bhoj78e90492015-12-07 01:36:32 +053044#endif
45
Vishal Bhoj78e90492015-12-07 01:36:32 +053046#if GRALLOC_SIMULATE_FAILURES
47#include <cutils/properties.h>
48
49/* system property keys for controlling simulated UMP allocation failures */
50#define PROP_MALI_TEST_GRALLOC_FAIL_FIRST "mali.test.gralloc.fail_first"
51#define PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL "mali.test.gralloc.fail_interval"
52
53static int __ump_alloc_should_fail()
54{
55
56 static unsigned int call_count = 0;
57 unsigned int first_fail = 0;
58 int fail_period = 0;
59 int fail = 0;
60
61 ++call_count;
62
63 /* read the system properties that control failure simulation */
64 {
65 char prop_value[PROPERTY_VALUE_MAX];
66
67 if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_FIRST, prop_value, "0") > 0)
68 {
69 sscanf(prop_value, "%11u", &first_fail);
70 }
71
72 if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL, prop_value, "0") > 0)
73 {
74 sscanf(prop_value, "%11u", &fail_period);
75 }
76 }
77
78 /* failure simulation is enabled by setting the first_fail property to non-zero */
79 if (first_fail > 0)
80 {
81 LOGI("iteration %u (fail=%u, period=%u)\n", call_count, first_fail, fail_period);
82
83 fail = (call_count == first_fail) ||
84 (call_count > first_fail && fail_period > 0 && 0 == (call_count - first_fail) % fail_period);
85
86 if (fail)
87 {
88 AERR("failed ump_ref_drv_allocate on iteration #%d\n", call_count);
89 }
90 }
91
92 return fail;
93}
94#endif
95
John Stultze5c5bb32018-03-20 18:16:49 -070096#ifdef FBIOGET_DMABUF
97static int fb_get_framebuffer_dmabuf(private_module_t *m, private_handle_t *hnd)
98{
99 struct fb_dmabuf_export fb_dma_buf;
100 int res;
101 res = ioctl(m->framebuffer->fd, FBIOGET_DMABUF, &fb_dma_buf);
102
103 if (res == 0)
104 {
105 hnd->share_fd = fb_dma_buf.fd;
106 return 0;
107 }
108 else
109 {
110 AINF("FBIOGET_DMABUF ioctl failed(%d). See gralloc_priv.h and the integration manual for vendor framebuffer "
111 "integration",
112 res);
113 return -1;
114 }
115}
116#endif
Vishal Bhoj78e90492015-12-07 01:36:32 +0530117
118static int gralloc_alloc_buffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
119{
120#if GRALLOC_ARM_DMA_BUF_MODULE
121 {
122 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
123 ion_user_handle_t ion_hnd;
John Stultze5c5bb32018-03-20 18:16:49 -0700124 void *cpu_ptr = MAP_FAILED;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530125 int shared_fd;
126 int ret;
John Stultze5c5bb32018-03-20 18:16:49 -0700127 unsigned int heap_mask;
128 int lock_state = 0;
129 int map_mask = 0;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530130
John Stultze5c5bb32018-03-20 18:16:49 -0700131 if (usage & GRALLOC_USAGE_PROTECTED)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530132 {
John Stultze5c5bb32018-03-20 18:16:49 -0700133#if defined(ION_HEAP_SECURE_MASK)
134 heap_mask = ION_HEAP_SECURE_MASK;
135#else
136 AERR("The platform does NOT support protected ION memory.");
137 return -1;
138#endif
Chia-I Wu06695a62017-05-08 12:55:28 -0700139 }
Laura Abbott6ed00bc2017-06-30 11:39:35 +0530140 else
141 {
John Stultze5c5bb32018-03-20 18:16:49 -0700142 heap_mask = ION_HEAP_SYSTEM_MASK;
Laura Abbott6ed00bc2017-06-30 11:39:35 +0530143 }
Chia-I Wu06695a62017-05-08 12:55:28 -0700144
Laura Abbott311955b2017-06-30 11:39:35 +0530145 if (m->gralloc_legacy_ion)
John Stultze5c5bb32018-03-20 18:16:49 -0700146 {
Laura Abbott311955b2017-06-30 11:39:35 +0530147 ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &(ion_hnd));
John Stultze5c5bb32018-03-20 18:16:49 -0700148
Laura Abbott311955b2017-06-30 11:39:35 +0530149 if (ret != 0)
John Stultze5c5bb32018-03-20 18:16:49 -0700150 {
Laura Abbott311955b2017-06-30 11:39:35 +0530151 AERR("Failed to ion_alloc from ion_client:%d", m->ion_client);
152 return -1;
John Stultze5c5bb32018-03-20 18:16:49 -0700153 }
154
Laura Abbott311955b2017-06-30 11:39:35 +0530155 ret = ion_share(m->ion_client, ion_hnd, &shared_fd);
156
157 if (ret != 0)
158 {
159 AERR("ion_share( %d ) failed", m->ion_client);
160
161 if (0 != ion_free(m->ion_client, ion_hnd))
162 {
163 AERR("ion_free( %d ) failed", m->ion_client);
164 }
165
166 return -1;
167 }
168
169 // we do not need ion_hnd once we have shared_fd
170 if (0 != ion_free(m->ion_client, ion_hnd))
171 {
172 AWAR("ion_free( %d ) failed", m->ion_client);
173 }
174 ion_hnd = ION_INVALID_HANDLE;
175 }
176 else
177 {
178 ret = ion_alloc_fd(m->ion_client, size, 0, 1 << m->system_heap_id, 0, &(shared_fd));
179
180 if (ret != 0)
181 {
182 AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
183 return -1;
184 }
John Stultze5c5bb32018-03-20 18:16:49 -0700185 }
186
187 if (!(usage & GRALLOC_USAGE_PROTECTED))
188 {
189 map_mask = PROT_READ | PROT_WRITE;
190 }
191 else
192 {
193 map_mask = PROT_WRITE;
194 }
John Stultze5c5bb32018-03-20 18:16:49 -0700195
196 cpu_ptr = mmap(NULL, size, map_mask, MAP_SHARED, shared_fd, 0);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530197
198 if (MAP_FAILED == cpu_ptr)
199 {
200 AERR("ion_map( %d ) failed", m->ion_client);
201
Vishal Bhoj78e90492015-12-07 01:36:32 +0530202 close(shared_fd);
203 return -1;
204 }
205
John Stultze5c5bb32018-03-20 18:16:49 -0700206 lock_state = private_handle_t::LOCK_STATE_MAPPED;
207
208 private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, cpu_ptr, lock_state);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530209
210 if (NULL != hnd)
211 {
212 hnd->share_fd = shared_fd;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530213 *pHandle = hnd;
214 return 0;
215 }
216 else
217 {
218 AERR("Gralloc out of mem for ion_client:%d", m->ion_client);
219 }
220
221 close(shared_fd);
John Stultze5c5bb32018-03-20 18:16:49 -0700222
Vishal Bhoj78e90492015-12-07 01:36:32 +0530223 ret = munmap(cpu_ptr, size);
224
225 if (0 != ret)
226 {
227 AERR("munmap failed for base:%p size: %lu", cpu_ptr, (unsigned long)size);
228 }
229
Vishal Bhoj78e90492015-12-07 01:36:32 +0530230 return -1;
231 }
232#endif
233
234#if GRALLOC_ARM_UMP_MODULE
235 MALI_IGNORE(dev);
236 {
237 ump_handle ump_mem_handle;
238 void *cpu_ptr;
239 ump_secure_id ump_id;
240 ump_alloc_constraints constraints;
241
242 size = round_up_to_page_size(size);
243
244 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
245 {
246 constraints = UMP_REF_DRV_CONSTRAINT_USE_CACHE;
247 }
248 else
249 {
250 constraints = UMP_REF_DRV_CONSTRAINT_NONE;
251 }
252
253#ifdef GRALLOC_SIMULATE_FAILURES
254
255 /* if the failure condition matches, fail this iteration */
256 if (__ump_alloc_should_fail())
257 {
258 ump_mem_handle = UMP_INVALID_MEMORY_HANDLE;
259 }
260 else
261#endif
262 {
John Stultze5c5bb32018-03-20 18:16:49 -0700263 if (usage & GRALLOC_USAGE_PROTECTED)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530264 {
John Stultze5c5bb32018-03-20 18:16:49 -0700265 AERR("gralloc_alloc_buffer() does not support to allocate protected UMP memory.");
Vishal Bhoj78e90492015-12-07 01:36:32 +0530266 }
267 else
268 {
John Stultze5c5bb32018-03-20 18:16:49 -0700269 ump_mem_handle = ump_ref_drv_allocate(size, constraints);
270
271 if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle)
272 {
273 cpu_ptr = ump_mapped_pointer_get(ump_mem_handle);
274
275 if (NULL != cpu_ptr)
276 {
277 ump_id = ump_secure_id_get(ump_mem_handle);
278
279 if (UMP_INVALID_SECURE_ID != ump_id)
280 {
281 private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, cpu_ptr,
282 private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle);
283
284 if (NULL != hnd)
285 {
286 *pHandle = hnd;
287 return 0;
288 }
289 else
290 {
291 AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id);
292 }
293 }
294 else
295 {
296 AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle);
297 }
298
299 ump_mapped_pointer_release(ump_mem_handle);
300 }
301 else
302 {
303 AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle);
304 }
305
306 ump_reference_release(ump_mem_handle);
307 }
308 else
309 {
310 AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints);
311 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530312 }
313 }
314
315 return -1;
316 }
317#endif
318
319}
320
321static int gralloc_alloc_framebuffer_locked(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
322{
323 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
324
325 // allocate the framebuffer
326 if (m->framebuffer == NULL)
327 {
328 // initialize the framebuffer, the framebuffer is mapped once and forever.
329 int err = init_frame_buffer_locked(m);
330
331 if (err < 0)
332 {
333 return err;
334 }
335 }
336
John Stultze5c5bb32018-03-20 18:16:49 -0700337 uint32_t bufferMask = m->bufferMask;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530338 const uint32_t numBuffers = m->numBuffers;
339 const size_t bufferSize = m->finfo.line_length * m->info.yres;
340
341 if (numBuffers == 1)
342 {
343 // If we have only one buffer, we never use page-flipping. Instead,
344 // we return a regular buffer which will be memcpy'ed to the main
345 // screen when post is called.
346 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
347 AERR("fallback to single buffering. Virtual Y-res too small %d", m->info.yres);
348 return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
349 }
350
351 if (bufferMask >= ((1LU << numBuffers) - 1))
352 {
John Stultze5c5bb32018-03-20 18:16:49 -0700353 // We ran out of buffers, reset bufferMask.
354 bufferMask = 0;
355 m->bufferMask = 0;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530356 }
357
358 void *vaddr = m->framebuffer->base;
359
360 // find a free slot
361 for (uint32_t i = 0 ; i < numBuffers ; i++)
362 {
363 if ((bufferMask & (1LU << i)) == 0)
364 {
365 m->bufferMask |= (1LU << i);
366 break;
367 }
368
369 vaddr = (void *)((uintptr_t)vaddr + bufferSize);
370 }
371
372 // The entire framebuffer memory is already mapped, now create a buffer object for parts of this memory
373 private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_FRAMEBUFFER, usage, size, vaddr,
John Stultze5c5bb32018-03-20 18:16:49 -0700374 0, m->framebuffer->fd, (uintptr_t)vaddr - (uintptr_t) m->framebuffer->base, m->framebuffer->fb_paddr);
375
Vishal Bhoj78e90492015-12-07 01:36:32 +0530376#if GRALLOC_ARM_UMP_MODULE
377 hnd->ump_id = m->framebuffer->ump_id;
378
379 /* create a backing ump memory handle if the framebuffer is exposed as a secure ID */
380 if ((int)UMP_INVALID_SECURE_ID != hnd->ump_id)
381 {
382 hnd->ump_mem_handle = (int)ump_handle_create_from_secure_id(hnd->ump_id);
383
384 if ((int)UMP_INVALID_MEMORY_HANDLE == hnd->ump_mem_handle)
385 {
386 AINF("warning: unable to create UMP handle from secure ID %i\n", hnd->ump_id);
387 }
388 }
389
390#endif
391
392#if GRALLOC_ARM_DMA_BUF_MODULE
393 {
394#ifdef FBIOGET_DMABUF
John Stultze5c5bb32018-03-20 18:16:49 -0700395 /*
396 * Perform allocator specific actions. If these fail we fall back to a regular buffer
397 * which will be memcpy'ed to the main screen when fb_post is called.
398 */
399 if (fb_get_framebuffer_dmabuf(m, hnd) == -1)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530400 {
John Stultze5c5bb32018-03-20 18:16:49 -0700401 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530402
John Stultze5c5bb32018-03-20 18:16:49 -0700403 AINF("Fallback to single buffering. Unable to map framebuffer memory to handle:%p", hnd);
404 return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
405 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530406#endif
407 }
Chia-I Wua12e37d2017-05-08 12:46:13 -0700408
409 // correct numFds/numInts when there is no dmabuf fd
John Stultze5c5bb32018-03-20 18:16:49 -0700410 if (hnd->share_fd < 0)
411 {
Chia-I Wua12e37d2017-05-08 12:46:13 -0700412 hnd->numFds--;
413 hnd->numInts++;
414 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530415#endif
416
417 *pHandle = hnd;
418
419 return 0;
420}
421
422static int gralloc_alloc_framebuffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
423{
424 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
425 pthread_mutex_lock(&m->lock);
426 int err = gralloc_alloc_framebuffer_locked(dev, size, usage, pHandle);
427 pthread_mutex_unlock(&m->lock);
428 return err;
429}
430
431static int alloc_device_alloc(alloc_device_t *dev, int w, int h, int format, int usage, buffer_handle_t *pHandle, int *pStride)
432{
433 if (!pHandle || !pStride)
434 {
435 return -EINVAL;
436 }
437
438 size_t size;
439 size_t stride;
440
441 if (format == HAL_PIXEL_FORMAT_YCrCb_420_SP || format == HAL_PIXEL_FORMAT_YV12
442 /* HAL_PIXEL_FORMAT_YCbCr_420_SP, HAL_PIXEL_FORMAT_YCbCr_420_P, HAL_PIXEL_FORMAT_YCbCr_422_I are not defined in Android.
443 * To enable Mali DDK EGLImage support for those formats, firstly, you have to add them in Android system/core/include/system/graphics.h.
444 * Then, define SUPPORT_LEGACY_FORMAT in the same header file(Mali DDK will also check this definition).
445 */
446#ifdef SUPPORT_LEGACY_FORMAT
447 || format == HAL_PIXEL_FORMAT_YCbCr_420_SP || format == HAL_PIXEL_FORMAT_YCbCr_420_P || format == HAL_PIXEL_FORMAT_YCbCr_422_I
448#endif
449 )
450 {
451 switch (format)
452 {
453 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
454 stride = GRALLOC_ALIGN(w, 16);
455 size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
456 break;
457
458 case HAL_PIXEL_FORMAT_YV12:
459#ifdef SUPPORT_LEGACY_FORMAT
460 case HAL_PIXEL_FORMAT_YCbCr_420_P:
461#endif
John Stultze5c5bb32018-03-20 18:16:49 -0700462 /*
463 * Since Utgard has limitation that "64-byte alignment is enforced on texture and mipmap addresses", here to make sure
464 * the v, u plane start addresses are 64-byte aligned.
465 */
466 stride = GRALLOC_ALIGN(w, (h % 8 == 0) ? GRALLOC_ALIGN_BASE_16 :
467 ((h % 4 == 0) ? GRALLOC_ALIGN_BASE_64 : GRALLOC_ALIGN_BASE_128));
Vishal Bhoj78e90492015-12-07 01:36:32 +0530468 size = GRALLOC_ALIGN(h, 2) * (stride + GRALLOC_ALIGN(stride / 2, 16));
469
470 break;
471#ifdef SUPPORT_LEGACY_FORMAT
472
473 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
474 stride = GRALLOC_ALIGN(w, 16);
475 size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
476 break;
477
478 case HAL_PIXEL_FORMAT_YCbCr_422_I:
479 stride = GRALLOC_ALIGN(w, 16);
480 size = h * stride * 2;
481
482 break;
483#endif
484
485 default:
486 return -EINVAL;
487 }
488 }
489 else
490 {
491 int bpp = 0;
492
493 switch (format)
494 {
495 case HAL_PIXEL_FORMAT_RGBA_8888:
496 case HAL_PIXEL_FORMAT_RGBX_8888:
497 case HAL_PIXEL_FORMAT_BGRA_8888:
498 bpp = 4;
499 break;
500
501 case HAL_PIXEL_FORMAT_RGB_888:
502 bpp = 3;
503 break;
504
505 case HAL_PIXEL_FORMAT_RGB_565:
506#if PLATFORM_SDK_VERSION < 19
507 case HAL_PIXEL_FORMAT_RGBA_5551:
508 case HAL_PIXEL_FORMAT_RGBA_4444:
509#endif
510 bpp = 2;
511 break;
512
Yongqin Liu6e403d02017-12-06 01:09:43 +0800513 case HAL_PIXEL_FORMAT_BLOB:
514 if (h != 1) {
515 AERR("Height for HAL_PIXEL_FORMAT_BLOB must be 1. h=%d", h);
516 return -EINVAL;
517 }
518 break;
519
Vishal Bhoj78e90492015-12-07 01:36:32 +0530520 default:
Yongqin Liu6e403d02017-12-06 01:09:43 +0800521 AERR("The format is not supported yet: format=%d\n", format);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530522 return -EINVAL;
523 }
524
Yongqin Liu6e403d02017-12-06 01:09:43 +0800525 if (format == HAL_PIXEL_FORMAT_BLOB) {
526 stride = 0; /* No 'rows', it's effectively a long one dimensional array */
527 size = w;
528 }else{
529 size_t bpr = GRALLOC_ALIGN(w * bpp, 64);
530 size = bpr * h;
531 stride = bpr / bpp;
532 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530533 }
534
535 int err;
536
537#ifndef MALI_600
538
539 if (usage & GRALLOC_USAGE_HW_FB)
540 {
541 err = gralloc_alloc_framebuffer(dev, size, usage, pHandle);
542 }
543 else
544#endif
545
546 {
547 err = gralloc_alloc_buffer(dev, size, usage, pHandle);
548 }
549
550 if (err < 0)
551 {
552 return err;
553 }
554
555 /* match the framebuffer format */
556 if (usage & GRALLOC_USAGE_HW_FB)
557 {
558#ifdef GRALLOC_16_BITS
559 format = HAL_PIXEL_FORMAT_RGB_565;
560#else
561 format = HAL_PIXEL_FORMAT_BGRA_8888;
562#endif
563 }
564
565 private_handle_t *hnd = (private_handle_t *)*pHandle;
566 int private_usage = usage & (GRALLOC_USAGE_PRIVATE_0 |
567 GRALLOC_USAGE_PRIVATE_1);
568
569 switch (private_usage)
570 {
571 case 0:
572 hnd->yuv_info = MALI_YUV_BT601_NARROW;
573 break;
574
575 case GRALLOC_USAGE_PRIVATE_1:
576 hnd->yuv_info = MALI_YUV_BT601_WIDE;
577 break;
578
579 case GRALLOC_USAGE_PRIVATE_0:
580 hnd->yuv_info = MALI_YUV_BT709_NARROW;
581 break;
582
583 case (GRALLOC_USAGE_PRIVATE_0 | GRALLOC_USAGE_PRIVATE_1):
584 hnd->yuv_info = MALI_YUV_BT709_WIDE;
585 break;
586 }
587
588 hnd->width = w;
589 hnd->height = h;
590 hnd->format = format;
591 hnd->stride = stride;
592
593 *pStride = stride;
594 return 0;
595}
596
John Stultz21ad5be2018-03-20 20:35:07 -0700597static int alloc_device_free(alloc_device_t __unused *dev, buffer_handle_t handle)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530598{
599 if (private_handle_t::validate(handle) < 0)
600 {
601 return -EINVAL;
602 }
603
604 private_handle_t const *hnd = reinterpret_cast<private_handle_t const *>(handle);
605
606 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
607 {
Vishal Bhoj78e90492015-12-07 01:36:32 +0530608#if GRALLOC_ARM_UMP_MODULE
609
610 if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
611 {
612 ump_reference_release((ump_handle)hnd->ump_mem_handle);
613 }
614
615#endif
616 }
617 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
618 {
619#if GRALLOC_ARM_UMP_MODULE
620
621 /* Buffer might be unregistered so we need to check for invalid ump handle*/
622 if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
623 {
624 ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
625 ump_reference_release((ump_handle)hnd->ump_mem_handle);
626 }
627
628#else
John Stultze5c5bb32018-03-20 18:16:49 -0700629 AERR("Can't free ump memory for handle:%p. Not supported.", hnd);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530630#endif
631 }
632 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
633 {
634#if GRALLOC_ARM_DMA_BUF_MODULE
Vishal Bhoj78e90492015-12-07 01:36:32 +0530635 /* Buffer might be unregistered so we need to check for invalid ump handle*/
636 if (0 != hnd->base)
637 {
638 if (0 != munmap((void *)hnd->base, hnd->size))
639 {
John Stultze5c5bb32018-03-20 18:16:49 -0700640 AERR("Failed to munmap handle %p", hnd);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530641 }
642 }
643
644 close(hnd->share_fd);
645
Vishal Bhoj78e90492015-12-07 01:36:32 +0530646 memset((void *)hnd, 0, sizeof(*hnd));
647#else
648 AERR("Can't free dma_buf memory for handle:0x%x. Not supported.", (unsigned int)hnd);
649#endif
650
651 }
652
653 delete hnd;
654
655 return 0;
656}
657
658static int alloc_device_close(struct hw_device_t *device)
659{
660 alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
661
662 if (dev)
663 {
664#if GRALLOC_ARM_DMA_BUF_MODULE
665 private_module_t *m = reinterpret_cast<private_module_t *>(device);
666
667 if (0 != ion_close(m->ion_client))
668 {
669 AERR("Failed to close ion_client: %d", m->ion_client);
670 }
671
672 close(m->ion_client);
673#endif
674 delete dev;
675#if GRALLOC_ARM_UMP_MODULE
676 ump_close(); // Our UMP memory refs will be released automatically here...
677#endif
678 }
679
680 return 0;
681}
682
Laura Abbott311955b2017-06-30 11:39:35 +0530683#if GRALLOC_ARM_DMA_BUF_MODULE
684static int find_system_heap_id(int ion_client)
685{
686 int i, ret, cnt, system_heap_id = -1;
687 struct ion_heap_data *data;
688
689 ret = ion_query_heap_cnt(ion_client, &cnt);
690
691 if (ret)
692 {
693 AERR("ion count query failed with %s", strerror(errno));
694 return -1;
695 }
696
697 data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
698 if (!data)
699 {
700 AERR("Error allocating data %s\n", strerror(errno));
701 return -1;
702 }
703
704 ret = ion_query_get_heaps(ion_client, cnt, data);
705 if (ret)
706 {
707 AERR("Error querying heaps from ion %s", strerror(errno));
708 }
709 else
710 {
711 for (i = 0; i < cnt; i++) {
712 struct ion_heap_data *dat = (struct ion_heap_data *)data;
713 if (strcmp(dat[i].name, "ion_system_heap") == 0) {
714 system_heap_id = dat[i].heap_id;
715 break;
716 }
717 }
718
719 if (i > cnt)
720 {
721 AERR("No System Heap Found amongst %d heaps\n", cnt);
722 system_heap_id = -1;
723 }
724 }
725
726 free(data);
727 return system_heap_id;
728}
729#endif
730
Vishal Bhoj78e90492015-12-07 01:36:32 +0530731int alloc_device_open(hw_module_t const *module, const char *name, hw_device_t **device)
732{
733 MALI_IGNORE(name);
734 alloc_device_t *dev;
735
736 dev = new alloc_device_t;
737
738 if (NULL == dev)
739 {
740 return -1;
741 }
742
743#if GRALLOC_ARM_UMP_MODULE
744 ump_result ump_res = ump_open();
745
746 if (UMP_OK != ump_res)
747 {
748 AERR("UMP open failed with %d", ump_res);
749 delete dev;
750 return -1;
751 }
752
753#endif
754
755 /* initialize our state here */
756 memset(dev, 0, sizeof(*dev));
757
758 /* initialize the procs */
759 dev->common.tag = HARDWARE_DEVICE_TAG;
760 dev->common.version = 0;
761 dev->common.module = const_cast<hw_module_t *>(module);
762 dev->common.close = alloc_device_close;
763 dev->alloc = alloc_device_alloc;
764 dev->free = alloc_device_free;
765
766#if GRALLOC_ARM_DMA_BUF_MODULE
767 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
768 m->ion_client = ion_open();
769
770 if (m->ion_client < 0)
771 {
772 AERR("ion_open failed with %s", strerror(errno));
773 delete dev;
774 return -1;
775 }
776
Laura Abbott311955b2017-06-30 11:39:35 +0530777 m->gralloc_legacy_ion = ion_is_legacy(m->ion_client);
778
779 if (!m->gralloc_legacy_ion)
780 {
781 m->system_heap_id = find_system_heap_id(m->ion_client);
782 if (m->system_heap_id < 0)
783 {
784 delete dev;
785 ion_close(m->ion_client);
786 m->ion_client = -1;
787 return -1;
788 }
789 }
790
Vishal Bhoj78e90492015-12-07 01:36:32 +0530791#endif
792
793 *device = &dev->common;
794
795 return 0;
796}