blob: 076cb5ced2813b80058ec74967dc9be870e4a6ec [file] [log] [blame]
Vishal Bhoj78e90492015-12-07 01:36:32 +05301/*
2 * Copyright (C) 2010 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
Laura Abbott311955b2017-06-30 11:39:35 +053019#include <cstdlib>
Vishal Bhoj78e90492015-12-07 01:36:32 +053020#include <string.h>
21#include <errno.h>
22#include <pthread.h>
23
24#include <cutils/log.h>
25#include <cutils/atomic.h>
26#include <hardware/hardware.h>
27#include <hardware/gralloc.h>
28
29#include <sys/ioctl.h>
30
31#include "alloc_device.h"
32#include "gralloc_priv.h"
33#include "gralloc_helper.h"
34#include "framebuffer_device.h"
35
36#if GRALLOC_ARM_UMP_MODULE
37#include <ump/ump.h>
38#include <ump/ump_ref_drv.h>
39#endif
40
41#if GRALLOC_ARM_DMA_BUF_MODULE
42#include <linux/ion.h>
43#include <ion/ion.h>
Laura Abbott311955b2017-06-30 11:39:35 +053044#include "ion_4.12.h"
Vishal Bhoj78e90492015-12-07 01:36:32 +053045#endif
46
Vishal Bhoj78e90492015-12-07 01:36:32 +053047#if GRALLOC_SIMULATE_FAILURES
48#include <cutils/properties.h>
49
50/* system property keys for controlling simulated UMP allocation failures */
51#define PROP_MALI_TEST_GRALLOC_FAIL_FIRST "mali.test.gralloc.fail_first"
52#define PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL "mali.test.gralloc.fail_interval"
53
54static int __ump_alloc_should_fail()
55{
56
57 static unsigned int call_count = 0;
58 unsigned int first_fail = 0;
59 int fail_period = 0;
60 int fail = 0;
61
62 ++call_count;
63
64 /* read the system properties that control failure simulation */
65 {
66 char prop_value[PROPERTY_VALUE_MAX];
67
68 if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_FIRST, prop_value, "0") > 0)
69 {
70 sscanf(prop_value, "%11u", &first_fail);
71 }
72
73 if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL, prop_value, "0") > 0)
74 {
75 sscanf(prop_value, "%11u", &fail_period);
76 }
77 }
78
79 /* failure simulation is enabled by setting the first_fail property to non-zero */
80 if (first_fail > 0)
81 {
82 LOGI("iteration %u (fail=%u, period=%u)\n", call_count, first_fail, fail_period);
83
84 fail = (call_count == first_fail) ||
85 (call_count > first_fail && fail_period > 0 && 0 == (call_count - first_fail) % fail_period);
86
87 if (fail)
88 {
89 AERR("failed ump_ref_drv_allocate on iteration #%d\n", call_count);
90 }
91 }
92
93 return fail;
94}
95#endif
96
John Stultze5c5bb32018-03-20 18:16:49 -070097#ifdef FBIOGET_DMABUF
98static int fb_get_framebuffer_dmabuf(private_module_t *m, private_handle_t *hnd)
99{
100 struct fb_dmabuf_export fb_dma_buf;
101 int res;
102 res = ioctl(m->framebuffer->fd, FBIOGET_DMABUF, &fb_dma_buf);
103
104 if (res == 0)
105 {
106 hnd->share_fd = fb_dma_buf.fd;
107 return 0;
108 }
109 else
110 {
111 AINF("FBIOGET_DMABUF ioctl failed(%d). See gralloc_priv.h and the integration manual for vendor framebuffer "
112 "integration",
113 res);
114 return -1;
115 }
116}
117#endif
Vishal Bhoj78e90492015-12-07 01:36:32 +0530118
119static int gralloc_alloc_buffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
120{
121#if GRALLOC_ARM_DMA_BUF_MODULE
122 {
123 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
124 ion_user_handle_t ion_hnd;
John Stultze5c5bb32018-03-20 18:16:49 -0700125 void *cpu_ptr = MAP_FAILED;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530126 int shared_fd;
127 int ret;
John Stultze5c5bb32018-03-20 18:16:49 -0700128 unsigned int heap_mask;
129 int lock_state = 0;
130 int map_mask = 0;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530131
John Stultze5c5bb32018-03-20 18:16:49 -0700132 if (usage & GRALLOC_USAGE_PROTECTED)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530133 {
John Stultze5c5bb32018-03-20 18:16:49 -0700134#if defined(ION_HEAP_SECURE_MASK)
135 heap_mask = ION_HEAP_SECURE_MASK;
136#else
137 AERR("The platform does NOT support protected ION memory.");
138 return -1;
139#endif
Chia-I Wu06695a62017-05-08 12:55:28 -0700140 }
Laura Abbott6ed00bc2017-06-30 11:39:35 +0530141 else
142 {
John Stultze5c5bb32018-03-20 18:16:49 -0700143 heap_mask = ION_HEAP_SYSTEM_MASK;
Laura Abbott6ed00bc2017-06-30 11:39:35 +0530144 }
Chia-I Wu06695a62017-05-08 12:55:28 -0700145
Laura Abbott311955b2017-06-30 11:39:35 +0530146 if (m->gralloc_legacy_ion)
John Stultze5c5bb32018-03-20 18:16:49 -0700147 {
Laura Abbott311955b2017-06-30 11:39:35 +0530148 ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &(ion_hnd));
John Stultze5c5bb32018-03-20 18:16:49 -0700149
Laura Abbott311955b2017-06-30 11:39:35 +0530150 if (ret != 0)
John Stultze5c5bb32018-03-20 18:16:49 -0700151 {
Laura Abbott311955b2017-06-30 11:39:35 +0530152 AERR("Failed to ion_alloc from ion_client:%d", m->ion_client);
153 return -1;
John Stultze5c5bb32018-03-20 18:16:49 -0700154 }
155
Laura Abbott311955b2017-06-30 11:39:35 +0530156 ret = ion_share(m->ion_client, ion_hnd, &shared_fd);
157
158 if (ret != 0)
159 {
160 AERR("ion_share( %d ) failed", m->ion_client);
161
162 if (0 != ion_free(m->ion_client, ion_hnd))
163 {
164 AERR("ion_free( %d ) failed", m->ion_client);
165 }
166
167 return -1;
168 }
169
170 // we do not need ion_hnd once we have shared_fd
171 if (0 != ion_free(m->ion_client, ion_hnd))
172 {
173 AWAR("ion_free( %d ) failed", m->ion_client);
174 }
175 ion_hnd = ION_INVALID_HANDLE;
176 }
177 else
178 {
179 ret = ion_alloc_fd(m->ion_client, size, 0, 1 << m->system_heap_id, 0, &(shared_fd));
180
181 if (ret != 0)
182 {
183 AERR("Failed to ion_alloc_fd from ion_client:%d", m->ion_client);
184 return -1;
185 }
John Stultze5c5bb32018-03-20 18:16:49 -0700186 }
187
188 if (!(usage & GRALLOC_USAGE_PROTECTED))
189 {
190 map_mask = PROT_READ | PROT_WRITE;
191 }
192 else
193 {
194 map_mask = PROT_WRITE;
195 }
John Stultze5c5bb32018-03-20 18:16:49 -0700196
197 cpu_ptr = mmap(NULL, size, map_mask, MAP_SHARED, shared_fd, 0);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530198
199 if (MAP_FAILED == cpu_ptr)
200 {
201 AERR("ion_map( %d ) failed", m->ion_client);
202
Vishal Bhoj78e90492015-12-07 01:36:32 +0530203 close(shared_fd);
204 return -1;
205 }
206
John Stultze5c5bb32018-03-20 18:16:49 -0700207 lock_state = private_handle_t::LOCK_STATE_MAPPED;
208
209 private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, cpu_ptr, lock_state);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530210
211 if (NULL != hnd)
212 {
213 hnd->share_fd = shared_fd;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530214 *pHandle = hnd;
215 return 0;
216 }
217 else
218 {
219 AERR("Gralloc out of mem for ion_client:%d", m->ion_client);
220 }
221
222 close(shared_fd);
John Stultze5c5bb32018-03-20 18:16:49 -0700223
Vishal Bhoj78e90492015-12-07 01:36:32 +0530224 ret = munmap(cpu_ptr, size);
225
226 if (0 != ret)
227 {
228 AERR("munmap failed for base:%p size: %lu", cpu_ptr, (unsigned long)size);
229 }
230
Vishal Bhoj78e90492015-12-07 01:36:32 +0530231 return -1;
232 }
233#endif
234
235#if GRALLOC_ARM_UMP_MODULE
236 MALI_IGNORE(dev);
237 {
238 ump_handle ump_mem_handle;
239 void *cpu_ptr;
240 ump_secure_id ump_id;
241 ump_alloc_constraints constraints;
242
243 size = round_up_to_page_size(size);
244
245 if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
246 {
247 constraints = UMP_REF_DRV_CONSTRAINT_USE_CACHE;
248 }
249 else
250 {
251 constraints = UMP_REF_DRV_CONSTRAINT_NONE;
252 }
253
254#ifdef GRALLOC_SIMULATE_FAILURES
255
256 /* if the failure condition matches, fail this iteration */
257 if (__ump_alloc_should_fail())
258 {
259 ump_mem_handle = UMP_INVALID_MEMORY_HANDLE;
260 }
261 else
262#endif
263 {
John Stultze5c5bb32018-03-20 18:16:49 -0700264 if (usage & GRALLOC_USAGE_PROTECTED)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530265 {
John Stultze5c5bb32018-03-20 18:16:49 -0700266 AERR("gralloc_alloc_buffer() does not support to allocate protected UMP memory.");
Vishal Bhoj78e90492015-12-07 01:36:32 +0530267 }
268 else
269 {
John Stultze5c5bb32018-03-20 18:16:49 -0700270 ump_mem_handle = ump_ref_drv_allocate(size, constraints);
271
272 if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle)
273 {
274 cpu_ptr = ump_mapped_pointer_get(ump_mem_handle);
275
276 if (NULL != cpu_ptr)
277 {
278 ump_id = ump_secure_id_get(ump_mem_handle);
279
280 if (UMP_INVALID_SECURE_ID != ump_id)
281 {
282 private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, cpu_ptr,
283 private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle);
284
285 if (NULL != hnd)
286 {
287 *pHandle = hnd;
288 return 0;
289 }
290 else
291 {
292 AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id);
293 }
294 }
295 else
296 {
297 AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle);
298 }
299
300 ump_mapped_pointer_release(ump_mem_handle);
301 }
302 else
303 {
304 AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle);
305 }
306
307 ump_reference_release(ump_mem_handle);
308 }
309 else
310 {
311 AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints);
312 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530313 }
314 }
315
316 return -1;
317 }
318#endif
319
320}
321
322static int gralloc_alloc_framebuffer_locked(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
323{
324 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
325
326 // allocate the framebuffer
327 if (m->framebuffer == NULL)
328 {
329 // initialize the framebuffer, the framebuffer is mapped once and forever.
330 int err = init_frame_buffer_locked(m);
331
332 if (err < 0)
333 {
334 return err;
335 }
336 }
337
John Stultze5c5bb32018-03-20 18:16:49 -0700338 uint32_t bufferMask = m->bufferMask;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530339 const uint32_t numBuffers = m->numBuffers;
340 const size_t bufferSize = m->finfo.line_length * m->info.yres;
341
342 if (numBuffers == 1)
343 {
344 // If we have only one buffer, we never use page-flipping. Instead,
345 // we return a regular buffer which will be memcpy'ed to the main
346 // screen when post is called.
347 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
348 AERR("fallback to single buffering. Virtual Y-res too small %d", m->info.yres);
349 return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
350 }
351
352 if (bufferMask >= ((1LU << numBuffers) - 1))
353 {
John Stultze5c5bb32018-03-20 18:16:49 -0700354 // We ran out of buffers, reset bufferMask.
355 bufferMask = 0;
356 m->bufferMask = 0;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530357 }
358
359 void *vaddr = m->framebuffer->base;
360
361 // find a free slot
362 for (uint32_t i = 0 ; i < numBuffers ; i++)
363 {
364 if ((bufferMask & (1LU << i)) == 0)
365 {
366 m->bufferMask |= (1LU << i);
367 break;
368 }
369
370 vaddr = (void *)((uintptr_t)vaddr + bufferSize);
371 }
372
373 // The entire framebuffer memory is already mapped, now create a buffer object for parts of this memory
374 private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_FRAMEBUFFER, usage, size, vaddr,
John Stultze5c5bb32018-03-20 18:16:49 -0700375 0, m->framebuffer->fd, (uintptr_t)vaddr - (uintptr_t) m->framebuffer->base, m->framebuffer->fb_paddr);
376
Vishal Bhoj78e90492015-12-07 01:36:32 +0530377#if GRALLOC_ARM_UMP_MODULE
378 hnd->ump_id = m->framebuffer->ump_id;
379
380 /* create a backing ump memory handle if the framebuffer is exposed as a secure ID */
381 if ((int)UMP_INVALID_SECURE_ID != hnd->ump_id)
382 {
383 hnd->ump_mem_handle = (int)ump_handle_create_from_secure_id(hnd->ump_id);
384
385 if ((int)UMP_INVALID_MEMORY_HANDLE == hnd->ump_mem_handle)
386 {
387 AINF("warning: unable to create UMP handle from secure ID %i\n", hnd->ump_id);
388 }
389 }
390
391#endif
392
393#if GRALLOC_ARM_DMA_BUF_MODULE
394 {
395#ifdef FBIOGET_DMABUF
John Stultze5c5bb32018-03-20 18:16:49 -0700396 /*
397 * Perform allocator specific actions. If these fail we fall back to a regular buffer
398 * which will be memcpy'ed to the main screen when fb_post is called.
399 */
400 if (fb_get_framebuffer_dmabuf(m, hnd) == -1)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530401 {
John Stultze5c5bb32018-03-20 18:16:49 -0700402 int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
Vishal Bhoj78e90492015-12-07 01:36:32 +0530403
John Stultze5c5bb32018-03-20 18:16:49 -0700404 AINF("Fallback to single buffering. Unable to map framebuffer memory to handle:%p", hnd);
405 return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
406 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530407#endif
408 }
Chia-I Wua12e37d2017-05-08 12:46:13 -0700409
410 // correct numFds/numInts when there is no dmabuf fd
John Stultze5c5bb32018-03-20 18:16:49 -0700411 if (hnd->share_fd < 0)
412 {
Chia-I Wua12e37d2017-05-08 12:46:13 -0700413 hnd->numFds--;
414 hnd->numInts++;
415 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530416#endif
417
418 *pHandle = hnd;
419
420 return 0;
421}
422
423static int gralloc_alloc_framebuffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
424{
425 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
426 pthread_mutex_lock(&m->lock);
427 int err = gralloc_alloc_framebuffer_locked(dev, size, usage, pHandle);
428 pthread_mutex_unlock(&m->lock);
429 return err;
430}
431
432static int alloc_device_alloc(alloc_device_t *dev, int w, int h, int format, int usage, buffer_handle_t *pHandle, int *pStride)
433{
434 if (!pHandle || !pStride)
435 {
436 return -EINVAL;
437 }
438
439 size_t size;
440 size_t stride;
441
442 if (format == HAL_PIXEL_FORMAT_YCrCb_420_SP || format == HAL_PIXEL_FORMAT_YV12
443 /* HAL_PIXEL_FORMAT_YCbCr_420_SP, HAL_PIXEL_FORMAT_YCbCr_420_P, HAL_PIXEL_FORMAT_YCbCr_422_I are not defined in Android.
444 * To enable Mali DDK EGLImage support for those formats, firstly, you have to add them in Android system/core/include/system/graphics.h.
445 * Then, define SUPPORT_LEGACY_FORMAT in the same header file(Mali DDK will also check this definition).
446 */
447#ifdef SUPPORT_LEGACY_FORMAT
448 || format == HAL_PIXEL_FORMAT_YCbCr_420_SP || format == HAL_PIXEL_FORMAT_YCbCr_420_P || format == HAL_PIXEL_FORMAT_YCbCr_422_I
449#endif
450 )
451 {
452 switch (format)
453 {
454 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
455 stride = GRALLOC_ALIGN(w, 16);
456 size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
457 break;
458
459 case HAL_PIXEL_FORMAT_YV12:
460#ifdef SUPPORT_LEGACY_FORMAT
461 case HAL_PIXEL_FORMAT_YCbCr_420_P:
462#endif
John Stultze5c5bb32018-03-20 18:16:49 -0700463 /*
464 * Since Utgard has limitation that "64-byte alignment is enforced on texture and mipmap addresses", here to make sure
465 * the v, u plane start addresses are 64-byte aligned.
466 */
467 stride = GRALLOC_ALIGN(w, (h % 8 == 0) ? GRALLOC_ALIGN_BASE_16 :
468 ((h % 4 == 0) ? GRALLOC_ALIGN_BASE_64 : GRALLOC_ALIGN_BASE_128));
Vishal Bhoj78e90492015-12-07 01:36:32 +0530469 size = GRALLOC_ALIGN(h, 2) * (stride + GRALLOC_ALIGN(stride / 2, 16));
470
471 break;
472#ifdef SUPPORT_LEGACY_FORMAT
473
474 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
475 stride = GRALLOC_ALIGN(w, 16);
476 size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16));
477 break;
478
479 case HAL_PIXEL_FORMAT_YCbCr_422_I:
480 stride = GRALLOC_ALIGN(w, 16);
481 size = h * stride * 2;
482
483 break;
484#endif
485
486 default:
487 return -EINVAL;
488 }
489 }
490 else
491 {
492 int bpp = 0;
493
494 switch (format)
495 {
496 case HAL_PIXEL_FORMAT_RGBA_8888:
497 case HAL_PIXEL_FORMAT_RGBX_8888:
498 case HAL_PIXEL_FORMAT_BGRA_8888:
499 bpp = 4;
500 break;
501
502 case HAL_PIXEL_FORMAT_RGB_888:
503 bpp = 3;
504 break;
505
506 case HAL_PIXEL_FORMAT_RGB_565:
507#if PLATFORM_SDK_VERSION < 19
508 case HAL_PIXEL_FORMAT_RGBA_5551:
509 case HAL_PIXEL_FORMAT_RGBA_4444:
510#endif
511 bpp = 2;
512 break;
513
Yongqin Liu6e403d02017-12-06 01:09:43 +0800514 case HAL_PIXEL_FORMAT_BLOB:
515 if (h != 1) {
516 AERR("Height for HAL_PIXEL_FORMAT_BLOB must be 1. h=%d", h);
517 return -EINVAL;
518 }
519 break;
520
Vishal Bhoj78e90492015-12-07 01:36:32 +0530521 default:
Yongqin Liu6e403d02017-12-06 01:09:43 +0800522 AERR("The format is not supported yet: format=%d\n", format);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530523 return -EINVAL;
524 }
525
Yongqin Liu6e403d02017-12-06 01:09:43 +0800526 if (format == HAL_PIXEL_FORMAT_BLOB) {
527 stride = 0; /* No 'rows', it's effectively a long one dimensional array */
528 size = w;
529 }else{
530 size_t bpr = GRALLOC_ALIGN(w * bpp, 64);
531 size = bpr * h;
532 stride = bpr / bpp;
533 }
Vishal Bhoj78e90492015-12-07 01:36:32 +0530534 }
535
536 int err;
537
538#ifndef MALI_600
539
540 if (usage & GRALLOC_USAGE_HW_FB)
541 {
542 err = gralloc_alloc_framebuffer(dev, size, usage, pHandle);
543 }
544 else
545#endif
546
547 {
548 err = gralloc_alloc_buffer(dev, size, usage, pHandle);
549 }
550
551 if (err < 0)
552 {
553 return err;
554 }
555
556 /* match the framebuffer format */
557 if (usage & GRALLOC_USAGE_HW_FB)
558 {
559#ifdef GRALLOC_16_BITS
560 format = HAL_PIXEL_FORMAT_RGB_565;
561#else
562 format = HAL_PIXEL_FORMAT_BGRA_8888;
563#endif
564 }
565
566 private_handle_t *hnd = (private_handle_t *)*pHandle;
567 int private_usage = usage & (GRALLOC_USAGE_PRIVATE_0 |
568 GRALLOC_USAGE_PRIVATE_1);
569
570 switch (private_usage)
571 {
572 case 0:
573 hnd->yuv_info = MALI_YUV_BT601_NARROW;
574 break;
575
576 case GRALLOC_USAGE_PRIVATE_1:
577 hnd->yuv_info = MALI_YUV_BT601_WIDE;
578 break;
579
580 case GRALLOC_USAGE_PRIVATE_0:
581 hnd->yuv_info = MALI_YUV_BT709_NARROW;
582 break;
583
584 case (GRALLOC_USAGE_PRIVATE_0 | GRALLOC_USAGE_PRIVATE_1):
585 hnd->yuv_info = MALI_YUV_BT709_WIDE;
586 break;
587 }
588
589 hnd->width = w;
590 hnd->height = h;
591 hnd->format = format;
592 hnd->stride = stride;
593
594 *pStride = stride;
595 return 0;
596}
597
John Stultz21ad5be2018-03-20 20:35:07 -0700598static int alloc_device_free(alloc_device_t __unused *dev, buffer_handle_t handle)
Vishal Bhoj78e90492015-12-07 01:36:32 +0530599{
600 if (private_handle_t::validate(handle) < 0)
601 {
602 return -EINVAL;
603 }
604
605 private_handle_t const *hnd = reinterpret_cast<private_handle_t const *>(handle);
606
607 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
608 {
Vishal Bhoj78e90492015-12-07 01:36:32 +0530609#if GRALLOC_ARM_UMP_MODULE
610
611 if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
612 {
613 ump_reference_release((ump_handle)hnd->ump_mem_handle);
614 }
615
616#endif
617 }
618 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
619 {
620#if GRALLOC_ARM_UMP_MODULE
621
622 /* Buffer might be unregistered so we need to check for invalid ump handle*/
623 if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
624 {
625 ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
626 ump_reference_release((ump_handle)hnd->ump_mem_handle);
627 }
628
629#else
John Stultze5c5bb32018-03-20 18:16:49 -0700630 AERR("Can't free ump memory for handle:%p. Not supported.", hnd);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530631#endif
632 }
633 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
634 {
635#if GRALLOC_ARM_DMA_BUF_MODULE
Vishal Bhoj78e90492015-12-07 01:36:32 +0530636 /* Buffer might be unregistered so we need to check for invalid ump handle*/
637 if (0 != hnd->base)
638 {
639 if (0 != munmap((void *)hnd->base, hnd->size))
640 {
John Stultze5c5bb32018-03-20 18:16:49 -0700641 AERR("Failed to munmap handle %p", hnd);
Vishal Bhoj78e90492015-12-07 01:36:32 +0530642 }
643 }
644
645 close(hnd->share_fd);
646
Vishal Bhoj78e90492015-12-07 01:36:32 +0530647 memset((void *)hnd, 0, sizeof(*hnd));
648#else
649 AERR("Can't free dma_buf memory for handle:0x%x. Not supported.", (unsigned int)hnd);
650#endif
651
652 }
653
654 delete hnd;
655
656 return 0;
657}
658
659static int alloc_device_close(struct hw_device_t *device)
660{
661 alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device);
662
663 if (dev)
664 {
665#if GRALLOC_ARM_DMA_BUF_MODULE
666 private_module_t *m = reinterpret_cast<private_module_t *>(device);
667
668 if (0 != ion_close(m->ion_client))
669 {
670 AERR("Failed to close ion_client: %d", m->ion_client);
671 }
672
673 close(m->ion_client);
674#endif
675 delete dev;
676#if GRALLOC_ARM_UMP_MODULE
677 ump_close(); // Our UMP memory refs will be released automatically here...
678#endif
679 }
680
681 return 0;
682}
683
Laura Abbott311955b2017-06-30 11:39:35 +0530684#if GRALLOC_ARM_DMA_BUF_MODULE
685static int find_system_heap_id(int ion_client)
686{
687 int i, ret, cnt, system_heap_id = -1;
688 struct ion_heap_data *data;
689
690 ret = ion_query_heap_cnt(ion_client, &cnt);
691
692 if (ret)
693 {
694 AERR("ion count query failed with %s", strerror(errno));
695 return -1;
696 }
697
698 data = (struct ion_heap_data *)malloc(cnt * sizeof(*data));
699 if (!data)
700 {
701 AERR("Error allocating data %s\n", strerror(errno));
702 return -1;
703 }
704
705 ret = ion_query_get_heaps(ion_client, cnt, data);
706 if (ret)
707 {
708 AERR("Error querying heaps from ion %s", strerror(errno));
709 }
710 else
711 {
712 for (i = 0; i < cnt; i++) {
713 struct ion_heap_data *dat = (struct ion_heap_data *)data;
714 if (strcmp(dat[i].name, "ion_system_heap") == 0) {
715 system_heap_id = dat[i].heap_id;
716 break;
717 }
718 }
719
720 if (i > cnt)
721 {
722 AERR("No System Heap Found amongst %d heaps\n", cnt);
723 system_heap_id = -1;
724 }
725 }
726
727 free(data);
728 return system_heap_id;
729}
730#endif
731
Vishal Bhoj78e90492015-12-07 01:36:32 +0530732int alloc_device_open(hw_module_t const *module, const char *name, hw_device_t **device)
733{
734 MALI_IGNORE(name);
735 alloc_device_t *dev;
736
737 dev = new alloc_device_t;
738
739 if (NULL == dev)
740 {
741 return -1;
742 }
743
744#if GRALLOC_ARM_UMP_MODULE
745 ump_result ump_res = ump_open();
746
747 if (UMP_OK != ump_res)
748 {
749 AERR("UMP open failed with %d", ump_res);
750 delete dev;
751 return -1;
752 }
753
754#endif
755
756 /* initialize our state here */
757 memset(dev, 0, sizeof(*dev));
758
759 /* initialize the procs */
760 dev->common.tag = HARDWARE_DEVICE_TAG;
761 dev->common.version = 0;
762 dev->common.module = const_cast<hw_module_t *>(module);
763 dev->common.close = alloc_device_close;
764 dev->alloc = alloc_device_alloc;
765 dev->free = alloc_device_free;
766
767#if GRALLOC_ARM_DMA_BUF_MODULE
768 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
769 m->ion_client = ion_open();
770
771 if (m->ion_client < 0)
772 {
773 AERR("ion_open failed with %s", strerror(errno));
774 delete dev;
775 return -1;
776 }
777
Laura Abbott311955b2017-06-30 11:39:35 +0530778 m->gralloc_legacy_ion = ion_is_legacy(m->ion_client);
779
780 if (!m->gralloc_legacy_ion)
781 {
782 m->system_heap_id = find_system_heap_id(m->ion_client);
783 if (m->system_heap_id < 0)
784 {
785 delete dev;
786 ion_close(m->ion_client);
787 m->ion_client = -1;
788 return -1;
789 }
790 }
791
Vishal Bhoj78e90492015-12-07 01:36:32 +0530792#endif
793
794 *device = &dev->common;
795
796 return 0;
797}