[cef] Stop using cros-toolchain 19/29419/3
authorRoger Zanoni <rzanoni@igalia.com>
Wed, 22 Nov 2023 22:59:02 +0000 (22:59 +0000)
committerJan-Simon Moeller <jsmoeller@linuxfoundation.org>
Thu, 23 Nov 2023 14:48:51 +0000 (14:48 +0000)
Previous cef and chromium recipes were using chromium's chrome os toolchain as
it was the closest to what were needed to build chromium for target devices.
However it contains additional settings and logic that may conflict with the
cef recipe's compiler settings and change executable paths, so this fix
changes to a custom toolchain that can be set according to AGL needs and
preserve paths set in the recipe.

Bug-AGL: SPEC-4976

Signed-off-by: Roger Zanoni <rzanoni@igalia.com>
Change-Id: Idcb7f6232b8d9989be5daa7ed83930a8b9f02df3
Reviewed-on: https://gerrit.automotivelinux.org/gerrit/c/AGL/meta-agl-demo/+/29419
Reviewed-by: Jan-Simon Moeller <jsmoeller@linuxfoundation.org>
Tested-by: Jenkins Job builder account
recipes-wam/cef/cef_git.bb
recipes-wam/cef/files/chromium/0034-v8-qemu-wrapper.patch [new file with mode: 0644]
recipes-wam/cef/gn-utils.inc

index 63897f3..fda37d3 100644 (file)
@@ -1,5 +1,7 @@
 require gn-utils.inc
 
+inherit qemu
+
 LICENSE = "Apache-2.0 & BSD-3-Clause & LGPL-2.0-only & LGPL-2.1-only"
 
 LIC_FILES_CHKSUM = "\
@@ -12,8 +14,6 @@ CHROMIUM_VERSION = "118.0.5993.80"
 BRANCH = "5993"
 SRCREV = "3cffa575446727e2fe1f6499efa21f8e096e8ca0"
 
-GN_TARGET_CPU = "${@gn_arch_name('${TUNE_ARCH}')}"
-
 PV = "${CHROMIUM_VERSION}.${BRANCH}+git"
 
 FILESEXTRAPATHS:prepend := "${THISDIR}/files/cef:"
@@ -60,6 +60,7 @@ SRC_URI = "\
     file://0031-M118-fix-Add-a-way-to-set-different-lib-paths-host-a.patch \
     file://0032-M118-fix-zlib-Fix-arm-build.patch \
     file://0033-M118-fix-Fix-skia-linker-issues-for-arm-neon.patch \
+    file://0034-v8-qemu-wrapper.patch \
     \
     git://bitbucket.org/chromiumembedded/cef.git;branch=${BRANCH};protocol=https;rev=${SRCREV};name=cef;destsuffix=chromium-${CHROMIUM_VERSION}/cef \
     file://0001-Add-an-option-to-use-an-output-directory-outside-src.patch;patchdir=cef \
@@ -83,8 +84,8 @@ DEPOT_TOOLS_DIR="${STAGING_DIR_NATIVE}${datadir}/depot_tools"
 S = "${CHROMIUM_DIR}"
 B = "${WORKDIR}/build"
 
-OUT_PATH = "${B}/out/Release_GN_${GN_TARGET_CPU}"
-DIST_PATH = "${OUT_PATH}/dist/cef-minimal_${GN_TARGET_CPU}"
+OUT_PATH = "${B}/out/Release_GN_${GN_TARGET_ARCH_NAME}"
+DIST_PATH = "${OUT_PATH}/dist/cef-minimal_${GN_TARGET_ARCH_NAME}"
 CEF_DATA_PATH = "${datadir}/cef"
 
 DEPENDS:append = " curl clang clang-native gperf-native gn-native dbus libcxx libcxx-native libpng libxslt jpeg compiler-rt libxkbcommon nss nss-native atk at-spi2-atk libdrm pango cairo virtual/egl qemu-native pciutils glib-2.0 pkgconfig-native pulseaudio xz-native compiler-rt compiler-rt-native"
@@ -232,34 +233,16 @@ GN_DEFINES:append = ' \
               current_os="linux" \
               clang_use_chrome_plugins=false \
               clang_base_path="${STAGING_DIR_NATIVE}/usr" \
-              clang_base_path_target="${STAGING_DIR_TARGET}/usr" \
               clang_version="14.0.6" \
+              clang_base_path_target="${STAGING_DIR_TARGET}/usr" \
               custom_toolchain="//build/toolchain/cros:target" \
               host_toolchain="//build/toolchain/cros:host" \
               v8_snapshot_toolchain="//build/toolchain/cros:v8_snapshot" \
-              target_cpu="${GN_TARGET_CPU}" \
+              target_cpu="${@gn_target_arch_name(d)}" \
               use_v8_context_snapshot=false \
-              cros_host_ar=\"${BUILD_AR}\" \
-              cros_host_cc=\"${BUILD_CC}\" \
-              cros_host_cxx=\"${BUILD_CXX}\" \
-              cros_host_ld=\"${BUILD_CXX}\" \
-              cros_host_extra_cppflags=\"${BUILD_CPPFLAGS}\" \
-              cros_host_extra_cxxflags=\"${BUILD_CXXFLAGS}\" \
-              cros_host_extra_ldflags=\"${BUILD_LDFLAGS}\" \
-              cros_target_ar=\"${AR}\" \
-              cros_target_cc=\"${CC}\" \
-              cros_target_cxx=\"${CXX}\" \
-              cros_target_ld=\"${CXX}\" \
-              cros_target_extra_cppflags=\"${CPPFLAGS}\" \
-              cros_target_extra_cxxflags=\"${CXXFLAGS}\" \
-              cros_target_extra_ldflags=\"${LDFLAGS}\" \
-              cros_v8_snapshot_ar=\"${BUILD_AR}\" \
-              cros_v8_snapshot_cc=\"${BUILD_CC}\" \
-              cros_v8_snapshot_cxx=\"${BUILD_CXX}\" \
-              cros_v8_snapshot_ld=\"${BUILD_CXX}\" \
-              cros_v8_snapshot_cppflags=\"${BUILD_CXXFLAGS}\" \
-              cros_v8_snapshot_cxxflags=\"${BUILD_CXXFLAGS}\" \
-              cros_v8_snapshot_ldflags=\"${BUILD_LDFLAGS}\" \
+              custom_toolchain="//build/toolchain/yocto:yocto_target" \
+              host_toolchain="//build/toolchain/yocto:yocto_native" \
+              v8_snapshot_toolchain="//build/toolchain/yocto:yocto_target" \
 '
 
 PACKAGECONFIG ??= "upower use-egl"
@@ -270,6 +253,43 @@ GN_DEFINES:append = ' \
               ${PACKAGECONFIG_CONFARGS} \
 '
 
+python do_write_toolchain_file () {
+    """Writes a BUILD.gn file for Yocto detailing its toolchains."""
+    toolchain_dir = d.expand("${S}/build/toolchain/yocto")
+    bb.utils.mkdirhier(toolchain_dir)
+    toolchain_file = os.path.join(toolchain_dir, "BUILD.gn")
+    write_toolchain_file(d, toolchain_file)
+}
+addtask write_toolchain_file after do_patch before do_configure
+
+# V8's JIT infrastructure requires binaries such as mksnapshot and
+# mkpeephole to be run in the host during the build. However, these
+# binaries must have the same bit-width as the target (e.g. a x86_64
+# host targeting ARMv6 needs to produce a 32-bit binary). Instead of
+# depending on a third Yocto toolchain, we just build those binaries
+# for the target and run them on the host with QEMU.
+python do_create_v8_qemu_wrapper () {
+    """Creates a small wrapper that invokes QEMU to run some target V8 binaries
+    on the host."""
+    qemu_libdirs = [d.expand('${STAGING_DIR_HOST}${libdir}'),
+                    d.expand('${STAGING_DIR_HOST}${base_libdir}')]
+    qemu_cmd = qemu_wrapper_cmdline(d, d.getVar('STAGING_DIR_HOST', True),
+                                    qemu_libdirs)
+    wrapper_path = d.expand('${OUT_PATH}/v8-qemu-wrapper.sh')
+    with open(wrapper_path, 'w') as wrapper_file:
+        wrapper_file.write("""#!/bin/sh
+
+# This file has been generated automatically.
+# It invokes QEMU to run binaries built for the target in the host during the
+# build process.
+
+%s "$@"
+""" % qemu_cmd)
+    os.chmod(wrapper_path, 0o755)
+}
+do_create_v8_qemu_wrapper[dirs] = "${OUT_PATH}"
+addtask create_v8_qemu_wrapper after do_patch before do_configure
+
 do_configure () {
     bbnote "do_configure:"
     bbnote "Base out path: ${B}"
@@ -313,7 +333,7 @@ do_install () {
                                   --no-archive \
                                   --ninja-build \
                                   --minimal \
-                                  --${GN_TARGET_CPU}-build \
+                                  --${GN_TARGET_ARCH_NAME}-build \
                                   --ozone
 
     install -d ${D}${CEF_DATA_PATH}
diff --git a/recipes-wam/cef/files/chromium/0034-v8-qemu-wrapper.patch b/recipes-wam/cef/files/chromium/0034-v8-qemu-wrapper.patch
new file mode 100644 (file)
index 0000000..dec3db3
--- /dev/null
@@ -0,0 +1,66 @@
+From 733559e6f2c26ccbce97354a2341b14c63563dab Mon Sep 17 00:00:00 2001
+From: Raphael Kubo da Costa <raphael.kubo.da.costa@intel.com>
+Date: Tue, 7 Nov 2017 15:24:32 +0100
+Subject: [PATCH] v8: qemu wrapper
+
+The patch below makes the V8 binaries run during the build be invoked through
+QEMU, as they are built for the target.
+
+Upstream-Status: Inappropriate [embedder specific]
+
+Signed-off-by: Raphael Kubo da Costa <raphael.kubo.da.costa@intel.com>
+Signed-off-by: Maksim Sisov <msisov@igalia.com>
+
+---
+ tools/v8_context_snapshot/BUILD.gn | 1 +
+ v8/BUILD.gn                        | 4 ++++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/tools/v8_context_snapshot/BUILD.gn b/tools/v8_context_snapshot/BUILD.gn
+index 52504f4..40425d6 100644
+--- a/tools/v8_context_snapshot/BUILD.gn
++++ b/tools/v8_context_snapshot/BUILD.gn
+@@ -44,6 +44,7 @@ if (use_v8_context_snapshot) {
+       output_path = rebase_path(output_file, root_build_dir)
+
+       args = [
++        "./v8-qemu-wrapper.sh",
+         "./" + rebase_path(get_label_info(":v8_context_snapshot_generator",
+                                           "root_out_dir") +
+                                "/v8_context_snapshot_generator",
+diff --git a/v8/BUILD.gn b/v8/BUILD.gn
+index 025203d..0c196bb 100644
+--- a/v8/BUILD.gn
++++ b/v8/BUILD.gn
+@@ -2134,6 +2134,7 @@ template("run_torque") {
+     }
+
+     args = [
++      "./v8-qemu-wrapper.sh",
+       "./" + rebase_path(
+               get_label_info(":torque($toolchain)", "root_out_dir") + "/torque",
+               root_build_dir),
+@@ -2291,6 +2292,7 @@ action("generate_bytecode_builtins_list") {
+   outputs = [ "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h" ]
+   deps = [ ":bytecode_builtins_list_generator($v8_generator_toolchain)" ]
+   args = [
++    "./v8-qemu-wrapper.sh",
+     "./" + rebase_path(
+             get_label_info(
+                     ":bytecode_builtins_list_generator($v8_generator_toolchain)",
+@@ -2330,6 +2332,7 @@ template("run_mksnapshot") {
+     data = []
+
+     args = [
++      "./v8-qemu-wrapper.sh",
+       "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)",
+                                         "root_out_dir") + "/mksnapshot",
+                          root_build_dir),
+@@ -6931,6 +6934,7 @@ if (v8_enable_i18n_support) {
+     outputs = [ output_file ]
+
+     args = [
++      "./v8-qemu-wrapper.sh",
+       "./" + rebase_path(
+               get_label_info(
+                       ":gen-regexp-special-case($v8_generator_toolchain)",
index 156b56d..34f8a12 100644 (file)
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
+# GN host architecture helpers.
 #
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
+# Copied from https://github.com/OSSystems/meta-browser
 #
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-
-def gn_arch_name(yocto_arch):
-    """Translates between Yocto's architecture values and the corresponding
-    ones used by GN."""
-    translation_table = {
-        'aarch64': 'arm64',
-        'arm': 'arm',
-        'i586': 'x86',
-        'i686': 'x86',
-        'x86_64': 'x64',
+# BUILD_ARCH's value corresponds to what uname returns as the machine name.
+# The mapping in gn_host_arch_name() tries to match several possible values
+# returned by the Linux kernel in uname(2) into the corresponding values GN
+# understands.
+
+def gn_host_arch_name(d):
+    """Returns a GN architecture name corresponding to the build host's machine
+    architecture."""
+    import re
+    arch_translations = {
+        r'aarch64.*': 'arm64',
+        r'arm.*': 'arm',
+        r'i[3456]86$': 'x86',
+        r'x86_64$': 'x64',
     }
-    try:
-        return translation_table[yocto_arch]
-    except KeyError:
-        bb.fatal('"%s" is not a supported architecture.' % yocto_arch)
+    build_arch = d.getVar("BUILD_ARCH")
+    for arch_regexp, gn_arch_name in arch_translations.items():
+        if re.match(arch_regexp, build_arch):
+            return gn_arch_name
+    bb.fatal('Unsuported BUILD_ARCH value: "%s"' % build_arch)
+
+# GN target architecture helpers.
+#
+# Determining the target architecture is more difficult, as there are many
+# different values we can use on the Yocto side (e.g. TUNE_ARCH, TARGET_ARCH,
+# MACHINEOVERRIDES etc). What we do is define the mapping with regular,
+# non-Python variables with overrides that are generic enough (i.e. "x86"
+# instead of "i586") and then use gn_target_arch_name() to return the right
+# value with some validation.
+GN_TARGET_ARCH_NAME:aarch64 = "arm64"
+GN_TARGET_ARCH_NAME:arm = "arm"
+GN_TARGET_ARCH_NAME:x86 = "x86"
+GN_TARGET_ARCH_NAME:x86-64 = "x64"
+
+def clang_install_path(d):
+    """Return clang compiler install path."""
+    return d.getVar("STAGING_BINDIR_NATIVE")
+
+def gn_target_arch_name(d):
+    """Returns a GN architecture name corresponding to the target machine's
+    architecture."""
+    name = d.getVar("GN_TARGET_ARCH_NAME")
+    if name is None:
+        bb.fatal('Unsupported target architecture. A valid override for the '
+                 'GN_TARGET_ARCH_NAME variable could not be found.')
+    return name
+
+def write_toolchain_file(d, file_path):
+    """Creates a complete GN toolchain file in |file_path|."""
+    import string
+    # Even though we always use clang, the "clang_toolchain" GN template is too
+    # restrictive in the way it sets variables such as |cxx|. Since it is just
+    # a wrapper on top of the "gcc_toolchain" template, we keep using the
+    # latter directly to accommodate our cross-compilation needs.
+    toolchain_tmpl = string.Template(
+        'gcc_toolchain("${toolchain_name}") {\n'
+        '  cc = "${cc}"\n'
+        '  cxx = "${cxx}"\n'
+        '  ar = "${ar}"\n'
+        '  ld = cxx  # GN expects a compiler, not a linker.\n'
+        '  nm = "${nm}"\n'
+        '  readelf = "${readelf}"\n'
+        '  extra_cflags = "${extra_cflags}"\n'
+        '  extra_cppflags = "${extra_cppflags}"\n'
+        '  extra_cxxflags = "${extra_cxxflags}"\n'
+        '  extra_ldflags = "${extra_ldflags}"\n'
+        '  toolchain_args = {\n'
+        '    current_cpu = "${current_cpu}"\n'
+        '    current_os = "linux"\n'
+        '    is_clang = true\n'
+        '  }\n'
+        '}\n'
+    )
+
+    native_toolchain = {
+        'toolchain_name': 'yocto_native',
+        'current_cpu': gn_host_arch_name(d),
+        'cc': d.expand('${BUILD_CC}'),
+        'cxx': d.expand('${BUILD_CXX}'),
+        'ar': d.expand('${BUILD_AR}'),
+        'nm': d.expand('${BUILD_NM}'),
+        'readelf': d.expand('${BUILD_PREFIX}readelf'),
+        'extra_cflags': d.expand('${BUILD_CFLAGS}'),
+        'extra_cppflags': d.expand('${BUILD_CPPFLAGS}'),
+        'extra_cxxflags': d.expand('${BUILD_CXXFLAGS}'),
+        'extra_ldflags': d.expand('${BUILD_LDFLAGS}'),
+    }
+    target_toolchain = {
+        'toolchain_name': 'yocto_target',
+        'current_cpu': gn_target_arch_name(d),
+        'cc': d.expand('${CC}'),
+        'cxx': d.expand('${CXX}'),
+        'ar': d.expand('${AR}'),
+        'nm': d.expand('${NM}'),
+        'readelf': d.expand('${READELF}'),
+        'extra_cflags': d.expand('${CFLAGS}'),
+        'extra_cppflags': d.expand('${CPPFLAGS}'),
+        'extra_cxxflags': d.expand('${CXXFLAGS}'),
+        'extra_ldflags': d.expand('${LDFLAGS}'),
+    }
+
+    with open(file_path, 'w') as toolchain_file:
+        toolchain_file.write(
+            '# This file has been generated automatically.\n'
+            '\n'
+            'import("//build/toolchain/gcc_toolchain.gni")\n'
+            '\n'
+        )
+        toolchain_file.write(toolchain_tmpl.substitute(native_toolchain))
+        toolchain_file.write(toolchain_tmpl.substitute(target_toolchain))
+